]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 11 Sep 2010 19:17:02 +0000 (12:17 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 11 Sep 2010 19:17:02 +0000 (12:17 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6:
  [SCSI] fix use-after-free in scsi_init_io()
  [SCSI] sd: fix medium-removal bug
  [SCSI] qla2xxx: Update version number to 8.03.04-k0.
  [SCSI] qla2xxx: Check for empty slot in request queue before posting Command type 6 request.
  [SCSI] qla2xxx: Cover UNDERRUN case where SCSI status is set.
  [SCSI] qla2xxx: Correctly set fw hung and complete only waiting mbx.
  [SCSI] qla2xxx: Reset seconds_since_last_heartbeat correctly.
  [SCSI] qla2xxx: make rport deletions explicit during vport removal
  [SCSI] qla2xxx: Fix vport delete issues
  [SCSI] sd, sym53c8xx: Remove warnings after vsprintf %pV introducation.
  [SCSI] Fix warning: zero-length gnu_printf format string
  [SCSI] hpsa: disable doorbell reset on reset_devices
  [SCSI] be2iscsi: Fix for Login failure
  [SCSI] fix bio.bi_rw handling

405 files changed:
Documentation/DocBook/kernel-locking.tmpl
Documentation/DocBook/tracepoint.tmpl
Documentation/block/cfq-iosched.txt [new file with mode: 0644]
Documentation/cgroups/blkio-controller.txt
Documentation/gpio.txt
Documentation/kernel-parameters.txt
Documentation/mutex-design.txt
Documentation/sound/alsa/HD-Audio-Models.txt
MAINTAINERS
arch/alpha/include/asm/cache.h
arch/alpha/kernel/err_marvel.c
arch/alpha/kernel/perf_event.c
arch/alpha/kernel/proto.h
arch/alpha/kernel/sys_cabriolet.c
arch/alpha/kernel/sys_takara.c
arch/arm/Kconfig
arch/arm/boot/Makefile
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/head.S
arch/arm/common/it8152.c
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/perf_event.h
arch/arm/include/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/kernel/perf_event.c
arch/arm/mach-at91/at91sam9g45.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-at91/board-sam9261ek.c
arch/arm/mach-at91/clock.c
arch/arm/mach-ep93xx/clock.c
arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
arch/arm/mach-mx25/mach-cpuimx25.c
arch/arm/mach-mx3/clock-imx35.c
arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
arch/arm/mach-mx3/mach-cpuimx35.c
arch/arm/mach-mx5/clock-mx51.c
arch/arm/mach-pxa/cpufreq-pxa2xx.c
arch/arm/mach-pxa/cpufreq-pxa3xx.c
arch/arm/mach-pxa/include/mach/mfp-pxa300.h
arch/arm/mach-shmobile/Makefile
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/clock-sh7372.c
arch/arm/mach-shmobile/clock.c
arch/arm/mach-shmobile/pm_runtime.c [new file with mode: 0644]
arch/arm/mm/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm/plat-mxc/Kconfig
arch/arm/plat-mxc/include/mach/eukrea-baseboards.h
arch/arm/plat-mxc/tzic.c
arch/arm/plat-pxa/pwm.c
arch/arm/tools/mach-types
arch/h8300/include/asm/atomic.h
arch/h8300/include/asm/system.h
arch/h8300/kernel/sys_h8300.c
arch/h8300/kernel/traps.c
arch/m68knommu/kernel/vmlinux.lds.S
arch/powerpc/include/asm/fsldma.h
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/time.c
arch/powerpc/platforms/83xx/mpc837x_mds.c
arch/powerpc/platforms/85xx/mpc85xx_mds.c
arch/powerpc/platforms/85xx/p1022_ds.c
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/fsl_rio.c
arch/powerpc/sysdev/qe_lib/qe.c
arch/sparc/kernel/sys_sparc_32.c
arch/sparc/kernel/unaligned_32.c
arch/sparc/kernel/windows.c
arch/x86/include/asm/iomap.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/pci.h
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/trampoline.c
arch/x86/kernel/tsc.c
arch/x86/kvm/emulate.c
arch/x86/kvm/i8259.c
arch/x86/kvm/irq.h
arch/x86/mm/iomap_32.c
arch/x86/oprofile/nmi_int.c
block/blk-cgroup.c
block/blk-core.c
block/blk-sysfs.c
block/blk.h
block/cfq-iosched.c
block/elevator.c
crypto/Kconfig
crypto/ahash.c
crypto/algboss.c
crypto/testmgr.c
drivers/acpi/pci_root.c
drivers/ata/ahci.c
drivers/ata/ata_piix.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-sff.c
drivers/ata/pata_artop.c
drivers/ata/pata_via.c
drivers/ata/sata_mv.c
drivers/block/cciss.c
drivers/block/loop.c
drivers/block/mg_disk.c
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-agp.h
drivers/char/agp/intel-gtt.c
drivers/char/hw_random/n2-drv.c
drivers/char/tty_io.c
drivers/char/vt.c
drivers/char/vt_ioctl.c
drivers/gpio/sx150x.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nv50_instmem.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_clocks.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/rv770.c
drivers/hwmon/hp_accel.c
drivers/infiniband/hw/cxgb3/cxio_hal.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_nic.c
drivers/input/input.c
drivers/input/mouse/bcm5974.c
drivers/input/serio/i8042.c
drivers/input/tablet/wacom_wac.c
drivers/md/.gitignore [deleted file]
drivers/md/bitmap.c
drivers/md/md.c
drivers/md/md.h
drivers/mmc/core/sdio.c
drivers/mmc/host/at91_mci.c
drivers/mmc/host/imxmmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/tmio_mmc.c
drivers/mmc/host/tmio_mmc.h
drivers/mtd/ubi/Kconfig.debug
drivers/mtd/ubi/cdev.c
drivers/mtd/ubi/scan.c
drivers/mtd/ubi/wl.c
drivers/net/3c59x.c
drivers/net/b44.c
drivers/net/benet/be.h
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_ethtool.c
drivers/net/benet/be_hw.h
drivers/net/benet/be_main.c
drivers/net/bonding/bond_main.c
drivers/net/ks8851.c
drivers/net/niu.c
drivers/net/pcmcia/pcnet_cs.c
drivers/net/pxa168_eth.c
drivers/net/stmmac/stmmac_main.c
drivers/net/usb/ipheth.c
drivers/net/via-velocity.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/eeprom.h
drivers/net/wireless/ath/regd.h
drivers/net/wireless/libertas/if_sdio.c
drivers/net/wireless/p54/txrx.c
drivers/oprofile/buffer_sync.c
drivers/oprofile/cpu_buffer.c
drivers/pci/hotplug/acpi_pcihp.c
drivers/pci/hotplug/pciehp.h
drivers/pci/hotplug/pciehp_acpi.c
drivers/pci/hotplug/pciehp_core.c
drivers/pci/pci.h
drivers/pci/pcie/Makefile
drivers/pci/pcie/aer/aerdrv.c
drivers/pci/pcie/aer/aerdrv_acpi.c
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/pcie/pme.c [moved from drivers/pci/pcie/pme/pcie_pme.c with 83% similarity]
drivers/pci/pcie/pme/Makefile [deleted file]
drivers/pci/pcie/pme/pcie_pme.h [deleted file]
drivers/pci/pcie/pme/pcie_pme_acpi.c [deleted file]
drivers/pci/pcie/portdrv.h
drivers/pci/pcie/portdrv_acpi.c [new file with mode: 0644]
drivers/pci/pcie/portdrv_core.c
drivers/pci/pcie/portdrv_pci.c
drivers/pci/slot.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-pl031.c
drivers/s390/char/tape_block.c
drivers/serial/bfin_sport_uart.c
drivers/staging/comedi/drivers/das08_cs.c
drivers/staging/hv/netvsc_drv.c
drivers/staging/hv/ring_buffer.c
drivers/staging/hv/storvsc_api.h
drivers/staging/hv/storvsc_drv.c
drivers/staging/octeon/Kconfig
drivers/staging/rt2860/usb_main_dev.c
drivers/staging/spectra/Kconfig
drivers/staging/spectra/ffsport.c
drivers/staging/wlan-ng/cfg80211.c
drivers/staging/zram/zram_drv.c
drivers/usb/atm/cxacru.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/message.c
drivers/usb/gadget/rndis.c
drivers/usb/gadget/rndis.h
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/host/ehci-ppc-of.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/mos7840.c
drivers/usb/serial/option.c
drivers/usb/serial/ssu100.c
drivers/vhost/vhost.c
drivers/video/pxa168fb.c
fs/9p/fid.c
fs/binfmt_misc.c
fs/bio-integrity.c
fs/direct-io.c
fs/exec.c
fs/fcntl.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/fuse/file.c
fs/minix/namei.c
fs/namespace.c
fs/nfsd/nfs4state.c
fs/nilfs2/the_nilfs.c
fs/ocfs2/alloc.c
fs/ocfs2/blockcheck.c
fs/ocfs2/file.c
fs/ocfs2/inode.c
fs/ocfs2/mmap.c
fs/ocfs2/namei.c
fs/ocfs2/refcounttree.c
fs/ocfs2/suballoc.c
fs/ocfs2/suballoc.h
fs/proc/page.c
fs/proc/task_mmu.c
fs/sysfs/file.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_buf.h
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_fs.h
fs/xfs/xfs_vnodeops.c
include/acpi/acpi_bus.h
include/asm-generic/gpio.h
include/asm-generic/percpu.h
include/linux/acpi.h
include/linux/cgroup.h
include/linux/elevator.h
include/linux/i2c/sx150x.h
include/linux/intel-gtt.h [new file with mode: 0644]
include/linux/io-mapping.h
include/linux/kfifo.h
include/linux/ksm.h
include/linux/lglock.h
include/linux/libata.h
include/linux/mm.h
include/linux/mmc/sdio.h
include/linux/mmzone.h
include/linux/mutex.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/percpu.h
include/linux/semaphore.h
include/linux/serial.h
include/linux/serial_core.h
include/linux/swap.h
include/linux/vmstat.h
include/linux/workqueue.h
include/net/cls_cgroup.h
include/net/ip_vs.h
include/net/sock.h
include/net/udp.h
kernel/cgroup.c
kernel/debug/kdb/kdb_bp.c
kernel/gcov/fs.c
kernel/groups.c
kernel/hrtimer.c
kernel/mutex.c
kernel/perf_event.c
kernel/power/hibernate.c
kernel/power/snapshot.c
kernel/power/swap.c
kernel/sched.c
kernel/sched_fair.c
kernel/sys.c
kernel/sysctl.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_kprobe.c
kernel/watchdog.c
kernel/workqueue.c
lib/raid6/.gitignore [new file with mode: 0644]
lib/scatterlist.c
mm/Kconfig
mm/backing-dev.c
mm/bounce.c
mm/compaction.c
mm/ksm.c
mm/memory.c
mm/memory_hotplug.c
mm/mlock.c
mm/mmzone.c
mm/page_alloc.c
mm/percpu.c
mm/percpu_up.c
mm/swapfile.c
mm/vmstat.c
net/bridge/br_netfilter.c
net/core/dev.c
net/core/gen_estimator.c
net/core/skbuff.c
net/ipv4/Kconfig
net/ipv4/datagram.c
net/ipv4/fib_frontend.c
net/ipv4/fib_trie.c
net/ipv4/route.c
net/ipv4/udp.c
net/ipv6/datagram.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/reassembly.c
net/ipv6/udp.c
net/irda/af_irda.c
net/irda/irlan/irlan_common.c
net/mac80211/main.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netlink/af_netlink.c
net/sched/act_police.c
net/sched/sch_hfsc.c
net/sctp/sm_statefuns.c
net/unix/af_unix.c
net/wireless/core.c
net/wireless/wext-compat.c
net/wireless/wext-core.c
net/xfrm/xfrm_user.c
security/apparmor/include/resource.h
security/apparmor/lib.c
security/apparmor/lsm.c
security/apparmor/path.c
security/apparmor/policy.c
security/apparmor/resource.c
security/integrity/ima/ima.h
security/integrity/ima/ima_iint.c
security/integrity/ima/ima_main.c
security/keys/keyctl.c
sound/core/rawmidi.c
sound/core/seq/oss/seq_oss_init.c
sound/isa/msnd/msnd_pinnacle.c
sound/pci/hda/hda_codec.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/oxygen/oxygen.h
sound/pci/oxygen/oxygen_lib.c
sound/pci/oxygen/virtuoso.c
sound/pci/oxygen/xonar_wm87x6.c
sound/usb/card.c
sound/usb/clock.c
sound/usb/endpoint.c
sound/usb/format.c
sound/usb/mixer.c
sound/usb/pcm.c
tools/perf/util/callchain.h
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
virt/kvm/kvm_main.c

index 0b1a3f97f285361a4075c8e267d42b2053747d9a..a0d479d1e1dd872bd1ae7b4d17d4582df03a384c 100644 (file)
@@ -1961,6 +1961,12 @@ machines due to caching.
    </sect1>
   </chapter>
 
+  <chapter id="apiref">
+   <title>Mutex API reference</title>
+!Iinclude/linux/mutex.h
+!Ekernel/mutex.c
+  </chapter>
+
   <chapter id="references">
    <title>Further reading</title>
 
index e8473eae2a2064ecef2afadf2e92551dc327bea3..b57a9ede32249bf51b69988740e455da06b7687c 100644 (file)
    <title>Block IO</title>
 !Iinclude/trace/events/block.h
   </chapter>
+
+  <chapter id="workqueue">
+   <title>Workqueue</title>
+!Iinclude/trace/events/workqueue.h
+  </chapter>
 </book>
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
new file mode 100644 (file)
index 0000000..e578fee
--- /dev/null
@@ -0,0 +1,45 @@
+CFQ ioscheduler tunables
+========================
+
+slice_idle
+----------
+This specifies how long CFQ should idle for next request on certain cfq queues
+(for sequential workloads) and service trees (for random workloads) before
+queue is expired and CFQ selects next queue to dispatch from.
+
+By default slice_idle is a non-zero value. That means by default we idle on
+queues/service trees. This can be very helpful on highly seeky media like
+single spindle SATA/SAS disks where we can cut down on overall number of
+seeks and see improved throughput.
+
+Setting slice_idle to 0 will remove all the idling on queues/service tree
+level and one should see an overall improved throughput on faster storage
+devices like multiple SATA/SAS disks in hardware RAID configuration. The down
+side is that isolation provided from WRITES also goes down and notion of
+IO priority becomes weaker.
+
+So depending on storage and workload, it might be useful to set slice_idle=0.
+In general I think for SATA/SAS disks and software RAID of SATA/SAS disks
+keeping slice_idle enabled should be useful. For any configurations where
+there are multiple spindles behind single LUN (Host based hardware RAID
+controller or for storage arrays), setting slice_idle=0 might end up in better
+throughput and acceptable latencies.
+
+CFQ IOPS Mode for group scheduling
+===================================
+Basic CFQ design is to provide priority based time slices. Higher priority
+process gets bigger time slice and lower priority process gets smaller time
+slice. Measuring time becomes harder if storage is fast and supports NCQ and
+it would be better to dispatch multiple requests from multiple cfq queues in
+request queue at a time. In such scenario, it is not possible to measure time
+consumed by single queue accurately.
+
+What is possible though is to measure number of requests dispatched from a
+single queue and also allow dispatch from multiple cfq queue at the same time.
+This effectively becomes the fairness in terms of IOPS (IO operations per
+second).
+
+If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
+to IOPS mode and starts providing fairness in terms of number of requests
+dispatched. Note that this mode switching takes effect only for group
+scheduling. For non-cgroup users nothing should change.
index 48e0b21b00594dac1971472bef9105e04d2bfa77..6919d62591d97580d3132f539e0edd50d9985449 100644 (file)
@@ -217,6 +217,7 @@ Details of cgroup files
 CFQ sysfs tunable
 =================
 /sys/block/<disk>/queue/iosched/group_isolation
+-----------------------------------------------
 
 If group_isolation=1, it provides stronger isolation between groups at the
 expense of throughput. By default group_isolation is 0. In general that
@@ -243,6 +244,33 @@ By default one should run with group_isolation=0. If that is not sufficient
 and one wants stronger isolation between groups, then set group_isolation=1
 but this will come at cost of reduced throughput.
 
+/sys/block/<disk>/queue/iosched/slice_idle
+------------------------------------------
+On a faster hardware CFQ can be slow, especially with sequential workload.
+This happens because CFQ idles on a single queue and single queue might not
+drive deeper request queue depths to keep the storage busy. In such scenarios
+one can try setting slice_idle=0 and that would switch CFQ to IOPS
+(IO operations per second) mode on NCQ supporting hardware.
+
+That means CFQ will not idle between cfq queues of a cfq group and hence be
+able to driver higher queue depth and achieve better throughput. That also
+means that cfq provides fairness among groups in terms of IOPS and not in
+terms of disk time.
+
+/sys/block/<disk>/queue/iosched/group_idle
+------------------------------------------
+If one disables idling on individual cfq queues and cfq service trees by
+setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
+on the group in an attempt to provide fairness among groups.
+
+By default group_idle is same as slice_idle and does not do anything if
+slice_idle is enabled.
+
+One can experience an overall throughput drop if you have created multiple
+groups and put applications in that group which are not driving enough
+IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
+on individual groups and throughput should improve.
+
 What works
 ==========
 - Currently only sync IO queues are support. All the buffered writes are
index d96a6dba57489bc6bbf3e747d82cd450084e5609..9633da01ff46afb008566ccb53aa381606654e85 100644 (file)
@@ -109,17 +109,19 @@ use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
 
 If you want to initialize a structure with an invalid GPIO number, use
 some negative number (perhaps "-EINVAL"); that will never be valid.  To
-test if a number could reference a GPIO, you may use this predicate:
+test if such number from such a structure could reference a GPIO, you
+may use this predicate:
 
        int gpio_is_valid(int number);
 
 A number that's not valid will be rejected by calls which may request
 or free GPIOs (see below).  Other numbers may also be rejected; for
-example, a number might be valid but unused on a given board.
-
-Whether a platform supports multiple GPIO controllers is currently a
-platform-specific implementation issue.
+example, a number might be valid but temporarily unused on a given board.
 
+Whether a platform supports multiple GPIO controllers is a platform-specific
+implementation issue, as are whether that support can leave "holes" in the space
+of GPIO numbers, and whether new controllers can be added at runtime.  Such issues
+can affect things including whether adjacent GPIO numbers are both valid.
 
 Using GPIOs
 -----------
@@ -480,12 +482,16 @@ To support this framework, a platform's Kconfig will "select" either
 ARCH_REQUIRE_GPIOLIB or ARCH_WANT_OPTIONAL_GPIOLIB
 and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
 three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
-They may also want to provide a custom value for ARCH_NR_GPIOS.
 
-ARCH_REQUIRE_GPIOLIB means that the gpio-lib code will always get compiled
+It may also provide a custom value for ARCH_NR_GPIOS, so that it better
+reflects the number of GPIOs in actual use on that platform, without
+wasting static table space.  (It should count both built-in/SoC GPIOs and
+also ones on GPIO expanders.
+
+ARCH_REQUIRE_GPIOLIB means that the gpiolib code will always get compiled
 into the kernel on that architecture.
 
-ARCH_WANT_OPTIONAL_GPIOLIB means the gpio-lib code defaults to off and the user
+ARCH_WANT_OPTIONAL_GPIOLIB means the gpiolib code defaults to off and the user
 can enable it and build it into the kernel optionally.
 
 If neither of these options are selected, the platform does not support
index f084af0cb8e01f7a67026367508fef1085bc801b..8dd7248508a9e12ac8908c24fb4a3649f157697a 100644 (file)
@@ -1974,15 +1974,18 @@ and is between 256 and 4096 characters. It is defined in the file
                force   Enable ASPM even on devices that claim not to support it.
                        WARNING: Forcing ASPM on may cause system lockups.
 
+       pcie_ports=     [PCIE] PCIe ports handling:
+               auto    Ask the BIOS whether or not to use native PCIe services
+                       associated with PCIe ports (PME, hot-plug, AER).  Use
+                       them only if that is allowed by the BIOS.
+               native  Use native PCIe services associated with PCIe ports
+                       unconditionally.
+               compat  Treat PCIe ports as PCI-to-PCI bridges, disable the PCIe
+                       ports driver.
+
        pcie_pme=       [PCIE,PM] Native PCIe PME signaling options:
-                       Format: {auto|force}[,nomsi]
-               auto    Use native PCIe PME signaling if the BIOS allows the
-                       kernel to control PCIe config registers of root ports.
-               force   Use native PCIe PME signaling even if the BIOS refuses
-                       to allow the kernel to control the relevant PCIe config
-                       registers.
                nomsi   Do not use MSI for native PCIe PME signaling (this makes
-                       all PCIe root ports use INTx for everything).
+                       all PCIe root ports use INTx for all services).
 
        pcmv=           [HW,PCMCIA] BadgePAD 4
 
index c91ccc0720fa97f42a1a616fd83e23628ae85ab1..38c10fd7f4110448facd7089b985c4776d264d85 100644 (file)
@@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
 mutex semantics are sufficient for your code, then there are a couple
 of advantages of mutexes:
 
- - 'struct mutex' is smaller on most architectures: .e.g on x86,
+ - 'struct mutex' is smaller on most architectures: E.g. on x86,
    'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
    A smaller structure size means less RAM footprint, and better
    CPU-cache utilization.
@@ -136,3 +136,4 @@ the APIs of 'struct mutex' have been streamlined:
  void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
  int  mutex_lock_interruptible_nested(struct mutex *lock,
                                       unsigned int subclass);
+ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
index ce46fa1e643e876344071d480e1e6d6d54b89856..37c6aad5e590ac1375944b8edf0656aa7b106607 100644 (file)
@@ -296,6 +296,7 @@ Conexant 5051
 Conexant 5066
 =============
   laptop       Basic Laptop config (default)
+  hp-laptop    HP laptops, e g G60
   dell-laptop  Dell laptops
   dell-vostro  Dell Vostro
   olpc-xo-1_5  OLPC XO 1.5
index c36f5d76e1a2e659db6c4f2e6f243df640c088d2..e7c528ff1013048cd4c6dadc0dbfc23f11204685 100644 (file)
@@ -1445,6 +1445,16 @@ S:       Maintained
 F:     Documentation/video4linux/cafe_ccic
 F:     drivers/media/video/cafe_ccic*
 
+CAIF NETWORK LAYER
+M:     Sjur Braendeland <sjur.brandeland@stericsson.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     Documentation/networking/caif/
+F:     drivers/net/caif/
+F:     include/linux/caif/
+F:     include/net/caif/
+F:     net/caif/
+
 CALGARY x86-64 IOMMU
 M:     Muli Ben-Yehuda <muli@il.ibm.com>
 M:     "Jon D. Mason" <jdmason@kudzu.us>
@@ -2201,6 +2211,12 @@ L:       linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/hw/ehca/
 
+EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
+M:     Breno Leitao <leitao@linux.vnet.ibm.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ehea/
+
 EMBEDDED LINUX
 M:     Paul Gortmaker <paul.gortmaker@windriver.com>
 M:     Matt Mackall <mpm@selenic.com>
@@ -2781,11 +2797,6 @@ S:       Maintained
 F:     arch/x86/kernel/hpet.c
 F:     arch/x86/include/asm/hpet.h
 
-HPET:  ACPI
-M:     Bob Picco <bob.picco@hp.com>
-S:     Maintained
-F:     drivers/char/hpet.c
-
 HPFS FILESYSTEM
 M:     Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz>
 W:     http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
@@ -3398,7 +3409,7 @@ F:        drivers/s390/kvm/
 
 KEXEC
 M:     Eric Biederman <ebiederm@xmission.com>
-W:     http://ftp.kernel.org/pub/linux/kernel/people/horms/kexec-tools/
+W:     http://kernel.org/pub/linux/utils/kernel/kexec/
 L:     kexec@lists.infradead.org
 S:     Maintained
 F:     include/linux/kexec.h
@@ -3923,8 +3934,7 @@ F:        Documentation/sound/oss/MultiSound
 F:     sound/oss/msnd*
 
 MULTITECH MULTIPORT CARD (ISICOM)
-M:     Jiri Slaby <jirislaby@gmail.com>
-S:     Maintained
+S:     Orphan
 F:     drivers/char/isicom.c
 F:     include/linux/isicom.h
 
@@ -4604,7 +4614,7 @@ F:        include/linux/preempt.h
 PRISM54 WIRELESS DRIVER
 M:     "Luis R. Rodriguez" <mcgrof@gmail.com>
 L:     linux-wireless@vger.kernel.org
-W:     http://prism54.org
+W:     http://wireless.kernel.org/en/users/Drivers/p54
 S:     Obsolete
 F:     drivers/net/wireless/prism54/
 
@@ -4805,6 +4815,7 @@ RCUTORTURE MODULE
 M:     Josh Triplett <josh@freedesktop.org>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 S:     Supported
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/torture.txt
 F:     kernel/rcutorture.c
 
@@ -4829,6 +4840,7 @@ M:        Dipankar Sarma <dipankar@in.ibm.com>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 W:     http://www.rdrop.com/users/paulmck/rclock/
 S:     Supported
+T:     git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
 F:     Documentation/RCU/
 F:     include/linux/rcu*
 F:     include/linux/srcu*
@@ -4836,12 +4848,10 @@ F:      kernel/rcu*
 F:     kernel/srcu*
 X:     kernel/rcutorture.c
 
-REAL TIME CLOCK DRIVER
+REAL TIME CLOCK DRIVER (LEGACY)
 M:     Paul Gortmaker <p_gortmaker@yahoo.com>
 S:     Maintained
-F:     Documentation/rtc.txt
-F:     drivers/rtc/
-F:     include/linux/rtc.h
+F:     drivers/char/rtc.c
 
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
index f199e69a5d0b0a83fd39e7d5464a5fa439c79265..ad368a93a46a6c5c7e1bf0d25ab17f08ebc19fd8 100644 (file)
@@ -17,7 +17,6 @@
 # define L1_CACHE_SHIFT     5
 #endif
 
-#define L1_CACHE_ALIGN(x)  (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
 #define SMP_CACHE_BYTES    L1_CACHE_BYTES
 
 #endif
index 52a79dfc13c6774ec1f88f2e288af1a42f774f51..5c905aaaeccd82861ea62d9186517475f0d7019c 100644 (file)
@@ -109,7 +109,7 @@ marvel_print_err_cyc(u64 err_cyc)
 #define IO7__ERR_CYC__CYCLE__M (0x7)
 
        printk("%s        Packet In Error: %s\n"
-              "%s        Error in %s, cycle %ld%s%s\n",
+              "%s        Error in %s, cycle %lld%s%s\n",
               err_print_prefix, 
               packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)],
               err_print_prefix,
@@ -313,7 +313,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym)
        }
 
        printk("%s      Up Hose Garbage Symptom:\n"
-              "%s        Source Port: %ld - Dest PID: %ld - OpCode: %s\n", 
+              "%s        Source Port: %lld - Dest PID: %lld - OpCode: %s\n",
               err_print_prefix,
               err_print_prefix, 
               EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT),
@@ -552,7 +552,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
 #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M   (0xfff)
 
        printk("%s      Split Completion Error:\n"      
-              "%s         Source (Bus:Dev:Func): %ld:%ld:%ld\n",
+              "%s         Source (Bus:Dev:Func): %lld:%lld:%lld\n",
               err_print_prefix,
               err_print_prefix,
               EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS),
index 51c39fa41693bcbcbc880130a8af6e0ce411c2dc..85d8e4f58c83ce612269162b9635bd49059c39dd 100644 (file)
@@ -241,20 +241,20 @@ static inline unsigned long alpha_read_pmc(int idx)
 static int alpha_perf_event_set_period(struct perf_event *event,
                                struct hw_perf_event *hwc, int idx)
 {
-       long left = atomic64_read(&hwc->period_left);
+       long left = local64_read(&hwc->period_left);
        long period = hwc->sample_period;
        int ret = 0;
 
        if (unlikely(left <= -period)) {
                left = period;
-               atomic64_set(&hwc->period_left, left);
+               local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                ret = 1;
        }
 
        if (unlikely(left <= 0)) {
                left += period;
-               atomic64_set(&hwc->period_left, left);
+               local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                ret = 1;
        }
@@ -269,7 +269,7 @@ static int alpha_perf_event_set_period(struct perf_event *event,
        if (left > (long)alpha_pmu->pmc_max_period[idx])
                left = alpha_pmu->pmc_max_period[idx];
 
-       atomic64_set(&hwc->prev_count, (unsigned long)(-left));
+       local64_set(&hwc->prev_count, (unsigned long)(-left));
 
        alpha_write_pmc(idx, (unsigned long)(-left));
 
@@ -300,10 +300,10 @@ static unsigned long alpha_perf_event_update(struct perf_event *event,
        long delta;
 
 again:
-       prev_raw_count = atomic64_read(&hwc->prev_count);
+       prev_raw_count = local64_read(&hwc->prev_count);
        new_raw_count = alpha_read_pmc(idx);
 
-       if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+       if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                             new_raw_count) != prev_raw_count)
                goto again;
 
@@ -316,8 +316,8 @@ again:
                delta += alpha_pmu->pmc_max_period[idx] + 1;
        }
 
-       atomic64_add(delta, &event->count);
-       atomic64_sub(delta, &hwc->period_left);
+       local64_add(delta, &event->count);
+       local64_sub(delta, &hwc->period_left);
 
        return new_raw_count;
 }
@@ -636,7 +636,7 @@ static int __hw_perf_event_init(struct perf_event *event)
        if (!hwc->sample_period) {
                hwc->sample_period = alpha_pmu->pmc_max_period[0];
                hwc->last_period = hwc->sample_period;
-               atomic64_set(&hwc->period_left, hwc->sample_period);
+               local64_set(&hwc->period_left, hwc->sample_period);
        }
 
        return 0;
index 3d2627ec986006e956f161b2bedcfa1c2ec32272..d3e52d3fd59299771ed3e90d0fc13f0aca9faa6f 100644 (file)
@@ -156,9 +156,6 @@ extern void SMC669_Init(int);
 /* es1888.c */
 extern void es1888_init(void);
 
-/* ns87312.c */
-extern void ns87312_enable_ide(long ide_base);
-
 /* ../lib/fpreg.c */
 extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
 extern unsigned long alpha_read_fp_reg (unsigned long reg);
index affd0f3f25df09b147c4a1e06146ff2f57e550ce..14c8898d19ec86fb8d4273dc527e7d860bcb8132 100644 (file)
@@ -33,7 +33,7 @@
 #include "irq_impl.h"
 #include "pci_impl.h"
 #include "machvec_impl.h"
-
+#include "pc873xx.h"
 
 /* Note mask bit is true for DISABLED irqs.  */
 static unsigned long cached_irq_mask = ~0UL;
@@ -235,18 +235,31 @@ cabriolet_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
        return COMMON_TABLE_LOOKUP;
 }
 
+static inline void __init
+cabriolet_enable_ide(void)
+{
+       if (pc873xx_probe() == -1) {
+               printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
+        } else {
+               printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
+                       pc873xx_get_model(), pc873xx_get_base());
+
+               pc873xx_enable_ide();
+       }
+}
+
 static inline void __init
 cabriolet_init_pci(void)
 {
        common_init_pci();
-       ns87312_enable_ide(0x398);
+       cabriolet_enable_ide();
 }
 
 static inline void __init
 cia_cab_init_pci(void)
 {
        cia_init_pci();
-       ns87312_enable_ide(0x398);
+       cabriolet_enable_ide();
 }
 
 /*
index 230464885b5cbe1a7683a2c76acdf1e46fbb9bac..4da596b6adbbb408ee9bb1e918b8891d3b148a29 100644 (file)
@@ -29,7 +29,7 @@
 #include "irq_impl.h"
 #include "pci_impl.h"
 #include "machvec_impl.h"
-
+#include "pc873xx.h"
 
 /* Note mask bit is true for DISABLED irqs.  */
 static unsigned long cached_irq_mask[2] = { -1, -1 };
@@ -264,7 +264,14 @@ takara_init_pci(void)
                alpha_mv.pci_map_irq = takara_map_irq_srm;
 
        cia_init_pci();
-       ns87312_enable_ide(0x26e);
+
+       if (pc873xx_probe() == -1) {
+               printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
+       } else {
+               printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
+                       pc873xx_get_model(), pc873xx_get_base());
+               pc873xx_enable_ide();
+       }
 }
 
 
index a7ed21f0136a3e5e8f4ee863b3659dd9a8fc8027..553b7cf17bfb0bac057eaedf0402a0bca2aa47e8 100644 (file)
@@ -1576,96 +1576,6 @@ config AUTO_ZRELADDR
          0xf8000000. This assumes the zImage being placed in the first 128MB
          from start of memory.
 
-config ZRELADDR
-       hex "Physical address of the decompressed kernel image"
-       depends on !AUTO_ZRELADDR
-       default 0x00008000 if ARCH_BCMRING ||\
-               ARCH_CNS3XXX ||\
-               ARCH_DOVE ||\
-               ARCH_EBSA110 ||\
-               ARCH_FOOTBRIDGE ||\
-               ARCH_INTEGRATOR ||\
-               ARCH_IOP13XX ||\
-               ARCH_IOP33X ||\
-               ARCH_IXP2000 ||\
-               ARCH_IXP23XX ||\
-               ARCH_IXP4XX ||\
-               ARCH_KIRKWOOD ||\
-               ARCH_KS8695 ||\
-               ARCH_LOKI ||\
-               ARCH_MMP ||\
-               ARCH_MV78XX0 ||\
-               ARCH_NOMADIK ||\
-               ARCH_NUC93X ||\
-               ARCH_NS9XXX ||\
-               ARCH_ORION5X ||\
-               ARCH_SPEAR3XX ||\
-               ARCH_SPEAR6XX ||\
-               ARCH_U8500 ||\
-               ARCH_VERSATILE ||\
-               ARCH_W90X900
-       default 0x08008000 if ARCH_MX1 ||\
-               ARCH_SHARK
-       default 0x10008000 if ARCH_MSM ||\
-               ARCH_OMAP1 ||\
-               ARCH_RPC
-       default 0x20008000 if ARCH_S5P6440 ||\
-               ARCH_S5P6442 ||\
-               ARCH_S5PC100 ||\
-               ARCH_S5PV210
-       default 0x30008000 if ARCH_S3C2410 ||\
-               ARCH_S3C2400 ||\
-               ARCH_S3C2412 ||\
-               ARCH_S3C2416 ||\
-               ARCH_S3C2440 ||\
-               ARCH_S3C2443
-       default 0x40008000 if ARCH_STMP378X ||\
-               ARCH_STMP37XX ||\
-               ARCH_SH7372 ||\
-               ARCH_SH7377 ||\
-               ARCH_S5PV310
-       default 0x50008000 if ARCH_S3C64XX ||\
-               ARCH_SH7367
-       default 0x60008000 if ARCH_VEXPRESS
-       default 0x80008000 if ARCH_MX25 ||\
-               ARCH_MX3 ||\
-               ARCH_NETX ||\
-               ARCH_OMAP2PLUS ||\
-               ARCH_PNX4008
-       default 0x90008000 if ARCH_MX5 ||\
-               ARCH_MX91231
-       default 0xa0008000 if ARCH_IOP32X ||\
-               ARCH_PXA ||\
-               MACH_MX27
-       default 0xc0008000 if ARCH_LH7A40X ||\
-               MACH_MX21
-       default 0xf0008000 if ARCH_AAEC2000 ||\
-               ARCH_L7200
-       default 0xc0028000 if ARCH_CLPS711X
-       default 0x70008000 if ARCH_AT91 && (ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
-       default 0x20008000 if ARCH_AT91 && !(ARCH_AT91CAP9 || ARCH_AT91SAM9G45)
-       default 0xc0008000 if ARCH_DAVINCI && ARCH_DAVINCI_DA8XX
-       default 0x80008000 if ARCH_DAVINCI && !ARCH_DAVINCI_DA8XX
-       default 0x00008000 if ARCH_EP93XX && EP93XX_SDCE3_SYNC_PHYS_OFFSET
-       default 0xc0008000 if ARCH_EP93XX && EP93XX_SDCE0_PHYS_OFFSET
-       default 0xd0008000 if ARCH_EP93XX && EP93XX_SDCE1_PHYS_OFFSET
-       default 0xe0008000 if ARCH_EP93XX && EP93XX_SDCE2_PHYS_OFFSET
-       default 0xf0008000 if ARCH_EP93XX && EP93XX_SDCE3_ASYNC_PHYS_OFFSET
-       default 0x00008000 if ARCH_GEMINI && GEMINI_MEM_SWAP
-       default 0x10008000 if ARCH_GEMINI && !GEMINI_MEM_SWAP
-       default 0x70008000 if ARCH_REALVIEW && REALVIEW_HIGH_PHYS_OFFSET
-       default 0x00008000 if ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET
-       default 0xc0208000 if ARCH_SA1100 && SA1111
-       default 0xc0008000 if ARCH_SA1100 && !SA1111
-       default 0x30108000 if ARCH_S3C2410 && PM_H1940
-       default 0x28E08000 if ARCH_U300 && MACH_U300_SINGLE_RAM
-       default 0x48008000 if ARCH_U300 && !MACH_U300_SINGLE_RAM
-       help
-         ZRELADDR is the physical address where the decompressed kernel
-         image will be placed. ZRELADDR has to be specified when the
-         assumption of AUTO_ZRELADDR is not valid, or when ZBOOT_ROM is
-         selected.
-
 endmenu
 
 menu "CPU Power Management"
index f705213caa881af9c07e181c0d2a3a1a26a5d98a..4a590f4113e2af044ea1764aeb0681ad7f50a74c 100644 (file)
 MKIMAGE         := $(srctree)/scripts/mkuboot.sh
 
 ifneq ($(MACHINE),)
--include $(srctree)/$(MACHINE)/Makefile.boot
+include $(srctree)/$(MACHINE)/Makefile.boot
 endif
 
 # Note: the following conditions must always be true:
+#   ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
 #   PARAMS_PHYS must be within 4MB of ZRELADDR
 #   INITRD_PHYS must be in RAM
+ZRELADDR    := $(zreladdr-y)
 PARAMS_PHYS := $(params_phys-y)
 INITRD_PHYS := $(initrd_phys-y)
 
-export INITRD_PHYS PARAMS_PHYS
+export ZRELADDR INITRD_PHYS PARAMS_PHYS
 
 targets := Image zImage xipImage bootpImage uImage
 
@@ -65,7 +67,7 @@ quiet_cmd_uimage = UIMAGE  $@
 ifeq ($(CONFIG_ZBOOT_ROM),y)
 $(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
 else
-$(obj)/uImage: LOADADDR=$(CONFIG_ZRELADDR)
+$(obj)/uImage: LOADADDR=$(ZRELADDR)
 endif
 
 ifeq ($(CONFIG_THUMB2_KERNEL),y)
index 68775e33476c2fafb4c20d88f7f676c836a8edc1..b23f6bc46cfa1dd1029bb53dc7009c3c790f1088 100644 (file)
@@ -79,6 +79,10 @@ endif
 EXTRA_CFLAGS  := -fpic -fno-builtin
 EXTRA_AFLAGS  := -Wa,-march=all
 
+# Supply ZRELADDR to the decompressor via a linker symbol.
+ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
+endif
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux += --be8
 endif
index 6af9907c3b5ccad2ae2d73e37f5470c5b2f6b897..6825c34646d4e02f24b0eefe7ab4e012bca05208 100644 (file)
@@ -177,7 +177,7 @@ not_angel:
                and     r4, pc, #0xf8000000
                add     r4, r4, #TEXT_OFFSET
 #else
-               ldr     r4, =CONFIG_ZRELADDR
+               ldr     r4, =zreladdr
 #endif
                subs    r0, r0, r1              @ calculate the delta offset
 
index 6c091356245593b87860d2ccb6221650fc62855b..7974baacafcea74ec055a46ee0f6cea496f24e6f 100644 (file)
@@ -263,6 +263,14 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
        return 0;
 }
 
+int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+       dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
+               __func__, dma_addr, size);
+       return (dev->bus == &pci_bus_type) &&
+               ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
+}
+
 int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
 {
        it8152_io.start = IT8152_IO_BASE + 0x12000;
index c226fe10553e2952ec982ef3ec5fcaf8538586e0..c568da7dcae45e60e8630e2b3060599f561d6555 100644 (file)
@@ -288,15 +288,7 @@ extern void dmabounce_unregister_dev(struct device *);
  * DMA access and 1 if the buffer needs to be bounced.
  *
  */
-#ifdef CONFIG_SA1111
 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
-#else
-static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
-                                  size_t size)
-{
-       return 0;
-}
-#endif
 
 /*
  * The DMA API, implemented by dmabounce.c.  See below for descriptions.
index 48837e6d888722dc96f594247a80026bf9b75e29..b5799a3b7117d5480eec6456008f79e01d1064ae 100644 (file)
@@ -17,7 +17,7 @@
  * counter interrupts are regular interrupts and not an NMI. This
  * means that when we receive the interrupt we can call
  * perf_event_do_pending() that handles all of the work with
- * interrupts enabled.
+ * interrupts disabled.
  */
 static inline void
 set_perf_event_pending(void)
index d02cfb683487eeafea4ef407a1a4e6f2d4ce4112..c891eb76c0e313406847e7b9fbe968bb1b8fa459 100644 (file)
 #define __NR_perf_event_open           (__NR_SYSCALL_BASE+364)
 #define __NR_recvmmsg                  (__NR_SYSCALL_BASE+365)
 #define __NR_accept4                   (__NR_SYSCALL_BASE+366)
+#define __NR_fanotify_init             (__NR_SYSCALL_BASE+367)
+#define __NR_fanotify_mark             (__NR_SYSCALL_BASE+368)
+#define __NR_prlimit64                 (__NR_SYSCALL_BASE+369)
 
 /*
  * The following SWIs are ARM private.
index afeb71fa72cb81fc0e2fb5652c653ef34e7258bb..5c26eccef9982665b1e1672416b9bc996f3b2dae 100644 (file)
                CALL(sys_perf_event_open)
 /* 365 */      CALL(sys_recvmmsg)
                CALL(sys_accept4)
+               CALL(sys_fanotify_init)
+               CALL(sys_fanotify_mark)
+               CALL(sys_prlimit64)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 417c392ddf1cb55066fa5f99e83e77514bd89901..ecbb0288e5dd95c80b420635dffee6ef600f86a8 100644 (file)
@@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc,
 {
        struct hw_perf_event fake_event = event->hw;
 
-       if (event->pmu && event->pmu != &pmu)
-               return 0;
+       if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+               return 1;
 
        return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
 }
@@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num,
        /*
         * Handle the pending perf events.
         *
-        * Note: this call *must* be run with interrupts enabled. For
-        * platforms that can have the PMU interrupts raised as a PMI, this
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
         * will not work.
         */
        perf_event_do_pending();
@@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
        /*
         * Handle the pending perf events.
         *
-        * Note: this call *must* be run with interrupts enabled. For
-        * platforms that can have the PMU interrupts raised as a PMI, this
+        * Note: this call *must* be run with interrupts disabled. For
+        * platforms that can have the PMU interrupts raised as an NMI, this
         * will not work.
         */
        perf_event_do_pending();
index 753c0d31a3d3f0b407cfe20b68f5ba66189cd1a7..c67b47f1c0fd805751cf226a65315337a2507942 100644 (file)
@@ -121,8 +121,8 @@ static struct clk ssc1_clk = {
        .pmc_mask       = 1 << AT91SAM9G45_ID_SSC1,
        .type           = CLK_TYPE_PERIPHERAL,
 };
-static struct clk tcb_clk = {
-       .name           = "tcb_clk",
+static struct clk tcb0_clk = {
+       .name           = "tcb0_clk",
        .pmc_mask       = 1 << AT91SAM9G45_ID_TCB,
        .type           = CLK_TYPE_PERIPHERAL,
 };
@@ -192,6 +192,14 @@ static struct clk ohci_clk = {
        .parent         = &uhphs_clk,
 };
 
+/* One additional fake clock for second TC block */
+static struct clk tcb1_clk = {
+       .name           = "tcb1_clk",
+       .pmc_mask       = 0,
+       .type           = CLK_TYPE_PERIPHERAL,
+       .parent         = &tcb0_clk,
+};
+
 static struct clk *periph_clocks[] __initdata = {
        &pioA_clk,
        &pioB_clk,
@@ -208,7 +216,7 @@ static struct clk *periph_clocks[] __initdata = {
        &spi1_clk,
        &ssc0_clk,
        &ssc1_clk,
-       &tcb_clk,
+       &tcb0_clk,
        &pwm_clk,
        &tsc_clk,
        &dma_clk,
@@ -221,6 +229,7 @@ static struct clk *periph_clocks[] __initdata = {
        &mmc1_clk,
        // irq0
        &ohci_clk,
+       &tcb1_clk,
 };
 
 /*
index 809114d5a5a6690ec3c4f510bb6cb88fc7fede99..5e71ccd5e7d381ca211d1f73653351bdca814c6b 100644 (file)
@@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = {
                .end    = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1,
                .flags  = IORESOURCE_MEM,
        },
-       [2] = {
+       [1] = {
                .start  = AT91SAM9G45_ID_DMA,
                .end    = AT91SAM9G45_ID_DMA,
                .flags  = IORESOURCE_IRQ,
@@ -835,9 +835,9 @@ static struct platform_device at91sam9g45_tcb1_device = {
 static void __init at91_add_device_tc(void)
 {
        /* this chip has one clock and irq for all six TC channels */
-       at91_clock_associate("tcb_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
+       at91_clock_associate("tcb0_clk", &at91sam9g45_tcb0_device.dev, "t0_clk");
        platform_device_register(&at91sam9g45_tcb0_device);
-       at91_clock_associate("tcb_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
+       at91_clock_associate("tcb1_clk", &at91sam9g45_tcb1_device.dev, "t0_clk");
        platform_device_register(&at91sam9g45_tcb1_device);
 }
 #else
index c4c8865d52d7bfd84f5a22416ef7fe6bc1c472aa..65eb0943194f4b0ea4ea65613435ceef4d875c75 100644 (file)
@@ -93,11 +93,12 @@ static struct resource dm9000_resource[] = {
                .start  = AT91_PIN_PC11,
                .end    = AT91_PIN_PC11,
                .flags  = IORESOURCE_IRQ
+                       | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE,
        }
 };
 
 static struct dm9000_plat_data dm9000_platdata = {
-       .flags          = DM9000_PLATF_16BITONLY,
+       .flags          = DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM,
 };
 
 static struct platform_device dm9000_device = {
@@ -167,17 +168,6 @@ static struct at91_udc_data __initdata ek_udc_data = {
 };
 
 
-/*
- * MCI (SD/MMC)
- */
-static struct at91_mmc_data __initdata ek_mmc_data = {
-       .wire4          = 1,
-//     .det_pin        = ... not connected
-//     .wp_pin         = ... not connected
-//     .vcc_pin        = ... not connected
-};
-
-
 /*
  * NAND flash
  */
@@ -246,6 +236,10 @@ static void __init ek_add_device_nand(void)
        at91_add_device_nand(&ek_nand_data);
 }
 
+/*
+ * SPI related devices
+ */
+#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
 
 /*
  * ADS7846 Touchscreen
@@ -356,6 +350,19 @@ static struct spi_board_info ek_spi_devices[] = {
 #endif
 };
 
+#else /* CONFIG_SPI_ATMEL_* */
+/* spi0 and mmc/sd share the same PIO pins: cannot be used at the same time */
+
+/*
+ * MCI (SD/MMC)
+ * det_pin, wp_pin and vcc_pin are not connected
+ */
+static struct at91_mmc_data __initdata ek_mmc_data = {
+       .wire4          = 1,
+};
+
+#endif /* CONFIG_SPI_ATMEL_* */
+
 
 /*
  * LCD Controller
index 7f7da439341fabc4e85b6febeb8b1f3e2cd6f4ee..7525cee3983f7252fac0542be5955a490ca6869b 100644 (file)
@@ -501,7 +501,8 @@ postcore_initcall(at91_clk_debugfs_init);
 int __init clk_register(struct clk *clk)
 {
        if (clk_is_peripheral(clk)) {
-               clk->parent = &mck;
+               if (!clk->parent)
+                       clk->parent = &mck;
                clk->mode = pmc_periph_mode;
                list_add_tail(&clk->node, &clocks);
        }
index 8bf3cec98cfadba46d8ca1816c7aff5b8a871bbf..4566bd1c8660b3fe7ac0cff28473746fd3d34c82 100644 (file)
@@ -560,4 +560,4 @@ static int __init ep93xx_clock_init(void)
        clkdev_add_table(clocks, ARRAY_SIZE(clocks));
        return 0;
 }
-arch_initcall(ep93xx_clock_init);
+postcore_initcall(ep93xx_clock_init);
index 91931dcb068997d540dd2a133001eed458cbc81b..4aaadc753d3e6ff4e60b88c17e62e5b3c0cfb812 100644 (file)
@@ -215,7 +215,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
  * Add platform devices present on this baseboard and init
  * them from CPU side as far as required to use them later on
  */
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd25_baseboard_init(void)
 {
        if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
                        ARRAY_SIZE(eukrea_mbimxsd_pads)))
index a5f0174290b4eaa0ae36a1769ae4c9cb32631d3c..e064bb3d69197b8ddee286eda06dee980052712d 100644 (file)
@@ -147,8 +147,8 @@ static void __init eukrea_cpuimx25_init(void)
        if (!otg_mode_host)
                mxc_register_device(&otg_udc_device, &otg_device_pdata);
 
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
-       eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD
+       eukrea_mbimxsd25_baseboard_init();
 #endif
 }
 
index d3af0fdf8475f7ef0d67b3afbb080df739c36431..7a62e744a8b0fcbc5eef1da9645bf663199608f8 100644 (file)
@@ -155,7 +155,7 @@ static unsigned long get_rate_arm(void)
 
        aad = &clk_consumer[(pdr0 >> 16) & 0xf];
        if (aad->sel)
-               fref = fref * 2 / 3;
+               fref = fref * 3 / 4;
 
        return fref / aad->arm;
 }
@@ -164,7 +164,7 @@ static unsigned long get_rate_ahb(struct clk *clk)
 {
        unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
        struct arm_ahb_div *aad;
-       unsigned long fref = get_rate_mpll();
+       unsigned long fref = get_rate_arm();
 
        aad = &clk_consumer[(pdr0 >> 16) & 0xf];
 
@@ -176,16 +176,11 @@ static unsigned long get_rate_ipg(struct clk *clk)
        return get_rate_ahb(NULL) >> 1;
 }
 
-static unsigned long get_3_3_div(unsigned long in)
-{
-       return (((in >> 3) & 0x7) + 1) * ((in & 0x7) + 1);
-}
-
 static unsigned long get_rate_uart(struct clk *clk)
 {
        unsigned long pdr3 = __raw_readl(CCM_BASE + CCM_PDR3);
        unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
-       unsigned long div = get_3_3_div(pdr4 >> 10);
+       unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
 
        if (pdr3 & (1 << 14))
                return get_rate_arm() / div;
@@ -216,7 +211,7 @@ static unsigned long get_rate_sdhc(struct clk *clk)
                break;
        }
 
-       return rate / get_3_3_div(div);
+       return rate / (div + 1);
 }
 
 static unsigned long get_rate_mshc(struct clk *clk)
@@ -270,7 +265,7 @@ static unsigned long get_rate_csi(struct clk *clk)
        else
                rate = get_rate_ppll();
 
-       return rate / get_3_3_div((pdr2 >> 16) & 0x3f);
+       return rate / (((pdr2 >> 16) & 0x3f) + 1);
 }
 
 static unsigned long get_rate_otg(struct clk *clk)
@@ -283,25 +278,51 @@ static unsigned long get_rate_otg(struct clk *clk)
        else
                rate = get_rate_ppll();
 
-       return rate / get_3_3_div((pdr4 >> 22) & 0x3f);
+       return rate / (((pdr4 >> 22) & 0x3f) + 1);
 }
 
 static unsigned long get_rate_ipg_per(struct clk *clk)
 {
        unsigned long pdr0 = __raw_readl(CCM_BASE + CCM_PDR0);
        unsigned long pdr4 = __raw_readl(CCM_BASE + CCM_PDR4);
-       unsigned long div1, div2;
+       unsigned long div;
 
        if (pdr0 & (1 << 26)) {
-               div1 = (pdr4 >> 19) & 0x7;
-               div2 = (pdr4 >> 16) & 0x7;
-               return get_rate_arm() / ((div1 + 1) * (div2 + 1));
+               div = (pdr4 >> 16) & 0x3f;
+               return get_rate_arm() / (div + 1);
        } else {
-               div1 = (pdr0 >> 12) & 0x7;
-               return get_rate_ahb(NULL) / div1;
+               div = (pdr0 >> 12) & 0x7;
+               return get_rate_ahb(NULL) / (div + 1);
        }
 }
 
+static unsigned long get_rate_hsp(struct clk *clk)
+{
+       unsigned long hsp_podf = (__raw_readl(CCM_BASE + CCM_PDR0) >> 20) & 0x03;
+       unsigned long fref = get_rate_mpll();
+
+       if (fref > 400 * 1000 * 1000) {
+               switch (hsp_podf) {
+               case 0:
+                       return fref >> 2;
+               case 1:
+                       return fref >> 3;
+               case 2:
+                       return fref / 3;
+               }
+       } else {
+               switch (hsp_podf) {
+               case 0:
+               case 2:
+                       return fref / 3;
+               case 1:
+                       return fref / 6;
+               }
+       }
+
+       return 0;
+}
+
 static int clk_cgr_enable(struct clk *clk)
 {
        u32 reg;
@@ -359,7 +380,7 @@ DEFINE_CLOCK(i2c1_clk,   0, CCM_CGR1, 10, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(i2c2_clk,   1, CCM_CGR1, 12, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(i2c3_clk,   2, CCM_CGR1, 14, get_rate_ipg_per, NULL);
 DEFINE_CLOCK(iomuxc_clk, 0, CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk,    0, CCM_CGR1, 18, get_rate_ahb, NULL);
+DEFINE_CLOCK(ipu_clk,    0, CCM_CGR1, 18, get_rate_hsp, NULL);
 DEFINE_CLOCK(kpp_clk,    0, CCM_CGR1, 20, get_rate_ipg, NULL);
 DEFINE_CLOCK(mlb_clk,    0, CCM_CGR1, 22, get_rate_ahb, NULL);
 DEFINE_CLOCK(mshc_clk,   0, CCM_CGR1, 24, get_rate_mshc, NULL);
@@ -485,10 +506,10 @@ static struct clk_lookup lookups[] = {
 
 int __init mx35_clocks_init()
 {
-       unsigned int ll = 0;
+       unsigned int cgr2 = 3 << 26, cgr3 = 0;
 
 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
-       ll = (3 << 16);
+       cgr2 |= 3 << 16;
 #endif
 
        clkdev_add_table(lookups, ARRAY_SIZE(lookups));
@@ -499,8 +520,20 @@ int __init mx35_clocks_init()
        __raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
        __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
                        CCM_BASE + CCM_CGR1);
-       __raw_writel((3 << 26) | ll, CCM_BASE + CCM_CGR2);
-       __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+       /*
+        * Check if we came up in internal boot mode. If yes, we need some
+        * extra clocks turned on, otherwise the MX35 boot ROM code will
+        * hang after a watchdog reset.
+        */
+       if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
+               /* Additionally turn on UART1, SCC, and IIM clocks */
+               cgr2 |= 3 << 16 | 3 << 4;
+               cgr3 |= 3 << 2;
+       }
+
+       __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+       __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
 
        mxc_timer_init(&gpt_clk,
                        MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
index 1dc5004df866d4e9df7d817aca27e658573f8c6c..f8f15e3ac7a0e82a6cde9f6bd021c85a546bbae7 100644 (file)
@@ -216,7 +216,7 @@ struct imx_ssi_platform_data eukrea_mbimxsd_ssi_pdata = {
  * Add platform devices present on this baseboard and init
  * them from CPU side as far as required to use them later on
  */
-void __init eukrea_mbimxsd_baseboard_init(void)
+void __init eukrea_mbimxsd35_baseboard_init(void)
 {
        if (mxc_iomux_v3_setup_multiple_pads(eukrea_mbimxsd_pads,
                        ARRAY_SIZE(eukrea_mbimxsd_pads)))
index 9770a6a973be561fdfb67cfdda1f8cdf36381e8e..2a4f8b781ba4c60a4d66f554d860953fcafe1cf0 100644 (file)
@@ -201,8 +201,8 @@ static void __init mxc_board_init(void)
        if (!otg_mode_host)
                mxc_register_device(&mxc_otg_udc_device, &otg_device_pdata);
 
-#ifdef CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD
-       eukrea_mbimxsd_baseboard_init();
+#ifdef CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD
+       eukrea_mbimxsd35_baseboard_init();
 #endif
 }
 
index 6af69def357f92d2f177d19d8fc7bce330ff5666..57c10a9926cc5056668645ff39fe3ac07fed9969 100644 (file)
@@ -56,7 +56,7 @@ static void _clk_ccgr_disable(struct clk *clk)
 {
        u32 reg;
        reg = __raw_readl(clk->enable_reg);
-       reg &= ~(MXC_CCM_CCGRx_MOD_OFF << clk->enable_shift);
+       reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
        __raw_writel(reg, clk->enable_reg);
 
 }
index 268a9bc6be8a22a4ca0ac4fed742d035753581a3..50d5939a78f1bdc8f318f08360243626a14a6105 100644 (file)
@@ -398,7 +398,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
        return 0;
 }
 
-static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa_cpufreq_init(struct cpufreq_policy *policy)
 {
        int i;
        unsigned int freq;
index 27fa329d9a8b7a5677c2cf75e25900797130eb46..0a0d0fe99220d7f450e61dc04495d8dfe4492be3 100644 (file)
@@ -204,7 +204,7 @@ static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy,
        return 0;
 }
 
-static __init int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
+static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
 {
        int ret = -EINVAL;
 
index 7139e0dc26d16062304bd72beca1f339e61899e7..4e1287070d219c32235aebadfbf2d97df3d059fb 100644 (file)
 #define GPIO46_CI_DD_7         MFP_CFG_DRV(GPIO46, AF0, DS04X)
 #define GPIO47_CI_DD_8         MFP_CFG_DRV(GPIO47, AF1, DS04X)
 #define GPIO48_CI_DD_9         MFP_CFG_DRV(GPIO48, AF1, DS04X)
-#define GPIO52_CI_HSYNC                MFP_CFG_DRV(GPIO52, AF0, DS04X)
-#define GPIO51_CI_VSYNC                MFP_CFG_DRV(GPIO51, AF0, DS04X)
 #define GPIO49_CI_MCLK         MFP_CFG_DRV(GPIO49, AF0, DS04X)
 #define GPIO50_CI_PCLK         MFP_CFG_DRV(GPIO50, AF0, DS04X)
+#define GPIO51_CI_HSYNC                MFP_CFG_DRV(GPIO51, AF0, DS04X)
+#define GPIO52_CI_VSYNC                MFP_CFG_DRV(GPIO52, AF0, DS04X)
 
 /* KEYPAD */
 #define GPIO3_KP_DKIN_6                MFP_CFG_LPM(GPIO3,   AF2, FLOAT)
index 5e16b4c692222a4a45d5728cec5b4970cae495e0..ae416fe7daf2e61b09f683c9be36ebc57c105c62 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 # Common objects
-obj-y                          := timer.o console.o clock.o
+obj-y                          := timer.o console.o clock.o pm_runtime.o
 
 # CPU objects
 obj-$(CONFIG_ARCH_SH7367)      += setup-sh7367.o clock-sh7367.o intc-sh7367.o
index 23d472f9525e6a160c97cbf8adc21c505819fb05..95935c83c30654ee94d301e94086680475a6ed67 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/mfd/sh_mobile_sdhi.h>
+#include <linux/mfd/tmio.h>
 #include <linux/mmc/host.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -39,6 +40,7 @@
 #include <linux/sh_clk.h>
 #include <linux/gpio.h>
 #include <linux/input.h>
+#include <linux/leds.h>
 #include <linux/input/sh_keysc.h>
 #include <linux/usb/r8a66597.h>
 
@@ -307,6 +309,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
        .dma_slave_tx   = SHDMA_SLAVE_SDHI1_TX,
        .dma_slave_rx   = SHDMA_SLAVE_SDHI1_RX,
        .tmio_ocr_mask  = MMC_VDD_165_195,
+       .tmio_flags     = TMIO_MMC_WRPROTECT_DISABLE,
 };
 
 static struct resource sdhi1_resources[] = {
@@ -558,7 +561,7 @@ static struct resource fsi_resources[] = {
 
 static struct platform_device fsi_device = {
        .name           = "sh_fsi2",
-       .id             = 0,
+       .id             = -1,
        .num_resources  = ARRAY_SIZE(fsi_resources),
        .resource       = fsi_resources,
        .dev    = {
@@ -650,7 +653,44 @@ static struct platform_device hdmi_device = {
        },
 };
 
+static struct gpio_led ap4evb_leds[] = {
+       {
+               .name                   = "led4",
+               .gpio                   = GPIO_PORT185,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led2",
+               .gpio                   = GPIO_PORT186,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led3",
+               .gpio                   = GPIO_PORT187,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       },
+       {
+               .name                   = "led1",
+               .gpio                   = GPIO_PORT188,
+               .default_state  = LEDS_GPIO_DEFSTATE_ON,
+       }
+};
+
+static struct gpio_led_platform_data ap4evb_leds_pdata = {
+       .num_leds = ARRAY_SIZE(ap4evb_leds),
+       .leds = ap4evb_leds,
+};
+
+static struct platform_device leds_device = {
+       .name = "leds-gpio",
+       .id = 0,
+       .dev = {
+               .platform_data  = &ap4evb_leds_pdata,
+       },
+};
+
 static struct platform_device *ap4evb_devices[] __initdata = {
+       &leds_device,
        &nor_flash_device,
        &smc911x_device,
        &sdhi0_device,
@@ -840,20 +880,6 @@ static void __init ap4evb_init(void)
        gpio_request(GPIO_FN_CS5A,      NULL);
        gpio_request(GPIO_FN_IRQ6_39,   NULL);
 
-       /* enable LED 1 - 4 */
-       gpio_request(GPIO_PORT185, NULL);
-       gpio_request(GPIO_PORT186, NULL);
-       gpio_request(GPIO_PORT187, NULL);
-       gpio_request(GPIO_PORT188, NULL);
-       gpio_direction_output(GPIO_PORT185, 1);
-       gpio_direction_output(GPIO_PORT186, 1);
-       gpio_direction_output(GPIO_PORT187, 1);
-       gpio_direction_output(GPIO_PORT188, 1);
-       gpio_export(GPIO_PORT185, 0);
-       gpio_export(GPIO_PORT186, 0);
-       gpio_export(GPIO_PORT187, 0);
-       gpio_export(GPIO_PORT188, 0);
-
        /* enable Debug switch (S6) */
        gpio_request(GPIO_PORT32, NULL);
        gpio_request(GPIO_PORT33, NULL);
index fb4e9b1d788e464922ba2345d60fb43b8e1173d2..759468992ad287ff3f40b2f2e92e19d99734c94a 100644 (file)
@@ -286,7 +286,6 @@ static struct clk_ops pllc2_clk_ops = {
 
 struct clk pllc2_clk = {
        .ops            = &pllc2_clk_ops,
-       .flags          = CLK_ENABLE_ON_INIT,
        .parent         = &extal1_div2_clk,
        .freq_table     = pllc2_freq_table,
        .parent_table   = pllc2_parent,
@@ -395,7 +394,7 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
 
 enum { MSTP001,
        MSTP131, MSTP130,
-       MSTP129, MSTP128,
+       MSTP129, MSTP128, MSTP127, MSTP126,
        MSTP118, MSTP117, MSTP116,
        MSTP106, MSTP101, MSTP100,
        MSTP223,
@@ -413,6 +412,8 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
        [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
        [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* VEU0 */
+       [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU */
+       [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2 */
        [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX */
        [MSTP117] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
        [MSTP116] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -428,7 +429,7 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
        [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
        [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
-       [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, CLK_ENABLE_ON_INIT), /* FSIA */
+       [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSIA */
        [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
        [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
        [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
@@ -498,6 +499,8 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
        CLKDEV_DEV_ID("uio_pdrv_genirq.1", &mstp_clks[MSTP128]), /* VEU0 */
+       CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU */
+       CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2 */
        CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
        CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), /* LCDC1 */
        CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* IIC0 */
index b7c705a213a2a1400e180df83d9e5a6f67649db4..6b7c7c42bc8fc529678fe7e77d01878299d049c4 100644 (file)
@@ -1,8 +1,10 @@
 /*
- * SH-Mobile Timer
+ * SH-Mobile Clock Framework
  *
  * Copyright (C) 2010  Magnus Damm
  *
+ * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; version 2 of the License.
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
new file mode 100644 (file)
index 0000000..94912d3
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * arch/arm/mach-shmobile/pm_runtime.c
+ *
+ * Runtime PM support code for SuperH Mobile ARM
+ *
+ *  Copyright (C) 2009-2010 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/sh_clk.h>
+#include <linux/bitmap.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#define BIT_ONCE 0
+#define BIT_ACTIVE 1
+#define BIT_CLK_ENABLED 2
+
+struct pm_runtime_data {
+       unsigned long flags;
+       struct clk *clk;
+};
+
+static void __devres_release(struct device *dev, void *res)
+{
+       struct pm_runtime_data *prd = res;
+
+       dev_dbg(dev, "__devres_release()\n");
+
+       if (test_bit(BIT_CLK_ENABLED, &prd->flags))
+               clk_disable(prd->clk);
+
+       if (test_bit(BIT_ACTIVE, &prd->flags))
+               clk_put(prd->clk);
+}
+
+static struct pm_runtime_data *__to_prd(struct device *dev)
+{
+       return devres_find(dev, __devres_release, NULL, NULL);
+}
+
+static void platform_pm_runtime_init(struct device *dev,
+                                    struct pm_runtime_data *prd)
+{
+       if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) {
+               prd->clk = clk_get(dev, NULL);
+               if (!IS_ERR(prd->clk)) {
+                       set_bit(BIT_ACTIVE, &prd->flags);
+                       dev_info(dev, "clocks managed by runtime pm\n");
+               }
+       }
+}
+
+static void platform_pm_runtime_bug(struct device *dev,
+                                   struct pm_runtime_data *prd)
+{
+       if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags))
+               dev_err(dev, "runtime pm suspend before resume\n");
+}
+
+int platform_pm_runtime_suspend(struct device *dev)
+{
+       struct pm_runtime_data *prd = __to_prd(dev);
+
+       dev_dbg(dev, "platform_pm_runtime_suspend()\n");
+
+       platform_pm_runtime_bug(dev, prd);
+
+       if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+               clk_disable(prd->clk);
+               clear_bit(BIT_CLK_ENABLED, &prd->flags);
+       }
+
+       return 0;
+}
+
+int platform_pm_runtime_resume(struct device *dev)
+{
+       struct pm_runtime_data *prd = __to_prd(dev);
+
+       dev_dbg(dev, "platform_pm_runtime_resume()\n");
+
+       platform_pm_runtime_init(dev, prd);
+
+       if (prd && test_bit(BIT_ACTIVE, &prd->flags)) {
+               clk_enable(prd->clk);
+               set_bit(BIT_CLK_ENABLED, &prd->flags);
+       }
+
+       return 0;
+}
+
+int platform_pm_runtime_idle(struct device *dev)
+{
+       /* suspend synchronously to disable clocks immediately */
+       return pm_runtime_suspend(dev);
+}
+
+static int platform_bus_notify(struct notifier_block *nb,
+                              unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct pm_runtime_data *prd;
+
+       dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+       if (action == BUS_NOTIFY_BIND_DRIVER) {
+               prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL);
+               if (prd)
+                       devres_add(dev, prd);
+               else
+                       dev_err(dev, "unable to alloc memory for runtime pm\n");
+       }
+
+       return 0;
+}
+
+#else /* CONFIG_PM_RUNTIME */
+
+static int platform_bus_notify(struct notifier_block *nb,
+                              unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct clk *clk;
+
+       dev_dbg(dev, "platform_bus_notify() %ld !\n", action);
+
+       switch (action) {
+       case BUS_NOTIFY_BIND_DRIVER:
+               clk = clk_get(dev, NULL);
+               if (!IS_ERR(clk)) {
+                       clk_enable(clk);
+                       clk_put(clk);
+                       dev_info(dev, "runtime pm disabled, clock forced on\n");
+               }
+               break;
+       case BUS_NOTIFY_UNBOUND_DRIVER:
+               clk = clk_get(dev, NULL);
+               if (!IS_ERR(clk)) {
+                       clk_disable(clk);
+                       clk_put(clk);
+                       dev_info(dev, "runtime pm disabled, clock forced off\n");
+               }
+               break;
+       }
+
+       return 0;
+}
+
+#endif /* CONFIG_PM_RUNTIME */
+
+static struct notifier_block platform_bus_notifier = {
+       .notifier_call = platform_bus_notify
+};
+
+static int __init sh_pm_runtime_init(void)
+{
+       bus_register_notifier(&platform_bus_type, &platform_bus_notifier);
+       return 0;
+}
+core_initcall(sh_pm_runtime_init);
index 33c3f570aaa06c2a56f6a6d70eb7f558a883b0b1..a0a2928ae4dd7670a1342863040791833b57dbab 100644 (file)
@@ -398,7 +398,7 @@ config CPU_V6
 # ARMv6k
 config CPU_32v6K
        bool "Support ARM V6K processor extensions" if !SMP
-       depends on CPU_V6
+       depends on CPU_V6 || CPU_V7
        default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
        help
          Say Y here if your ARMv6 processor supports the 'K' extension.
index c704eed63c5ddba4c5f849f7ab7b6420008cef21..4bc43e535d3baadc657df9af504078ff1ae00570 100644 (file)
@@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
                        }
                } while (size -= PAGE_SIZE);
 
+               dsb();
+
                return (void *)c->vm_start;
        }
        return NULL;
index 0527e65318f4a647b5b00192ce08ba47368ce5f8..6785db4179b84ccd925f9112cd48e9be71668e7c 100644 (file)
@@ -43,6 +43,7 @@ config ARCH_MXC91231
 config ARCH_MX5
        bool "MX5-based"
        select CPU_V7
+       select ARM_L1_CACHE_SHIFT_6
        help
          This enables support for systems based on the Freescale i.MX51 family
 
index 634e3f4c454df222728aa678bdcd657967286dbc..656acb45d434b7333e0864798fb6b69538387701 100644 (file)
@@ -37,9 +37,9 @@
  * mach-mx5/eukrea_mbimx51-baseboard.c for cpuimx51
  */
 
-extern void eukrea_mbimx25_baseboard_init(void);
+extern void eukrea_mbimxsd25_baseboard_init(void);
 extern void eukrea_mbimx27_baseboard_init(void);
-extern void eukrea_mbimx35_baseboard_init(void);
+extern void eukrea_mbimxsd35_baseboard_init(void);
 extern void eukrea_mbimx51_baseboard_init(void);
 
 #endif
index b3da9aad4295704ef9ea8a4a43c95127d74e6d9e..3703ab28257fbbb55d3db89bee56877eebb38345 100644 (file)
@@ -164,8 +164,9 @@ int tzic_enable_wake(int is_idle)
                return -EAGAIN;
 
        for (i = 0; i < 4; i++) {
-               v = is_idle ? __raw_readl(TZIC_ENSET0(i)) : wakeup_intr[i];
-               __raw_writel(v, TZIC_WAKEUP0(i));
+               v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
+                       wakeup_intr[i];
+               __raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
        }
 
        return 0;
index 0732c6c8d511979e354cced2cd6987889702a1e7..ef32686feef9431ab00f42e4a2a0e2d7656af783 100644 (file)
@@ -176,7 +176,7 @@ static inline void __add_pwm(struct pwm_device *pwm)
 
 static int __devinit pwm_probe(struct platform_device *pdev)
 {
-       struct platform_device_id *id = platform_get_device_id(pdev);
+       const struct platform_device_id *id = platform_get_device_id(pdev);
        struct pwm_device *pwm, *secondary = NULL;
        struct resource *r;
        int ret = 0;
index 48cbdcb6bbd4288929f31bef94da3691b160a489..55590a4d87c932984404d1df13ca4c296c9d7117 100644 (file)
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Mon Jul 12 21:10:14 2010
+# Last update: Thu Sep 9 22:43:01 2010
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -2622,7 +2622,7 @@ kraken                    MACH_KRAKEN             KRAKEN                  2634
 gw2388                 MACH_GW2388             GW2388                  2635
 jadecpu                        MACH_JADECPU            JADECPU                 2636
 carlisle               MACH_CARLISLE           CARLISLE                2637
-lux_sf9                        MACH_LUX_SFT9           LUX_SFT9                2638
+lux_sf9                        MACH_LUX_SF9            LUX_SF9                 2638
 nemid_tb               MACH_NEMID_TB           NEMID_TB                2639
 terrier                        MACH_TERRIER            TERRIER                 2640
 turbot                 MACH_TURBOT             TURBOT                  2641
@@ -2950,3 +2950,97 @@ davinci_dm365_dvr        MACH_DAVINCI_DM365_DVR  DAVINCI_DM365_DVR       2963
 netviz                 MACH_NETVIZ             NETVIZ                  2964
 flexibity              MACH_FLEXIBITY          FLEXIBITY               2965
 wlan_computer          MACH_WLAN_COMPUTER      WLAN_COMPUTER           2966
+lpc24xx                        MACH_LPC24XX            LPC24XX                 2967
+spica                  MACH_SPICA              SPICA                   2968
+gpsdisplay             MACH_GPSDISPLAY         GPSDISPLAY              2969
+bipnet                 MACH_BIPNET             BIPNET                  2970
+overo_ctu_inertial     MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL      2971
+davinci_dm355_mmm      MACH_DAVINCI_DM355_MMM  DAVINCI_DM355_MMM       2972
+pc9260_v2              MACH_PC9260_V2          PC9260_V2               2973
+ptx7545                        MACH_PTX7545            PTX7545                 2974
+tm_efdc                        MACH_TM_EFDC            TM_EFDC                 2975
+omap3_waldo1           MACH_OMAP3_WALDO1       OMAP3_WALDO1            2977
+flyer                  MACH_FLYER              FLYER                   2978
+tornado3240            MACH_TORNADO3240        TORNADO3240             2979
+soli_01                        MACH_SOLI_01            SOLI_01                 2980
+omapl138_europalc      MACH_OMAPL138_EUROPALC  OMAPL138_EUROPALC       2981
+helios_v1              MACH_HELIOS_V1          HELIOS_V1               2982
+netspace_lite_v2       MACH_NETSPACE_LITE_V2   NETSPACE_LITE_V2        2983
+ssc                    MACH_SSC                SSC                     2984
+premierwave_en         MACH_PREMIERWAVE_EN     PREMIERWAVE_EN          2985
+wasabi                 MACH_WASABI             WASABI                  2986
+vivow                  MACH_VIVOW              VIVOW                   2987
+mx50_rdp               MACH_MX50_RDP           MX50_RDP                2988
+universal              MACH_UNIVERSAL          UNIVERSAL               2989
+real6410               MACH_REAL6410           REAL6410                2990
+spx_sakura             MACH_SPX_SAKURA         SPX_SAKURA              2991
+ij3k_2440              MACH_IJ3K_2440          IJ3K_2440               2992
+omap3_bc10             MACH_OMAP3_BC10         OMAP3_BC10              2993
+thebe                  MACH_THEBE              THEBE                   2994
+rv082                  MACH_RV082              RV082                   2995
+armlguest              MACH_ARMLGUEST          ARMLGUEST               2996
+tjinc1000              MACH_TJINC1000          TJINC1000               2997
+dockstar               MACH_DOCKSTAR           DOCKSTAR                2998
+ax8008                 MACH_AX8008             AX8008                  2999
+gnet_sgce              MACH_GNET_SGCE          GNET_SGCE               3000
+pxwnas_500_1000                MACH_PXWNAS_500_1000    PXWNAS_500_1000         3001
+ea20                   MACH_EA20               EA20                    3002
+awm2                   MACH_AWM2               AWM2                    3003
+ti8148evm              MACH_TI8148EVM          TI8148EVM               3004
+tegra_seaboard         MACH_TEGRA_SEABOARD     TEGRA_SEABOARD          3005
+linkstation_chlv2      MACH_LINKSTATION_CHLV2  LINKSTATION_CHLV2       3006
+tera_pro2_rack         MACH_TERA_PRO2_RACK     TERA_PRO2_RACK          3007
+rubys                  MACH_RUBYS              RUBYS                   3008
+aquarius               MACH_AQUARIUS           AQUARIUS                3009
+mx53_ard               MACH_MX53_ARD           MX53_ARD                3010
+mx53_smd               MACH_MX53_SMD           MX53_SMD                3011
+lswxl                  MACH_LSWXL              LSWXL                   3012
+dove_avng_v3           MACH_DOVE_AVNG_V3       DOVE_AVNG_V3            3013
+sdi_ess_9263           MACH_SDI_ESS_9263       SDI_ESS_9263            3014
+jocpu550               MACH_JOCPU550           JOCPU550                3015
+msm8x60_rumi3          MACH_MSM8X60_RUMI3      MSM8X60_RUMI3           3016
+msm8x60_ffa            MACH_MSM8X60_FFA        MSM8X60_FFA             3017
+yanomami               MACH_YANOMAMI           YANOMAMI                3018
+gta04                  MACH_GTA04              GTA04                   3019
+cm_a510                        MACH_CM_A510            CM_A510                 3020
+omap3_rfs200           MACH_OMAP3_RFS200       OMAP3_RFS200            3021
+kx33xx                 MACH_KX33XX             KX33XX                  3022
+ptx7510                        MACH_PTX7510            PTX7510                 3023
+top9000                        MACH_TOP9000            TOP9000                 3024
+teenote                        MACH_TEENOTE            TEENOTE                 3025
+ts3                    MACH_TS3                TS3                     3026
+a0                     MACH_A0                 A0                      3027
+fsm9xxx_surf           MACH_FSM9XXX_SURF       FSM9XXX_SURF            3028
+fsm9xxx_ffa            MACH_FSM9XXX_FFA        FSM9XXX_FFA             3029
+frrhwcdma60w           MACH_FRRHWCDMA60W       FRRHWCDMA60W            3030
+remus                  MACH_REMUS              REMUS                   3031
+at91cap7xdk            MACH_AT91CAP7XDK        AT91CAP7XDK             3032
+at91cap7stk            MACH_AT91CAP7STK        AT91CAP7STK             3033
+kt_sbc_sam9_1          MACH_KT_SBC_SAM9_1      KT_SBC_SAM9_1           3034
+oratisrouter           MACH_ORATISROUTER       ORATISROUTER            3035
+armada_xp_db           MACH_ARMADA_XP_DB       ARMADA_XP_DB            3036
+spdm                   MACH_SPDM               SPDM                    3037
+gtib                   MACH_GTIB               GTIB                    3038
+dgm3240                        MACH_DGM3240            DGM3240                 3039
+atlas_i_lpe            MACH_ATLAS_I_LPE        ATLAS_I_LPE             3040
+htcmega                        MACH_HTCMEGA            HTCMEGA                 3041
+tricorder              MACH_TRICORDER          TRICORDER               3042
+tx28                   MACH_TX28               TX28                    3043
+bstbrd                 MACH_BSTBRD             BSTBRD                  3044
+pwb3090                        MACH_PWB3090            PWB3090                 3045
+idea6410               MACH_IDEA6410           IDEA6410                3046
+qbc9263                        MACH_QBC9263            QBC9263                 3047
+borabora               MACH_BORABORA           BORABORA                3048
+valdez                 MACH_VALDEZ             VALDEZ                  3049
+ls9g20                 MACH_LS9G20             LS9G20                  3050
+mios_v1                        MACH_MIOS_V1            MIOS_V1                 3051
+s5pc110_crespo         MACH_S5PC110_CRESPO     S5PC110_CRESPO          3052
+controltek9g20         MACH_CONTROLTEK9G20     CONTROLTEK9G20          3053
+tin307                 MACH_TIN307             TIN307                  3054
+tin510                 MACH_TIN510             TIN510                  3055
+bluecheese             MACH_BLUECHEESE         BLUECHEESE              3057
+tem3x30                        MACH_TEM3X30            TEM3X30                 3058
+harvest_desoto         MACH_HARVEST_DESOTO     HARVEST_DESOTO          3059
+msm8x60_qrdc           MACH_MSM8X60_QRDC       MSM8X60_QRDC            3060
+spear900               MACH_SPEAR900           SPEAR900                3061
+pcontrol_g20           MACH_PCONTROL_G20       PCONTROL_G20            3062
index e936804b7508758112b853bfca42f9992ebadb44..984221abb66d3d470483f0f14a7271ef944dce21 100644 (file)
@@ -18,7 +18,8 @@
 
 static __inline__ int atomic_add_return(int i, atomic_t *v)
 {
-       int ret,flags;
+       unsigned long flags;
+       int ret;
        local_irq_save(flags);
        ret = v->counter += i;
        local_irq_restore(flags);
@@ -30,7 +31,8 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
 
 static __inline__ int atomic_sub_return(int i, atomic_t *v)
 {
-       int ret,flags;
+       unsigned long flags;
+       int ret;
        local_irq_save(flags);
        ret = v->counter -= i;
        local_irq_restore(flags);
@@ -42,7 +44,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
 
 static __inline__ int atomic_inc_return(atomic_t *v)
 {
-       int ret,flags;
+       unsigned long flags;
+       int ret;
        local_irq_save(flags);
        v->counter++;
        ret = v->counter;
@@ -64,7 +67,8 @@ static __inline__ int atomic_inc_return(atomic_t *v)
 
 static __inline__ int atomic_dec_return(atomic_t *v)
 {
-       int ret,flags;
+       unsigned long flags;
+       int ret;
        local_irq_save(flags);
        --v->counter;
        ret = v->counter;
@@ -76,7 +80,8 @@ static __inline__ int atomic_dec_return(atomic_t *v)
 
 static __inline__ int atomic_dec_and_test(atomic_t *v)
 {
-       int ret,flags;
+       unsigned long flags;
+       int ret;
        local_irq_save(flags);
        --v->counter;
        ret = v->counter;
index d98d97685f068da986c8639bd29953d94a8ceb8d..16bf1560ff680c87fad71fa4f06af4a0b2622213 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <linux/linkage.h>
 
+struct pt_regs;
+
 /*
  * switch_to(n) should switch tasks to task ptr, first checking that
  * ptr isn't the current task, in which case it does nothing.  This
@@ -155,6 +157,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
 
 #define arch_align_stack(x) (x)
 
-void die(char *str, struct pt_regs *fp, unsigned long err);
+extern void die(const char *str, struct pt_regs *fp, unsigned long err);
 
 #endif /* _H8300_SYSTEM_H */
index dc1ac0243b78d0532640c55517753cdbdeacc8f0..aaf5e5a48f93167c2513a00e56155b166ee631b8 100644 (file)
@@ -56,8 +56,8 @@ int kernel_execve(const char *filename,
                  const char *const envp[])
 {
        register long res __asm__("er0");
-       register char *const *_c __asm__("er3") = envp;
-       register char *const *_b __asm__("er2") = argv;
+       register const char *const *_c __asm__("er3") = envp;
+       register const char *const *_b __asm__("er2") = argv;
        register const char * _a __asm__("er1") = filename;
        __asm__ __volatile__ ("mov.l %1,er0\n\t"
                        "trapa  #0\n\t"
index 3c0b66bc669eca40ced007cccb5d2ce5d6695904..dfa05bd908b6b22139c388ebbe983721c39b6100 100644 (file)
@@ -96,7 +96,7 @@ static void dump(struct pt_regs *fp)
        printk("\n\n");
 }
 
-void die(char *str, struct pt_regs *fp, unsigned long err)
+void die(const char *str, struct pt_regs *fp, unsigned long err)
 {
        static int diecount;
 
index a91b2713451da0e5290a3889e1bcebbb530c0e4b..ef332136f96dababcbba1e574d2143847f7b368c 100644 (file)
@@ -150,6 +150,8 @@ SECTIONS {
                _sdata = . ;
                DATA_DATA
                CACHELINE_ALIGNED_DATA(32)
+               PAGE_ALIGNED_DATA(PAGE_SIZE)
+               *(.data..shared_aligned)
                INIT_TASK_DATA(THREAD_SIZE)
                _edata = . ;
        } > DATA
index a67aeed17d405fbc37e3a44a860c67a3a1dd9fcf..debc5ed96d6e087a2e241e47421f537a13bf1feb 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
 #define __ARCH_POWERPC_ASM_FSLDMA_H__
 
+#include <linux/slab.h>
 #include <linux/dmaengine.h>
 
 /*
index 4d6681dce8163c98f2f2f48828f67cf985dc49f6..c571cd3c14532db29777c07977582e736be5a9aa 100644 (file)
@@ -575,13 +575,19 @@ __secondary_start:
        /* Initialize the kernel stack.  Just a repeat for iSeries.      */
        LOAD_REG_ADDR(r3, current_set)
        sldi    r28,r24,3               /* get current_set[cpu#]         */
-       ldx     r1,r3,r28
-       addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
-       std     r1,PACAKSAVE(r13)
+       ldx     r14,r3,r28
+       addi    r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD
+       std     r14,PACAKSAVE(r13)
 
        /* Do early setup for that CPU (stab, slb, hash table pointer) */
        bl      .early_setup_secondary
 
+       /*
+        * setup the new stack pointer, but *don't* use this until
+        * translation is on.
+        */
+       mr      r1, r14
+
        /* Clear backchain so we get nice backtraces */
        li      r7,0
        mtlr    r7
index 6bbd7a604d243c9f8f9f906d683a9a20df2f9a43..a7a570dcdd571c8fb861562e549dbf094147fb32 100644 (file)
@@ -810,6 +810,9 @@ relocate_new_kernel:
        isync
        sync
 
+       mfspr   r3, SPRN_PIR /* current core we are running on */
+       mr      r4, r5 /* load physical address of chunk called */
+
        /* jump to the entry point, usually the setup routine */
        mtlr    r5
        blrl
index ce53dfa7130dfc74f551c2360746f206154cf925..8533b3b83f5d0e35b50fb39eefb663e6d81975b0 100644 (file)
@@ -577,20 +577,11 @@ void timer_interrupt(struct pt_regs * regs)
         * some CPUs will continuue to take decrementer exceptions */
        set_dec(DECREMENTER_MAX);
 
-#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
        if (atomic_read(&ppc_n_lost_interrupts) != 0)
                do_IRQ(regs);
 #endif
 
-       now = get_tb_or_rtc();
-       if (now < decrementer->next_tb) {
-               /* not time for this event yet */
-               now = decrementer->next_tb - now;
-               if (now <= DECREMENTER_MAX)
-                       set_dec((int)now);
-               trace_timer_interrupt_exit(regs);
-               return;
-       }
        old_regs = set_irq_regs(regs);
        irq_enter();
 
@@ -606,8 +597,16 @@ void timer_interrupt(struct pt_regs * regs)
                get_lppaca()->int_dword.fields.decr_int = 0;
 #endif
 
-       if (evt->event_handler)
-               evt->event_handler(evt);
+       now = get_tb_or_rtc();
+       if (now >= decrementer->next_tb) {
+               decrementer->next_tb = ~(u64)0;
+               if (evt->event_handler)
+                       evt->event_handler(evt);
+       } else {
+               now = decrementer->next_tb - now;
+               if (now <= DECREMENTER_MAX)
+                       set_dec((int)now);
+       }
 
 #ifdef CONFIG_PPC_ISERIES
        if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
index f9751c8905be5db684a25555975a60b3b574d881..83068322abd154004325093e1db9a7b24b9da8d7 100644 (file)
@@ -48,8 +48,10 @@ static int mpc837xmds_usb_cfg(void)
                return -1;
 
        np = of_find_node_by_name(NULL, "usb");
-       if (!np)
-               return -ENODEV;
+       if (!np) {
+               ret = -ENODEV;
+               goto out;
+       }
        phy_type = of_get_property(np, "phy_type", NULL);
        if (phy_type && !strcmp(phy_type, "ulpi")) {
                clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN);
@@ -65,8 +67,9 @@ static int mpc837xmds_usb_cfg(void)
        }
 
        of_node_put(np);
+out:
        iounmap(bcsr_regs);
-       return 0;
+       return ret;
 }
 
 /* ************************************************************************
index da64be19d0997b75518ea1cdde2ecd6a16cbf884..aa34cac4eb5cc5db47db0e233c32ea9dcf0c09f6 100644 (file)
@@ -357,6 +357,7 @@ static void __init mpc85xx_mds_setup_arch(void)
 {
 #ifdef CONFIG_PCI
        struct pci_controller *hose;
+       struct device_node *np;
 #endif
        dma_addr_t max = 0xffffffff;
 
index e1467c937450791af1ca1355176e780e22b6b86b..34e00902ce86229bf7aafe28eb5e5c9ae4a632e0 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <linux/pci.h>
 #include <linux/of_platform.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 
 #include <asm/mpic.h>
 #include <asm/swiotlb.h>
@@ -97,7 +97,7 @@ static void __init p1022_ds_setup_arch(void)
 #endif
 
 #ifdef CONFIG_SWIOTLB
-       if (lmb_end_of_DRAM() > max) {
+       if (memblock_end_of_DRAM() > max) {
                ppc_swiotlb_enable = 1;
                set_pci_dma_ops(&swiotlb_dma_ops);
                ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
index 227c1c3d585e028874cc895afc3241afff0b3caf..72d8054fa739055ddc03521aaa6e0ff961ca3670 100644 (file)
@@ -129,20 +129,35 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
        struct property *property;
        struct property *last_property = NULL;
        struct cc_workarea *ccwa;
+       char *data_buf;
        int cc_token;
-       int rc;
+       int rc = -1;
 
        cc_token = rtas_token("ibm,configure-connector");
        if (cc_token == RTAS_UNKNOWN_SERVICE)
                return NULL;
 
-       spin_lock(&rtas_data_buf_lock);
-       ccwa = (struct cc_workarea *)&rtas_data_buf[0];
+       data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
+       if (!data_buf)
+               return NULL;
+
+       ccwa = (struct cc_workarea *)&data_buf[0];
        ccwa->drc_index = drc_index;
        ccwa->zero = 0;
 
-       rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
-       while (rc) {
+       do {
+               /* Since we release the rtas_data_buf lock between configure
+                * connector calls we want to re-populate the rtas_data_buffer
+                * with the contents of the previous call.
+                */
+               spin_lock(&rtas_data_buf_lock);
+
+               memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
+               rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
+               memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
+
+               spin_unlock(&rtas_data_buf_lock);
+
                switch (rc) {
                case NEXT_SIBLING:
                        dn = dlpar_parse_cc_node(ccwa);
@@ -197,18 +212,19 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
                               "returned from configure-connector\n", rc);
                        goto cc_error;
                }
+       } while (rc);
 
-               rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
+cc_error:
+       kfree(data_buf);
+
+       if (rc) {
+               if (first_dn)
+                       dlpar_free_cc_nodes(first_dn);
+
+               return NULL;
        }
 
-       spin_unlock(&rtas_data_buf_lock);
        return first_dn;
-
-cc_error:
-       if (first_dn)
-               dlpar_free_cc_nodes(first_dn);
-       spin_unlock(&rtas_data_buf_lock);
-       return NULL;
 }
 
 static struct device_node *derive_parent(const char *path)
index 209384b6e0396e232be5c0a53ed0519d3b5e6a3b..4ae933225251e0ae7312ea55f4e00732fc899911 100644 (file)
@@ -399,6 +399,8 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header);
 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header);
 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header);
 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021E, quirk_fsl_pcie_header);
+DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1021, quirk_fsl_pcie_header);
 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header);
 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header);
 DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header);
index 6425abe5b7dbfe809cd41d8a06634a56367d9d1d..3017532319c8d0df73ba36e3858c5368c17cccaf 100644 (file)
@@ -240,12 +240,13 @@ struct rio_priv {
 
 static void __iomem *rio_regs_win;
 
+#ifdef CONFIG_E500
 static int (*saved_mcheck_exception)(struct pt_regs *regs);
 
 static int fsl_rio_mcheck_exception(struct pt_regs *regs)
 {
        const struct exception_table_entry *entry = NULL;
-       unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK);
+       unsigned long reason = mfspr(SPRN_MCSR);
 
        if (reason & MCSR_BUS_RBERR) {
                reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
@@ -269,6 +270,7 @@ static int fsl_rio_mcheck_exception(struct pt_regs *regs)
        else
                return cur_cpu_spec->machine_check(regs);
 }
+#endif
 
 /**
  * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
@@ -1517,8 +1519,10 @@ int fsl_rio_setup(struct platform_device *dev)
        fsl_rio_doorbell_init(port);
        fsl_rio_port_write_init(port);
 
+#ifdef CONFIG_E500
        saved_mcheck_exception = ppc_md.machine_check_exception;
        ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
+#endif
        /* Ensure that RFXE is set */
        mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
 
index 3da8014931c9c7a99afcebf52814b50d03d02f35..90020de4dcf2c7570e371569997506e5642c8c5f 100644 (file)
@@ -640,6 +640,7 @@ unsigned int qe_get_num_of_snums(void)
                if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
                        /* No QE ever has fewer than 28 SNUMs */
                        pr_err("QE: number of snum is invalid\n");
+                       of_node_put(qe);
                        return -EINVAL;
                }
        }
index 50794137d710d71bfa197cbb055d14377f2cd770..675c9e11ada5541085e2fd6e6129272c05713df4 100644 (file)
@@ -166,7 +166,6 @@ sparc_breakpoint (struct pt_regs *regs)
 {
        siginfo_t info;
 
-       lock_kernel();
 #ifdef DEBUG_SPARC_BREAKPOINT
         printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
 #endif
@@ -180,7 +179,6 @@ sparc_breakpoint (struct pt_regs *regs)
 #ifdef DEBUG_SPARC_BREAKPOINT
        printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
 #endif
-       unlock_kernel();
 }
 
 asmlinkage int
index f8514e291e1559ecda6994ff5be883cc6805128c..12b9f352595f44e26c3f5730d3a84e8557d8cca8 100644 (file)
@@ -323,7 +323,6 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
 {
        enum direction dir;
 
-       lock_kernel();
        if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
           (((insn >> 30) & 3) != 3))
                goto kill_user;
@@ -377,5 +376,5 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
 kill_user:
        user_mna_trap_fault(regs, insn);
 out:
-       unlock_kernel();
+       ;
 }
index f24d298bda29d1ca0457b0b7060a366ace463756..b351770cbdd6aded05ca53554b30c8716a24a4ce 100644 (file)
@@ -112,7 +112,6 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
        struct thread_info *tp = current_thread_info();
        int window;
 
-       lock_kernel();
        flush_user_windows();
        for(window = 0; window < tp->w_saved; window++) {
                unsigned long sp = tp->rwbuf_stkptrs[window];
@@ -123,5 +122,4 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
                        do_exit(SIGILL);
        }
        tp->w_saved = 0;
-       unlock_kernel();
 }
index f35eb45d6576258e7dba242934d76376380b531c..c4191b3b7056c6ad16563375f529d1480668a26e 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
-void *
+void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
 
 void
-iounmap_atomic(void *kvaddr, enum km_type type);
+iounmap_atomic(void __iomem *kvaddr, enum km_type type);
 
 int
 iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
index 51cfd730ac5d145ed9f184f643c254d441dbeaab..1f99ecfc48e178312860f177160b735c2655946f 100644 (file)
@@ -152,9 +152,14 @@ struct x86_emulate_ops {
 struct operand {
        enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
        unsigned int bytes;
-       unsigned long orig_val, *ptr;
+       union {
+               unsigned long orig_val;
+               u64 orig_val64;
+       };
+       unsigned long *ptr;
        union {
                unsigned long val;
+               u64 val64;
                char valptr[sizeof(unsigned long) + 2];
        };
 };
index 404a880ea325510ab2f659e65c02b54314948cfb..d395540ff8948f112a8de16774f2b17bfb1dd695 100644 (file)
@@ -27,6 +27,9 @@ extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
                                            int node);
 extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
 
+#ifdef CONFIG_PCI
+
+#ifdef CONFIG_PCI_DOMAINS
 static inline int pci_domain_nr(struct pci_bus *bus)
 {
        struct pci_sysdata *sd = bus->sysdata;
@@ -37,13 +40,12 @@ static inline int pci_proc_domain(struct pci_bus *bus)
 {
        return pci_domain_nr(bus);
 }
-
+#endif
 
 /* Can be used to override the logic in pci_scan_bus for skipping
    already-configured bus numbers - to be used for buggy BIOSes
    or architectures with incomplete PCI setup by the loader */
 
-#ifdef CONFIG_PCI
 extern unsigned int pcibios_assign_all_busses(void);
 extern int pci_legacy_init(void);
 # ifdef CONFIG_ACPI
index 224392d8fe8c095390439a10ea45af2e21bc0930..5e975298fa819ff3ca648e647f531700edb1f408 100644 (file)
@@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                err = -ENOMEM;
                goto out;
        }
-       if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
                kfree(b);
                err = -ENOMEM;
                goto out;
@@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 #ifndef CONFIG_SMP
        cpumask_setall(b->cpus);
 #else
-       cpumask_copy(b->cpus, c->llc_shared_map);
+       cpumask_set_cpu(cpu, b->cpus);
 #endif
 
        per_cpu(threshold_banks, cpu)[bank] = b;
index c2a8b26d4feacf4ac6b6b022c0a9c590fbbcef85..d9368eeda3090eb9f53482704e000e600b6237e3 100644 (file)
@@ -202,10 +202,11 @@ static int therm_throt_process(bool new_event, int event, int level)
 
 #ifdef CONFIG_SYSFS
 /* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
+static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
+                               unsigned int cpu)
 {
        int err;
-       struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
        if (err)
@@ -251,7 +252,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
                mutex_lock(&therm_cpu_lock);
-               err = thermal_throttle_add_dev(sys_dev);
+               err = thermal_throttle_add_dev(sys_dev, cpu);
                mutex_unlock(&therm_cpu_lock);
                WARN_ON(err);
                break;
@@ -287,7 +288,7 @@ static __init int thermal_throttle_init_device(void)
 #endif
        /* connect live CPUs to sysfs */
        for_each_online_cpu(cpu) {
-               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
+               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
                WARN_ON(err);
        }
 #ifdef CONFIG_HOTPLUG_CPU
index f2da20fda02ddf6fcd449a88ba399fe4ed44af2a..3efdf2870a3572263add749326aa5925df7c461f 100644 (file)
@@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                /*
                 * event overflow
                 */
-               handled         = 1;
+               handled++;
                data.period     = event->hw.last_period;
 
                if (!x86_perf_event_set_period(event))
@@ -1200,12 +1200,20 @@ void perf_events_lapic_init(void)
        apic_write(APIC_LVTPC, APIC_DM_NMI);
 }
 
+struct pmu_nmi_state {
+       unsigned int    marked;
+       int             handled;
+};
+
+static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
+
 static int __kprobes
 perf_event_nmi_handler(struct notifier_block *self,
                         unsigned long cmd, void *__args)
 {
        struct die_args *args = __args;
-       struct pt_regs *regs;
+       unsigned int this_nmi;
+       int handled;
 
        if (!atomic_read(&active_events))
                return NOTIFY_DONE;
@@ -1214,22 +1222,47 @@ perf_event_nmi_handler(struct notifier_block *self,
        case DIE_NMI:
        case DIE_NMI_IPI:
                break;
-
+       case DIE_NMIUNKNOWN:
+               this_nmi = percpu_read(irq_stat.__nmi_count);
+               if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+                       /* let the kernel handle the unknown nmi */
+                       return NOTIFY_DONE;
+               /*
+                * This one is a PMU back-to-back nmi. Two events
+                * trigger 'simultaneously' raising two back-to-back
+                * NMIs. If the first NMI handles both, the latter
+                * will be empty and daze the CPU. So, we drop it to
+                * avoid false-positive 'unknown nmi' messages.
+                */
+               return NOTIFY_STOP;
        default:
                return NOTIFY_DONE;
        }
 
-       regs = args->regs;
-
        apic_write(APIC_LVTPC, APIC_DM_NMI);
-       /*
-        * Can't rely on the handled return value to say it was our NMI, two
-        * events could trigger 'simultaneously' raising two back-to-back NMIs.
-        *
-        * If the first NMI handles both, the latter will be empty and daze
-        * the CPU.
-        */
-       x86_pmu.handle_irq(regs);
+
+       handled = x86_pmu.handle_irq(args->regs);
+       if (!handled)
+               return NOTIFY_DONE;
+
+       this_nmi = percpu_read(irq_stat.__nmi_count);
+       if ((handled > 1) ||
+               /* the next nmi could be a back-to-back nmi */
+           ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
+            (__get_cpu_var(pmu_nmi).handled > 1))) {
+               /*
+                * We could have two subsequent back-to-back nmis: The
+                * first handles more than one counter, the 2nd
+                * handles only one counter and the 3rd handles no
+                * counter.
+                *
+                * This is the 2nd nmi because the previous was
+                * handling more than one counter. We will mark the
+                * next (3rd) and then drop it if unhandled.
+                */
+               __get_cpu_var(pmu_nmi).marked   = this_nmi + 1;
+               __get_cpu_var(pmu_nmi).handled  = handled;
+       }
 
        return NOTIFY_STOP;
 }
index d8d86d01400866c6320001fb715301276747cd52..ee05c90012d269e66de9ffb6a5952d1434317c1e 100644 (file)
@@ -712,7 +712,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        struct perf_sample_data data;
        struct cpu_hw_events *cpuc;
        int bit, loops;
-       u64 ack, status;
+       u64 status;
+       int handled = 0;
 
        perf_sample_data_init(&data, 0);
 
@@ -728,6 +729,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
 
        loops = 0;
 again:
+       intel_pmu_ack_status(status);
        if (++loops > 100) {
                WARN_ONCE(1, "perfevents: irq loop stuck!\n");
                perf_event_print_debug();
@@ -736,19 +738,22 @@ again:
        }
 
        inc_irq_stat(apic_perf_irqs);
-       ack = status;
 
        intel_pmu_lbr_read();
 
        /*
         * PEBS overflow sets bit 62 in the global status register
         */
-       if (__test_and_clear_bit(62, (unsigned long *)&status))
+       if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+               handled++;
                x86_pmu.drain_pebs(regs);
+       }
 
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];
 
+               handled++;
+
                if (!test_bit(bit, cpuc->active_mask))
                        continue;
 
@@ -761,8 +766,6 @@ again:
                        x86_pmu_stop(event);
        }
 
-       intel_pmu_ack_status(ack);
-
        /*
         * Repeat if there is more work to be done:
         */
@@ -772,7 +775,7 @@ again:
 
 done:
        intel_pmu_enable_all(0);
-       return 1;
+       return handled;
 }
 
 static struct event_constraint *
index 7e578e9cc58bd5062d30776d431dabdcf724ac67..b560db3305be16ff954fc416137d17b17189b5e7 100644 (file)
@@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
                inc_irq_stat(apic_perf_irqs);
        }
 
-       return handled > 0;
+       return handled;
 }
 
 /*
index a874495b3673baeb27467d144995d885f2f94ebc..e2a5952573905b2eeac3d18060be54532dbfdfde 100644 (file)
@@ -45,8 +45,7 @@ void __init setup_trampoline_page_table(void)
        /* Copy kernel address range */
        clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
                        swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-                       min_t(unsigned long, KERNEL_PGD_PTRS,
-                             KERNEL_PGD_BOUNDARY));
+                       KERNEL_PGD_PTRS);
 
        /* Initialize low mappings */
        clone_pgd_range(trampoline_pg_dir,
index d632934cb6386947352650f262745eb3c93c68ce..26a863a9c2a815ec797b6211cd3de5e0b9cbed8e 100644 (file)
@@ -655,7 +655,7 @@ void restore_sched_clock_state(void)
 
        local_irq_save(flags);
 
-       get_cpu_var(cyc2ns_offset) = 0;
+       __get_cpu_var(cyc2ns_offset) = 0;
        offset = cyc2ns_suspend - sched_clock();
 
        for_each_possible_cpu(cpu)
index b38bd8b92aa6c84ed9a00295eeaac73bade26458..66ca98aafdd6a73d7eea8834d1f1c930384b8268 100644 (file)
@@ -1870,17 +1870,16 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
                               struct x86_emulate_ops *ops)
 {
        struct decode_cache *c = &ctxt->decode;
-       u64 old = c->dst.orig_val;
+       u64 old = c->dst.orig_val64;
 
        if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
            ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
-
                c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
                c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
                ctxt->eflags &= ~EFLG_ZF;
        } else {
-               c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
-                      (u32) c->regs[VCPU_REGS_RBX];
+               c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
+                       (u32) c->regs[VCPU_REGS_RBX];
 
                ctxt->eflags |= EFLG_ZF;
        }
@@ -2616,7 +2615,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
                                        c->src.valptr, c->src.bytes);
                if (rc != X86EMUL_CONTINUE)
                        goto done;
-               c->src.orig_val = c->src.val;
+               c->src.orig_val64 = c->src.val64;
        }
 
        if (c->src2.type == OP_MEM) {
index 8d10c063d7f207451b087a11a8d7bf0c888f3695..4b7b73ce209894442eddd9df2b8f076408389b12 100644 (file)
@@ -64,6 +64,9 @@ static void pic_unlock(struct kvm_pic *s)
                if (!found)
                        found = s->kvm->bsp_vcpu;
 
+               if (!found)
+                       return;
+
                kvm_vcpu_kick(found);
        }
 }
index ffed06871c5cf389594244086ecb3685496b4258..63c314502993b33c1a53773d111eb69133668179 100644 (file)
@@ -43,7 +43,6 @@ struct kvm_kpic_state {
        u8 irr;         /* interrupt request register */
        u8 imr;         /* interrupt mask register */
        u8 isr;         /* interrupt service register */
-       u8 isr_ack;     /* interrupt ack detection */
        u8 priority_add;        /* highest irq priority */
        u8 irq_base;
        u8 read_reg_select;
@@ -56,6 +55,7 @@ struct kvm_kpic_state {
        u8 init4;               /* true if 4 byte init */
        u8 elcr;                /* PIIX edge/trigger selection */
        u8 elcr_mask;
+       u8 isr_ack;     /* interrupt ack detection */
        struct kvm_pic *pics_state;
 };
 
index 84e236ce76ba9a8afd624cfc4c506ebaa654b926..72fc70cf6184c756b1157f272b0d5e2b7bcc0609 100644 (file)
@@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 /*
  * Map 'pfn' using fixed map 'type' and protections 'prot'
  */
-void *
+void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 {
        /*
@@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
        if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
                prot = PAGE_KERNEL_UC_MINUS;
 
-       return kmap_atomic_prot_pfn(pfn, type, prot);
+       return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
 }
 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
 
 void
-iounmap_atomic(void *kvaddr, enum km_type type)
+iounmap_atomic(void __iomem *kvaddr, enum km_type type)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
        enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
index f6b48f6c595176a59c8e2fe5fa145bc11acca118..cfe4faabb0f6792aebfb8330dd55f02406b148dc 100644 (file)
@@ -568,8 +568,13 @@ static int __init init_sysfs(void)
        int error;
 
        error = sysdev_class_register(&oprofile_sysclass);
-       if (!error)
-               error = sysdev_register(&device_oprofile);
+       if (error)
+               return error;
+
+       error = sysdev_register(&device_oprofile);
+       if (error)
+               sysdev_class_unregister(&oprofile_sysclass);
+
        return error;
 }
 
@@ -580,8 +585,10 @@ static void exit_sysfs(void)
 }
 
 #else
-#define init_sysfs() do { } while (0)
-#define exit_sysfs() do { } while (0)
+
+static inline int  init_sysfs(void) { return 0; }
+static inline void exit_sysfs(void) { }
+
 #endif /* CONFIG_PM */
 
 static int __init p4_init(char **cpu_type)
@@ -695,6 +702,8 @@ int __init op_nmi_init(struct oprofile_operations *ops)
        char *cpu_type = NULL;
        int ret = 0;
 
+       using_nmi = 0;
+
        if (!cpu_has_apic)
                return -ENODEV;
 
@@ -774,7 +783,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
 
        mux_init(ops);
 
-       init_sysfs();
+       ret = init_sysfs();
+       if (ret)
+               return ret;
+
        using_nmi = 1;
        printk(KERN_INFO "oprofile: using NMI interrupt.\n");
        return 0;
index a6809645d212d9cf970b85473e3d9c05bc4ba652..2fef1ef931a06756d364707c28d97e6c440aa484 100644 (file)
@@ -966,7 +966,7 @@ blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
 
        /* Currently we do not support hierarchy deeper than two level (0,1) */
        if (parent != cgroup->top_cgroup)
-               return ERR_PTR(-EINVAL);
+               return ERR_PTR(-EPERM);
 
        blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
        if (!blkcg)
index ee1a1e7e63ccfc3e735566e3725febee1890bb2e..32a1c123dfb36a5fff24857221a9a4d1b79131bf 100644 (file)
@@ -1198,9 +1198,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        int el_ret;
        unsigned int bytes = bio->bi_size;
        const unsigned short prio = bio_prio(bio);
-       const bool sync = (bio->bi_rw & REQ_SYNC);
-       const bool unplug = (bio->bi_rw & REQ_UNPLUG);
-       const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+       const bool sync = !!(bio->bi_rw & REQ_SYNC);
+       const bool unplug = !!(bio->bi_rw & REQ_UNPLUG);
+       const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK;
        int rw_flags;
 
        if ((bio->bi_rw & REQ_HARDBARRIER) &&
index 001ab18078f5ba1b8c6f34e021cc02ef401d98b6..0749b89c68852fc824e4afa862d871814c56ddd8 100644 (file)
@@ -511,6 +511,7 @@ int blk_register_queue(struct gendisk *disk)
                kobject_uevent(&q->kobj, KOBJ_REMOVE);
                kobject_del(&q->kobj);
                blk_trace_remove_sysfs(disk_to_dev(disk));
+               kobject_put(&dev->kobj);
                return ret;
        }
 
index 6e7dc87141e48230d0eb82c2bdcf770b0ac581a9..d6b911ac002cd9ef14c76535a6b3b6814243079b 100644 (file)
@@ -142,14 +142,18 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
 
 static inline int blk_cpu_to_group(int cpu)
 {
+       int group = NR_CPUS;
 #ifdef CONFIG_SCHED_MC
        const struct cpumask *mask = cpu_coregroup_mask(cpu);
-       return cpumask_first(mask);
+       group = cpumask_first(mask);
 #elif defined(CONFIG_SCHED_SMT)
-       return cpumask_first(topology_thread_cpumask(cpu));
+       group = cpumask_first(topology_thread_cpumask(cpu));
 #else
        return cpu;
 #endif
+       if (likely(group < NR_CPUS))
+               return group;
+       return cpu;
 }
 
 /*
index eb4086f7dfef9eb7efc6d202fb7a979919a551b8..f65c6f01c47580b7e24e7fc6bda1764668cf4d21 100644 (file)
@@ -30,6 +30,7 @@ static const int cfq_slice_sync = HZ / 10;
 static int cfq_slice_async = HZ / 25;
 static const int cfq_slice_async_rq = 2;
 static int cfq_slice_idle = HZ / 125;
+static int cfq_group_idle = HZ / 125;
 static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
 static const int cfq_hist_divisor = 4;
 
@@ -147,6 +148,8 @@ struct cfq_queue {
        struct cfq_queue *new_cfqq;
        struct cfq_group *cfqg;
        struct cfq_group *orig_cfqg;
+       /* Number of sectors dispatched from queue in single dispatch round */
+       unsigned long nr_sectors;
 };
 
 /*
@@ -198,6 +201,8 @@ struct cfq_group {
        struct hlist_node cfqd_node;
        atomic_t ref;
 #endif
+       /* number of requests that are on the dispatch list or inside driver */
+       int dispatched;
 };
 
 /*
@@ -271,6 +276,7 @@ struct cfq_data {
        unsigned int cfq_slice[2];
        unsigned int cfq_slice_async_rq;
        unsigned int cfq_slice_idle;
+       unsigned int cfq_group_idle;
        unsigned int cfq_latency;
        unsigned int cfq_group_isolation;
 
@@ -378,6 +384,21 @@ CFQ_CFQQ_FNS(wait_busy);
                        &cfqg->service_trees[i][j]: NULL) \
 
 
+static inline bool iops_mode(struct cfq_data *cfqd)
+{
+       /*
+        * If we are not idling on queues and it is a NCQ drive, parallel
+        * execution of requests is on and measuring time is not possible
+        * in most of the cases until and unless we drive shallower queue
+        * depths and that becomes a performance bottleneck. In such cases
+        * switch to start providing fairness in terms of number of IOs.
+        */
+       if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
+               return true;
+       else
+               return false;
+}
+
 static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
 {
        if (cfq_class_idle(cfqq))
@@ -906,7 +927,6 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
                        slice_used = cfqq->allocated_slice;
        }
 
-       cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
        return slice_used;
 }
 
@@ -914,19 +934,21 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
                                struct cfq_queue *cfqq)
 {
        struct cfq_rb_root *st = &cfqd->grp_service_tree;
-       unsigned int used_sl, charge_sl;
+       unsigned int used_sl, charge;
        int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
                        - cfqg->service_tree_idle.count;
 
        BUG_ON(nr_sync < 0);
-       used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
+       used_sl = charge = cfq_cfqq_slice_usage(cfqq);
 
-       if (!cfq_cfqq_sync(cfqq) && !nr_sync)
-               charge_sl = cfqq->allocated_slice;
+       if (iops_mode(cfqd))
+               charge = cfqq->slice_dispatch;
+       else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
+               charge = cfqq->allocated_slice;
 
        /* Can't update vdisktime while group is on service tree */
        cfq_rb_erase(&cfqg->rb_node, st);
-       cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
+       cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
        __cfq_group_service_tree_add(st, cfqg);
 
        /* This group is being expired. Save the context */
@@ -940,6 +962,9 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
 
        cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
                                        st->min_vdisktime);
+       cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
+                       " sect=%u", used_sl, cfqq->slice_dispatch, charge,
+                       iops_mode(cfqd), cfqq->nr_sectors);
        cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
        cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
 }
@@ -1587,6 +1612,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
                cfqq->allocated_slice = 0;
                cfqq->slice_end = 0;
                cfqq->slice_dispatch = 0;
+               cfqq->nr_sectors = 0;
 
                cfq_clear_cfqq_wait_request(cfqq);
                cfq_clear_cfqq_must_dispatch(cfqq);
@@ -1839,6 +1865,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
        BUG_ON(!service_tree);
        BUG_ON(!service_tree->count);
 
+       if (!cfqd->cfq_slice_idle)
+               return false;
+
        /* We never do for idle class queues. */
        if (prio == IDLE_WORKLOAD)
                return false;
@@ -1863,7 +1892,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 {
        struct cfq_queue *cfqq = cfqd->active_queue;
        struct cfq_io_context *cic;
-       unsigned long sl;
+       unsigned long sl, group_idle = 0;
 
        /*
         * SSD device without seek penalty, disable idling. But only do so
@@ -1879,8 +1908,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
        /*
         * idle is disabled, either manually or by past process history
         */
-       if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
-               return;
+       if (!cfq_should_idle(cfqd, cfqq)) {
+               /* no queue idling. Check for group idling */
+               if (cfqd->cfq_group_idle)
+                       group_idle = cfqd->cfq_group_idle;
+               else
+                       return;
+       }
 
        /*
         * still active requests from this queue, don't idle
@@ -1907,13 +1941,21 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
                return;
        }
 
+       /* There are other queues in the group, don't do group idle */
+       if (group_idle && cfqq->cfqg->nr_cfqq > 1)
+               return;
+
        cfq_mark_cfqq_wait_request(cfqq);
 
-       sl = cfqd->cfq_slice_idle;
+       if (group_idle)
+               sl = cfqd->cfq_group_idle;
+       else
+               sl = cfqd->cfq_slice_idle;
 
        mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
        cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
-       cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
+       cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
+                       group_idle ? 1 : 0);
 }
 
 /*
@@ -1929,9 +1971,11 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
        cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
        cfq_remove_request(rq);
        cfqq->dispatched++;
+       (RQ_CFQG(rq))->dispatched++;
        elv_dispatch_sort(q, rq);
 
        cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
+       cfqq->nr_sectors += blk_rq_sectors(rq);
        cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
                                        rq_data_dir(rq), rq_is_sync(rq));
 }
@@ -2198,7 +2242,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
                        cfqq = NULL;
                        goto keep_queue;
                } else
-                       goto expire;
+                       goto check_group_idle;
        }
 
        /*
@@ -2226,8 +2270,23 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
         * flight or is idling for a new request, allow either of these
         * conditions to happen (or time out) before selecting a new queue.
         */
-       if (timer_pending(&cfqd->idle_slice_timer) ||
-           (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
+       if (timer_pending(&cfqd->idle_slice_timer)) {
+               cfqq = NULL;
+               goto keep_queue;
+       }
+
+       if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
+               cfqq = NULL;
+               goto keep_queue;
+       }
+
+       /*
+        * If group idle is enabled and there are requests dispatched from
+        * this group, wait for requests to complete.
+        */
+check_group_idle:
+       if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
+           && cfqq->cfqg->dispatched) {
                cfqq = NULL;
                goto keep_queue;
        }
@@ -3375,6 +3434,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
        WARN_ON(!cfqq->dispatched);
        cfqd->rq_in_driver--;
        cfqq->dispatched--;
+       (RQ_CFQG(rq))->dispatched--;
        cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
                        rq_start_time_ns(rq), rq_io_start_time_ns(rq),
                        rq_data_dir(rq), rq_is_sync(rq));
@@ -3404,7 +3464,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
                 * the queue.
                 */
                if (cfq_should_wait_busy(cfqd, cfqq)) {
-                       cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
+                       unsigned long extend_sl = cfqd->cfq_slice_idle;
+                       if (!cfqd->cfq_slice_idle)
+                               extend_sl = cfqd->cfq_group_idle;
+                       cfqq->slice_end = jiffies + extend_sl;
                        cfq_mark_cfqq_wait_busy(cfqq);
                        cfq_log_cfqq(cfqd, cfqq, "will busy wait");
                }
@@ -3850,6 +3913,7 @@ static void *cfq_init_queue(struct request_queue *q)
        cfqd->cfq_slice[1] = cfq_slice_sync;
        cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
        cfqd->cfq_slice_idle = cfq_slice_idle;
+       cfqd->cfq_group_idle = cfq_group_idle;
        cfqd->cfq_latency = 1;
        cfqd->cfq_group_isolation = 0;
        cfqd->hw_tag = -1;
@@ -3922,6 +3986,7 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
 SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
 SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
+SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@@ -3954,6 +4019,7 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
 STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
                UINT_MAX, 0);
 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@@ -3975,6 +4041,7 @@ static struct elv_fs_entry cfq_attrs[] = {
        CFQ_ATTR(slice_async),
        CFQ_ATTR(slice_async_rq),
        CFQ_ATTR(slice_idle),
+       CFQ_ATTR(group_idle),
        CFQ_ATTR(low_latency),
        CFQ_ATTR(group_isolation),
        __ATTR_NULL
@@ -4028,6 +4095,12 @@ static int __init cfq_init(void)
        if (!cfq_slice_idle)
                cfq_slice_idle = 1;
 
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+       if (!cfq_group_idle)
+               cfq_group_idle = 1;
+#else
+               cfq_group_idle = 0;
+#endif
        if (cfq_slab_setup())
                return -ENOMEM;
 
index ec585c9554d33c04b973537f9ac3216e332afa50..205b09a5bd9ead9e64d7bcfb60cec924f1a22cbd 100644 (file)
@@ -1009,18 +1009,19 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 {
        struct elevator_queue *old_elevator, *e;
        void *data;
+       int err;
 
        /*
         * Allocate new elevator
         */
        e = elevator_alloc(q, new_e);
        if (!e)
-               return 0;
+               return -ENOMEM;
 
        data = elevator_init_queue(q, e);
        if (!data) {
                kobject_put(&e->kobj);
-               return 0;
+               return -ENOMEM;
        }
 
        /*
@@ -1043,7 +1044,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 
        __elv_unregister_queue(old_elevator);
 
-       if (elv_register_queue(q))
+       err = elv_register_queue(q);
+       if (err)
                goto fail_register;
 
        /*
@@ -1056,7 +1058,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 
        blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
 
-       return 1;
+       return 0;
 
 fail_register:
        /*
@@ -1071,17 +1073,19 @@ fail_register:
        queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
        spin_unlock_irq(q->queue_lock);
 
-       return 0;
+       return err;
 }
 
-ssize_t elv_iosched_store(struct request_queue *q, const char *name,
-                         size_t count)
+/*
+ * Switch this queue to the given IO scheduler.
+ */
+int elevator_change(struct request_queue *q, const char *name)
 {
        char elevator_name[ELV_NAME_MAX];
        struct elevator_type *e;
 
        if (!q->elevator)
-               return count;
+               return -ENXIO;
 
        strlcpy(elevator_name, name, sizeof(elevator_name));
        e = elevator_get(strstrip(elevator_name));
@@ -1092,13 +1096,27 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 
        if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
                elevator_put(e);
-               return count;
+               return 0;
        }
 
-       if (!elevator_switch(q, e))
-               printk(KERN_ERR "elevator: switch to %s failed\n",
-                                                       elevator_name);
-       return count;
+       return elevator_switch(q, e);
+}
+EXPORT_SYMBOL(elevator_change);
+
+ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+                         size_t count)
+{
+       int ret;
+
+       if (!q->elevator)
+               return count;
+
+       ret = elevator_change(q, name);
+       if (!ret)
+               return count;
+
+       printk(KERN_ERR "elevator: switch to %s failed\n", name);
+       return ret;
 }
 
 ssize_t elv_iosched_show(struct request_queue *q, char *name)
index 1cd497d7a15aa2cdbd96be5139ca64be8135e528..e573077f1672e06bf159c9a242564c5436fc0a75 100644 (file)
@@ -101,13 +101,13 @@ config CRYPTO_MANAGER2
        select CRYPTO_BLKCIPHER2
        select CRYPTO_PCOMP2
 
-config CRYPTO_MANAGER_TESTS
-       bool "Run algolithms' self-tests"
+config CRYPTO_MANAGER_DISABLE_TESTS
+       bool "Disable run-time self tests"
        default y
        depends on CRYPTO_MANAGER2
        help
-         Run cryptomanager's tests for the new crypto algorithms being
-         registered.
+         Disable run-time self tests that normally take place at
+         algorithm registration.
 
 config CRYPTO_GF128MUL
        tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
index b8c59b889c6ea579e7a6bacd7241bbbc86cbe44c..f669822a7a443c81a04c8fb61d8d382a64546542 100644 (file)
@@ -47,8 +47,11 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
        walk->data = crypto_kmap(walk->pg, 0);
        walk->data += offset;
 
-       if (offset & alignmask)
-               nbytes = alignmask + 1 - (offset & alignmask);
+       if (offset & alignmask) {
+               unsigned int unaligned = alignmask + 1 - (offset & alignmask);
+               if (nbytes > unaligned)
+                       nbytes = unaligned;
+       }
 
        walk->entrylen -= nbytes;
        return nbytes;
index 40bd391f34d90fc0b0619809beb0f09759b307f5..791d194958fa18ac0fb8006e5d565f789c1e4f92 100644 (file)
@@ -206,13 +206,16 @@ err:
        return NOTIFY_OK;
 }
 
-#ifdef CONFIG_CRYPTO_MANAGER_TESTS
 static int cryptomgr_test(void *data)
 {
        struct crypto_test_param *param = data;
        u32 type = param->type;
        int err = 0;
 
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+       goto skiptest;
+#endif
+
        if (type & CRYPTO_ALG_TESTED)
                goto skiptest;
 
@@ -267,7 +270,6 @@ err_put_module:
 err:
        return NOTIFY_OK;
 }
-#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
 
 static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
                            void *data)
@@ -275,10 +277,8 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
        switch (msg) {
        case CRYPTO_MSG_ALG_REQUEST:
                return cryptomgr_schedule_probe(data);
-#ifdef CONFIG_CRYPTO_MANAGER_TESTS
        case CRYPTO_MSG_ALG_REGISTER:
                return cryptomgr_schedule_test(data);
-#endif
        }
 
        return NOTIFY_DONE;
index abd980c729ebce5a57e833a0937bf233dc8937cc..fa8c8f78c8d4c1838d121e6131b05162858b2802 100644 (file)
@@ -23,7 +23,7 @@
 
 #include "internal.h"
 
-#ifndef CONFIG_CRYPTO_MANAGER_TESTS
+#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
 
 /* a perfect nop */
 int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
@@ -2542,6 +2542,6 @@ non_fips_alg:
        return -EINVAL;
 }
 
-#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
+#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
 
 EXPORT_SYMBOL_GPL(alg_test);
index 1f67057af2a571991481bb761d157320b9d3f329..3ba8d1f44a73ed3b234e810f5316f6531e4d6718 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/pci.h>
 #include <linux/pci-acpi.h>
-#include <linux/pci-aspm.h>
 #include <linux/acpi.h>
 #include <linux/slab.h>
 #include <acpi/acpi_bus.h>
@@ -226,22 +225,31 @@ static acpi_status acpi_pci_run_osc(acpi_handle handle,
        return status;
 }
 
-static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags)
+static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
+                                       u32 support,
+                                       u32 *control)
 {
        acpi_status status;
-       u32 support_set, result, capbuf[3];
+       u32 result, capbuf[3];
+
+       support &= OSC_PCI_SUPPORT_MASKS;
+       support |= root->osc_support_set;
 
-       /* do _OSC query for all possible controls */
-       support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS);
        capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-       capbuf[OSC_SUPPORT_TYPE] = support_set;
-       capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+       capbuf[OSC_SUPPORT_TYPE] = support;
+       if (control) {
+               *control &= OSC_PCI_CONTROL_MASKS;
+               capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+       } else {
+               /* Run _OSC query for all possible controls. */
+               capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+       }
 
        status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
        if (ACPI_SUCCESS(status)) {
-               root->osc_support_set = support_set;
-               root->osc_control_qry = result;
-               root->osc_queried = 1;
+               root->osc_support_set = support;
+               if (control)
+                       *control = result;
        }
        return status;
 }
@@ -255,7 +263,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
        if (ACPI_FAILURE(status))
                return status;
        mutex_lock(&osc_lock);
-       status = acpi_pci_query_osc(root, flags);
+       status = acpi_pci_query_osc(root, flags, NULL);
        mutex_unlock(&osc_lock);
        return status;
 }
@@ -365,55 +373,70 @@ out:
 EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
 
 /**
- * acpi_pci_osc_control_set - commit requested control to Firmware
- * @handle: acpi_handle for the target ACPI object
- * @flags: driver's requested control bits
+ * acpi_pci_osc_control_set - Request control of PCI root _OSC features.
+ * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
+ * @mask: Mask of _OSC bits to request control of, place to store control mask.
+ * @req: Mask of _OSC bits the control of is essential to the caller.
+ *
+ * Run _OSC query for @mask and if that is successful, compare the returned
+ * mask of control bits with @req.  If all of the @req bits are set in the
+ * returned mask, run _OSC request for it.
  *
- * Attempt to take control from Firmware on requested control bits.
+ * The variable at the @mask address may be modified regardless of whether or
+ * not the function returns success.  On success it will contain the mask of
+ * _OSC bits the BIOS has granted control of, but its contents are meaningless
+ * on failure.
  **/
-acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags)
+acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
 {
+       struct acpi_pci_root *root;
        acpi_status status;
-       u32 control_req, result, capbuf[3];
+       u32 ctrl, capbuf[3];
        acpi_handle tmp;
-       struct acpi_pci_root *root;
 
-       status = acpi_get_handle(handle, "_OSC", &tmp);
-       if (ACPI_FAILURE(status))
-               return status;
+       if (!mask)
+               return AE_BAD_PARAMETER;
 
-       control_req = (flags & OSC_PCI_CONTROL_MASKS);
-       if (!control_req)
+       ctrl = *mask & OSC_PCI_CONTROL_MASKS;
+       if ((ctrl & req) != req)
                return AE_TYPE;
 
        root = acpi_pci_find_root(handle);
        if (!root)
                return AE_NOT_EXIST;
 
+       status = acpi_get_handle(handle, "_OSC", &tmp);
+       if (ACPI_FAILURE(status))
+               return status;
+
        mutex_lock(&osc_lock);
+
+       *mask = ctrl | root->osc_control_set;
        /* No need to evaluate _OSC if the control was already granted. */
-       if ((root->osc_control_set & control_req) == control_req)
+       if ((root->osc_control_set & ctrl) == ctrl)
                goto out;
 
-       /* Need to query controls first before requesting them */
-       if (!root->osc_queried) {
-               status = acpi_pci_query_osc(root, root->osc_support_set);
+       /* Need to check the available controls bits before requesting them. */
+       while (*mask) {
+               status = acpi_pci_query_osc(root, root->osc_support_set, mask);
                if (ACPI_FAILURE(status))
                        goto out;
+               if (ctrl == *mask)
+                       break;
+               ctrl = *mask;
        }
-       if ((root->osc_control_qry & control_req) != control_req) {
-               printk(KERN_DEBUG
-                      "Firmware did not grant requested _OSC control\n");
+
+       if ((ctrl & req) != req) {
                status = AE_SUPPORT;
                goto out;
        }
 
        capbuf[OSC_QUERY_TYPE] = 0;
        capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
-       capbuf[OSC_CONTROL_TYPE] = root->osc_control_set | control_req;
-       status = acpi_pci_run_osc(handle, capbuf, &result);
+       capbuf[OSC_CONTROL_TYPE] = ctrl;
+       status = acpi_pci_run_osc(handle, capbuf, mask);
        if (ACPI_SUCCESS(status))
-               root->osc_control_set = result;
+               root->osc_control_set = *mask;
 out:
        mutex_unlock(&osc_lock);
        return status;
@@ -544,14 +567,6 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
        if (flags != base_flags)
                acpi_pci_osc_support(root, flags);
 
-       status = acpi_pci_osc_control_set(root->device->handle,
-                                         OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-
-       if (ACPI_FAILURE(status)) {
-               printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n");
-               pcie_no_aspm();
-       }
-
        pci_acpi_add_bus_pm_notifier(device, root->bus);
        if (device->wakeup.flags.run_wake)
                device_set_run_wake(root->bus->bridge, true);
index 013727b20417226249ca942d28708164a497c2dc..ff1c945fba98e39de55b42ce46a5e9af26b34c45 100644 (file)
@@ -253,6 +253,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
        { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
        { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
+       { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
+       { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
+       { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
index 3971bc0a4838235ea26f4bff0b97ec7dd3335b69..d712675d0a9657d3cc7742b74220aaf05d0fa537 100644 (file)
@@ -302,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = {
        { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        /* SATA Controller IDE (CPT) */
        { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+       /* SATA Controller IDE (PBG) */
+       { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+       /* SATA Controller IDE (PBG) */
+       { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
        { }     /* terminate list */
 };
 
index 666850d31df2c304b9d1409e21e20355d52f2a65..68dc6785472f86932f769029ae6046a0dffeec47 100644 (file)
@@ -1326,7 +1326,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
        /* issue the first D2H Register FIS */
        msecs = 0;
        now = jiffies;
-       if (time_after(now, deadline))
+       if (time_after(deadline, now))
                msecs = jiffies_to_msecs(deadline - now);
 
        tf.ctl |= ATA_SRST;
index c035b3d041ee1572492d7a17f56323bde0fccacd..932eaee5024527f4e617064726e39db808c17e87 100644 (file)
@@ -5418,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
  */
 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
 {
+       unsigned int ehi_flags = ATA_EHI_QUIET;
        int rc;
 
        /*
@@ -5426,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
         */
        ata_lpm_enable(host);
 
-       rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
+       /*
+        * On some hardware, device fails to respond after spun down
+        * for suspend.  As the device won't be used before being
+        * resumed, we don't need to touch the device.  Ask EH to skip
+        * the usual stuff and proceed directly to suspend.
+        *
+        * http://thread.gmane.org/gmane.linux.ide/46764
+        */
+       if (mesg.event == PM_EVENT_SUSPEND)
+               ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
+
+       rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
        if (rc == 0)
                host->dev->power.power_state = mesg;
        return rc;
index c9ae299b83428d039c13cce845e7acc497bcff13..e48302eae55fcd2ad619564c9cd8237c722efb29 100644 (file)
@@ -3235,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
        if (link->flags & ATA_LFLAG_DISABLED)
                return 1;
 
+       /* skip if explicitly requested */
+       if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
+               return 1;
+
        /* thaw frozen port and recover failed devices */
        if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
                return 0;
index 3b82d8ef76f0ffc81e6bb62e9d3ea19d1be57195..e30c537cce32f99ab7fcb07f7f124cd920c4cd4b 100644 (file)
@@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                if (ioaddr->ctl_addr)
                        iowrite8(tf->ctl, ioaddr->ctl_addr);
                ap->last_ctl = tf->ctl;
+               ata_wait_idle(ap);
        }
 
        if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                iowrite8(tf->device, ioaddr->device_addr);
                VPRINTK("device 0x%X\n", tf->device);
        }
+
+       ata_wait_idle(ap);
 }
 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
 
@@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
                     u8 status, int in_wq)
 {
-       struct ata_eh_info *ehi = &ap->link.eh_info;
+       struct ata_link *link = qc->dev->link;
+       struct ata_eh_info *ehi = &link->eh_info;
        unsigned long flags = 0;
        int poll_next;
 
@@ -1298,8 +1302,14 @@ fsm_start:
 }
 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
 
-void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay)
+void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
 {
+       struct ata_port *ap = link->ap;
+
+       WARN_ON((ap->sff_pio_task_link != NULL) &&
+               (ap->sff_pio_task_link != link));
+       ap->sff_pio_task_link = link;
+
        /* may fail if ata_sff_flush_pio_task() in progress */
        queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
                           msecs_to_jiffies(delay));
@@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work)
 {
        struct ata_port *ap =
                container_of(work, struct ata_port, sff_pio_task.work);
+       struct ata_link *link = ap->sff_pio_task_link;
        struct ata_queued_cmd *qc;
        u8 status;
        int poll_next;
 
+       BUG_ON(ap->sff_pio_task_link == NULL); 
        /* qc can be NULL if timeout occurred */
-       qc = ata_qc_from_tag(ap, ap->link.active_tag);
-       if (!qc)
+       qc = ata_qc_from_tag(ap, link->active_tag);
+       if (!qc) {
+               ap->sff_pio_task_link = NULL;
                return;
+       }
 
 fsm_start:
        WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1345,11 +1359,16 @@ fsm_start:
                msleep(2);
                status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
                if (status & ATA_BUSY) {
-                       ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE);
+                       ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
                        return;
                }
        }
 
+       /*
+        * hsm_move() may trigger another command to be processed.
+        * clean the link beforehand.
+        */
+       ap->sff_pio_task_link = NULL;
        /* move the HSM */
        poll_next = ata_sff_hsm_move(ap, qc, status, 1);
 
@@ -1376,6 +1395,7 @@ fsm_start:
 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
+       struct ata_link *link = qc->dev->link;
 
        /* Use polling pio if the LLD doesn't handle
         * interrupt driven pio and atapi CDB interrupt.
@@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                ap->hsm_task_state = HSM_ST_LAST;
 
                if (qc->tf.flags & ATA_TFLAG_POLLING)
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
 
                break;
 
@@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                if (qc->tf.flags & ATA_TFLAG_WRITE) {
                        /* PIO data out protocol */
                        ap->hsm_task_state = HSM_ST_FIRST;
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
 
                        /* always send first data block using the
                         * ata_sff_pio_task() codepath.
@@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                        ap->hsm_task_state = HSM_ST;
 
                        if (qc->tf.flags & ATA_TFLAG_POLLING)
-                               ata_sff_queue_pio_task(ap, 0);
+                               ata_sff_queue_pio_task(link, 0);
 
                        /* if polling, ata_sff_pio_task() handles the
                         * rest.  otherwise, interrupt handler takes
@@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                /* send cdb by polling if no cdb interrupt */
                if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
                    (qc->tf.flags & ATA_TFLAG_POLLING))
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
                break;
 
        default:
@@ -2734,6 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
+       struct ata_link *link = qc->dev->link;
 
        /* defer PIO handling to sff_qc_issue */
        if (!ata_is_dma(qc->tf.protocol))
@@ -2762,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
 
                /* send cdb by polling if no cdb interrupt */
                if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
-                       ata_sff_queue_pio_task(ap, 0);
+                       ata_sff_queue_pio_task(link, 0);
                break;
 
        default:
index ba43f0f8c880a21aa1b8b1d3b9ae3d8c3487d415..2215632e4b317684a818b0ee5cbb7037fe064079 100644 (file)
@@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
 
        /* Odd numbered device ids are the units with enable bits (the -R cards) */
-       if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
+       if ((pdev->device & 1) &&
+           !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
                return -ENOENT;
 
        return ata_sff_prereset(link, deadline);
index 5e659885de162e7ef3cf38e3e1f8a67098108712..ac8d7d97e4085d4122b33fbd58bde6951f3ef2e7 100644 (file)
@@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
                        tf->lbam,
                        tf->lbah);
        }
+
+       ata_wait_idle(ap);
 }
 
 static int via_port_start(struct ata_port *ap)
index 81982594a014b603aa2ebd8b5944ed13049c929b..a9fd9709c2627c7514fe74a8ec9ebc26f09b44f9 100644 (file)
@@ -2284,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
        }
 
        if (qc->tf.flags & ATA_TFLAG_POLLING)
-               ata_sff_queue_pio_task(ap, 0);
+               ata_sff_queue_pio_task(link, 0);
        return 0;
 }
 
index 31064df1370a96320f548d1ce5f191eaf2e97051..6124c2fd2d33440f022aa45b369bd7fb7a850c8a 100644 (file)
@@ -297,6 +297,8 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h,
        spin_lock_irqsave(&h->lock, flags);
        addQ(&h->reqQ, c);
        h->Qdepth++;
+       if (h->Qdepth > h->maxQsinceinit)
+               h->maxQsinceinit = h->Qdepth;
        start_io(h);
        spin_unlock_irqrestore(&h->lock, flags);
 }
@@ -4519,6 +4521,12 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
        misc_fw_support = readl(&cfgtable->misc_fw_support);
        use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
 
+       /* The doorbell reset seems to cause lockups on some Smart
+        * Arrays (e.g. P410, P410i, maybe others).  Until this is
+        * fixed or at least isolated, avoid the doorbell reset.
+        */
+       use_doorbell = 0;
+
        rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
        if (rc)
                goto unmap_cfgtable;
@@ -4712,6 +4720,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        h->scatter_list = kmalloc(h->max_commands *
                                                sizeof(struct scatterlist *),
                                                GFP_KERNEL);
+       if (!h->scatter_list)
+               goto clean4;
+
        for (k = 0; k < h->nr_cmds; k++) {
                h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
                                                        h->maxsgentries,
index f3c636d237187df21879c0e8acc4a4c943e59ad2..91797bbbe702f01afc5770f45618487ab21054da 100644 (file)
@@ -477,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 
        if (bio_rw(bio) == WRITE) {
-               bool barrier = (bio->bi_rw & REQ_HARDBARRIER);
+               bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER);
                struct file *file = lo->lo_backing_file;
 
                if (barrier) {
index b82c5ce5e9dfaf3bcc1d81b68554de8cdff0493c..76fa3deaee84059d4431f7763981311210f16f06 100644 (file)
@@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev)
        host->breq->queuedata = host;
 
        /* mflash is random device, thanx for the noop */
-       elevator_exit(host->breq->elevator);
-       err = elevator_init(host->breq, "noop");
+       err = elevator_change(host->breq, "noop");
        if (err) {
                printk(KERN_ERR "%s:%d (elevator_init) fail\n",
                                __func__, __LINE__);
index 710af89b176d0b8a7fcbed56dbb3a56b762e27f8..eab58db5f91cd9cfd6ffa5302761780ac8b3f373 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/smp.h>
 #include "agp.h"
 #include "intel-agp.h"
+#include <linux/intel-gtt.h>
 
 #include "intel-gtt.c"
 
@@ -815,11 +816,19 @@ static const struct intel_driver_description {
            "HD Graphics", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
            "HD Graphics", NULL, &intel_i965_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG,
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
            "Sandybridge", NULL, &intel_gen6_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG,
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
            "Sandybridge", NULL, &intel_gen6_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG,
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
+           "Sandybridge", NULL, &intel_gen6_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
+           "Sandybridge", NULL, &intel_gen6_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
+           "Sandybridge", NULL, &intel_gen6_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
+           "Sandybridge", NULL, &intel_gen6_driver },
+       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
            "Sandybridge", NULL, &intel_gen6_driver },
        { 0, 0, NULL, NULL, NULL }
 };
@@ -1044,6 +1053,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
        ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
        ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
        ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
+       ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
        { }
 };
 
index 08d47532e60590f169f74995007e745623c4cd66..ee189c74d345ea98062ddbb914c87cb206691c59 100644 (file)
@@ -1,6 +1,8 @@
 /*
  * Common Intel AGPGART and GTT definitions.
  */
+#ifndef _INTEL_AGP_H
+#define _INTEL_AGP_H
 
 /* Intel registers */
 #define INTEL_APSIZE   0xb4
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB         0x0062
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB    0x006a
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG          0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB  0x0100
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG  0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB  0x0104
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG  0x0106
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG  0x0126
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB             0x0100  /* Desktop */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG         0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG         0x0112
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG    0x0122
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB           0x0104  /* Mobile */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG       0x0106
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG       0x0116
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG  0x0126
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB           0x0108  /* Server */
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG           0x010A
 
 /* cover 915 and 945 variants */
 #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
 
 #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
 
 #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
                IS_SNB)
+
+#endif
index d22ffb811bf2cea4001a97d485327d668ff9d85a..75e0a3497888d295d25dd414bda8a2ce69eee724 100644 (file)
@@ -49,6 +49,26 @@ static struct gatt_mask intel_i810_masks[] =
         .type = INTEL_AGP_CACHED_MEMORY}
 };
 
+#define INTEL_AGP_UNCACHED_MEMORY              0
+#define INTEL_AGP_CACHED_MEMORY_LLC            1
+#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT       2
+#define INTEL_AGP_CACHED_MEMORY_LLC_MLC        3
+#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT   4
+
+static struct gatt_mask intel_gen6_masks[] =
+{
+       {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
+        .type = INTEL_AGP_UNCACHED_MEMORY },
+       {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
+         .type = INTEL_AGP_CACHED_MEMORY_LLC },
+       {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
+         .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
+       {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
+         .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
+       {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
+         .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
+};
+
 static struct _intel_private {
        struct pci_dev *pcidev; /* device one */
        u8 __iomem *registers;
@@ -178,13 +198,6 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
                                        off_t pg_start, int mask_type)
 {
        int i, j;
-       u32 cache_bits = 0;
-
-       if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
-           agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
-       {
-               cache_bits = GEN6_PTE_LLC_MLC;
-       }
 
        for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
                writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -317,6 +330,23 @@ static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
                return 0;
 }
 
+static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
+                                       int type)
+{
+       unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
+       unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
+
+       if (type_mask == AGP_USER_UNCACHED_MEMORY)
+               return INTEL_AGP_UNCACHED_MEMORY;
+       else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
+               return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
+                             INTEL_AGP_CACHED_MEMORY_LLC_MLC;
+       else /* set 'normal'/'cached' to LLC by default */
+               return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
+                             INTEL_AGP_CACHED_MEMORY_LLC;
+}
+
+
 static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
                                int type)
 {
@@ -588,8 +618,7 @@ static void intel_i830_init_gtt_entries(void)
                        gtt_entries = 0;
                        break;
                }
-       } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
-                  agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
+       } else if (IS_SNB) {
                /*
                 * SandyBridge has new memory control reg at 0x50.w
                 */
@@ -1068,11 +1097,11 @@ static void intel_i9xx_setup_flush(void)
                intel_i915_setup_chipset_flush();
        }
 
-       if (intel_private.ifp_resource.start) {
+       if (intel_private.ifp_resource.start)
                intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
-               if (!intel_private.i9xx_flush_page)
-                       dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
-       }
+       if (!intel_private.i9xx_flush_page)
+               dev_err(&intel_private.pcidev->dev,
+                       "can't ioremap flush page - no chipset flushing\n");
 }
 
 static int intel_i9xx_configure(void)
@@ -1163,7 +1192,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
 
        mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
 
-       if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+       if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
            mask_type != INTEL_AGP_CACHED_MEMORY)
                goto out_err;
 
@@ -1333,8 +1362,8 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
 static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
                                            dma_addr_t addr, int type)
 {
-       /* Shift high bits down */
-       addr |= (addr >> 28) & 0xff;
+       /* gen6 has bit11-4 for physical addr bit39-32 */
+       addr |= (addr >> 28) & 0xff0;
 
        /* Type checking must be done elsewhere */
        return addr | bridge->driver->masks[type].mask;
@@ -1359,6 +1388,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
                break;
        case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
        case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
+       case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
                *gtt_offset = MB(2);
 
                pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -1563,7 +1593,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
        .fetch_size             = intel_i9xx_fetch_size,
        .cleanup                = intel_i915_cleanup,
        .mask_memory            = intel_gen6_mask_memory,
-       .masks                  = intel_i810_masks,
+       .masks                  = intel_gen6_masks,
        .agp_enable             = intel_i810_agp_enable,
        .cache_flush            = global_cache_flush,
        .create_gatt_table      = intel_i965_create_gatt_table,
@@ -1576,7 +1606,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
        .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
        .agp_destroy_pages      = agp_generic_destroy_pages,
-       .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
+       .agp_type_to_mask_type  = intel_gen6_type_to_mask_type,
        .chipset_flush          = intel_i915_chipset_flush,
 #ifdef USE_PCI_DMA_API
        .agp_map_page           = intel_agp_map_page,
index 1acdb25095112ef538072e8d153a889476d1359f..a3f5e381e74647268517a22c6f65d7c641277f3d 100644 (file)
@@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np)
 
 static int n2rng_data_read(struct hwrng *rng, u32 *data)
 {
-       struct n2rng *np = rng->priv;
+       struct n2rng *np = (struct n2rng *) rng->priv;
        unsigned long ra = __pa(&np->test_data);
        int len;
 
index 949067a0bd4743151515382b07ccf7aecad11316..613c852ee0feced279c50f6850e9e30c7c870111 100644 (file)
@@ -355,7 +355,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
                if (*stp == '\0')
                        stp = NULL;
 
-               if (tty_line >= 0 && tty_line <= p->num && p->ops &&
+               if (tty_line >= 0 && tty_line < p->num && p->ops &&
                    p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
                        res = tty_driver_kref_get(p);
                        *line = tty_line;
index 50590c7f2c01ac4c2a71595e94fbebe930081079..281aada7b4a110a2d9f47d5e3e780596a36c298e 100644 (file)
@@ -906,22 +906,16 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
                         * bottom of buffer
                         */
                        old_origin += (old_rows - new_rows) * old_row_size;
-                       end = vc->vc_scr_end;
                } else {
                        /*
                         * Cursor is in no man's land, copy 1/2 screenful
                         * from the top and bottom of cursor position
                         */
                        old_origin += (vc->vc_y - new_rows/2) * old_row_size;
-                       end = old_origin + (old_row_size * new_rows);
                }
-       } else
-               /*
-                * Cursor near the top, copy contents from the top of buffer
-                */
-               end = (old_rows > new_rows) ? old_origin +
-                       (old_row_size * new_rows) :
-                       vc->vc_scr_end;
+       }
+
+       end = old_origin + old_row_size * min(old_rows, new_rows);
 
        update_attr(vc);
 
@@ -3075,8 +3069,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
 
                old_was_color = vc->vc_can_do_color;
                vc->vc_sw->con_deinit(vc);
-               if (!vc->vc_origin)
-                       vc->vc_origin = (unsigned long)vc->vc_screenbuf;
+               vc->vc_origin = (unsigned long)vc->vc_screenbuf;
                visual_init(vc, i, 0);
                set_origin(vc);
                update_attr(vc);
index 2bbeaaea46e9b7765ce983374fb125b7dd5422c9..38df8c19e74cc56903d5985cdbee7d52df3dc0d9 100644 (file)
@@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        case KIOCSOUND:
                if (!perm)
                        goto eperm;
-               /* FIXME: This is an old broken API but we need to keep it
-                  supported and somehow separate the historic advertised
-                  tick rate from any real one */
+               /*
+                * The use of PIT_TICK_RATE is historic, it used to be
+                * the platform-dependent CLOCK_TICK_RATE between 2.6.12
+                * and 2.6.36, which was a minor but unfortunate ABI
+                * change.
+                */
                if (arg)
-                       arg = CLOCK_TICK_RATE / arg;
+                       arg = PIT_TICK_RATE / arg;
                kd_mksound(arg, 0);
                break;
 
@@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                 */
                ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
                count = ticks ? (arg & 0xffff) : 0;
-               /* FIXME: This is an old broken API but we need to keep it
-                  supported and somehow separate the historic advertised
-                  tick rate from any real one */
                if (count)
-                       count = CLOCK_TICK_RATE / count;
+                       count = PIT_TICK_RATE / count;
                kd_mksound(count, ticks);
                break;
        }
index b42f42ca70c3c9454bb00ff7cf2e8505f74e482d..823559ab0e243610ca8e5ff3b5983aca3d53b7bb 100644 (file)
@@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
        return err;
 }
 
-static int sx150x_init_hw(struct sx150x_chip *chip,
-                       struct sx150x_platform_data *pdata)
+static int sx150x_reset(struct sx150x_chip *chip)
 {
-       int err = 0;
+       int err;
 
-       err = i2c_smbus_write_word_data(chip->client,
+       err = i2c_smbus_write_byte_data(chip->client,
                                        chip->dev_cfg->reg_reset,
-                                       0x3412);
+                                       0x12);
        if (err < 0)
                return err;
 
+       err = i2c_smbus_write_byte_data(chip->client,
+                                       chip->dev_cfg->reg_reset,
+                                       0x34);
+       return err;
+}
+
+static int sx150x_init_hw(struct sx150x_chip *chip,
+                       struct sx150x_platform_data *pdata)
+{
+       int err = 0;
+
+       if (pdata->reset_during_probe) {
+               err = sx150x_reset(chip);
+               if (err < 0)
+                       return err;
+       }
+
        err = sx150x_i2c_write(chip->client,
                        chip->dev_cfg->reg_misc,
                        0x01);
index 7e31d4348340b10c9c4b39b0612e119335dc444c..d2ab01e90a96315fee72015f4a76f194211d4a81 100644 (file)
@@ -34,6 +34,9 @@
 #include "drm_crtc_helper.h"
 #include "drm_fb_helper.h"
 
+static bool drm_kms_helper_poll = true;
+module_param_named(poll, drm_kms_helper_poll, bool, 0600);
+
 static void drm_mode_validate_flag(struct drm_connector *connector,
                                   int flags)
 {
@@ -99,8 +102,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
                        connector->status = connector_status_disconnected;
                if (connector->funcs->force)
                        connector->funcs->force(connector);
-       } else
+       } else {
                connector->status = connector->funcs->detect(connector);
+               drm_helper_hpd_irq_event(dev);
+       }
 
        if (connector->status == connector_status_disconnected) {
                DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -110,11 +115,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
        }
 
        count = (*connector_funcs->get_modes)(connector);
-       if (!count) {
+       if (count == 0 && connector->status == connector_status_connected)
                count = drm_add_modes_noedid(connector, 1024, 768);
-               if (!count)
-                       return 0;
-       }
+       if (count == 0)
+               goto prune;
 
        drm_mode_connector_list_update(connector);
 
@@ -840,6 +844,9 @@ static void output_poll_execute(struct work_struct *work)
        enum drm_connector_status old_status, status;
        bool repoll = false, changed = false;
 
+       if (!drm_kms_helper_poll)
+               return;
+
        mutex_lock(&dev->mode_config.mutex);
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
@@ -890,6 +897,9 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
        bool poll = false;
        struct drm_connector *connector;
 
+       if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+               return;
+
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                if (connector->polled)
                        poll = true;
@@ -919,8 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
 {
        if (!dev->mode_config.poll_enabled)
                return;
+
        /* kill timer and schedule immediate execution, this doesn't block */
        cancel_delayed_work(&dev->mode_config.output_poll_work);
-       queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
+       if (drm_kms_helper_poll)
+               queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
 }
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
index 92d5605a34d11a6fcecbbae4149220c44ac86740..5e43d70767896e80400c96c628f2960270e1f2c4 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/slab.h>
 #include "drmP.h"
 #include "drm.h"
+#include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 
@@ -121,6 +122,54 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        return 0;
 }
 
+static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       unsigned long flags;
+       struct intel_crtc *crtc;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+               const char *pipe = crtc->pipe ? "B" : "A";
+               const char *plane = crtc->plane ? "B" : "A";
+               struct intel_unpin_work *work;
+
+               spin_lock_irqsave(&dev->event_lock, flags);
+               work = crtc->unpin_work;
+               if (work == NULL) {
+                       seq_printf(m, "No flip due on pipe %s (plane %s)\n",
+                                  pipe, plane);
+               } else {
+                       if (!work->pending) {
+                               seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
+                                          pipe, plane);
+                       } else {
+                               seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
+                                          pipe, plane);
+                       }
+                       if (work->enable_stall_check)
+                               seq_printf(m, "Stall check enabled, ");
+                       else
+                               seq_printf(m, "Stall check waiting for page flip ioctl, ");
+                       seq_printf(m, "%d prepares\n", work->pending);
+
+                       if (work->old_fb_obj) {
+                               struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
+                               if(obj_priv)
+                                       seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+                       }
+                       if (work->pending_flip_obj) {
+                               struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
+                               if(obj_priv)
+                                       seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+                       }
+               }
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+       }
+
+       return 0;
+}
+
 static int i915_gem_request_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -777,6 +826,7 @@ static struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
        {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
        {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+       {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
        {"i915_gem_request", i915_gem_request_info, 0},
        {"i915_gem_seqno", i915_gem_seqno_info, 0},
        {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
index a7ec93e62f811800003dcc655a7580473d748d33..9d67b485303005771a090ea7a3c1e1c6f8b74e9e 100644 (file)
@@ -620,8 +620,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
                ret = copy_from_user(cliprects, batch->cliprects,
                                     batch->num_cliprects *
                                     sizeof(struct drm_clip_rect));
-               if (ret != 0)
+               if (ret != 0) {
+                       ret = -EFAULT;
                        goto fail_free;
+               }
        }
 
        mutex_lock(&dev->struct_mutex);
@@ -662,8 +664,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
-       if (ret != 0)
+       if (ret != 0) {
+               ret = -EFAULT;
                goto fail_batch_free;
+       }
 
        if (cmdbuf->num_cliprects) {
                cliprects = kcalloc(cmdbuf->num_cliprects,
@@ -676,8 +680,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
                ret = copy_from_user(cliprects, cmdbuf->cliprects,
                                     cmdbuf->num_cliprects *
                                     sizeof(struct drm_clip_rect));
-               if (ret != 0)
+               if (ret != 0) {
+                       ret = -EFAULT;
                        goto fail_clip_free;
+               }
        }
 
        mutex_lock(&dev->struct_mutex);
@@ -885,7 +891,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
        int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
        u32 temp_lo, temp_hi = 0;
        u64 mchbar_addr;
-       int ret = 0;
+       int ret;
 
        if (IS_I965G(dev))
                pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
@@ -895,22 +901,23 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
        /* If ACPI doesn't have it, assume we need to allocate it ourselves */
 #ifdef CONFIG_PNP
        if (mchbar_addr &&
-           pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
-               ret = 0;
-               goto out;
-       }
+           pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+               return 0;
 #endif
 
        /* Get some space for it */
-       ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
+       dev_priv->mch_res.name = "i915 MCHBAR";
+       dev_priv->mch_res.flags = IORESOURCE_MEM;
+       ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+                                    &dev_priv->mch_res,
                                     MCHBAR_SIZE, MCHBAR_SIZE,
                                     PCIBIOS_MIN_MEM,
-                                    0,   pcibios_align_resource,
+                                    0, pcibios_align_resource,
                                     dev_priv->bridge_dev);
        if (ret) {
                DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
                dev_priv->mch_res.start = 0;
-               goto out;
+               return ret;
        }
 
        if (IS_I965G(dev))
@@ -919,8 +926,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
 
        pci_write_config_dword(dev_priv->bridge_dev, reg,
                               lower_32_bits(dev_priv->mch_res.start));
-out:
-       return ret;
+       return 0;
 }
 
 /* Setup MCHBAR if possible, return true if we should disable it again */
@@ -2082,6 +2088,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto free_priv;
        }
 
+       /* overlay on gen2 is broken and can't address above 1G */
+       if (IS_GEN2(dev))
+               dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+
        dev_priv->regs = ioremap(base, size);
        if (!dev_priv->regs) {
                DRM_ERROR("failed to map registers\n");
index 00befce8fbb7f503b5ed5311c8acac81f1c00fcd..216deb579785eb93e27e2ba57a0556471a13daf2 100644 (file)
@@ -61,91 +61,86 @@ extern int intel_agp_enabled;
        .driver_data = (unsigned long) info }
 
 static const struct intel_device_info intel_i830_info = {
-       .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+       .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_845g_info = {
-       .is_i8xx = 1,
+       .gen = 2, .is_i8xx = 1,
 };
 
 static const struct intel_device_info intel_i85x_info = {
-       .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+       .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
        .cursor_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i865g_info = {
-       .is_i8xx = 1,
+       .gen = 2, .is_i8xx = 1,
 };
 
 static const struct intel_device_info intel_i915g_info = {
-       .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+       .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
 };
 static const struct intel_device_info intel_i915gm_info = {
-       .is_i9xx = 1,  .is_mobile = 1,
+       .gen = 3, .is_i9xx = 1,  .is_mobile = 1,
        .cursor_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945g_info = {
-       .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+       .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
 };
 static const struct intel_device_info intel_i945gm_info = {
-       .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
+       .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
        .has_hotplug = 1, .cursor_needs_physical = 1,
 };
 
 static const struct intel_device_info intel_i965g_info = {
-       .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
+       .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
+       .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
-       .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
-       .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
-       .has_hotplug = 1,
+       .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
+       .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_g33_info = {
-       .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
-       .has_hotplug = 1,
+       .gen = 3, .is_g33 = 1, .is_i9xx = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_g45_info = {
-       .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
-       .has_pipe_cxsr = 1,
-       .has_hotplug = 1,
+       .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+       .has_pipe_cxsr = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_gm45_info = {
-       .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
+       .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
        .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
-       .has_pipe_cxsr = 1,
-       .has_hotplug = 1,
+       .has_pipe_cxsr = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_pineview_info = {
-       .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
-       .need_gfx_hws = 1,
-       .has_hotplug = 1,
+       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_ironlake_d_info = {
-       .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
-       .has_pipe_cxsr = 1,
-       .has_hotplug = 1,
+       .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
+       .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
-       .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
-       .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
-       .has_hotplug = 1,
+       .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+       .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_d_info = {
-       .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
-       .has_hotplug = 1, .is_gen6 = 1,
+       .gen = 6, .is_i965g = 1, .is_i9xx = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
-       .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
-       .has_hotplug = 1, .is_gen6 = 1,
+       .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
+       .need_gfx_hws = 1, .has_hotplug = 1,
 };
 
 static const struct pci_device_id pciidlist[] = {              /* aka */
@@ -180,8 +175,12 @@ static const struct pci_device_id pciidlist[] = {          /* aka */
        INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
        INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
        INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
+       INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
+       INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
        INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
+       INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
        INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
+       INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
        {0, 0, 0}
 };
 
index 047cd7ce7e1b31ec981f9069fdfbfb412e9dd8ea..af4a263cf25782a6906f80a1d0783841000ac9b1 100644 (file)
@@ -191,6 +191,7 @@ struct drm_i915_display_funcs {
 };
 
 struct intel_device_info {
+       u8 gen;
        u8 is_mobile : 1;
        u8 is_i8xx : 1;
        u8 is_i85x : 1;
@@ -206,7 +207,6 @@ struct intel_device_info {
        u8 is_broadwater : 1;
        u8 is_crestline : 1;
        u8 is_ironlake : 1;
-       u8 is_gen6 : 1;
        u8 has_fbc : 1;
        u8 has_rc6 : 1;
        u8 has_pipe_cxsr : 1;
@@ -1162,7 +1162,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 #define IS_845G(dev)           ((dev)->pci_device == 0x2562)
 #define IS_I85X(dev)           (INTEL_INFO(dev)->is_i85x)
 #define IS_I865G(dev)          ((dev)->pci_device == 0x2572)
-#define IS_GEN2(dev)           (INTEL_INFO(dev)->is_i8xx)
 #define IS_I915G(dev)          (INTEL_INFO(dev)->is_i915g)
 #define IS_I915GM(dev)         ((dev)->pci_device == 0x2592)
 #define IS_I945G(dev)          ((dev)->pci_device == 0x2772)
@@ -1181,27 +1180,13 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 #define IS_IRONLAKE_M(dev)     ((dev)->pci_device == 0x0046)
 #define IS_IRONLAKE(dev)       (INTEL_INFO(dev)->is_ironlake)
 #define IS_I9XX(dev)           (INTEL_INFO(dev)->is_i9xx)
-#define IS_GEN6(dev)           (INTEL_INFO(dev)->is_gen6)
 #define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
 
-#define IS_GEN3(dev)   (IS_I915G(dev) ||                       \
-                        IS_I915GM(dev) ||                      \
-                        IS_I945G(dev) ||                       \
-                        IS_I945GM(dev) ||                      \
-                        IS_G33(dev) || \
-                        IS_PINEVIEW(dev))
-#define IS_GEN4(dev)   ((dev)->pci_device == 0x2972 ||         \
-                        (dev)->pci_device == 0x2982 ||         \
-                        (dev)->pci_device == 0x2992 ||         \
-                        (dev)->pci_device == 0x29A2 ||         \
-                        (dev)->pci_device == 0x2A02 ||         \
-                        (dev)->pci_device == 0x2A12 ||         \
-                        (dev)->pci_device == 0x2E02 ||         \
-                        (dev)->pci_device == 0x2E12 ||         \
-                        (dev)->pci_device == 0x2E22 ||         \
-                        (dev)->pci_device == 0x2E32 ||         \
-                        (dev)->pci_device == 0x2A42 ||         \
-                        (dev)->pci_device == 0x2E42)
+#define IS_GEN2(dev)   (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev)   (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev)   (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev)   (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev)   (INTEL_INFO(dev)->gen == 6)
 
 #define HAS_BSD(dev)            (IS_IRONLAKE(dev) || IS_G4X(dev))
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
index df5a7135c2614c8de55e309c5dfad82efcf618ac..16fca1d1799a4211474a91e7fc52b605eceafbfc 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/pci.h>
+#include <linux/intel-gtt.h>
 
 static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
 static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
@@ -135,12 +136,15 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
                return -ENOMEM;
 
        ret = drm_gem_handle_create(file_priv, obj, &handle);
-       drm_gem_object_unreference_unlocked(obj);
-       if (ret)
+       if (ret) {
+               drm_gem_object_unreference_unlocked(obj);
                return ret;
+       }
 
-       args->handle = handle;
+       /* Sink the floating reference from kref_init(handlecount) */
+       drm_gem_object_handle_unreference_unlocked(obj);
 
+       args->handle = handle;
        return 0;
 }
 
@@ -3585,6 +3589,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                if (ret != 0) {
                        DRM_ERROR("copy %d cliprects failed: %d\n",
                                  args->num_cliprects, ret);
+                       ret = -EFAULT;
                        goto pre_mutex_err;
                }
        }
index 16861b800feeb46577f35e616c3d891b6dff9f4e..744225ebb4b25d5988fab454441de95d7db94115 100644 (file)
@@ -887,6 +887,49 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
        queue_work(dev_priv->wq, &dev_priv->error_work);
 }
 
+static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_i915_gem_object *obj_priv;
+       struct intel_unpin_work *work;
+       unsigned long flags;
+       bool stall_detected;
+
+       /* Ignore early vblank irqs */
+       if (intel_crtc == NULL)
+               return;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       work = intel_crtc->unpin_work;
+
+       if (work == NULL || work->pending || !work->enable_stall_check) {
+               /* Either the pending flip IRQ arrived, or we're too early. Don't check */
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+               return;
+       }
+
+       /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
+       obj_priv = to_intel_bo(work->pending_flip_obj);
+       if(IS_I965G(dev)) {
+               int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
+               stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+       } else {
+               int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
+               stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
+                                                       crtc->y * crtc->fb->pitch +
+                                                       crtc->x * crtc->fb->bits_per_pixel/8);
+       }
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       if (stall_detected) {
+               DRM_DEBUG_DRIVER("Pageflip stall detected\n");
+               intel_prepare_page_flip(dev, intel_crtc->plane);
+       }
+}
+
 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 {
        struct drm_device *dev = (struct drm_device *) arg;
@@ -1004,15 +1047,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                if (pipea_stats & vblank_status) {
                        vblank++;
                        drm_handle_vblank(dev, 0);
-                       if (!dev_priv->flip_pending_is_done)
+                       if (!dev_priv->flip_pending_is_done) {
+                               i915_pageflip_stall_check(dev, 0);
                                intel_finish_page_flip(dev, 0);
+                       }
                }
 
                if (pipeb_stats & vblank_status) {
                        vblank++;
                        drm_handle_vblank(dev, 1);
-                       if (!dev_priv->flip_pending_is_done)
+                       if (!dev_priv->flip_pending_is_done) {
+                               i915_pageflip_stall_check(dev, 1);
                                intel_finish_page_flip(dev, 1);
+                       }
                }
 
                if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
@@ -1303,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data)
                i915_seqno_passed(i915_get_gem_seqno(dev,
                                &dev_priv->render_ring),
                        i915_get_tail_request(dev)->seqno)) {
+               bool missed_wakeup = false;
+
                dev_priv->hangcheck_count = 0;
 
                /* Issue a wake-up to catch stuck h/w. */
-               if (dev_priv->render_ring.waiting_gem_seqno |
-                   dev_priv->bsd_ring.waiting_gem_seqno) {
-                       DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
-                       if (dev_priv->render_ring.waiting_gem_seqno)
-                               DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
-                       if (dev_priv->bsd_ring.waiting_gem_seqno)
-                               DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+               if (dev_priv->render_ring.waiting_gem_seqno &&
+                   waitqueue_active(&dev_priv->render_ring.irq_queue)) {
+                       DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+                       missed_wakeup = true;
                }
+
+               if (dev_priv->bsd_ring.waiting_gem_seqno &&
+                   waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
+                       DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+                       missed_wakeup = true;
+               }
+
+               if (missed_wakeup)
+                       DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
                return;
        }
 
index 67e3ec1a6af9dc482870e842b3739a86581176ae..4f5e15577e89e3e6f7005cd92f9f87ab0636b4eb 100644 (file)
 
 #define MI_MODE                0x0209c
 # define VS_TIMER_DISPATCH                             (1 << 6)
+# define MI_FLUSH_ENABLE                               (1 << 11)
 
 #define SCPD0          0x0209c /* 915+ only */
 #define IER            0x020a0
 #define  WM1_LP_SR_EN          (1<<31)
 #define  WM1_LP_LATENCY_SHIFT  24
 #define  WM1_LP_LATENCY_MASK   (0x7f<<24)
+#define  WM1_LP_FBC_LP1_MASK   (0xf<<20)
+#define  WM1_LP_FBC_LP1_SHIFT  20
 #define  WM1_LP_SR_MASK                (0x1ff<<8)
 #define  WM1_LP_SR_SHIFT       8
 #define  WM1_LP_CURSOR_MASK    (0x3f)
+#define WM2_LP_ILK             0x4510c
+#define  WM2_LP_EN             (1<<31)
+#define WM3_LP_ILK             0x45110
+#define  WM3_LP_EN             (1<<31)
+#define WM1S_LP_ILK            0x45120
+#define  WM1S_LP_EN            (1<<31)
 
 /* Memory latency timer register */
 #define MLTR_ILK               0x11222
index 11a3394f5fe17bb772cb381de1708ad77ae14499..19daead5b525d5d3d653e156ce2bfe532479bd9e 100644 (file)
@@ -990,6 +990,22 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
 
+       /* Clear existing vblank status. Note this will clear any other
+        * sticky status fields as well.
+        *
+        * This races with i915_driver_irq_handler() with the result
+        * that either function could miss a vblank event.  Here it is not
+        * fatal, as we will either wait upon the next vblank interrupt or
+        * timeout.  Generally speaking intel_wait_for_vblank() is only
+        * called during modeset at which time the GPU should be idle and
+        * should *not* be performing page flips and thus not waiting on
+        * vblanks...
+        * Currently, the result of us stealing a vblank from the irq
+        * handler is that a single frame will be skipped during swapbuffers.
+        */
+       I915_WRITE(pipestat_reg,
+                  I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
+
        /* Wait for vblank interrupt bit to set */
        if (wait_for((I915_READ(pipestat_reg) &
                      PIPE_VBLANK_INTERRUPT_STATUS),
@@ -1486,7 +1502,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                        dspcntr &= ~DISPPLANE_TILED;
        }
 
-       if (IS_IRONLAKE(dev))
+       if (HAS_PCH_SPLIT(dev))
                /* must disable */
                dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
@@ -1495,20 +1511,19 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        Start = obj_priv->gtt_offset;
        Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
 
-       DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+       DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+                     Start, Offset, x, y, fb->pitch);
        I915_WRITE(dspstride, fb->pitch);
        if (IS_I965G(dev)) {
-               I915_WRITE(dspbase, Offset);
-               I915_READ(dspbase);
                I915_WRITE(dspsurf, Start);
-               I915_READ(dspsurf);
                I915_WRITE(dsptileoff, (y << 16) | x);
+               I915_WRITE(dspbase, Offset);
        } else {
                I915_WRITE(dspbase, Start + Offset);
-               I915_READ(dspbase);
        }
+       POSTING_READ(dspbase);
 
-       if ((IS_I965G(dev) || plane == 0))
+       if (IS_I965G(dev) || plane == 0)
                intel_update_fbc(crtc, &crtc->mode);
 
        intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -1522,7 +1537,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                    struct drm_framebuffer *old_fb)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_framebuffer *intel_fb;
@@ -1530,13 +1544,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        struct drm_gem_object *obj;
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
-       unsigned long Start, Offset;
-       int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
-       int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
-       int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
-       int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
-       int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
-       u32 dspcntr;
        int ret;
 
        /* no fb bound */
@@ -1572,71 +1579,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return ret;
        }
 
-       dspcntr = I915_READ(dspcntr_reg);
-       /* Mask out pixel format bits in case we change it */
-       dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-       switch (crtc->fb->bits_per_pixel) {
-       case 8:
-               dspcntr |= DISPPLANE_8BPP;
-               break;
-       case 16:
-               if (crtc->fb->depth == 15)
-                       dspcntr |= DISPPLANE_15_16BPP;
-               else
-                       dspcntr |= DISPPLANE_16BPP;
-               break;
-       case 24:
-       case 32:
-               if (crtc->fb->depth == 30)
-                       dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
-               else
-                       dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
-               break;
-       default:
-               DRM_ERROR("Unknown color depth\n");
+       ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
+       if (ret) {
                i915_gem_object_unpin(obj);
                mutex_unlock(&dev->struct_mutex);
-               return -EINVAL;
-       }
-       if (IS_I965G(dev)) {
-               if (obj_priv->tiling_mode != I915_TILING_NONE)
-                       dspcntr |= DISPPLANE_TILED;
-               else
-                       dspcntr &= ~DISPPLANE_TILED;
-       }
-
-       if (HAS_PCH_SPLIT(dev))
-               /* must disable */
-               dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
-       I915_WRITE(dspcntr_reg, dspcntr);
-
-       Start = obj_priv->gtt_offset;
-       Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
-
-       DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
-                     Start, Offset, x, y, crtc->fb->pitch);
-       I915_WRITE(dspstride, crtc->fb->pitch);
-       if (IS_I965G(dev)) {
-               I915_WRITE(dspsurf, Start);
-               I915_WRITE(dsptileoff, (y << 16) | x);
-               I915_WRITE(dspbase, Offset);
-       } else {
-               I915_WRITE(dspbase, Start + Offset);
+               return ret;
        }
-       POSTING_READ(dspbase);
-
-       if ((IS_I965G(dev) || plane == 0))
-               intel_update_fbc(crtc, &crtc->mode);
-
-       intel_wait_for_vblank(dev, pipe);
 
        if (old_fb) {
                intel_fb = to_intel_framebuffer(old_fb);
                obj_priv = to_intel_bo(intel_fb->obj);
                i915_gem_object_unpin(intel_fb->obj);
        }
-       intel_increase_pllclock(crtc, true);
 
        mutex_unlock(&dev->struct_mutex);
 
@@ -1911,9 +1865,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
        int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
        int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
        int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
-       int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
-       int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
-       int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
        int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
        int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
        int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1982,15 +1933,19 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                }
 
                /* Enable panel fitting for LVDS */
-               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
-                   || HAS_eDP || intel_pch_has_edp(crtc)) {
-                       if (dev_priv->pch_pf_size) {
-                               temp = I915_READ(pf_ctl_reg);
-                               I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
-                               I915_WRITE(pf_win_pos, dev_priv->pch_pf_pos);
-                               I915_WRITE(pf_win_size, dev_priv->pch_pf_size);
-                       } else
-                               I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
+               if (dev_priv->pch_pf_size &&
+                   (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+                   || HAS_eDP || intel_pch_has_edp(crtc))) {
+                       /* Force use of hard-coded filter coefficients
+                        * as some pre-programmed values are broken,
+                        * e.g. x201.
+                        */
+                       I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
+                                  PF_ENABLE | PF_FILTER_MED_3x3);
+                       I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
+                                  dev_priv->pch_pf_pos);
+                       I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
+                                  dev_priv->pch_pf_size);
                }
 
                /* Enable CPU pipe */
@@ -2115,7 +2070,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                        I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
                        I915_READ(transconf_reg);
 
-                       if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 10, 0))
+                       if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
                                DRM_ERROR("failed to enable transcoder\n");
                }
 
@@ -2155,14 +2110,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
                udelay(100);
 
                /* Disable PF */
-               temp = I915_READ(pf_ctl_reg);
-               if ((temp & PF_ENABLE) != 0) {
-                       I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
-                       I915_READ(pf_ctl_reg);
-               }
-               I915_WRITE(pf_win_size, 0);
-               POSTING_READ(pf_win_size);
-
+               I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
+               I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
 
                /* disable CPU FDI tx and PCH FDI rx */
                temp = I915_READ(fdi_tx_reg);
@@ -2421,6 +2370,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
        int pipe = intel_crtc->pipe;
        bool enabled;
 
+       if (intel_crtc->dpms_mode == mode)
+               return;
+
        intel_crtc->dpms_mode = mode;
        intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
 
@@ -2815,14 +2767,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
        /* Don't promote wm_size to unsigned... */
        if (wm_size > (long)wm->max_wm)
                wm_size = wm->max_wm;
-       if (wm_size <= 0) {
+       if (wm_size <= 0)
                wm_size = wm->default_wm;
-               DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
-                         " entries required = %ld, available = %lu.\n",
-                         entries_required + wm->guard_size,
-                         wm->fifo_size);
-       }
-
        return wm_size;
 }
 
@@ -3436,8 +3382,7 @@ static void ironlake_update_wm(struct drm_device *dev,  int planea_clock,
                reg_value = I915_READ(WM1_LP_ILK);
                reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
                               WM1_LP_CURSOR_MASK);
-               reg_value |= WM1_LP_SR_EN |
-                            (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
+               reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
                             (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
 
                I915_WRITE(WM1_LP_ILK, reg_value);
@@ -3554,10 +3499,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
        bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
        bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
-       bool is_edp = false;
+       struct intel_encoder *has_edp_encoder = NULL;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_encoder *encoder;
-       struct intel_encoder *intel_encoder = NULL;
        const intel_limit_t *limit;
        int ret;
        struct fdi_m_n m_n = {0};
@@ -3578,12 +3522,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        drm_vblank_pre_modeset(dev, pipe);
 
        list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+               struct intel_encoder *intel_encoder;
 
-               if (!encoder || encoder->crtc != crtc)
+               if (encoder->crtc != crtc)
                        continue;
 
                intel_encoder = enc_to_intel_encoder(encoder);
-
                switch (intel_encoder->type) {
                case INTEL_OUTPUT_LVDS:
                        is_lvds = true;
@@ -3607,7 +3551,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                        is_dp = true;
                        break;
                case INTEL_OUTPUT_EDP:
-                       is_edp = true;
+                       has_edp_encoder = intel_encoder;
                        break;
                }
 
@@ -3685,10 +3629,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                int lane = 0, link_bw, bpp;
                /* eDP doesn't require FDI link, so just set DP M/N
                   according to current link config */
-               if (is_edp) {
+               if (has_edp_encoder) {
                        target_clock = mode->clock;
-                       intel_edp_link_config(intel_encoder,
-                                       &lane, &link_bw);
+                       intel_edp_link_config(has_edp_encoder,
+                                             &lane, &link_bw);
                } else {
                        /* DP over FDI requires target mode clock
                           instead of link clock */
@@ -3709,7 +3653,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                                temp |= PIPE_8BPC;
                        else
                                temp |= PIPE_6BPC;
-               } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) {
+               } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
                        switch (dev_priv->edp_bpp/3) {
                        case 8:
                                temp |= PIPE_8BPC;
@@ -3782,7 +3726,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 
                udelay(200);
 
-               if (is_edp) {
+               if (has_edp_encoder) {
                        if (dev_priv->lvds_use_ssc) {
                                temp |= DREF_SSC1_ENABLE;
                                I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -3931,7 +3875,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                dpll_reg = pch_dpll_reg;
        }
 
-       if (!is_edp) {
+       if (!has_edp_encoder) {
                I915_WRITE(fp_reg, fp);
                I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
                I915_READ(dpll_reg);
@@ -4026,7 +3970,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                }
        }
 
-       if (!is_edp) {
+       if (!has_edp_encoder) {
                I915_WRITE(fp_reg, fp);
                I915_WRITE(dpll_reg, dpll);
                I915_READ(dpll_reg);
@@ -4105,7 +4049,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                I915_WRITE(link_m1_reg, m_n.link_m);
                I915_WRITE(link_n1_reg, m_n.link_n);
 
-               if (is_edp) {
+               if (has_edp_encoder) {
                        ironlake_set_pll_edp(crtc, adjusted_mode->clock);
                } else {
                        /* enable FDI RX PLL too */
@@ -4911,15 +4855,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
        kfree(intel_crtc);
 }
 
-struct intel_unpin_work {
-       struct work_struct work;
-       struct drm_device *dev;
-       struct drm_gem_object *old_fb_obj;
-       struct drm_gem_object *pending_flip_obj;
-       struct drm_pending_vblank_event *event;
-       int pending;
-};
-
 static void intel_unpin_work_fn(struct work_struct *__work)
 {
        struct intel_unpin_work *work =
@@ -5007,7 +4942,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
 
        spin_lock_irqsave(&dev->event_lock, flags);
        if (intel_crtc->unpin_work) {
-               intel_crtc->unpin_work->pending = 1;
+               if ((++intel_crtc->unpin_work->pending) > 1)
+                       DRM_ERROR("Prepared flip multiple times\n");
        } else {
                DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
        }
@@ -5026,9 +4962,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
        unsigned long flags, offset;
-       int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
-       int ret, pipesrc;
-       u32 flip_mask;
+       int pipe = intel_crtc->pipe;
+       u32 pf, pipesrc;
+       int ret;
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
        if (work == NULL)
@@ -5077,42 +5013,73 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        atomic_inc(&obj_priv->pending_flip);
        work->pending_flip_obj = obj;
 
-       if (intel_crtc->plane)
-               flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
-       else
-               flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
        if (IS_GEN3(dev) || IS_GEN2(dev)) {
+               u32 flip_mask;
+
+               if (intel_crtc->plane)
+                       flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+               else
+                       flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
                BEGIN_LP_RING(2);
                OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
                OUT_RING(0);
                ADVANCE_LP_RING();
        }
 
+       work->enable_stall_check = true;
+
        /* Offset into the new buffer for cases of shared fbs between CRTCs */
-       offset = obj_priv->gtt_offset;
-       offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
+       offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
 
        BEGIN_LP_RING(4);
-       if (IS_I965G(dev)) {
+       switch(INTEL_INFO(dev)->gen) {
+       case 2:
                OUT_RING(MI_DISPLAY_FLIP |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(offset | obj_priv->tiling_mode);
-               pipesrc = I915_READ(pipesrc_reg); 
-               OUT_RING(pipesrc & 0x0fff0fff);
-       } else if (IS_GEN3(dev)) {
+               OUT_RING(obj_priv->gtt_offset + offset);
+               OUT_RING(MI_NOOP);
+               break;
+
+       case 3:
                OUT_RING(MI_DISPLAY_FLIP_I915 |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(offset);
+               OUT_RING(obj_priv->gtt_offset + offset);
                OUT_RING(MI_NOOP);
-       } else {
+               break;
+
+       case 4:
+       case 5:
+               /* i965+ uses the linear or tiled offsets from the
+                * Display Registers (which do not change across a page-flip)
+                * so we need only reprogram the base address.
+                */
                OUT_RING(MI_DISPLAY_FLIP |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(offset);
-               OUT_RING(MI_NOOP);
+               OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+
+               /* XXX Enabling the panel-fitter across page-flip is so far
+                * untested on non-native modes, so ignore it for now.
+                * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+                */
+               pf = 0;
+               pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+               OUT_RING(pf | pipesrc);
+               break;
+
+       case 6:
+               OUT_RING(MI_DISPLAY_FLIP |
+                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+               OUT_RING(fb->pitch | obj_priv->tiling_mode);
+               OUT_RING(obj_priv->gtt_offset);
+
+               pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+               pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+               OUT_RING(pf | pipesrc);
+               break;
        }
        ADVANCE_LP_RING();
 
@@ -5193,7 +5160,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
 
        intel_crtc->cursor_addr = 0;
-       intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+       intel_crtc->dpms_mode = -1;
        drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
        intel_crtc->busy = false;
@@ -5701,6 +5668,9 @@ void intel_init_clock_gating(struct drm_device *dev)
                        I915_WRITE(DISP_ARB_CTL,
                                        (I915_READ(DISP_ARB_CTL) |
                                                DISP_FBC_WM_DIS));
+               I915_WRITE(WM3_LP_ILK, 0);
+               I915_WRITE(WM2_LP_ILK, 0);
+               I915_WRITE(WM1_LP_ILK, 0);
                }
                /*
                 * Based on the document from hardware guys the following bits
@@ -5722,8 +5692,7 @@ void intel_init_clock_gating(struct drm_device *dev)
                                   ILK_DPFC_DIS2 |
                                   ILK_CLK_FBC);
                }
-               if (IS_GEN6(dev))
-                       return;
+               return;
        } else if (IS_G4X(dev)) {
                uint32_t dspclk_gate;
                I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5784,11 +5753,9 @@ void intel_init_clock_gating(struct drm_device *dev)
                                OUT_RING(MI_FLUSH);
                                ADVANCE_LP_RING();
                        }
-               } else {
+               } else
                        DRM_DEBUG_KMS("Failed to allocate render context."
-                                     "Disable RC6\n");
-                       return;
-               }
+                                      "Disable RC6\n");
        }
 
        if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
index 9caccd03dccb6841fea4be4c5fa43f5e8822ee33..51d142939a26e9abe76fdfe8a90aa94ff0f3b612 100644 (file)
@@ -239,7 +239,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        uint32_t ch_data = ch_ctl + 4;
        int i;
        int recv_bytes;
-       uint32_t ctl;
        uint32_t status;
        uint32_t aux_clock_divider;
        int try, precharge;
@@ -263,41 +262,43 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        else
                precharge = 5;
 
+       if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
+               DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
+                         I915_READ(ch_ctl));
+               return -EBUSY;
+       }
+
        /* Must try at least 3 times according to DP spec */
        for (try = 0; try < 5; try++) {
                /* Load the send data into the aux channel data registers */
-               for (i = 0; i < send_bytes; i += 4) {
-                       uint32_t    d = pack_aux(send + i, send_bytes - i);
-       
-                       I915_WRITE(ch_data + i, d);
-               }
-       
-               ctl = (DP_AUX_CH_CTL_SEND_BUSY |
-                      DP_AUX_CH_CTL_TIME_OUT_400us |
-                      (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-                      (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
-                      (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
-                      DP_AUX_CH_CTL_DONE |
-                      DP_AUX_CH_CTL_TIME_OUT_ERROR |
-                      DP_AUX_CH_CTL_RECEIVE_ERROR);
+               for (i = 0; i < send_bytes; i += 4)
+                       I915_WRITE(ch_data + i,
+                                  pack_aux(send + i, send_bytes - i));
        
                /* Send the command and wait for it to complete */
-               I915_WRITE(ch_ctl, ctl);
-               (void) I915_READ(ch_ctl);
+               I915_WRITE(ch_ctl,
+                          DP_AUX_CH_CTL_SEND_BUSY |
+                          DP_AUX_CH_CTL_TIME_OUT_400us |
+                          (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+                          (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+                          (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+                          DP_AUX_CH_CTL_DONE |
+                          DP_AUX_CH_CTL_TIME_OUT_ERROR |
+                          DP_AUX_CH_CTL_RECEIVE_ERROR);
                for (;;) {
-                       udelay(100);
                        status = I915_READ(ch_ctl);
                        if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
                                break;
+                       udelay(100);
                }
        
                /* Clear done status and any errors */
-               I915_WRITE(ch_ctl, (status |
-                               DP_AUX_CH_CTL_DONE |
-                               DP_AUX_CH_CTL_TIME_OUT_ERROR |
-                               DP_AUX_CH_CTL_RECEIVE_ERROR));
-               (void) I915_READ(ch_ctl);
-               if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
+               I915_WRITE(ch_ctl,
+                          status |
+                          DP_AUX_CH_CTL_DONE |
+                          DP_AUX_CH_CTL_TIME_OUT_ERROR |
+                          DP_AUX_CH_CTL_RECEIVE_ERROR);
+               if (status & DP_AUX_CH_CTL_DONE)
                        break;
        }
 
@@ -324,15 +325,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        /* Unload any bytes sent back from the other side */
        recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
                      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-
        if (recv_bytes > recv_size)
                recv_bytes = recv_size;
        
-       for (i = 0; i < recv_bytes; i += 4) {
-               uint32_t    d = I915_READ(ch_data + i);
-
-               unpack_aux(d, recv + i, recv_bytes - i);
-       }
+       for (i = 0; i < recv_bytes; i += 4)
+               unpack_aux(I915_READ(ch_data + i),
+                          recv + i, recv_bytes - i);
 
        return recv_bytes;
 }
index 0e92aa07b38252a4ac3457e75dc2b5113d8d9ded..ad312ca6b3e570125732168b3c2f670467264beb 100644 (file)
@@ -176,6 +176,16 @@ struct intel_crtc {
 #define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
 
+struct intel_unpin_work {
+       struct work_struct work;
+       struct drm_device *dev;
+       struct drm_gem_object *old_fb_obj;
+       struct drm_gem_object *pending_flip_obj;
+       struct drm_pending_vblank_event *event;
+       int pending;
+       bool enable_stall_check;
+};
+
 struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
                                     const char *name);
 void intel_i2c_destroy(struct i2c_adapter *adapter);
index b819c10811470775b19826e407ba75fffd6a9b68..4fbb0165b26f06a24cdbd8eef71f3c3d8e9845a2 100644 (file)
@@ -875,8 +875,6 @@ void intel_lvds_init(struct drm_device *dev)
 
        intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
        intel_encoder->crtc_mask = (1 << 1);
-       if (IS_I965G(dev))
-               intel_encoder->crtc_mask |= (1 << 0);
        drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
        drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
        connector->display_info.subpixel_order = SubPixelHorizontalRGB;
index 4f00390d7c616b1c87ffe145082e1913a04935d0..1d306a458be6463c8e1f6636d69ebf515d845889 100644 (file)
@@ -25,6 +25,8 @@
  *
  * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
  */
+
+#include <linux/seq_file.h>
 #include "drmP.h"
 #include "drm.h"
 #include "i915_drm.h"
index 51e9c9e718c4c16453659ef873f7d30b88f18f9e..cb3508f78bc350735e16962f8283e598c53deca6 100644 (file)
@@ -220,9 +220,13 @@ static int init_render_ring(struct drm_device *dev,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret = init_ring_common(dev, ring);
+       int mode;
+
        if (IS_I9XX(dev) && !IS_GEN3(dev)) {
-               I915_WRITE(MI_MODE,
-                               (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
+               mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+               if (IS_GEN6(dev))
+                       mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
+               I915_WRITE(MI_MODE, mode);
        }
        return ret;
 }
index 093e914e8a41a3d58b645dcf8475cb24312b3e1e..e3b7a7ee39cb97b390048c1a5ce9187b94b6e51d 100644 (file)
@@ -1061,8 +1061,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
                if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
                        return false;
 
-               if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode))
-                       return false;
+               (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+                                                            mode,
+                                                            adjusted_mode);
        } else if (intel_sdvo->is_lvds) {
                drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
 
@@ -1070,8 +1071,9 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
                                                            intel_sdvo->sdvo_lvds_fixed_mode))
                        return false;
 
-               if (!intel_sdvo_set_input_timings_for_mode(intel_sdvo, mode, adjusted_mode))
-                       return false;
+               (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
+                                                            mode,
+                                                            adjusted_mode);
        }
 
        /* Make the CRTC code factor in the SDVO pixel multiplier.  The
@@ -1108,10 +1110,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
        in_out.in0 = intel_sdvo->attached_output;
        in_out.in1 = 0;
 
-       if (!intel_sdvo_set_value(intel_sdvo,
-                                 SDVO_CMD_SET_IN_OUT_MAP,
-                                 &in_out, sizeof(in_out)))
-               return;
+       intel_sdvo_set_value(intel_sdvo,
+                            SDVO_CMD_SET_IN_OUT_MAP,
+                            &in_out, sizeof(in_out));
 
        if (intel_sdvo->is_hdmi) {
                if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
@@ -1122,11 +1123,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
 
        /* We have tried to get input timing in mode_fixup, and filled into
           adjusted_mode */
-       if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
-               intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+       intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+       if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
                input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
-       } else
-               intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
 
        /* If it's a TV, we already set the output timing in mode_fixup.
         * Otherwise, the output timing is equal to the input timing.
@@ -1137,8 +1136,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
                                                  intel_sdvo->attached_output))
                        return;
 
-               if (!intel_sdvo_set_output_timing(intel_sdvo, &input_dtd))
-                       return;
+               (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
        }
 
        /* Set the input timing to the screen. Assume always input 0. */
@@ -1165,8 +1163,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
                intel_sdvo_set_input_timing(encoder, &input_dtd);
        }
 #else
-       if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
-               return;
+       (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
 #endif
 
        sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
@@ -1932,6 +1929,41 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
        .destroy = intel_sdvo_enc_destroy,
 };
 
+static void
+intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
+{
+       uint16_t mask = 0;
+       unsigned int num_bits;
+
+       /* Make a mask of outputs less than or equal to our own priority in the
+        * list.
+        */
+       switch (sdvo->controlled_output) {
+       case SDVO_OUTPUT_LVDS1:
+               mask |= SDVO_OUTPUT_LVDS1;
+       case SDVO_OUTPUT_LVDS0:
+               mask |= SDVO_OUTPUT_LVDS0;
+       case SDVO_OUTPUT_TMDS1:
+               mask |= SDVO_OUTPUT_TMDS1;
+       case SDVO_OUTPUT_TMDS0:
+               mask |= SDVO_OUTPUT_TMDS0;
+       case SDVO_OUTPUT_RGB1:
+               mask |= SDVO_OUTPUT_RGB1;
+       case SDVO_OUTPUT_RGB0:
+               mask |= SDVO_OUTPUT_RGB0;
+               break;
+       }
+
+       /* Count bits to find what number we are in the priority list. */
+       mask &= sdvo->caps.output_flags;
+       num_bits = hweight16(mask);
+       /* If more than 3 outputs, default to DDC bus 3 for now. */
+       if (num_bits > 3)
+               num_bits = 3;
+
+       /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+       sdvo->ddc_bus = 1 << num_bits;
+}
 
 /**
  * Choose the appropriate DDC bus for control bus switch command for this
@@ -1951,7 +1983,10 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
        else
                mapping = &(dev_priv->sdvo_mappings[1]);
 
-       sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+       if (mapping->initialized)
+               sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+       else
+               intel_sdvo_guess_ddc_bus(sdvo);
 }
 
 static bool
index d2029efee982c3baff92538556ddae0b7a44ea9b..c671f60ce80bac917a61c1c60cdb02e692afcb85 100644 (file)
@@ -1231,7 +1231,6 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
        struct drm_encoder *encoder = &intel_tv->base.enc;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        unsigned long irqflags;
        u32 tv_ctl, save_tv_ctl;
        u32 tv_dac, save_tv_dac;
@@ -1268,11 +1267,15 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
                   DAC_C_0_7_V);
        I915_WRITE(TV_CTL, tv_ctl);
        I915_WRITE(TV_DAC, tv_dac);
-       intel_wait_for_vblank(dev, intel_crtc->pipe);
+       POSTING_READ(TV_DAC);
+       msleep(20);
+
        tv_dac = I915_READ(TV_DAC);
        I915_WRITE(TV_DAC, save_tv_dac);
        I915_WRITE(TV_CTL, save_tv_ctl);
-       intel_wait_for_vblank(dev, intel_crtc->pipe);
+       POSTING_READ(TV_CTL);
+       msleep(20);
+
        /*
         *  A B C
         *  0 1 1 Composite
index 6b208ffafa8de1df0bdd5dd6ff6aede30f929341..87ac21ec23d290db82e90c6fe6257e4cffdb5955 100644 (file)
@@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
        struct nouveau_fence *fence;
        uint32_t sequence;
 
+       spin_lock(&chan->fence.lock);
+
        if (USE_REFCNT)
                sequence = nvchan_rd32(chan, 0x48);
        else
                sequence = atomic_read(&chan->fence.last_sequence_irq);
 
        if (chan->fence.sequence_ack == sequence)
-               return;
+               goto out;
        chan->fence.sequence_ack = sequence;
 
-       spin_lock(&chan->fence.lock);
        list_for_each_safe(entry, tmp, &chan->fence.pending) {
                fence = list_entry(entry, struct nouveau_fence, entry);
 
@@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
                if (sequence == chan->fence.sequence_ack)
                        break;
        }
+out:
        spin_unlock(&chan->fence.lock);
 }
 
index 581c67cd7b24c24446097797f54ffc5d689f92b0..ead7b8fc53fcbcd473dbdc7a97d893a3e2e9c454 100644 (file)
@@ -245,7 +245,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
                list_del(&nvbo->entry);
                nvbo->reserved_by = NULL;
                ttm_bo_unreserve(&nvbo->bo);
-               drm_gem_object_unreference(nvbo->gem);
+               drm_gem_object_unreference_unlocked(nvbo->gem);
        }
 }
 
@@ -300,7 +300,7 @@ retry:
                        validate_fini(op, NULL);
                        if (ret == -EAGAIN)
                                ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
-                       drm_gem_object_unreference(gem);
+                       drm_gem_object_unreference_unlocked(gem);
                        if (ret) {
                                NV_ERROR(dev, "fail reserve\n");
                                return ret;
@@ -616,8 +616,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
                return PTR_ERR(bo);
        }
 
-       mutex_lock(&dev->struct_mutex);
-
        /* Mark push buffers as being used on PFIFO, the validation code
         * will then make sure that if the pushbuf bo moves, that they
         * happen on the kernel channel, which will in turn cause a sync
@@ -731,7 +729,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 out:
        validate_fini(&op, fence);
        nouveau_fence_unref((void**)&fence);
-       mutex_unlock(&dev->struct_mutex);
        kfree(bo);
        kfree(push);
 
index c95bf9b681ddc2352b8a251805e05456100f70b2..91ef93cf1f352f89553b7dab4ab9fb4f219326ad 100644 (file)
@@ -139,6 +139,8 @@ nv50_instmem_init(struct drm_device *dev)
        chan->file_priv = (struct drm_file *)-2;
        dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
 
+       INIT_LIST_HEAD(&chan->ramht_refs);
+
        /* Channel's PRAMIN object + heap */
        ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
                                                        NULL, &chan->ramin);
index 577239a24fd5e37a29c3bbc72a509e7361248a68..464a81a1990f6f274d46bd0535283bb315680305 100644 (file)
@@ -332,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
        args.usV_SyncWidth =
                cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
 
+       args.ucOverscanRight = radeon_crtc->h_border;
+       args.ucOverscanLeft = radeon_crtc->h_border;
+       args.ucOverscanBottom = radeon_crtc->v_border;
+       args.ucOverscanTop = radeon_crtc->v_border;
+
        if (mode->flags & DRM_MODE_FLAG_NVSYNC)
                misc |= ATOM_VSYNC_POLARITY;
        if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -534,6 +539,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                        pll->algo = PLL_ALGO_LEGACY;
                                        pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
                                }
+                               /* There is some evidence (often anecdotal) that RV515 LVDS
+                                * (on some boards at least) prefers the legacy algo.  I'm not
+                                * sure whether this should handled generically or on a
+                                * case-by-case quirk basis.  Both algos should work fine in the
+                                * majority of cases.
+                                */
+                               if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
+                                   (rdev->family == CHIP_RV515)) {
+                                       /* allow the user to overrride just in case */
+                                       if (radeon_new_pll == 1)
+                                               pll->algo = PLL_ALGO_NEW;
+                                       else
+                                               pll->algo = PLL_ALGO_LEGACY;
+                               }
                        } else {
                                if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
                                        pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -1056,11 +1075,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
 
        if (rdev->family >= CHIP_RV770) {
                if (radeon_crtc->crtc_id) {
-                       WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
-                       WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+                       WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+                       WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
                } else {
-                       WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0);
-                       WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0);
+                       WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
+                       WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
                }
        }
        WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -1197,8 +1216,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct radeon_device *rdev = dev->dev_private;
+       struct drm_encoder *encoder;
+       bool is_tvcv = false;
 
-       /* TODO color tiling */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               /* find tv std */
+               if (encoder->crtc == crtc) {
+                       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+                       if (radeon_encoder->active_device &
+                           (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+                               is_tvcv = true;
+               }
+       }
 
        atombios_disable_ss(crtc);
        /* always set DCPLL */
@@ -1207,9 +1236,14 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
        atombios_crtc_set_pll(crtc, adjusted_mode);
        atombios_enable_ss(crtc);
 
-       if (ASIC_IS_AVIVO(rdev))
+       if (ASIC_IS_DCE4(rdev))
                atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
-       else {
+       else if (ASIC_IS_AVIVO(rdev)) {
+               if (is_tvcv)
+                       atombios_crtc_set_timing(crtc, adjusted_mode);
+               else
+                       atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+       } else {
                atombios_crtc_set_timing(crtc, adjusted_mode);
                if (radeon_crtc->crtc_id == 0)
                        atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
index 957d5067ad9cc1495984f909d724fbf64c8d8856..b8b7f010b25f8df49e20329932c1735482c03ecf 100644 (file)
@@ -675,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
        return 0;
 }
 
+static int evergreen_cp_start(struct radeon_device *rdev)
+{
+       int r;
+       uint32_t cp_me;
+
+       r = radeon_ring_lock(rdev, 7);
+       if (r) {
+               DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+               return r;
+       }
+       radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+       radeon_ring_write(rdev, 0x1);
+       radeon_ring_write(rdev, 0x0);
+       radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+       radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+       radeon_ring_write(rdev, 0);
+       radeon_ring_write(rdev, 0);
+       radeon_ring_unlock_commit(rdev);
+
+       cp_me = 0xff;
+       WREG32(CP_ME_CNTL, cp_me);
+
+       r = radeon_ring_lock(rdev, 4);
+       if (r) {
+               DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+               return r;
+       }
+       /* init some VGT regs */
+       radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+       radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+       radeon_ring_write(rdev, 0xe);
+       radeon_ring_write(rdev, 0x10);
+       radeon_ring_unlock_commit(rdev);
+
+       return 0;
+}
+
 int evergreen_cp_resume(struct radeon_device *rdev)
 {
        u32 tmp;
@@ -719,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
        rdev->cp.rptr = RREG32(CP_RB_RPTR);
        rdev->cp.wptr = RREG32(CP_RB_WPTR);
 
-       r600_cp_start(rdev);
+       evergreen_cp_start(rdev);
        rdev->cp.ready = true;
        r = radeon_ring_test(rdev);
        if (r) {
@@ -2054,11 +2091,6 @@ int evergreen_resume(struct radeon_device *rdev)
         */
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
-       /* Initialize clocks */
-       r = radeon_clocks_init(rdev);
-       if (r) {
-               return r;
-       }
 
        r = evergreen_startup(rdev);
        if (r) {
@@ -2164,9 +2196,6 @@ int evergreen_init(struct radeon_device *rdev)
        radeon_surface_init(rdev);
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       r = radeon_clocks_init(rdev);
-       if (r)
-               return r;
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
        if (r)
@@ -2236,7 +2265,6 @@ void evergreen_fini(struct radeon_device *rdev)
        evergreen_pcie_gart_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
-       radeon_clocks_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
index d0ebae9dde25ba400778b210dfc2e6a86179d682..afc18d87fdca7409e4c7462fe3a1b03eeaa6d3ca 100644 (file)
@@ -2119,10 +2119,7 @@ int r600_cp_start(struct radeon_device *rdev)
        }
        radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
        radeon_ring_write(rdev, 0x1);
-       if (rdev->family >= CHIP_CEDAR) {
-               radeon_ring_write(rdev, 0x0);
-               radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
-       } else if (rdev->family >= CHIP_RV770) {
+       if (rdev->family >= CHIP_RV770) {
                radeon_ring_write(rdev, 0x0);
                radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
        } else {
@@ -2489,11 +2486,6 @@ int r600_resume(struct radeon_device *rdev)
         */
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
-       /* Initialize clocks */
-       r = radeon_clocks_init(rdev);
-       if (r) {
-               return r;
-       }
 
        r = r600_startup(rdev);
        if (r) {
@@ -2586,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
        radeon_surface_init(rdev);
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       r = radeon_clocks_init(rdev);
-       if (r)
-               return r;
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
        if (r)
@@ -2663,7 +2652,6 @@ void r600_fini(struct radeon_device *rdev)
        radeon_agp_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
-       radeon_clocks_fini(rdev);
        radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
        kfree(rdev->bios);
@@ -3541,7 +3529,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
         * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
         */
        if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
-               void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+               void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
                u32 tmp;
 
                WREG32(HDP_DEBUG1, 0);
index 3dfcfa3ca4250bf85e005dc5e6fd05f4c24fbea1..a168d644bf9e96724b5e717f2a8777bb8354f5e5 100644 (file)
@@ -1013,6 +1013,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *filp);
 
+/* VRAM scratch page for HDP bug */
+struct r700_vram_scratch {
+       struct radeon_bo                *robj;
+       volatile uint32_t               *ptr;
+};
 
 /*
  * Core structure, functions and helpers.
@@ -1079,6 +1084,7 @@ struct radeon_device {
        const struct firmware *pfp_fw;  /* r6/700 PFP firmware */
        const struct firmware *rlc_fw;  /* r6/700 RLC firmware */
        struct r600_blit r600_blit;
+       struct r700_vram_scratch vram_scratch;
        int msi_enabled; /* msi enabled */
        struct r600_ih ih; /* r6/700 interrupt ring */
        struct workqueue_struct *wq;
@@ -1333,8 +1339,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev);
 extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
 extern void radeon_update_display_priority(struct radeon_device *rdev);
 extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
-extern int radeon_clocks_init(struct radeon_device *rdev);
-extern void radeon_clocks_fini(struct radeon_device *rdev);
 extern void radeon_scratch_init(struct radeon_device *rdev);
 extern void radeon_surface_init(struct radeon_device *rdev);
 extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
index a21bf88e8c2d530637972b3ddd60863334ba1c1c..25e1dd1977917ad4151cdaf599add440ef14d625 100644 (file)
@@ -858,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev)
        return 0;
 }
 
-/*
- * Wrapper around modesetting bits. Move to radeon_clocks.c?
- */
-int radeon_clocks_init(struct radeon_device *rdev)
-{
-       int r;
-
-       r = radeon_static_clocks_init(rdev->ddev);
-       if (r) {
-               return r;
-       }
-       DRM_INFO("Clocks initialized !\n");
-       return 0;
-}
-
-void radeon_clocks_fini(struct radeon_device *rdev)
-{
-}
index 61141981880d7d03908a9a582f0d5a0a9951d2b3..ebae14c4b768b4413990e84c1055782a72590009 100644 (file)
@@ -85,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
                for (i = 0; i < num_indices; i++) {
                        gpio = &i2c_info->asGPIO_Info[i];
 
+                       /* some evergreen boards have bad data for this entry */
+                       if (ASIC_IS_DCE4(rdev)) {
+                               if ((i == 7) &&
+                                   (gpio->usClkMaskRegisterIndex == 0x1936) &&
+                                   (gpio->sucI2cId.ucAccess == 0)) {
+                                       gpio->sucI2cId.ucAccess = 0x97;
+                                       gpio->ucDataMaskShift = 8;
+                                       gpio->ucDataEnShift = 8;
+                                       gpio->ucDataY_Shift = 8;
+                                       gpio->ucDataA_Shift = 8;
+                               }
+                       }
+
                        if (gpio->sucI2cId.ucAccess == id) {
                                i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
                                i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -147,6 +160,20 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
                for (i = 0; i < num_indices; i++) {
                        gpio = &i2c_info->asGPIO_Info[i];
                        i2c.valid = false;
+
+                       /* some evergreen boards have bad data for this entry */
+                       if (ASIC_IS_DCE4(rdev)) {
+                               if ((i == 7) &&
+                                   (gpio->usClkMaskRegisterIndex == 0x1936) &&
+                                   (gpio->sucI2cId.ucAccess == 0)) {
+                                       gpio->sucI2cId.ucAccess = 0x97;
+                                       gpio->ucDataMaskShift = 8;
+                                       gpio->ucDataEnShift = 8;
+                                       gpio->ucDataY_Shift = 8;
+                                       gpio->ucDataA_Shift = 8;
+                               }
+                       }
+
                        i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
                        i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
                        i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
index 14448a740ba632cf5f2fcbaf2d2d5804f3446b93..5249af8931e60549e01362102f9c8ca941ba0d24 100644 (file)
@@ -327,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev)
        mpll->max_feedback_div = 0xff;
        mpll->best_vco = 0;
 
+       if (!rdev->clock.default_sclk)
+               rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
+       if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
+               rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
+
+       rdev->pm.current_sclk = rdev->clock.default_sclk;
+       rdev->pm.current_mclk = rdev->clock.default_mclk;
+
 }
 
 /* 10 khz */
@@ -897,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
        }
 }
 
-static void radeon_apply_clock_quirks(struct radeon_device *rdev)
-{
-       uint32_t tmp;
-
-       /* XXX make sure engine is idle */
-
-       if (rdev->family < CHIP_RS600) {
-               tmp = RREG32_PLL(RADEON_SCLK_CNTL);
-               if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
-                       tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
-               if ((rdev->family == CHIP_RV250)
-                   || (rdev->family == CHIP_RV280))
-                       tmp |=
-                           RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
-               if ((rdev->family == CHIP_RV350)
-                   || (rdev->family == CHIP_RV380))
-                       tmp |= R300_SCLK_FORCE_VAP;
-               if (rdev->family == CHIP_R420)
-                       tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
-               WREG32_PLL(RADEON_SCLK_CNTL, tmp);
-       } else if (rdev->family < CHIP_R600) {
-               tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
-               tmp |= AVIVO_CP_FORCEON;
-               WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
-
-               tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
-               tmp |= AVIVO_E2_FORCEON;
-               WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
-
-               tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
-               tmp |= AVIVO_IDCT_FORCEON;
-               WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
-       }
-}
-
-int radeon_static_clocks_init(struct drm_device *dev)
-{
-       struct radeon_device *rdev = dev->dev_private;
-
-       /* XXX make sure engine is idle */
-
-       if (radeon_dynclks != -1) {
-               if (radeon_dynclks) {
-                       if (rdev->asic->set_clock_gating)
-                               radeon_set_clock_gating(rdev, 1);
-               }
-       }
-       radeon_apply_clock_quirks(rdev);
-       return 0;
-}
index 1a5ee392e9c796ad7db3d8056b3fbf870dab4074..a9dd7847d96ed673e4548efc343e9112390e7c75 100644 (file)
@@ -1051,10 +1051,16 @@ radeon_add_atom_connector(struct drm_device *dev,
        uint32_t subpixel_order = SubPixelNone;
        bool shared_ddc = false;
 
-       /* fixme - tv/cv/din */
        if (connector_type == DRM_MODE_CONNECTOR_Unknown)
                return;
 
+       /* if the user selected tv=0 don't try and add the connector */
+       if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+            (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+            (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+           (radeon_tv == 0))
+               return;
+
        /* see if we already added it */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                radeon_connector = to_radeon_connector(connector);
@@ -1209,19 +1215,17 @@ radeon_add_atom_connector(struct drm_device *dev,
        case DRM_MODE_CONNECTOR_SVIDEO:
        case DRM_MODE_CONNECTOR_Composite:
        case DRM_MODE_CONNECTOR_9PinDIN:
-               if (radeon_tv == 1) {
-                       drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
-                       drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
-                       radeon_connector->dac_load_detect = true;
-                       drm_connector_attach_property(&radeon_connector->base,
-                                                     rdev->mode_info.load_detect_property,
-                                                     1);
-                       drm_connector_attach_property(&radeon_connector->base,
-                                                     rdev->mode_info.tv_std_property,
-                                                     radeon_atombios_get_tv_info(rdev));
-                       /* no HPD on analog connectors */
-                       radeon_connector->hpd.hpd = RADEON_HPD_NONE;
-               }
+               drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+               radeon_connector->dac_load_detect = true;
+               drm_connector_attach_property(&radeon_connector->base,
+                                             rdev->mode_info.load_detect_property,
+                                             1);
+               drm_connector_attach_property(&radeon_connector->base,
+                                             rdev->mode_info.tv_std_property,
+                                             radeon_atombios_get_tv_info(rdev));
+               /* no HPD on analog connectors */
+               radeon_connector->hpd.hpd = RADEON_HPD_NONE;
                break;
        case DRM_MODE_CONNECTOR_LVDS:
                radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -1272,10 +1276,16 @@ radeon_add_legacy_connector(struct drm_device *dev,
        struct radeon_connector *radeon_connector;
        uint32_t subpixel_order = SubPixelNone;
 
-       /* fixme - tv/cv/din */
        if (connector_type == DRM_MODE_CONNECTOR_Unknown)
                return;
 
+       /* if the user selected tv=0 don't try and add the connector */
+       if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
+            (connector_type == DRM_MODE_CONNECTOR_Composite) ||
+            (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
+           (radeon_tv == 0))
+               return;
+
        /* see if we already added it */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                radeon_connector = to_radeon_connector(connector);
@@ -1347,26 +1357,24 @@ radeon_add_legacy_connector(struct drm_device *dev,
        case DRM_MODE_CONNECTOR_SVIDEO:
        case DRM_MODE_CONNECTOR_Composite:
        case DRM_MODE_CONNECTOR_9PinDIN:
-               if (radeon_tv == 1) {
-                       drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
-                       drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
-                       radeon_connector->dac_load_detect = true;
-                       /* RS400,RC410,RS480 chipset seems to report a lot
-                        * of false positive on load detect, we haven't yet
-                        * found a way to make load detect reliable on those
-                        * chipset, thus just disable it for TV.
-                        */
-                       if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
-                               radeon_connector->dac_load_detect = false;
-                       drm_connector_attach_property(&radeon_connector->base,
-                                                     rdev->mode_info.load_detect_property,
-                                                     radeon_connector->dac_load_detect);
-                       drm_connector_attach_property(&radeon_connector->base,
-                                                     rdev->mode_info.tv_std_property,
-                                                     radeon_combios_get_tv_info(rdev));
-                       /* no HPD on analog connectors */
-                       radeon_connector->hpd.hpd = RADEON_HPD_NONE;
-               }
+               drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
+               drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
+               radeon_connector->dac_load_detect = true;
+               /* RS400,RC410,RS480 chipset seems to report a lot
+                * of false positive on load detect, we haven't yet
+                * found a way to make load detect reliable on those
+                * chipset, thus just disable it for TV.
+                */
+               if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
+                       radeon_connector->dac_load_detect = false;
+               drm_connector_attach_property(&radeon_connector->base,
+                                             rdev->mode_info.load_detect_property,
+                                             radeon_connector->dac_load_detect);
+               drm_connector_attach_property(&radeon_connector->base,
+                                             rdev->mode_info.tv_std_property,
+                                             radeon_combios_get_tv_info(rdev));
+               /* no HPD on analog connectors */
+               radeon_connector->hpd.hpd = RADEON_HPD_NONE;
                break;
        case DRM_MODE_CONNECTOR_LVDS:
                drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
index 69b3c2291e926730944107c76e1b1d52bb0270b9..256d204a6d24226a93f68b5d4d097fc4b52739c7 100644 (file)
@@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev)
 void radeon_update_bandwidth_info(struct radeon_device *rdev)
 {
        fixed20_12 a;
-       u32 sclk, mclk;
+       u32 sclk = rdev->pm.current_sclk;
+       u32 mclk = rdev->pm.current_mclk;
 
-       if (rdev->flags & RADEON_IS_IGP) {
-               sclk = radeon_get_engine_clock(rdev);
-               mclk = rdev->clock.default_mclk;
-
-               a.full = dfixed_const(100);
-               rdev->pm.sclk.full = dfixed_const(sclk);
-               rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
-               rdev->pm.mclk.full = dfixed_const(mclk);
-               rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
+       /* sclk/mclk in Mhz */
+       a.full = dfixed_const(100);
+       rdev->pm.sclk.full = dfixed_const(sclk);
+       rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
+       rdev->pm.mclk.full = dfixed_const(mclk);
+       rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
 
+       if (rdev->flags & RADEON_IS_IGP) {
                a.full = dfixed_const(16);
                /* core_bandwidth = sclk(Mhz) * 16 */
                rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
-       } else {
-               sclk = radeon_get_engine_clock(rdev);
-               mclk = radeon_get_memory_clock(rdev);
-
-               a.full = dfixed_const(100);
-               rdev->pm.sclk.full = dfixed_const(sclk);
-               rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
-               rdev->pm.mclk.full = dfixed_const(mclk);
-               rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
        }
 }
 
index 0416804d8f3010f7d0451e17e5c2428eecba3d51..6a13ee38a5b9fc71201a0a9a38dd93b3ab5957eb 100644 (file)
@@ -213,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
 
 static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
 {
-       u32 sclk = radeon_get_engine_clock(rdev);
+       u32 sclk = rdev->pm.current_sclk;
        u32 prescale = 0;
        u32 nm;
        u8 n, m, loop;
index 8f93e2b4b0c821153ca9969255e22e1f95c95513..efbe975312dc42342c2add89c417b753b0e92791 100644 (file)
@@ -600,7 +600,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d
 void radeon_enc_destroy(struct drm_encoder *encoder);
 void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
 void radeon_combios_asic_init(struct drm_device *dev);
-extern int radeon_static_clocks_init(struct drm_device *dev);
 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
                                        struct drm_display_mode *mode,
                                        struct drm_display_mode *adjusted_mode);
index f1c796810117fdb9dfb0b66aa5a89eee5e1da1e9..bfa59db374d23d3c4a06877a6e9a37aec59904e0 100644 (file)
@@ -905,6 +905,54 @@ static void rv770_gpu_init(struct radeon_device *rdev)
 
 }
 
+static int rv770_vram_scratch_init(struct radeon_device *rdev)
+{
+       int r;
+       u64 gpu_addr;
+
+       if (rdev->vram_scratch.robj == NULL) {
+               r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
+                                       true, RADEON_GEM_DOMAIN_VRAM,
+                                       &rdev->vram_scratch.robj);
+               if (r) {
+                       return r;
+               }
+       }
+
+       r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+       if (unlikely(r != 0))
+               return r;
+       r = radeon_bo_pin(rdev->vram_scratch.robj,
+                         RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+       if (r) {
+               radeon_bo_unreserve(rdev->vram_scratch.robj);
+               return r;
+       }
+       r = radeon_bo_kmap(rdev->vram_scratch.robj,
+                               (void **)&rdev->vram_scratch.ptr);
+       if (r)
+               radeon_bo_unpin(rdev->vram_scratch.robj);
+       radeon_bo_unreserve(rdev->vram_scratch.robj);
+
+       return r;
+}
+
+static void rv770_vram_scratch_fini(struct radeon_device *rdev)
+{
+       int r;
+
+       if (rdev->vram_scratch.robj == NULL) {
+               return;
+       }
+       r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+       if (likely(r == 0)) {
+               radeon_bo_kunmap(rdev->vram_scratch.robj);
+               radeon_bo_unpin(rdev->vram_scratch.robj);
+               radeon_bo_unreserve(rdev->vram_scratch.robj);
+       }
+       radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
 int rv770_mc_init(struct radeon_device *rdev)
 {
        u32 tmp;
@@ -970,6 +1018,9 @@ static int rv770_startup(struct radeon_device *rdev)
                if (r)
                        return r;
        }
+       r = rv770_vram_scratch_init(rdev);
+       if (r)
+               return r;
        rv770_gpu_init(rdev);
        r = r600_blit_init(rdev);
        if (r) {
@@ -1023,11 +1074,6 @@ int rv770_resume(struct radeon_device *rdev)
         */
        /* post card */
        atom_asic_init(rdev->mode_info.atom_context);
-       /* Initialize clocks */
-       r = radeon_clocks_init(rdev);
-       if (r) {
-               return r;
-       }
 
        r = rv770_startup(rdev);
        if (r) {
@@ -1118,9 +1164,6 @@ int rv770_init(struct radeon_device *rdev)
        radeon_surface_init(rdev);
        /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
-       r = radeon_clocks_init(rdev);
-       if (r)
-               return r;
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
        if (r)
@@ -1195,9 +1238,9 @@ void rv770_fini(struct radeon_device *rdev)
        r600_irq_fini(rdev);
        radeon_irq_kms_fini(rdev);
        rv770_pcie_gart_fini(rdev);
+       rv770_vram_scratch_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
-       radeon_clocks_fini(rdev);
        radeon_agp_fini(rdev);
        radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
index 7580f55e67e3cf1560437b428d9fb1e5b8e411d0..36e95753223059ab0e1b5ed8490fe7364b39673e 100644 (file)
@@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
        AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
        AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
+       AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
+       AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
        { NULL, }
 /* Laptop models without axis info (yet):
  * "NC6910" "HP Compaq 6910"
index 8f0caf7d4482079ef45aa9ea3b8a66d6a37500d4..78fbe9ffe7f024f3f4e1ca486bcbeb5976087e27 100644 (file)
@@ -53,7 +53,7 @@
 #define T3_MAX_PBL_SIZE 256
 #define T3_MAX_RQ_SIZE 1024
 #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 262144
+#define T3_MAX_CQ_DEPTH 65536
 #define T3_MAX_NUM_STAG (1<<15)
 #define T3_MAX_MR_SIZE 0x100000000ULL
 #define T3_PAGESIZE_MASK 0xffff000  /* 4KB-128MB */
index 443cea55daac5973469cf43fabe2a388abeadb55..61e0efd4ccfb5d9d4d6f6bdc50f765d365c07d0e 100644 (file)
@@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
 static void nes_retrans_expired(struct nes_cm_node *cm_node)
 {
        struct iw_cm_id *cm_id = cm_node->cm_id;
-       switch (cm_node->state) {
+       enum nes_cm_node_state state = cm_node->state;
+       cm_node->state = NES_CM_STATE_CLOSED;
+       switch (state) {
        case NES_CM_STATE_SYN_RCVD:
        case NES_CM_STATE_CLOSING:
                rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
        case NES_CM_STATE_FIN_WAIT1:
                if (cm_node->cm_id)
                        cm_id->rem_ref(cm_id);
-               cm_node->state = NES_CM_STATE_CLOSED;
                send_reset(cm_node, NULL);
                break;
        default:
@@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
                break;
        case NES_CM_STATE_MPAREQ_RCVD:
                passive_state = atomic_add_return(1, &cm_node->passive_state);
-               if (passive_state ==  NES_SEND_RESET_EVENT)
-                       create_event(cm_node, NES_CM_EVENT_RESET);
-               cm_node->state = NES_CM_STATE_CLOSED;
                dev_kfree_skb_any(skb);
                break;
        case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
        case NES_CM_STATE_CLOSED:
                drop_packet(skb);
                break;
+       case NES_CM_STATE_FIN_WAIT2:
        case NES_CM_STATE_FIN_WAIT1:
        case NES_CM_STATE_LAST_ACK:
                cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                return -EINVAL;
        }
 
+       passive_state = atomic_add_return(1, &cm_node->passive_state);
+       if (passive_state == NES_SEND_RESET_EVENT) {
+               rem_ref_cm_node(cm_node->cm_core, cm_node);
+               return -ECONNRESET;
+       }
+
        /* associate the node with the QP */
        nesqp->cm_node = (void *)cm_node;
        cm_node->nesqp = nesqp;
@@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
                        "ret=%d\n", __func__, __LINE__, ret);
 
-       passive_state = atomic_add_return(1, &cm_node->passive_state);
-       if (passive_state == NES_SEND_RESET_EVENT)
-               create_event(cm_node, NES_CM_EVENT_RESET);
        return 0;
 }
 
index f8233c851c694862d76861e515b93183b8d387a5..1980a461c49904e93102e02e655e5b033c5f92be 100644 (file)
@@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                return; /* Ignore it, wait for close complete */
 
                        if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+                               if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
+                                       (nesqp->ibqp_state == IB_QPS_RTS) &&
+                                       ((nesadapter->eeprom_version >> 16) != NES_A0)) {
+                                       spin_lock_irqsave(&nesqp->lock, flags);
+                                       nesqp->hw_iwarp_state = iwarp_state;
+                                       nesqp->hw_tcp_state = tcp_state;
+                                       nesqp->last_aeq = async_event_id;
+                                       next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+                                       nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+                                       spin_unlock_irqrestore(&nesqp->lock, flags);
+                                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+                                       nes_cm_disconn(nesqp);
+                               }
                                nesqp->cm_id->add_ref(nesqp->cm_id);
                                schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
                                                NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                                nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
                                                async_event_id, nesqp->last_aeq, tcp_state);
                        }
-
                        break;
                case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
                        if (nesqp->term_flags) {
index aa9183db32b104aaaa7bfad081c3c969699cec72..1204c3432b6322f23518c42d550746f19d9e4ce6 100644 (file)
@@ -45,6 +45,7 @@
 #define NES_PHY_TYPE_KR               9
 
 #define NES_MULTICAST_PF_MAX 8
+#define NES_A0 3
 
 enum pci_regs {
        NES_INT_STAT = 0x0000,
index 6dfdd49cdbcf36ef5cd68aee3caf46dbdaeb77f0..10560c796fd6c0ffc591601c610579d3e1b6e8ca 100644 (file)
@@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
                                NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
                u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
                nes_write_indexed(nesdev,
-                               NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+                               NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
                nesdev->disable_tx_flow_control = 0;
        } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
                u32temp = nes_read_indexed(nesdev,
                                NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
                u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
                nes_write_indexed(nesdev,
-                               NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+                               NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
                nesdev->disable_tx_flow_control = 1;
        }
        if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
index a9b025f4147a0692845d2407b6efbd9220f9837e..ab6982056518e3c086c57738f360574b211108aa 100644 (file)
@@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device);
  * @dev: input device supporting MT events and finger tracking
  * @num_slots: number of slots used by the device
  *
- * This function allocates all necessary memory for MT slot handling
- * in the input device, and adds ABS_MT_SLOT to the device capabilities.
+ * This function allocates all necessary memory for MT slot handling in the
+ * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
+ * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
  */
 int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
 {
+       int i;
+
        if (!num_slots)
                return 0;
 
@@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
        dev->mtsize = num_slots;
        input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
 
+       /* Mark slots as 'unused' */
+       for (i = 0; i < num_slots; i++)
+               dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
+
        return 0;
 }
 EXPORT_SYMBOL(input_mt_create_slots);
index ea67c49146a3a03280ee8719c362c41d8033c743..b952317639116f2f18a7bc1f41ff5887c17f2a49 100644 (file)
@@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input,
                               const struct bcm5974_config *cfg,
                               const struct tp_finger *f)
 {
-       input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
-       input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
-       input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
-       input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
+       input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+                        raw2int(f->force_major) << 1);
+       input_report_abs(input, ABS_MT_TOUCH_MINOR,
+                        raw2int(f->force_minor) << 1);
+       input_report_abs(input, ABS_MT_WIDTH_MAJOR,
+                        raw2int(f->size_major) << 1);
+       input_report_abs(input, ABS_MT_WIDTH_MINOR,
+                        raw2int(f->size_minor) << 1);
        input_report_abs(input, ABS_MT_ORIENTATION,
                         MAX_FINGER_ORIENTATION - raw2int(f->orientation));
        input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
index 46e4ba0b92463184d5e398345e50ca68809628b2..f585131604806f531f91c48ebb867919339fd80f 100644 (file)
@@ -1485,8 +1485,8 @@ static int __init i8042_init(void)
 
 static void __exit i8042_exit(void)
 {
-       platform_driver_unregister(&i8042_driver);
        platform_device_unregister(i8042_platform_device);
+       platform_driver_unregister(&i8042_driver);
        i8042_platform_exit();
 
        panic_blink = NULL;
index 40d77ba8fdc138ff98b0320b877358a5302d7a28..6e29badb969e44192be85080caf801851c3d06fa 100644 (file)
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
                        if (features->type == WACOM_G4 ||
                                        features->type == WACOM_MO) {
                                input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
-                               rw = (signed)(data[7] & 0x04) - (data[7] & 0x03);
+                               rw = (data[7] & 0x04) - (data[7] & 0x03);
                        } else {
                                input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
-                               rw = -(signed)data[6];
+                               rw = -(signed char)data[6];
                        }
                        input_report_rel(input, REL_WHEEL, rw);
                }
diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore
deleted file mode 100644 (file)
index a7afec6..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-mktables
-raid6altivec*.c
-raid6int*.c
-raid6tables.c
index 1ba1e122e948931bf16a504ebe662e16e1404a8f..ed4900ade93a4d80b84784e76aafc406689020b2 100644 (file)
@@ -1542,8 +1542,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
                   atomic_read(&bitmap->mddev->recovery_active) == 0);
 
        bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
-       if (bitmap->mddev->persistent)
-               set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
+       set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
        sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
        s = 0;
        while (s < sector && s < bitmap->mddev->resync_max_sectors) {
index c148b630215484f9689bf9257d6acf286685c37a..43cf9cc9c1df3650c228ce01920645fb474f105a 100644 (file)
@@ -2167,9 +2167,9 @@ repeat:
                                rdev->recovery_offset = mddev->curr_resync_completed;
 
        }       
-       if (mddev->external || !mddev->persistent) {
-               clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+       if (!mddev->persistent) {
                clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+               clear_bit(MD_CHANGE_DEVS, &mddev->flags);
                wake_up(&mddev->sb_wait);
                return;
        }
@@ -2178,7 +2178,6 @@ repeat:
 
        mddev->utime = get_seconds();
 
-       set_bit(MD_CHANGE_PENDING, &mddev->flags);
        if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
                force_change = 1;
        if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
@@ -3371,7 +3370,7 @@ array_state_show(mddev_t *mddev, char *page)
                case 0:
                        if (mddev->in_sync)
                                st = clean;
-                       else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+                       else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
                                st = write_pending;
                        else if (mddev->safemode)
                                st = active_idle;
@@ -3452,9 +3451,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
                                        mddev->in_sync = 1;
                                        if (mddev->safemode == 1)
                                                mddev->safemode = 0;
-                                       if (mddev->persistent)
-                                               set_bit(MD_CHANGE_CLEAN,
-                                                       &mddev->flags);
+                                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
                                }
                                err = 0;
                        } else
@@ -3466,8 +3463,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
        case active:
                if (mddev->pers) {
                        restart_array(mddev);
-                       if (mddev->external)
-                               clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
                        wake_up(&mddev->sb_wait);
                        err = 0;
                } else {
@@ -6572,6 +6568,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
                if (mddev->in_sync) {
                        mddev->in_sync = 0;
                        set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       set_bit(MD_CHANGE_PENDING, &mddev->flags);
                        md_wakeup_thread(mddev->thread);
                        did_change = 1;
                }
@@ -6580,7 +6577,6 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
        if (did_change)
                sysfs_notify_dirent_safe(mddev->sysfs_state);
        wait_event(mddev->sb_wait,
-                  !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
                   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
 }
 
@@ -6616,6 +6612,7 @@ int md_allow_write(mddev_t *mddev)
        if (mddev->in_sync) {
                mddev->in_sync = 0;
                set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+               set_bit(MD_CHANGE_PENDING, &mddev->flags);
                if (mddev->safemode_delay &&
                    mddev->safemode == 0)
                        mddev->safemode = 1;
@@ -6625,7 +6622,7 @@ int md_allow_write(mddev_t *mddev)
        } else
                spin_unlock_irq(&mddev->write_lock);
 
-       if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+       if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
                return -EAGAIN;
        else
                return 0;
@@ -6823,8 +6820,7 @@ void md_do_sync(mddev_t *mddev)
                                   atomic_read(&mddev->recovery_active) == 0);
                        mddev->curr_resync_completed =
                                mddev->curr_resync;
-                       if (mddev->persistent)
-                               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
                        sysfs_notify(&mddev->kobj, NULL, "sync_completed");
                }
 
@@ -7103,8 +7099,7 @@ void md_check_recovery(mddev_t *mddev)
                            mddev->recovery_cp == MaxSector) {
                                mddev->in_sync = 1;
                                did_change = 1;
-                               if (mddev->persistent)
-                                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
                        }
                        if (mddev->safemode == 1)
                                mddev->safemode = 0;
index a953fe2808ae7ef17b1046b6cff6bc6276c1b8c1..3931299788dcefe14385980c584321289cedc8db 100644 (file)
@@ -140,7 +140,7 @@ struct mddev_s
        unsigned long                   flags;
 #define MD_CHANGE_DEVS 0       /* Some device status has changed */
 #define MD_CHANGE_CLEAN 1      /* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2    /* superblock update in progress */
+#define MD_CHANGE_PENDING 2    /* switch from 'clean' to 'active' in progress */
 
        int                             suspended;
        atomic_t                        active_io;
index bd2755e8d9a3d327f7ae54bedd0e7820f85ea5fe..f332c52968b75d7528ee8c5f21eaf561a76d373d 100644 (file)
@@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
                goto err;
        }
 
-       err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid);
-
-       if (!err) {
+       if (ocr & R4_MEMORY_PRESENT
+           && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
                card->type = MMC_TYPE_SD_COMBO;
 
                if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
index 5f3a599ead07bbdfae11f7abc0198285e743db81..87226cd202a5086f7d90699f0a43d6d4e99725a1 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/clk.h>
 #include <linux/atmel_pdc.h>
 #include <linux/gfp.h>
+#include <linux/highmem.h>
 
 #include <linux/mmc/host.h>
 
index 9a68ff4353a2e83878fce5429afe9351140daf57..5a950b16d9e629dc3d08041bda547b165cadee76 100644 (file)
@@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host)
 
                while (delay--) {
                        reg = readw(host->base + MMC_REG_STATUS);
-                       if (reg & STATUS_CARD_BUS_CLK_RUN)
+                       if (reg & STATUS_CARD_BUS_CLK_RUN) {
                                /* Check twice before cut */
                                reg = readw(host->base + MMC_REG_STATUS);
                                if (reg & STATUS_CARD_BUS_CLK_RUN)
                                        return 0;
+                       }
 
                        if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
                                return 0;
index 4a8776f8afdd690048c69de91e755d35d2c884a5..4526d2791f2990229acbe9ef0f5c88286819807f 100644 (file)
@@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev)
        int ret = 0;
        struct platform_device *pdev = to_platform_device(dev);
        struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
-       pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
 
        if (host && host->suspended)
                return 0;
@@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev)
                        }
                }
                cancel_work_sync(&host->mmc_carddetect_work);
-               mmc_host_enable(host->mmc);
                ret = mmc_suspend_host(host->mmc);
+               mmc_host_enable(host->mmc);
                if (ret == 0) {
                        omap_hsmmc_disable_irq(host);
                        OMAP_HSMMC_WRITE(host->base, HCTL,
index 2e16e0a90a5e1a5d8d3d7487727baa39d701b8fb..976330de379ecc78cbe91f19c4bd9495d4722dae 100644 (file)
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
        host->pio_active        = XFER_NONE;
 
 #ifdef CONFIG_MMC_S3C_PIODMA
-       host->dodma             = host->pdata->dma;
+       host->dodma             = host->pdata->use_dma;
 #endif
 
        host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index ee7d0a5a51c496cb92b04e7b9bc8172f8a233875..69d98e3bf6abaa3c784d1d70f171387b3142eb64 100644 (file)
@@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 {
        struct mmc_data *data = host->data;
+       void *sg_virt;
        unsigned short *buf;
        unsigned int count;
        unsigned long flags;
@@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
                return;
        }
 
-       buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
-             host->sg_off);
+       sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
+       buf = (unsigned short *)(sg_virt + host->sg_off);
 
        count = host->sg_ptr->length - host->sg_off;
        if (count > data->blksz)
@@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
 
        host->sg_off += count;
 
-       tmio_mmc_kunmap_atomic(host, &flags);
+       tmio_mmc_kunmap_atomic(sg_virt, &flags);
 
        if (host->sg_off == host->sg_ptr->length)
                tmio_mmc_next_sg(host);
index 64f7d5dfc106ac7b39e37842c5eca9a898dbbb06..0fedc78e3ea5c4613767d7d31e534143b4bf1780 100644 (file)
 
 #define ack_mmc_irqs(host, i) \
        do { \
-               u32 mask;\
-               mask  = sd_ctrl_read32((host), CTL_STATUS); \
-               mask &= ~((i) & TMIO_MASK_IRQ); \
-               sd_ctrl_write32((host), CTL_STATUS, mask); \
+               sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
        } while (0)
 
 
@@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
        return --host->sg_len;
 }
 
-static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
        unsigned long *flags)
 {
-       struct scatterlist *sg = host->sg_ptr;
-
        local_irq_save(*flags);
        return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
 }
 
-static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
+static inline void tmio_mmc_kunmap_atomic(void *virt,
        unsigned long *flags)
 {
-       kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
+       kunmap_atomic(virt, KM_BIO_SRC_IRQ);
        local_irq_restore(*flags);
 }
 
index 2246f154e2f7a6f57317cf2232070f39ada5d4dc..61f6e5e404582cb26cae8cebd563f0a61869d9f2 100644 (file)
@@ -6,7 +6,7 @@ config MTD_UBI_DEBUG
        depends on SYSFS
        depends on MTD_UBI
        select DEBUG_FS
-       select KALLSYMS_ALL
+       select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
        help
          This option enables UBI debugging.
 
index 4dfa6b90c21c30566d35939d7d3837327495f208..3d2d1a69e9a084b01c43ed2b23c75679cdebcaef 100644 (file)
@@ -798,18 +798,18 @@ static int rename_volumes(struct ubi_device *ubi,
                        goto out_free;
                }
 
-               re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
-               if (!re) {
+               re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
+               if (!re1) {
                        err = -ENOMEM;
                        ubi_close_volume(desc);
                        goto out_free;
                }
 
-               re->remove = 1;
-               re->desc = desc;
-               list_add(&re->list, &rename_list);
+               re1->remove = 1;
+               re1->desc = desc;
+               list_add(&re1->list, &rename_list);
                dbg_msg("will remove volume %d, name \"%s\"",
-                       re->desc->vol->vol_id, re->desc->vol->name);
+                       re1->desc->vol->vol_id, re1->desc->vol->name);
        }
 
        mutex_lock(&ubi->device_mutex);
index 372a15ac9995a64c7fc8f8084c75cb59ba4251d1..69b52e9c9489f961a8c11245b7fe90258078837e 100644 (file)
@@ -843,7 +843,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
                case UBI_COMPAT_DELETE:
                        ubi_msg("\"delete\" compatible internal volume %d:%d"
                                " found, will remove it", vol_id, lnum);
-                       err = add_to_list(si, pnum, ec, &si->corr);
+                       err = add_to_list(si, pnum, ec, &si->erase);
                        if (err)
                                return err;
                        return 0;
index ee7b1d8fbb92c9d03fbe8026100378e100b4ffc1..97a435672eafac554bd76eeb0a8efe4422602a5e 100644 (file)
@@ -1212,7 +1212,8 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
 retry:
        spin_lock(&ubi->wl_lock);
        e = ubi->lookuptbl[pnum];
-       if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
+       if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
+                                  in_wl_tree(e, &ubi->erroneous)) {
                spin_unlock(&ubi->wl_lock);
                return 0;
        }
index c685a55fc2f415a7cf376344753366098603c78b..85671adae455dc59bd29405aa2b196e5b0ebd9c2 100644 (file)
@@ -647,7 +647,7 @@ struct vortex_private {
        u16 io_size;                                            /* Size of PCI region (for release_region) */
 
        /* Serialises access to hardware other than MII and variables below.
-        * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */
+        * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
        spinlock_t lock;
 
        spinlock_t mii_lock;            /* Serialises access to MII */
@@ -1994,10 +1994,9 @@ vortex_error(struct net_device *dev, int status)
                }
        }
 
-       if (status & RxEarly) {                         /* Rx early is unused. */
-               vortex_rx(dev);
+       if (status & RxEarly)                           /* Rx early is unused. */
                iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
-       }
+
        if (status & StatsFull) {                       /* Empty statistics. */
                static int DoneDidThat;
                if (vortex_debug > 4)
@@ -2298,7 +2297,12 @@ vortex_interrupt(int irq, void *dev_id)
                if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
                        if (status == 0xffff)
                                break;
+                       if (status & RxEarly)
+                               vortex_rx(dev);
+                       spin_unlock(&vp->window_lock);
                        vortex_error(dev, status);
+                       spin_lock(&vp->window_lock);
+                       window_set(vp, 7);
                }
 
                if (--work_done < 0) {
@@ -2984,7 +2988,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        int err;
        struct vortex_private *vp = netdev_priv(dev);
-       unsigned long flags;
        pci_power_t state = 0;
 
        if(VORTEX_PCI(vp))
@@ -2994,9 +2997,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 
        if(state != 0)
                pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
-       spin_lock_irqsave(&vp->lock, flags);
        err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
-       spin_unlock_irqrestore(&vp->lock, flags);
        if(state != 0)
                pci_set_power_state(VORTEX_PCI(vp), state);
 
index 37617abc164769aa5f8be38a797ea4337a3585e0..1e620e287ae0cc9fbe9894a105fedbfdc9fbb6be 100644 (file)
@@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget)
                b44_tx(bp);
                /* spin_unlock(&bp->tx_lock); */
        }
+       if (bp->istat & ISTAT_RFO) {    /* fast recovery, in ~20msec */
+               bp->istat &= ~ISTAT_RFO;
+               b44_disable_ints(bp);
+               ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
+               b44_init_rings(bp);
+               b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+               netif_wake_queue(bp->dev);
+       }
+
        spin_unlock_irqrestore(&bp->lock, flags);
 
        work_done = 0;
index 99197bd54da558ef26cf8a62a54af19aaa02e0f8..53306bf3f401bee193fc89f5c6c4d1b35759ecb0 100644 (file)
@@ -181,6 +181,7 @@ struct be_drvr_stats {
        u64 be_rx_bytes_prev;
        u64 be_rx_pkts;
        u32 be_rx_rate;
+       u32 be_rx_mcast_pkt;
        /* number of non ether type II frames dropped where
         * frame len > length field of Mac Hdr */
        u32 be_802_3_dropped_frames;
index 3d305494a6066fb987510abebe34a1332f80c114..34abcc9403d6b76428416412904b4c06ff8d593b 100644 (file)
@@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status)
        while ((compl = be_mcc_compl_get(adapter))) {
                if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
                        /* Interpret flags as an async trailer */
-                       BUG_ON(!is_link_state_evt(compl->flags));
-
-                       /* Interpret compl as a async link evt */
-                       be_async_link_state_process(adapter,
+                       if (is_link_state_evt(compl->flags))
+                               be_async_link_state_process(adapter,
                                (struct be_async_event_link_state *) compl);
                } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
                                *status = be_mcc_compl_process(adapter, compl);
@@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
 
                if (msecs > 4000) {
                        dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
-                       be_dump_ue(adapter);
+                       be_detect_dump_ue(adapter);
                        return -1;
                }
 
index bdc10a28cfda9feb11ffdecee141a6e333a9e92d..ad1e6fac60c58869e074609cee3e363672bfecd9 100644 (file)
@@ -992,5 +992,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
 extern int be_cmd_get_phy_info(struct be_adapter *adapter,
                struct be_dma_mem *cmd);
 extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_dump_ue(struct be_adapter *adapter);
+extern void be_detect_dump_ue(struct be_adapter *adapter);
 
index cd16243c7c364a858d849ac3f70ac10bbbfcb966..13f0abbc520550b0b22ef48d0ed2a4da56d69187 100644 (file)
@@ -60,6 +60,7 @@ static const struct be_ethtool_stat et_stats[] = {
        {DRVSTAT_INFO(be_rx_events)},
        {DRVSTAT_INFO(be_tx_compl)},
        {DRVSTAT_INFO(be_rx_compl)},
+       {DRVSTAT_INFO(be_rx_mcast_pkt)},
        {DRVSTAT_INFO(be_ethrx_post_fail)},
        {DRVSTAT_INFO(be_802_3_dropped_frames)},
        {DRVSTAT_INFO(be_802_3_malformed_frames)},
index 5d38046402b235d255b529bb96c3cd07806fe3a9..a2ec5df0d73340bf82e45ab3d50f25402fcaa9b6 100644 (file)
 #define FLASH_FCoE_BIOS_START_g3           (13631488)
 #define FLASH_REDBOOT_START_g3             (262144)
 
-
-
+/************* Rx Packet Type Encoding **************/
+#define BE_UNICAST_PACKET              0
+#define BE_MULTICAST_PACKET            1
+#define BE_BROADCAST_PACKET            2
+#define BE_RSVD_PACKET                 3
 
 /*
  * BE descriptors: host memory data structures whose formats
index 74e146f470c60e9df5ff01806623a0aaaaa0ec82..6eda7a02225623943a35293cada545b8d20d752b 100644 (file)
@@ -247,6 +247,7 @@ void netdev_stats_update(struct be_adapter *adapter)
        dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
        dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
        dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
+       dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
 
        /* bad pkts received */
        dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -294,7 +295,6 @@ void netdev_stats_update(struct be_adapter *adapter)
        /* no space available in linux */
        dev_stats->tx_dropped = 0;
 
-       dev_stats->multicast = port_stats->rx_multicast_frames;
        dev_stats->collisions = 0;
 
        /* detailed tx_errors */
@@ -848,7 +848,7 @@ static void be_rx_rate_update(struct be_adapter *adapter)
 }
 
 static void be_rx_stats_update(struct be_adapter *adapter,
-               u32 pktsize, u16 numfrags)
+               u32 pktsize, u16 numfrags, u8 pkt_type)
 {
        struct be_drvr_stats *stats = drvr_stats(adapter);
 
@@ -856,6 +856,9 @@ static void be_rx_stats_update(struct be_adapter *adapter,
        stats->be_rx_frags += numfrags;
        stats->be_rx_bytes += pktsize;
        stats->be_rx_pkts++;
+
+       if (pkt_type == BE_MULTICAST_PACKET)
+               stats->be_rx_mcast_pkt++;
 }
 
 static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -925,9 +928,11 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
        u16 rxq_idx, i, j;
        u32 pktsize, hdr_len, curr_frag_len, size;
        u8 *start;
+       u8 pkt_type;
 
        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
        pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
+       pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
 
        page_info = get_rx_page_info(adapter, rxq_idx);
 
@@ -993,7 +998,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
        BUG_ON(j > MAX_SKB_FRAGS);
 
 done:
-       be_rx_stats_update(adapter, pktsize, num_rcvd);
+       be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
 }
 
 /* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -1060,6 +1065,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
        u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
        u16 i, rxq_idx = 0, vid, j;
        u8 vtm;
+       u8 pkt_type;
 
        num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
        /* Is it a flush compl that has no data */
@@ -1070,6 +1076,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
        vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
        rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
        vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+       pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
 
        /* vlanf could be wrongly set in some cards.
         * ignore if vtm is not set */
@@ -1125,7 +1132,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
                vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
        }
 
-       be_rx_stats_update(adapter, pkt_size, num_rcvd);
+       be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
 }
 
 static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -1743,26 +1750,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
        return 1;
 }
 
-static inline bool be_detect_ue(struct be_adapter *adapter)
-{
-       u32 online0 = 0, online1 = 0;
-
-       pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
-
-       pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
-
-       if (!online0 || !online1) {
-               adapter->ue_detected = true;
-               dev_err(&adapter->pdev->dev,
-                       "UE Detected!! online0=%d online1=%d\n",
-                       online0, online1);
-               return true;
-       }
-
-       return false;
-}
-
-void be_dump_ue(struct be_adapter *adapter)
+void be_detect_dump_ue(struct be_adapter *adapter)
 {
        u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
        u32 i;
@@ -1779,6 +1767,11 @@ void be_dump_ue(struct be_adapter *adapter)
        ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
        ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
 
+       if (ue_status_lo || ue_status_hi) {
+               adapter->ue_detected = true;
+               dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+       }
+
        if (ue_status_lo) {
                for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
                        if (ue_status_lo & 1)
@@ -1814,10 +1807,8 @@ static void be_worker(struct work_struct *work)
                adapter->rx_post_starved = false;
                be_post_rx_frags(adapter);
        }
-       if (!adapter->ue_detected) {
-               if (be_detect_ue(adapter))
-                       be_dump_ue(adapter);
-       }
+       if (!adapter->ue_detected)
+               be_detect_dump_ue(adapter);
 
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 }
index 2cc4cfc31892cd85458dec20b6c46401fdb90d9b..3b16f62d5606c741e97fb0a7c7b0b211c903157d 100644 (file)
@@ -2797,9 +2797,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
         *       so it can wait
         */
        bond_for_each_slave(bond, slave, i) {
+               unsigned long trans_start = dev_trans_start(slave->dev);
+
                if (slave->link != BOND_LINK_UP) {
-                       if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) &&
-                           time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) {
+                       if (time_in_range(jiffies,
+                               trans_start - delta_in_ticks,
+                               trans_start + delta_in_ticks) &&
+                           time_in_range(jiffies,
+                               slave->dev->last_rx - delta_in_ticks,
+                               slave->dev->last_rx + delta_in_ticks)) {
 
                                slave->link  = BOND_LINK_UP;
                                slave->state = BOND_STATE_ACTIVE;
@@ -2827,8 +2833,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
                         * when the source ip is 0, so don't take the link down
                         * if we don't know our ip yet
                         */
-                       if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) ||
-                           (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) {
+                       if (!time_in_range(jiffies,
+                               trans_start - delta_in_ticks,
+                               trans_start + 2 * delta_in_ticks) ||
+                           !time_in_range(jiffies,
+                               slave->dev->last_rx - delta_in_ticks,
+                               slave->dev->last_rx + 2 * delta_in_ticks)) {
 
                                slave->link  = BOND_LINK_DOWN;
                                slave->state = BOND_STATE_BACKUP;
@@ -2883,13 +2893,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
 {
        struct slave *slave;
        int i, commit = 0;
+       unsigned long trans_start;
 
        bond_for_each_slave(bond, slave, i) {
                slave->new_link = BOND_LINK_NOCHANGE;
 
                if (slave->link != BOND_LINK_UP) {
-                       if (time_before_eq(jiffies, slave_last_rx(bond, slave) +
-                                          delta_in_ticks)) {
+                       if (time_in_range(jiffies,
+                               slave_last_rx(bond, slave) - delta_in_ticks,
+                               slave_last_rx(bond, slave) + delta_in_ticks)) {
+
                                slave->new_link = BOND_LINK_UP;
                                commit++;
                        }
@@ -2902,8 +2915,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 * active.  This avoids bouncing, as the last receive
                 * times need a full ARP monitor cycle to be updated.
                 */
-               if (!time_after_eq(jiffies, slave->jiffies +
-                                  2 * delta_in_ticks))
+               if (time_in_range(jiffies,
+                                 slave->jiffies - delta_in_ticks,
+                                 slave->jiffies + 2 * delta_in_ticks))
                        continue;
 
                /*
@@ -2921,8 +2935,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 */
                if (slave->state == BOND_STATE_BACKUP &&
                    !bond->current_arp_slave &&
-                   time_after(jiffies, slave_last_rx(bond, slave) +
-                              3 * delta_in_ticks)) {
+                   !time_in_range(jiffies,
+                       slave_last_rx(bond, slave) - delta_in_ticks,
+                       slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
+
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
                }
@@ -2933,11 +2949,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
                 * - (more than 2*delta since receive AND
                 *    the bond has an IP address)
                 */
+               trans_start = dev_trans_start(slave->dev);
                if ((slave->state == BOND_STATE_ACTIVE) &&
-                   (time_after_eq(jiffies, dev_trans_start(slave->dev) +
-                                   2 * delta_in_ticks) ||
-                     (time_after_eq(jiffies, slave_last_rx(bond, slave)
-                                    + 2 * delta_in_ticks)))) {
+                   (!time_in_range(jiffies,
+                       trans_start - delta_in_ticks,
+                       trans_start + 2 * delta_in_ticks) ||
+                    !time_in_range(jiffies,
+                       slave_last_rx(bond, slave) - delta_in_ticks,
+                       slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
+
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
                }
@@ -2956,6 +2976,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
 {
        struct slave *slave;
        int i;
+       unsigned long trans_start;
 
        bond_for_each_slave(bond, slave, i) {
                switch (slave->new_link) {
@@ -2963,10 +2984,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
                        continue;
 
                case BOND_LINK_UP:
+                       trans_start = dev_trans_start(slave->dev);
                        if ((!bond->curr_active_slave &&
-                            time_before_eq(jiffies,
-                                           dev_trans_start(slave->dev) +
-                                           delta_in_ticks)) ||
+                            time_in_range(jiffies,
+                                          trans_start - delta_in_ticks,
+                                          trans_start + delta_in_ticks)) ||
                            bond->curr_active_slave != slave) {
                                slave->link = BOND_LINK_UP;
                                bond->current_arp_slave = NULL;
index b4fb07a6f13ffd489c957816eebb4f831157ccd1..51919fcd50c26e2c0c6b8c23ba393887eca50254 100644 (file)
@@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
                ks8851_wrreg16(ks, KS_RXQCR,
                               ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
 
-               if (rxlen > 0) {
-                       skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8);
-                       if (!skb) {
-                               /* todo - dump frame and move on */
-                       }
+               if (rxlen > 4) {
+                       unsigned int rxalign;
+
+                       rxlen -= 4;
+                       rxalign = ALIGN(rxlen, 4);
+                       skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
+                       if (skb) {
 
-                       /* two bytes to ensure ip is aligned, and four bytes
-                        * for the status header and 4 bytes of garbage */
-                       skb_reserve(skb, 2 + 4 + 4);
+                               /* 4 bytes of status header + 4 bytes of
+                                * garbage: we put them before ethernet
+                                * header, so that they are copied,
+                                * but ignored.
+                                */
 
-                       rxpkt = skb_put(skb, rxlen - 4) - 8;
+                               rxpkt = skb_put(skb, rxlen) - 8;
 
-                       /* align the packet length to 4 bytes, and add 4 bytes
-                        * as we're getting the rx status header as well */
-                       ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
+                               ks8851_rdfifo(ks, rxpkt, rxalign + 8);
 
-                       if (netif_msg_pktdata(ks))
-                               ks8851_dbg_dumpkkt(ks, rxpkt);
+                               if (netif_msg_pktdata(ks))
+                                       ks8851_dbg_dumpkkt(ks, rxpkt);
 
-                       skb->protocol = eth_type_trans(skb, ks->netdev);
-                       netif_rx(skb);
+                               skb->protocol = eth_type_trans(skb, ks->netdev);
+                               netif_rx(skb);
 
-                       ks->netdev->stats.rx_packets++;
-                       ks->netdev->stats.rx_bytes += rxlen - 4;
+                               ks->netdev->stats.rx_packets++;
+                               ks->netdev->stats.rx_bytes += rxlen;
+                       }
                }
 
                ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
index bc695d53cdccbcca3eb1c2ccb30018acb3c8bde6..fe6983af6918fe37cdb5589907346557ddeeb8df 100644 (file)
@@ -7269,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
        struct niu_parent *parent = np->parent;
        struct niu_tcam_entry *tp;
        int i, idx, cnt;
-       u16 n_entries;
        unsigned long flags;
-
+       int ret = 0;
 
        /* put the tcam size here */
        nfc->data = tcam_get_size(np);
 
        niu_lock_parent(np, flags);
-       n_entries = nfc->rule_cnt;
        for (cnt = 0, i = 0; i < nfc->data; i++) {
                idx = tcam_get_index(np, i);
                tp = &parent->tcam[idx];
                if (!tp->valid)
                        continue;
+               if (cnt == nfc->rule_cnt) {
+                       ret = -EMSGSIZE;
+                       break;
+               }
                rule_locs[cnt] = i;
                cnt++;
        }
        niu_unlock_parent(np, flags);
 
-       if (n_entries != cnt) {
-               /* print warning, this should not happen */
-               netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
-                           np->parent->index, __func__, n_entries, cnt);
-       }
-
-       return 0;
+       return ret;
 }
 
 static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
index c3edfe4c26516fc14780ee19183fc6db95fbf273..49279b0ee526a54a534c6f8c04e7c48aa4aba0d0 100644 (file)
@@ -1637,6 +1637,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
        PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b),
        PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0),
        PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956),
+       PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616),
        PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64),
        PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5),
        PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3),
index 410ea0a613718af8c42ac02fdbd3def788757ee4..85eddda276bdf350f17660fd2361b86e059b9001 100644 (file)
@@ -1606,6 +1606,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
 
        iounmap(pep->base);
        pep->base = NULL;
+       mdiobus_unregister(pep->smi_bus);
+       mdiobus_free(pep->smi_bus);
        unregister_netdev(dev);
        flush_scheduled_work();
        free_netdev(dev);
index bbb7951b9c4c34bb3992deed9c7a8e84ff52f68e..ea0461eb2dbe4314c223ab2ad47f17ea9b90e740 100644 (file)
@@ -1865,15 +1865,15 @@ static int stmmac_resume(struct platform_device *pdev)
        if (!netif_running(dev))
                return 0;
 
-       spin_lock(&priv->lock);
-
        if (priv->shutdown) {
                /* Re-open the interface and re-init the MAC/DMA
-                  and the rings. */
+                  and the rings (i.e. on hibernation stage) */
                stmmac_open(dev);
-               goto out_resume;
+               return 0;
        }
 
+       spin_lock(&priv->lock);
+
        /* Power Down bit, into the PM register, is cleared
         * automatically as soon as a magic packet or a Wake-up frame
         * is received. Anyway, it's better to manually clear
@@ -1901,7 +1901,6 @@ static int stmmac_resume(struct platform_device *pdev)
 
        netif_start_queue(dev);
 
-out_resume:
        spin_unlock(&priv->lock);
        return 0;
 }
index 8ed30fa35d0a5d789121eb3042c4b87a5e53a7f2..b2bcf99e6f087ab1dfca2aef092233eea9aa5fe6 100644 (file)
@@ -429,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = {
        .ndo_get_stats = &ipheth_stats,
 };
 
-static struct device_type ipheth_type = {
-       .name   = "wwan",
-};
-
 static int ipheth_probe(struct usb_interface *intf,
                        const struct usb_device_id *id)
 {
@@ -450,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf,
 
        netdev->netdev_ops = &ipheth_netdev_ops;
        netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
-       strcpy(netdev->name, "wwan%d");
+       strcpy(netdev->name, "eth%d");
 
        dev = netdev_priv(netdev);
        dev->udev = udev;
@@ -500,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf,
 
        SET_NETDEV_DEV(netdev, &intf->dev);
        SET_ETHTOOL_OPS(netdev, &ops);
-       SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
 
        retval = register_netdev(netdev);
        if (retval) {
index fd69095ef6e33d61698556abac79a4e84429c8fe..f53412368ce1e1745a7796a8cc8181b16027e797 100644 (file)
@@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
        netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
 
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
-               NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
+               NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
 
        ret = register_netdev(dev);
        if (ret < 0)
index 373dcfec689c40c2bf1be9af2b9bddb7a15dcacb..d77ce9906b6ccb1bf1031b1260dfd2510fd9e116 100644 (file)
@@ -1327,6 +1327,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
                        PCI_DMA_TODEVICE);
 
        rate = ieee80211_get_tx_rate(sc->hw, info);
+       if (!rate) {
+               ret = -EINVAL;
+               goto err_unmap;
+       }
 
        if (info->flags & IEEE80211_TX_CTL_NO_ACK)
                flags |= AR5K_TXDESC_NOACK;
index b883b174385b822e98cb46e0f83b8024159cf18c..057fb69ddf7fbc56e76b010a1cc21833fdc2b078 100644 (file)
@@ -797,7 +797,7 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
                length = block[it+1];
                length &= 0xff;
 
-               if (length > 0 && spot >= 0 && spot+length < mdataSize) {
+               if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
                        ath_print(common, ATH_DBG_EEPROM,
                                  "Restore at %d: spot=%d "
                                  "offset=%d length=%d\n",
index 7f48df1e2903008f0a68bf991817bee64edf5b81..0b09db0f8e7d99b1c22c94803884a67043e2187e 100644 (file)
@@ -62,7 +62,7 @@
 
 #define SD_NO_CTL               0xE0
 #define NO_CTL                  0xff
-#define CTL_MODE_M              7
+#define CTL_MODE_M              0xf
 #define CTL_11A                 0
 #define CTL_11B                 1
 #define CTL_11G                 2
index a1c39526161a9d663964638ebdc4206e95928b8a..345dd9721b415972d82d8309ce04aeec6c6bbc37 100644 (file)
@@ -31,7 +31,6 @@ enum ctl_group {
 #define NO_CTL 0xff
 #define SD_NO_CTL               0xE0
 #define NO_CTL                  0xff
-#define CTL_MODE_M              7
 #define CTL_11A                 0
 #define CTL_11B                 1
 #define CTL_11G                 2
index ba854c70ab9455b8e18a63fa254e18671f447232..87b634978b357797ed6ceb22f7dec9643f6c7054 100644 (file)
@@ -128,7 +128,7 @@ struct if_sdio_card {
        bool                    helper_allocated;
        bool                    firmware_allocated;
 
-       u8                      buffer[65536];
+       u8                      buffer[65536] __attribute__((aligned(4)));
 
        spinlock_t              lock;
        struct if_sdio_packet   *packets;
index 173aec3d6e7eba64896da75ceaf42436489f3fe8..0e937dc0c9c41df6cff985c6b538f6cb17cf1091 100644 (file)
@@ -446,7 +446,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
        }
 
        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
-            (!payload->status))
+            !(payload->status & P54_TX_FAILED))
                info->flags |= IEEE80211_TX_STAT_ACK;
        if (payload->status & P54_TX_PSM_CANCELLED)
                info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
index a9352b2c7ac430d4e4aafac3d65a1b46005ea505..b7e755f4178ad885332ccaaeeb5eda492e6dfcfd 100644 (file)
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
        .notifier_call = module_load_notify,
 };
 
-
-static void end_sync(void)
-{
-       end_cpu_work();
-       /* make sure we don't leak task structs */
-       process_task_mortuary();
-       process_task_mortuary();
-}
-
-
 int sync_start(void)
 {
        int err;
@@ -158,7 +148,7 @@ int sync_start(void)
        if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
                return -ENOMEM;
 
-       start_cpu_work();
+       mutex_lock(&buffer_mutex);
 
        err = task_handoff_register(&task_free_nb);
        if (err)
@@ -173,7 +163,10 @@ int sync_start(void)
        if (err)
                goto out4;
 
+       start_cpu_work();
+
 out:
+       mutex_unlock(&buffer_mutex);
        return err;
 out4:
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@ out3:
 out2:
        task_handoff_unregister(&task_free_nb);
 out1:
-       end_sync();
        free_cpumask_var(marked_cpus);
        goto out;
 }
@@ -190,11 +182,20 @@ out1:
 
 void sync_stop(void)
 {
+       /* flush buffers */
+       mutex_lock(&buffer_mutex);
+       end_cpu_work();
        unregister_module_notifier(&module_load_nb);
        profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
        profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
        task_handoff_unregister(&task_free_nb);
-       end_sync();
+       mutex_unlock(&buffer_mutex);
+       flush_scheduled_work();
+
+       /* make sure we don't leak task structs */
+       process_task_mortuary();
+       process_task_mortuary();
+
        free_cpumask_var(marked_cpus);
 }
 
index 219f79e2210a3fcd561b94456c4960a0a1fbacd9..f179ac2ea80149423034d66a90b1e81251c5069b 100644 (file)
@@ -120,8 +120,6 @@ void end_cpu_work(void)
 
                cancel_delayed_work(&b->work);
        }
-
-       flush_scheduled_work();
 }
 
 /*
index 45fcc1e96df9ac08718696e7ea8a8f487f33072f..3bc72d18b121d3cff5ebcb7c8c05a5a06cdff78a 100644 (file)
@@ -338,9 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
        acpi_handle chandle, handle;
        struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
 
-       flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
-                 OSC_SHPC_NATIVE_HP_CONTROL |
-                 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+       flags &= OSC_SHPC_NATIVE_HP_CONTROL;
        if (!flags) {
                err("Invalid flags %u specified!\n", flags);
                return -EINVAL;
@@ -360,7 +358,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
                acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
                dbg("Trying to get hotplug control for %s\n",
                                (char *)string.pointer);
-               status = acpi_pci_osc_control_set(handle, flags);
+               status = acpi_pci_osc_control_set(handle, &flags, flags);
                if (ACPI_SUCCESS(status))
                        goto got_one;
                if (status == AE_SUPPORT)
index 4ed76b47b6dcbd6358e083b11d5f8e823daa9cdb..73d5139892639bf798767ee927c9f6f4c26da013 100644 (file)
@@ -176,19 +176,11 @@ static inline void pciehp_firmware_init(void)
 {
        pciehp_acpi_slot_detection_init();
 }
-
-static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
-{
-       int retval;
-       u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
-                    OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-       retval = acpi_get_hp_hw_control_from_firmware(dev, flags);
-       if (retval)
-               return retval;
-       return pciehp_acpi_slot_detection_check(dev);
-}
 #else
 #define pciehp_firmware_init()                         do {} while (0)
-#define pciehp_get_hp_hw_control_from_firmware(dev)    0
+static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
+{
+       return 0;
+}
 #endif                                 /* CONFIG_ACPI */
 #endif                         /* _PCIEHP_H */
index 1f4000a5a108fa8d9f57383ffc03f9313c9fc2c0..2574700db461559717a95e0623c1c58c5821c687 100644 (file)
@@ -85,9 +85,7 @@ static int __init dummy_probe(struct pcie_device *dev)
        acpi_handle handle;
        struct dummy_slot *slot, *tmp;
        struct pci_dev *pdev = dev->port;
-       /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
-       if (pciehp_get_hp_hw_control_from_firmware(pdev))
-               return -ENODEV;
+
        pos = pci_pcie_cap(pdev);
        if (!pos)
                return -ENODEV;
index 3588ea61b0dd244df58ac3fdc92adc61cdc512be..aa5f3ff629ff4b798d3b2e473e3e6dfb025be823 100644 (file)
@@ -59,7 +59,7 @@ module_param(pciehp_force, bool, 0644);
 MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
 MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
 MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
-MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
+MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
 
 #define PCIE_MODULE_NAME "pciehp"
 
@@ -235,7 +235,7 @@ static int pciehp_probe(struct pcie_device *dev)
                dev_info(&dev->device,
                         "Bypassing BIOS check for pciehp use on %s\n",
                         pci_name(dev->port));
-       else if (pciehp_get_hp_hw_control_from_firmware(dev->port))
+       else if (pciehp_acpi_slot_detection_check(dev->port))
                goto err_out_none;
 
        ctrl = pcie_init(dev);
index 679c39de6a89124e96a36252f32e8ff45991b7aa..7754a678ab15cc77445d396aa59409658bf8006f 100644 (file)
@@ -140,8 +140,10 @@ static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
 
 #ifdef CONFIG_PCIEAER
 void pci_no_aer(void);
+bool pci_aer_available(void);
 #else
 static inline void pci_no_aer(void) { }
+static inline bool pci_aer_available(void) { return false; }
 #endif
 
 static inline int pci_no_d1d2(struct pci_dev *dev)
index ea654545e7c468c876e7d7b2a7eb637d53c168b9..00c62df5a9fc4773405c74aca96d894f7515c108 100644 (file)
@@ -6,10 +6,11 @@
 obj-$(CONFIG_PCIEASPM)         += aspm.o
 
 pcieportdrv-y                  := portdrv_core.o portdrv_pci.o portdrv_bus.o
+pcieportdrv-$(CONFIG_ACPI)     += portdrv_acpi.o
 
 obj-$(CONFIG_PCIEPORTBUS)      += pcieportdrv.o
 
 # Build PCI Express AER if needed
 obj-$(CONFIG_PCIEAER)          += aer/
 
-obj-$(CONFIG_PCIE_PME) += pme/
+obj-$(CONFIG_PCIE_PME) += pme.o
index 484cc55194b8841809218cb15d5f2ccccdffd582..f409948e1a9bb614cfe5fedc1dc652285f8e0307 100644 (file)
@@ -72,6 +72,11 @@ void pci_no_aer(void)
        pcie_aer_disable = 1;   /* has priority over 'forceload' */
 }
 
+bool pci_aer_available(void)
+{
+       return !pcie_aer_disable && pci_msi_enabled();
+}
+
 static int set_device_error_reporting(struct pci_dev *dev, void *data)
 {
        bool enable = *((bool *)data);
@@ -411,9 +416,7 @@ static void aer_error_resume(struct pci_dev *dev)
  */
 static int __init aer_service_init(void)
 {
-       if (pcie_aer_disable)
-               return -ENXIO;
-       if (!pci_msi_enabled())
+       if (!pci_aer_available())
                return -ENXIO;
        return pcie_port_service_register(&aerdriver);
 }
index f278d7b0d95d32b636260cddbe763aa109fbfacf..2bb9b8972211073564b9d99b3cc85a148d79e583 100644 (file)
 #include <acpi/apei.h>
 #include "aerdrv.h"
 
-/**
- * aer_osc_setup - run ACPI _OSC method
- * @pciedev: pcie_device which AER is being enabled on
- *
- * @return: Zero on success. Nonzero otherwise.
- *
- * Invoked when PCIe bus loads AER service driver. To avoid conflict with
- * BIOS AER support requires BIOS to yield AER control to OS native driver.
- **/
-int aer_osc_setup(struct pcie_device *pciedev)
-{
-       acpi_status status = AE_NOT_FOUND;
-       struct pci_dev *pdev = pciedev->port;
-       acpi_handle handle = NULL;
-
-       if (acpi_pci_disabled)
-               return -1;
-
-       handle = acpi_find_root_bridge_handle(pdev);
-       if (handle) {
-               status = acpi_pci_osc_control_set(handle,
-                                       OSC_PCI_EXPRESS_AER_CONTROL |
-                                       OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-       }
-
-       if (ACPI_FAILURE(status)) {
-               dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't "
-                          "init device: %s\n",
-                          (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
-                          "no _OSC support" : "_OSC failed");
-               return -1;
-       }
-
-       return 0;
-}
-
 #ifdef CONFIG_ACPI_APEI
 static inline int hest_match_pci(struct acpi_hest_aer_common *p,
                                 struct pci_dev *pci)
index fc0b5a93e1deaf3895ef671ae1f30892980c9352..29e268fadf14cb44d18fac7570e118ce7ba489c9 100644 (file)
@@ -772,22 +772,10 @@ void aer_isr(struct work_struct *work)
  */
 int aer_init(struct pcie_device *dev)
 {
-       if (pcie_aer_get_firmware_first(dev->port)) {
-               dev_printk(KERN_DEBUG, &dev->device,
-                          "PCIe errors handled by platform firmware.\n");
-               goto out;
-       }
-
-       if (aer_osc_setup(dev))
-               goto out;
-
-       return 0;
-out:
        if (forceload) {
                dev_printk(KERN_DEBUG, &dev->device,
                           "aerdrv forceload requested.\n");
                pcie_aer_force_firmware_first(dev->port, 0);
-               return 0;
        }
-       return -ENXIO;
+       return 0;
 }
similarity index 83%
rename from drivers/pci/pcie/pme/pcie_pme.c
rename to drivers/pci/pcie/pme.c
index bbdea18693d94048fc2444dcb388c0a18817fc6e..2f3c904072273fcdaf5c5a660405281720dff7aa 100644 (file)
 #include <linux/pci-acpi.h>
 #include <linux/pm_runtime.h>
 
-#include "../../pci.h"
-#include "pcie_pme.h"
+#include "../pci.h"
+#include "portdrv.h"
 
 #define PCI_EXP_RTSTA_PME      0x10000 /* PME status */
 #define PCI_EXP_RTSTA_PENDING  0x20000 /* PME pending */
 
-/*
- * If set, this switch will prevent the PCIe root port PME service driver from
- * being registered.  Consequently, the interrupt-based PCIe PME signaling will
- * not be used by any PCIe root ports in that case.
- */
-static bool pcie_pme_disabled = true;
-
-/*
- * The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
- * "In order to maintain compatibility with non-PCI Express-aware system
- * software, system power management logic must be configured by firmware to use
- * the legacy mechanism of signaling PME by default.  PCI Express-aware system
- * software must notify the firmware prior to enabling native, interrupt-based
- * PME signaling."  However, if the platform doesn't provide us with a suitable
- * notification mechanism or the notification fails, it is not clear whether or
- * not we are supposed to use the interrupt-based PCIe PME signaling.  The
- * switch below can be used to indicate the desired behaviour.  When set, it
- * will make the kernel use the interrupt-based PCIe PME signaling regardless of
- * the platform notification status, although the kernel will attempt to notify
- * the platform anyway.  When unset, it will prevent the kernel from using the
- * the interrupt-based PCIe PME signaling if the platform notification fails,
- * which is the default.
- */
-static bool pcie_pme_force_enable;
-
 /*
  * If this switch is set, MSI will not be used for PCIe PME signaling.  This
  * causes the PCIe port driver to use INTx interrupts only, but it turns out
@@ -64,38 +39,13 @@ bool pcie_pme_msi_disabled;
 
 static int __init pcie_pme_setup(char *str)
 {
-       if (!strncmp(str, "auto", 4))
-               pcie_pme_disabled = false;
-       else if (!strncmp(str, "force", 5))
-               pcie_pme_force_enable = true;
-
-       str = strchr(str, ',');
-       if (str) {
-               str++;
-               str += strspn(str, " \t");
-               if (*str && !strcmp(str, "nomsi"))
-                       pcie_pme_msi_disabled = true;
-       }
+       if (!strncmp(str, "nomsi", 5))
+               pcie_pme_msi_disabled = true;
 
        return 1;
 }
 __setup("pcie_pme=", pcie_pme_setup);
 
-/**
- * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME.
- * @srv: PCIe PME root port service to use for carrying out the check.
- *
- * Notify the platform that the native PCIe PME is going to be used and return
- * 'true' if the control of the PCIe PME registers has been acquired from the
- * platform.
- */
-static bool pcie_pme_platform_setup(struct pcie_device *srv)
-{
-       if (!pcie_pme_platform_notify(srv))
-               return true;
-       return pcie_pme_force_enable;
-}
-
 struct pcie_pme_service_data {
        spinlock_t lock;
        struct pcie_device *srv;
@@ -108,7 +58,7 @@ struct pcie_pme_service_data {
  * @dev: PCIe root port or event collector.
  * @enable: Enable or disable the interrupt.
  */
-static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
+void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
 {
        int rtctl_pos;
        u16 rtctl;
@@ -417,9 +367,6 @@ static int pcie_pme_probe(struct pcie_device *srv)
        struct pcie_pme_service_data *data;
        int ret;
 
-       if (!pcie_pme_platform_setup(srv))
-               return -EACCES;
-
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
@@ -509,8 +456,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
  */
 static int __init pcie_pme_service_init(void)
 {
-       return pcie_pme_disabled ?
-               -ENODEV : pcie_port_service_register(&pcie_pme_driver);
+       return pcie_port_service_register(&pcie_pme_driver);
 }
 
 module_init(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile
deleted file mode 100644 (file)
index 8b92380..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for PCI-Express Root Port PME signaling driver
-#
-
-obj-$(CONFIG_PCIE_PME) += pmedriver.o
-
-pmedriver-objs := pcie_pme.o
-pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h
deleted file mode 100644 (file)
index b30d2b7..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * drivers/pci/pcie/pme/pcie_pme.h
- *
- * PCI Express Root Port PME signaling support
- *
- * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- */
-
-#ifndef _PCIE_PME_H_
-#define _PCIE_PME_H_
-
-struct pcie_device;
-
-#ifdef CONFIG_ACPI
-extern int pcie_pme_acpi_setup(struct pcie_device *srv);
-
-static inline int pcie_pme_platform_notify(struct pcie_device *srv)
-{
-       return pcie_pme_acpi_setup(srv);
-}
-#else /* !CONFIG_ACPI */
-static inline int pcie_pme_platform_notify(struct pcie_device *srv)
-{
-       return 0;
-}
-#endif /* !CONFIG_ACPI */
-
-#endif
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c
deleted file mode 100644 (file)
index 83ab228..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * PCIe Native PME support, ACPI-related part
- *
- * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License V2.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/acpi.h>
-#include <linux/pci-acpi.h>
-#include <linux/pcieport_if.h>
-
-/**
- * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME.
- * @srv - PCIe PME service for a root port or event collector.
- *
- * Invoked when the PCIe bus type loads PCIe PME service driver.  To avoid
- * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME
- * control to the kernel.
- */
-int pcie_pme_acpi_setup(struct pcie_device *srv)
-{
-       acpi_status status = AE_NOT_FOUND;
-       struct pci_dev *port = srv->port;
-       acpi_handle handle;
-       int error = 0;
-
-       if (acpi_pci_disabled)
-               return -ENOSYS;
-
-       dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n");
-
-       handle = acpi_find_root_bridge_handle(port);
-       if (!handle)
-               return -EINVAL;
-
-       status = acpi_pci_osc_control_set(handle,
-                       OSC_PCI_EXPRESS_PME_CONTROL |
-                       OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-       if (ACPI_FAILURE(status)) {
-               dev_info(&port->dev,
-                       "Failed to receive control of PCIe PME service: %s\n",
-                       (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
-                       "no _OSC support" : "ACPI _OSC failed");
-               error = -ENODEV;
-       }
-
-       return error;
-}
index 813a5c3427b69261d0d54d5c531ffc9f9973ce64..7b5aba0a3291107a231a8b6ee6a90cacf0c4557c 100644 (file)
@@ -20,6 +20,9 @@
 
 #define get_descriptor_id(type, service) (((type - 4) << 4) | service)
 
+extern bool pcie_ports_disabled;
+extern bool pcie_ports_auto;
+
 extern struct bus_type pcie_port_bus_type;
 extern int pcie_port_device_register(struct pci_dev *dev);
 #ifdef CONFIG_PM
@@ -30,6 +33,8 @@ extern void pcie_port_device_remove(struct pci_dev *dev);
 extern int __must_check pcie_port_bus_register(void);
 extern void pcie_port_bus_unregister(void);
 
+struct pci_dev;
+
 #ifdef CONFIG_PCIE_PME
 extern bool pcie_pme_msi_disabled;
 
@@ -42,9 +47,26 @@ static inline bool pcie_pme_no_msi(void)
 {
        return pcie_pme_msi_disabled;
 }
+
+extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
 #else /* !CONFIG_PCIE_PME */
 static inline void pcie_pme_disable_msi(void) {}
 static inline bool pcie_pme_no_msi(void) { return false; }
+static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
 #endif /* !CONFIG_PCIE_PME */
 
+#ifdef CONFIG_ACPI
+extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
+
+static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+{
+       return pcie_port_acpi_setup(port, mask);
+}
+#else /* !CONFIG_ACPI */
+static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+{
+       return 0;
+}
+#endif /* !CONFIG_ACPI */
+
 #endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
new file mode 100644 (file)
index 0000000..b7c4cb1
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * PCIe Port Native Services Support, ACPI-Related Part
+ *
+ * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License V2.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+#include <linux/pcieport_if.h>
+
+#include "aer/aerdrv.h"
+#include "../pci.h"
+
+/**
+ * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
+ * @port: PCIe Port service for a root port or event collector.
+ * @srv_mask: Bit mask of services that can be enabled for @port.
+ *
+ * Invoked when @port is identified as a PCIe port device.  To avoid conflicts
+ * with the BIOS PCIe port native services support requires the BIOS to yield
+ * control of these services to the kernel.  The mask of services that the BIOS
+ * allows to be enabled for @port is written to @srv_mask.
+ *
+ * NOTE: It turns out that we cannot do that for individual port services
+ * separately, because that would make some systems work incorrectly.
+ */
+int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
+{
+       acpi_status status;
+       acpi_handle handle;
+       u32 flags;
+
+       if (acpi_pci_disabled)
+               return 0;
+
+       handle = acpi_find_root_bridge_handle(port);
+       if (!handle)
+               return -EINVAL;
+
+       flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
+               | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+               | OSC_PCI_EXPRESS_PME_CONTROL;
+
+       if (pci_aer_available()) {
+               if (pcie_aer_get_firmware_first(port))
+                       dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
+               else
+                       flags |= OSC_PCI_EXPRESS_AER_CONTROL;
+       }
+
+       status = acpi_pci_osc_control_set(handle, &flags,
+                                       OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
+       if (ACPI_FAILURE(status)) {
+               dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
+                       status);
+               return -ENODEV;
+       }
+
+       dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags);
+
+       *srv_mask = PCIE_PORT_SERVICE_VC;
+       if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
+               *srv_mask |= PCIE_PORT_SERVICE_HP;
+       if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
+               *srv_mask |= PCIE_PORT_SERVICE_PME;
+       if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
+               *srv_mask |= PCIE_PORT_SERVICE_AER;
+
+       return 0;
+}
index e73effbe402c55e82be4d8a821b3d39e64f96bac..a9c222d79ebc5dc32cd6c4fad37f616ac291ff6a 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/pcieport_if.h>
+#include <linux/aer.h>
+#include <linux/pci-aspm.h>
 
 #include "../pci.h"
 #include "portdrv.h"
@@ -236,24 +238,64 @@ static int get_port_device_capability(struct pci_dev *dev)
        int services = 0, pos;
        u16 reg16;
        u32 reg32;
+       int cap_mask;
+       int err;
+
+       err = pcie_port_platform_notify(dev, &cap_mask);
+       if (pcie_ports_auto) {
+               if (err) {
+                       pcie_no_aspm();
+                       return 0;
+               }
+       } else {
+               cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
+                               | PCIE_PORT_SERVICE_VC;
+               if (pci_aer_available())
+                       cap_mask |= PCIE_PORT_SERVICE_AER;
+       }
 
        pos = pci_pcie_cap(dev);
        pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
        /* Hot-Plug Capable */
-       if (reg16 & PCI_EXP_FLAGS_SLOT) {
+       if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) {
                pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32);
-               if (reg32 & PCI_EXP_SLTCAP_HPC)
+               if (reg32 & PCI_EXP_SLTCAP_HPC) {
                        services |= PCIE_PORT_SERVICE_HP;
+                       /*
+                        * Disable hot-plug interrupts in case they have been
+                        * enabled by the BIOS and the hot-plug service driver
+                        * is not loaded.
+                        */
+                       pos += PCI_EXP_SLTCTL;
+                       pci_read_config_word(dev, pos, &reg16);
+                       reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
+                       pci_write_config_word(dev, pos, reg16);
+               }
        }
        /* AER capable */
-       if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
+       if ((cap_mask & PCIE_PORT_SERVICE_AER)
+           && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) {
                services |= PCIE_PORT_SERVICE_AER;
+               /*
+                * Disable AER on this port in case it's been enabled by the
+                * BIOS (the AER service driver will enable it when necessary).
+                */
+               pci_disable_pcie_error_reporting(dev);
+       }
        /* VC support */
        if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
                services |= PCIE_PORT_SERVICE_VC;
        /* Root ports are capable of generating PME too */
-       if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+       if ((cap_mask & PCIE_PORT_SERVICE_PME)
+           && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
                services |= PCIE_PORT_SERVICE_PME;
+               /*
+                * Disable PME interrupt on this port in case it's been enabled
+                * by the BIOS (the PME service driver will enable it when
+                * necessary).
+                */
+               pcie_pme_interrupt_enable(dev, false);
+       }
 
        return services;
 }
@@ -494,6 +536,9 @@ static void pcie_port_shutdown_service(struct device *dev) {}
  */
 int pcie_port_service_register(struct pcie_port_service_driver *new)
 {
+       if (pcie_ports_disabled)
+               return -ENODEV;
+
        new->driver.name = (char *)new->name;
        new->driver.bus = &pcie_port_bus_type;
        new->driver.probe = pcie_port_probe_service;
index 3debed25e46bb63f4e70e4a58b0d9abff3fd33be..f9033e190fb62060e701ebf44b0eb1d3dfbefa90 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/pcieport_if.h>
 #include <linux/aer.h>
 #include <linux/dmi.h>
+#include <linux/pci-aspm.h>
 
 #include "portdrv.h"
 #include "aer/aerdrv.h"
@@ -29,6 +30,31 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 
+/* If this switch is set, PCIe port native services should not be enabled. */
+bool pcie_ports_disabled;
+
+/*
+ * If this switch is set, ACPI _OSC will be used to determine whether or not to
+ * enable PCIe port native services.
+ */
+bool pcie_ports_auto = true;
+
+static int __init pcie_port_setup(char *str)
+{
+       if (!strncmp(str, "compat", 6)) {
+               pcie_ports_disabled = true;
+       } else if (!strncmp(str, "native", 6)) {
+               pcie_ports_disabled = false;
+               pcie_ports_auto = false;
+       } else if (!strncmp(str, "auto", 4)) {
+               pcie_ports_disabled = false;
+               pcie_ports_auto = true;
+       }
+
+       return 1;
+}
+__setup("pcie_ports=", pcie_port_setup);
+
 /* global data */
 
 static int pcie_portdrv_restore_config(struct pci_dev *dev)
@@ -301,6 +327,11 @@ static int __init pcie_portdrv_init(void)
 {
        int retval;
 
+       if (pcie_ports_disabled) {
+               pcie_no_aspm();
+               return -EACCES;
+       }
+
        dmi_check_system(pcie_portdrv_dmi_table);
 
        retval = pcie_port_bus_register();
@@ -315,11 +346,4 @@ static int __init pcie_portdrv_init(void)
        return retval;
 }
 
-static void __exit pcie_portdrv_exit(void)
-{
-       pci_unregister_driver(&pcie_portdriver);
-       pcie_port_bus_unregister();
-}
-
 module_init(pcie_portdrv_init);
-module_exit(pcie_portdrv_exit);
index 659eaa0fc48facf81dad9c0b5c430498e01398e3..968cfea04f749ea76ffcb33bb62f114ecfd9a6ef 100644 (file)
@@ -49,7 +49,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
 }
 
 /* these strings match up with the values in pci_bus_speed */
-static char *pci_bus_speed_strings[] = {
+static const char *pci_bus_speed_strings[] = {
        "33 MHz PCI",           /* 0x00 */
        "66 MHz PCI",           /* 0x01 */
        "66 MHz PCI-X",         /* 0x02 */
index 72b2bcc2c22413b1a63e465e355ea65084ec7b8e..d4fb82d85e9b36ab61e1626236cb98bf76364ea2 100644 (file)
@@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
                enable_irq_wake(IRQ_RTC);
                bfin_rtc_sync_pending(&pdev->dev);
        } else
-               bfin_rtc_int_clear(-1);
+               bfin_rtc_int_clear(0);
 
        return 0;
 }
@@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev)
 {
        if (device_may_wakeup(&pdev->dev))
                disable_irq_wake(IRQ_RTC);
-       else
-               bfin_write_RTC_ISTAT(-1);
+
+       /*
+        * Since only some of the RTC bits are maintained externally in the
+        * Vbat domain, we need to wait for the RTC MMRs to be synced into
+        * the core after waking up.  This happens every RTC 1HZ.  Once that
+        * has happened, we can go ahead and re-enable the important write
+        * complete interrupt event.
+        */
+       while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
+               continue;
+       bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
 
        return 0;
 }
index 66377f3e28b851eaa908c6057a9646a639e9c229..d60557cae8ef4fdadadb10b343125f174cb7508d 100644 (file)
@@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
        t->time.tm_isdst = -1;
        t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
        t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
-       return rtc_valid_tm(t);
+       return 0;
 }
 
 static struct rtc_class_ops m41t80_rtc_ops = {
index 6c418fe7f288ae2deaa9f44080a749a9eaafad88..b7a6690e5b35e8744295bf212a8e0d75e0d8dd6f 100644 (file)
@@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id)
        }
 
        if (request_irq(adev->irq[0], pl031_interrupt,
-                       IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) {
+                       IRQF_DISABLED, "rtc-pl031", ldata)) {
                ret = -EIO;
                goto out_no_irq;
        }
index b7de02525ec901ebbee390b2798043eb1938f4bd..85cf607fc78f62d243ae41b20f2df018710be2e0 100644 (file)
@@ -217,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device)
        if (!blkdat->request_queue)
                return -ENOMEM;
 
-       elevator_exit(blkdat->request_queue->elevator);
-       rc = elevator_init(blkdat->request_queue, "noop");
+       rc = elevator_change(blkdat->request_queue, "noop");
        if (rc)
                goto cleanup_queue;
 
index e57fb3d228e2dd0881ab10f3410a457b46e0e405..5318dd3774ae156dc17f482a73e80f8c8a3ec77b 100644 (file)
@@ -121,7 +121,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
        unsigned int sclk = get_sclk();
 
        /* Set TCR1 and TCR2, TFSR is not enabled for uart */
-       SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK));
+       SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
        SPORT_PUT_TCR2(up, size + 1);
        pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
 
index c6aa52f8dcee3c5f97ea3df1a3f9374cf9a04f04..48d9fb1227df0d9d9d79c2848cc8615a6c52b837 100644 (file)
@@ -222,7 +222,6 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
                p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
                p_dev->resource[0]->flags |=
                        pcmcia_io_cfg_data_width(io->flags);
-               p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
                p_dev->resource[0]->start = io->win[0].base;
                p_dev->resource[0]->end = io->win[0].len;
                if (io->nwin > 1) {
index 56e11575c97763f1747a35ae67db81b49cdd6325..64a01147ecaeffc17863bd0002df415da56da8f2 100644 (file)
@@ -327,6 +327,9 @@ static const struct net_device_ops device_ops = {
        .ndo_stop =                     netvsc_close,
        .ndo_start_xmit =               netvsc_start_xmit,
        .ndo_set_multicast_list =       netvsc_set_multicast_list,
+       .ndo_change_mtu =               eth_change_mtu,
+       .ndo_validate_addr =            eth_validate_addr,
+       .ndo_set_mac_address =          eth_mac_addr,
 };
 
 static int netvsc_probe(struct device *device)
index 17bc7626f70a71d991aa2bdc9c50a960657e4f90..d78c569ac94a2aa7ae4ba6b2325dbf6d2b93c45c 100644 (file)
@@ -193,8 +193,7 @@ Description:
 static inline u64
 GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo)
 {
-       return ((u64)RingInfo->RingBuffer->WriteIndex << 32)
-       || RingInfo->RingBuffer->ReadIndex;
+       return (u64)RingInfo->RingBuffer->WriteIndex << 32;
 }
 
 
index 0063bde9a4b23ac22bbaad19d1912518a855c008..8505a1c5f9ee6bab72259539fa0d1c845fbba1c9 100644 (file)
 #include "vmbus_api.h"
 
 /* Defines */
-#define STORVSC_RING_BUFFER_SIZE                       (10*PAGE_SIZE)
+#define STORVSC_RING_BUFFER_SIZE                       (20*PAGE_SIZE)
 #define BLKVSC_RING_BUFFER_SIZE                                (20*PAGE_SIZE)
 
-#define STORVSC_MAX_IO_REQUESTS                                64
+#define STORVSC_MAX_IO_REQUESTS                                128
 
 /*
  * In Hyper-V, each port/path/target maps to 1 scsi host adapter.  In
index 075b61bd492f19918c1ee3027994362b2b6aa900..62882a437aa45abb072fede360ffa3672dc37b9d 100644 (file)
@@ -495,7 +495,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
 
                /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
 
-               if (j == 0)
+               if (bounce_addr == 0)
                        bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
 
                while (srclen) {
@@ -556,7 +556,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
                destlen = orig_sgl[i].length;
                /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
 
-               if (j == 0)
+               if (bounce_addr == 0)
                        bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
 
                while (destlen) {
@@ -615,6 +615,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
        unsigned int request_size = 0;
        int i;
        struct scatterlist *sgl;
+       unsigned int sg_count = 0;
 
        DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d "
                   "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction,
@@ -697,6 +698,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
        request->DataBuffer.Length = scsi_bufflen(scmnd);
        if (scsi_sg_count(scmnd)) {
                sgl = (struct scatterlist *)scsi_sglist(scmnd);
+               sg_count = scsi_sg_count(scmnd);
 
                /* check if we need to bounce the sgl */
                if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
@@ -731,15 +733,16 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
                                              scsi_sg_count(scmnd));
 
                        sgl = cmd_request->bounce_sgl;
+                       sg_count = cmd_request->bounce_sgl_count;
                }
 
                request->DataBuffer.Offset = sgl[0].offset;
 
-               for (i = 0; i < scsi_sg_count(scmnd); i++) {
+               for (i = 0; i < sg_count; i++) {
                        DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n",
                                   i, sgl[i].length, sgl[i].offset);
                        request->DataBuffer.PfnArray[i] =
-                                       page_to_pfn(sg_page((&sgl[i])));
+                               page_to_pfn(sg_page((&sgl[i])));
                }
        } else if (scsi_sglist(scmnd)) {
                /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
index 638ad6b3589107d6f6f146e64eb6f4011d425597..9493128e5fd2d7a743521781eddcd507622310a2 100644 (file)
@@ -1,6 +1,6 @@
 config OCTEON_ETHERNET
        tristate "Cavium Networks Octeon Ethernet support"
-       depends on CPU_CAVIUM_OCTEON
+       depends on CPU_CAVIUM_OCTEON && NETDEVICES
        select PHYLIB
        select MDIO_OCTEON
        help
index a0fe31de0a6d1299c802062cff4007b971d6a594..ebf9074a90838367fc7dde6aac3cba0fe6a3e131 100644 (file)
@@ -44,6 +44,7 @@ struct usb_device_id rtusb_usb_id[] = {
        {USB_DEVICE(0x07B8, 0x2870)},   /* AboCom */
        {USB_DEVICE(0x07B8, 0x2770)},   /* AboCom */
        {USB_DEVICE(0x0DF6, 0x0039)},   /* Sitecom 2770 */
+       {USB_DEVICE(0x0DF6, 0x003F)},   /* Sitecom 2770 */
        {USB_DEVICE(0x083A, 0x7512)},   /* Arcadyan 2770 */
        {USB_DEVICE(0x0789, 0x0162)},   /* Logitec 2870 */
        {USB_DEVICE(0x0789, 0x0163)},   /* Logitec 2870 */
@@ -95,7 +96,8 @@ struct usb_device_id rtusb_usb_id[] = {
        {USB_DEVICE(0x050d, 0x815c)},
        {USB_DEVICE(0x1482, 0x3C09)},   /* Abocom */
        {USB_DEVICE(0x14B2, 0x3C09)},   /* Alpha */
-       {USB_DEVICE(0x04E8, 0x2018)},   /* samsung */
+       {USB_DEVICE(0x04E8, 0x2018)},   /* samsung linkstick2 */
+       {USB_DEVICE(0x1690, 0x0740)},   /* Askey */
        {USB_DEVICE(0x5A57, 0x0280)},   /* Zinwell */
        {USB_DEVICE(0x5A57, 0x0282)},   /* Zinwell */
        {USB_DEVICE(0x7392, 0x7718)},
@@ -105,21 +107,34 @@ struct usb_device_id rtusb_usb_id[] = {
        {USB_DEVICE(0x1737, 0x0071)},   /* Linksys WUSB600N */
        {USB_DEVICE(0x0411, 0x00e8)},   /* Buffalo WLI-UC-G300N */
        {USB_DEVICE(0x050d, 0x815c)},   /* Belkin F5D8053 */
+       {USB_DEVICE(0x100D, 0x9031)},   /* Motorola 2770 */
 #endif /* RT2870 // */
 #ifdef RT3070
        {USB_DEVICE(0x148F, 0x3070)},   /* Ralink 3070 */
        {USB_DEVICE(0x148F, 0x3071)},   /* Ralink 3071 */
        {USB_DEVICE(0x148F, 0x3072)},   /* Ralink 3072 */
        {USB_DEVICE(0x0DB0, 0x3820)},   /* Ralink 3070 */
+       {USB_DEVICE(0x0DB0, 0x871C)},   /* Ralink 3070 */
+       {USB_DEVICE(0x0DB0, 0x822C)},   /* Ralink 3070 */
+       {USB_DEVICE(0x0DB0, 0x871B)},   /* Ralink 3070 */
+       {USB_DEVICE(0x0DB0, 0x822B)},   /* Ralink 3070 */
        {USB_DEVICE(0x0DF6, 0x003E)},   /* Sitecom 3070 */
        {USB_DEVICE(0x0DF6, 0x0042)},   /* Sitecom 3072 */
+       {USB_DEVICE(0x0DF6, 0x0048)},   /* Sitecom 3070 */
+       {USB_DEVICE(0x0DF6, 0x0047)},   /* Sitecom 3071 */
        {USB_DEVICE(0x14B2, 0x3C12)},   /* AL 3070 */
        {USB_DEVICE(0x18C5, 0x0012)},   /* Corega 3070 */
        {USB_DEVICE(0x083A, 0x7511)},   /* Arcadyan 3070 */
+       {USB_DEVICE(0x083A, 0xA701)},   /* SMC 3070 */
+       {USB_DEVICE(0x083A, 0xA702)},   /* SMC 3072 */
        {USB_DEVICE(0x1740, 0x9703)},   /* EnGenius 3070 */
        {USB_DEVICE(0x1740, 0x9705)},   /* EnGenius 3071 */
        {USB_DEVICE(0x1740, 0x9706)},   /* EnGenius 3072 */
+       {USB_DEVICE(0x1740, 0x9707)},   /* EnGenius 3070 */
+       {USB_DEVICE(0x1740, 0x9708)},   /* EnGenius 3071 */
+       {USB_DEVICE(0x1740, 0x9709)},   /* EnGenius 3072 */
        {USB_DEVICE(0x13D3, 0x3273)},   /* AzureWave 3070 */
+       {USB_DEVICE(0x13D3, 0x3305)},   /* AzureWave 3070*/
        {USB_DEVICE(0x1044, 0x800D)},   /* Gigabyte GN-WB32L 3070 */
        {USB_DEVICE(0x2019, 0xAB25)},   /* Planex Communications, Inc. RT3070 */
        {USB_DEVICE(0x07B8, 0x3070)},   /* AboCom 3070 */
@@ -132,14 +147,36 @@ struct usb_device_id rtusb_usb_id[] = {
        {USB_DEVICE(0x07D1, 0x3C0D)},   /* D-Link 3070 */
        {USB_DEVICE(0x07D1, 0x3C0E)},   /* D-Link 3070 */
        {USB_DEVICE(0x07D1, 0x3C0F)},   /* D-Link 3070 */
+       {USB_DEVICE(0x07D1, 0x3C16)},   /* D-Link 3070 */
+       {USB_DEVICE(0x07D1, 0x3C17)},   /* D-Link 8070 */
        {USB_DEVICE(0x1D4D, 0x000C)},   /* Pegatron Corporation 3070 */
        {USB_DEVICE(0x1D4D, 0x000E)},   /* Pegatron Corporation 3070 */
        {USB_DEVICE(0x5A57, 0x5257)},   /* Zinwell 3070 */
        {USB_DEVICE(0x5A57, 0x0283)},   /* Zinwell 3072 */
        {USB_DEVICE(0x04BB, 0x0945)},   /* I-O DATA 3072 */
+       {USB_DEVICE(0x04BB, 0x0947)},   /* I-O DATA 3070 */
+       {USB_DEVICE(0x04BB, 0x0948)},   /* I-O DATA 3072 */
        {USB_DEVICE(0x203D, 0x1480)},   /* Encore 3070 */
+       {USB_DEVICE(0x20B8, 0x8888)},   /* PARA INDUSTRIAL 3070 */
+       {USB_DEVICE(0x0B05, 0x1784)},   /* Asus 3072 */
+       {USB_DEVICE(0x203D, 0x14A9)},   /* Encore 3070*/
+       {USB_DEVICE(0x0DB0, 0x899A)},   /* MSI 3070*/
+       {USB_DEVICE(0x0DB0, 0x3870)},   /* MSI 3070*/
+       {USB_DEVICE(0x0DB0, 0x870A)},   /* MSI 3070*/
+       {USB_DEVICE(0x0DB0, 0x6899)},   /* MSI 3070 */
+       {USB_DEVICE(0x0DB0, 0x3822)},   /* MSI 3070 */
+       {USB_DEVICE(0x0DB0, 0x3871)},   /* MSI 3070 */
+       {USB_DEVICE(0x0DB0, 0x871A)},   /* MSI 3070 */
+       {USB_DEVICE(0x0DB0, 0x822A)},   /* MSI 3070 */
+       {USB_DEVICE(0x0DB0, 0x3821)},   /* Ralink 3070 */
+       {USB_DEVICE(0x0DB0, 0x821A)},   /* Ralink 3070 */
+       {USB_DEVICE(0x083A, 0xA703)},   /* IO-MAGIC */
+       {USB_DEVICE(0x13D3, 0x3307)},   /* Azurewave */
+       {USB_DEVICE(0x13D3, 0x3321)},   /* Azurewave */
+       {USB_DEVICE(0x07FA, 0x7712)},   /* Edimax */
+       {USB_DEVICE(0x0789, 0x0166)},   /* Edimax */
+       {USB_DEVICE(0x148F, 0x2070)},   /* Edimax */
 #endif /* RT3070 // */
-       {USB_DEVICE(0x0DF6, 0x003F)},   /* Sitecom WL-608 */
        {USB_DEVICE(0x1737, 0x0077)},   /* Linksys WUSB54GC-EU v3 */
        {USB_DEVICE(0x2001, 0x3C09)},   /* D-Link */
        {USB_DEVICE(0x2001, 0x3C0A)},   /* D-Link 3072 */
index 5e2ffefb60af9c86c384eff40baaaacca3a279f1..d231ae27299d42b796d7c9f1dec2a83ee2e1694c 100644 (file)
@@ -2,6 +2,7 @@
 menuconfig SPECTRA
        tristate "Denali Spectra Flash Translation Layer"
        depends on BLOCK
+       depends on X86_MRST
        default n
        ---help---
          Enable the FTL pseudo-filesystem used with the NAND Flash
index 44a7fbe7eccd45282cd94ab35169331513cbde91..fa21a0fd8e84643549a5de0088874a05cc3f940b 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/log2.h>
 #include <linux/init.h>
 #include <linux/smp_lock.h>
+#include <linux/slab.h>
 
 /**** Helper functions used for Div, Remainder operation on u64 ****/
 
index 368c30a9d5ffa6da6e7a2065a279fc9825357239..4af83d5318f2705d551cd65fc898645e217e339d 100644 (file)
@@ -219,6 +219,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
                return -ENOENT;
        params.key_len = len;
        params.key = wlandev->wep_keys[key_index];
+       params.seq_len = 0;
 
        callback(cookie, &params);
 
@@ -735,6 +736,8 @@ struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
        priv->band.n_channels = ARRAY_SIZE(prism2_channels);
        priv->band.bitrates = priv->rates;
        priv->band.n_bitrates = ARRAY_SIZE(prism2_rates);
+       priv->band.band = IEEE80211_BAND_2GHZ;
+       priv->band.ht_cap.ht_supported = false;
        wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
 
        set_wiphy_dev(wiphy, dev);
index 77d4d715a78936b01795667215875adf17781fc2..722c840ac6387456b5825feaf8375195c8bce5ac 100644 (file)
@@ -769,6 +769,7 @@ static int __init zram_init(void)
 free_devices:
        while (dev_id)
                destroy_device(&devices[--dev_id]);
+       kfree(devices);
 unregister:
        unregister_blkdev(zram_major, "zram");
 out:
index 593fc5e2d2e6074da4b9238b4136cc791d9dee0a..5af23cc5ea9fe6cf8c9ded25ccea29f172dc5a8e 100644 (file)
@@ -1127,6 +1127,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
 {
        struct cxacru_data *instance;
        struct usb_device *usb_dev = interface_to_usbdev(intf);
+       struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
        int ret;
 
        /* instance init */
@@ -1171,15 +1172,34 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
                goto fail;
        }
 
-       usb_fill_int_urb(instance->rcv_urb,
+       if (!cmd_ep) {
+               dbg("cxacru_bind: no command endpoint");
+               ret = -ENODEV;
+               goto fail;
+       }
+
+       if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+                       == USB_ENDPOINT_XFER_INT) {
+               usb_fill_int_urb(instance->rcv_urb,
                        usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
                        instance->rcv_buf, PAGE_SIZE,
                        cxacru_blocking_completion, &instance->rcv_done, 1);
 
-       usb_fill_int_urb(instance->snd_urb,
+               usb_fill_int_urb(instance->snd_urb,
                        usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD),
                        instance->snd_buf, PAGE_SIZE,
                        cxacru_blocking_completion, &instance->snd_done, 4);
+       } else {
+               usb_fill_bulk_urb(instance->rcv_urb,
+                       usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD),
+                       instance->rcv_buf, PAGE_SIZE,
+                       cxacru_blocking_completion, &instance->rcv_done);
+
+               usb_fill_bulk_urb(instance->snd_urb,
+                       usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
+                       instance->snd_buf, PAGE_SIZE,
+                       cxacru_blocking_completion, &instance->snd_done);
+       }
 
        mutex_init(&instance->cm_serialize);
 
index 1833b3a71515bb65f9be08bafb2bf329996dc71d..bc62fae0680fe4bd1ae67e5ef6af8e10374a6b49 100644 (file)
@@ -965,7 +965,8 @@ static int acm_probe(struct usb_interface *intf,
        }
 
        if (!buflen) {
-               if (intf->cur_altsetting->endpoint->extralen &&
+               if (intf->cur_altsetting->endpoint &&
+                               intf->cur_altsetting->endpoint->extralen &&
                                intf->cur_altsetting->endpoint->extra) {
                        dev_dbg(&intf->dev,
                                "Seeking extra descriptors on endpoint\n");
@@ -1481,6 +1482,11 @@ static int acm_reset_resume(struct usb_interface *intf)
                USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
                USB_CDC_ACM_PROTO_VENDOR)
 
+#define SAMSUNG_PCSUITE_ACM_INFO(x) \
+               USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \
+               USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
+               USB_CDC_ACM_PROTO_VENDOR)
+
 /*
  * USB driver structure.
  */
@@ -1591,6 +1597,17 @@ static const struct usb_device_id acm_ids[] = {
        { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
        { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
        { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
+       { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */
+       { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */
+       { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */
+       { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */
+       { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */
+       { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */
+       { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */
+       { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
+       { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
+       { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+       { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
 
        /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
 
@@ -1599,6 +1616,10 @@ static const struct usb_device_id acm_ids[] = {
        .driver_info = NOT_A_MODEM,
                },
 
+       /* control interfaces without any protocol set */
+       { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+               USB_CDC_PROTO_NONE) },
+
        /* control interfaces with various AT-command sets */
        { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
                USB_CDC_ACM_PROTO_AT_V25TER) },
index fd4c36ea5e4688971afc9c2f32e3668429d0ae63..844683e503830485147910ff16ca035a90194d27 100644 (file)
@@ -1724,6 +1724,15 @@ free_interfaces:
        if (ret)
                goto free_interfaces;
 
+       /* if it's already configured, clear out old state first.
+        * getting rid of old interfaces means unbinding their drivers.
+        */
+       if (dev->state != USB_STATE_ADDRESS)
+               usb_disable_device(dev, 1);     /* Skip ep0 */
+
+       /* Get rid of pending async Set-Config requests for this device */
+       cancel_async_set_config(dev);
+
        /* Make sure we have bandwidth (and available HCD resources) for this
         * configuration.  Remove endpoints from the schedule if we're dropping
         * this configuration to set configuration 0.  After this point, the
@@ -1733,20 +1742,11 @@ free_interfaces:
        mutex_lock(&hcd->bandwidth_mutex);
        ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
        if (ret < 0) {
-               usb_autosuspend_device(dev);
                mutex_unlock(&hcd->bandwidth_mutex);
+               usb_autosuspend_device(dev);
                goto free_interfaces;
        }
 
-       /* if it's already configured, clear out old state first.
-        * getting rid of old interfaces means unbinding their drivers.
-        */
-       if (dev->state != USB_STATE_ADDRESS)
-               usb_disable_device(dev, 1);     /* Skip ep0 */
-
-       /* Get rid of pending async Set-Config requests for this device */
-       cancel_async_set_config(dev);
-
        ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
                              USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
                              NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1761,8 +1761,8 @@ free_interfaces:
        if (!cp) {
                usb_set_device_state(dev, USB_STATE_ADDRESS);
                usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
-               usb_autosuspend_device(dev);
                mutex_unlock(&hcd->bandwidth_mutex);
+               usb_autosuspend_device(dev);
                goto free_interfaces;
        }
        mutex_unlock(&hcd->bandwidth_mutex);
index 020fa5a25fda8fe7bb6c1ca23f28f90d120bd537..972d5ddd1e18043f23281b3282681c6c9eb0ea62 100644 (file)
@@ -293,9 +293,13 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
        /* mandatory */
        case OID_GEN_VENDOR_DESCRIPTION:
                pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
-               length = strlen (rndis_per_dev_params [configNr].vendorDescr);
-               memcpy (outbuf,
-                       rndis_per_dev_params [configNr].vendorDescr, length);
+               if ( rndis_per_dev_params [configNr].vendorDescr ) {
+                       length = strlen (rndis_per_dev_params [configNr].vendorDescr);
+                       memcpy (outbuf,
+                               rndis_per_dev_params [configNr].vendorDescr, length);
+               } else {
+                       outbuf[0] = 0;
+               }
                retval = 0;
                break;
 
@@ -1148,7 +1152,7 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
 
 
-int __init rndis_init (void)
+int rndis_init(void)
 {
        u8 i;
 
index c236aaa9dcd174db90b7991f719bdda860658b9d..907c33008118b1e880b87fc52166237a2e1152e6 100644 (file)
@@ -262,7 +262,7 @@ int  rndis_signal_disconnect (int configNr);
 int  rndis_state (int configNr);
 extern void rndis_set_host_mac (int configNr, const u8 *addr);
 
-int __devinit rndis_init (void);
+int rndis_init(void);
 void rndis_exit (void);
 
 #endif  /* _LINUX_RNDIS_H */
index 521ebed0118d0fbdc8228ec6d9506abc17fe99e1..a229744a8c7dfa4d5f284165800a455d8f687b26 100644 (file)
@@ -12,8 +12,6 @@
  * published by the Free Software Foundation.
 */
 
-#define DEBUG
-
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
index 335ee699fd85bde5637fc6991809ddfdab8f506d..ba52be473027a82dd20bcb20aa724f4c4b472f78 100644 (file)
@@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
        }
 
        rv = usb_add_hcd(hcd, irq, 0);
-       if (rv == 0)
-               return 0;
+       if (rv)
+               goto err_ehci;
+
+       return 0;
 
+err_ehci:
+       if (ehci->has_amcc_usb23)
+               iounmap(ehci->ohci_hcctrl_reg);
        iounmap(hcd->regs);
 err_ioremap:
        irq_dispose_mapping(irq);
 err_irq:
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-
-       if (ehci->has_amcc_usb23)
-               iounmap(ehci->ohci_hcctrl_reg);
 err_rmr:
        usb_put_hcd(hcd);
 
index 80bf8333bb037a67fc8d141fd0e33449eff18886..4f1744c5871fa74443bdae1800e8376b083e561d 100644 (file)
@@ -56,6 +56,7 @@ static int debug;
 static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
        { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+       { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
        { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
        { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
        { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
@@ -88,6 +89,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
        { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
        { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
+       { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
        { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
        { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
        { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
@@ -109,6 +111,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
        { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
        { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+       { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
@@ -122,14 +125,14 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
        { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
        { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
-       { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
-       { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
-       { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
-       { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
        { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
        { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
        { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
        { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
+       { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+       { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+       { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+       { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
        { } /* Terminating Entry */
 };
 
index c792c96f590e80918221c78caf7227e60ececaa1..97cc87d654ce3258a931a8bfbd0bcd36a2d5622b 100644 (file)
@@ -753,6 +753,14 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
        { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
        { },                                    /* Optional parameter entry */
        { }                                     /* Terminating entry */
 };
index 2e95857c9633c3f6e2e52148854ea0620a6bf43e..15a4583775ad236a359c3dec0c02c0aa7f5f7f4e 100644 (file)
 #define FTDI_NDI_FUTURE_3_PID          0xDA73  /* NDI future device #3 */
 #define FTDI_NDI_AURORA_SCU_PID                0xDA74  /* NDI Aurora SCU */
 
+/*
+ * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
+ */
+#define FTDI_CHAMSYS_24_MASTER_WING_PID        0xDAF8
+#define FTDI_CHAMSYS_PC_WING_PID       0xDAF9
+#define FTDI_CHAMSYS_USB_DMX_PID       0xDAFA
+#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB
+#define FTDI_CHAMSYS_MINI_WING_PID     0xDAFC
+#define FTDI_CHAMSYS_MAXI_WING_PID     0xDAFD
+#define FTDI_CHAMSYS_MEDIA_WING_PID    0xDAFE
+#define FTDI_CHAMSYS_WING_PID  0xDAFF
+
 /*
  * Westrex International devices submitted by Cory Lee
  */
index 585b7e6637405981e5019790bbdb0700109f87d0..1c9b6e9b2386e5032da1e3b8a095525222dc8cb5 100644 (file)
  * by making a change here, in moschip_port_id_table, and in
  * moschip_id_table_combined
  */
-#define USB_VENDOR_ID_BANDB             0x0856
-#define BANDB_DEVICE_ID_USO9ML2_2      0xAC22
-#define BANDB_DEVICE_ID_USO9ML2_4      0xAC24
-#define BANDB_DEVICE_ID_US9ML2_2       0xAC29
-#define BANDB_DEVICE_ID_US9ML2_4       0xAC30
-#define BANDB_DEVICE_ID_USPTL4_2       0xAC31
-#define BANDB_DEVICE_ID_USPTL4_4       0xAC32
-#define BANDB_DEVICE_ID_USOPTL4_2      0xAC42
-#define BANDB_DEVICE_ID_USOPTL4_4      0xAC44
-#define BANDB_DEVICE_ID_USOPTL2_4      0xAC24
+#define USB_VENDOR_ID_BANDB              0x0856
+#define BANDB_DEVICE_ID_USO9ML2_2        0xAC22
+#define BANDB_DEVICE_ID_USO9ML2_2P       0xBC00
+#define BANDB_DEVICE_ID_USO9ML2_4        0xAC24
+#define BANDB_DEVICE_ID_USO9ML2_4P       0xBC01
+#define BANDB_DEVICE_ID_US9ML2_2         0xAC29
+#define BANDB_DEVICE_ID_US9ML2_4         0xAC30
+#define BANDB_DEVICE_ID_USPTL4_2         0xAC31
+#define BANDB_DEVICE_ID_USPTL4_4         0xAC32
+#define BANDB_DEVICE_ID_USOPTL4_2        0xAC42
+#define BANDB_DEVICE_ID_USOPTL4_2P       0xBC02
+#define BANDB_DEVICE_ID_USOPTL4_4        0xAC44
+#define BANDB_DEVICE_ID_USOPTL4_4P       0xBC03
+#define BANDB_DEVICE_ID_USOPTL2_4        0xAC24
 
 /* This driver also supports
  * ATEN UC2324 device using Moschip MCS7840
@@ -184,13 +188,17 @@ static const struct usb_device_id moschip_port_id_table[] = {
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
@@ -201,13 +209,17 @@ static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
        {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
+       {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
        {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
        {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
index adcbdb994de3eab65e0d963538f08df6f6cf1a49..c46911af282f01c82eb8cc34e312db994173d38c 100644 (file)
@@ -164,6 +164,14 @@ static void option_instat_callback(struct urb *urb);
 #define YISO_VENDOR_ID                         0x0EAB
 #define YISO_PRODUCT_U893                      0xC893
 
+/*
+ * NOVATEL WIRELESS PRODUCTS
+ *
+ * Note from Novatel Wireless:
+ * If your Novatel modem does not work on linux, don't
+ * change the option module, but check our website. If
+ * that does not help, contact ddeschepper@nvtl.com
+*/
 /* MERLIN EVDO PRODUCTS */
 #define NOVATELWIRELESS_PRODUCT_V640           0x1100
 #define NOVATELWIRELESS_PRODUCT_V620           0x1110
@@ -185,24 +193,39 @@ static void option_instat_callback(struct urb *urb);
 #define NOVATELWIRELESS_PRODUCT_EU730          0x2400
 #define NOVATELWIRELESS_PRODUCT_EU740          0x2410
 #define NOVATELWIRELESS_PRODUCT_EU870D         0x2420
-
 /* OVATION PRODUCTS */
 #define NOVATELWIRELESS_PRODUCT_MC727          0x4100
 #define NOVATELWIRELESS_PRODUCT_MC950D         0x4400
-#define NOVATELWIRELESS_PRODUCT_U727           0x5010
-#define NOVATELWIRELESS_PRODUCT_MC727_NEW      0x5100
-#define NOVATELWIRELESS_PRODUCT_MC760          0x6000
+/*
+ * Note from Novatel Wireless:
+ * All PID in the 5xxx range are currently reserved for
+ * auto-install CDROMs, and should not be added to this
+ * module.
+ *
+ * #define NOVATELWIRELESS_PRODUCT_U727                0x5010
+ * #define NOVATELWIRELESS_PRODUCT_MC727_NEW   0x5100
+*/
 #define NOVATELWIRELESS_PRODUCT_OVMC760                0x6002
-
-/* FUTURE NOVATEL PRODUCTS */
-#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001
-#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000
-#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001
-#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED        0X8000
-#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED        0X8001
-#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED        0X9000
-#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED        0X9001
-#define NOVATELWIRELESS_PRODUCT_GLOBAL         0XA001
+#define NOVATELWIRELESS_PRODUCT_MC780          0x6010
+#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000
+#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001
+#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3        0x7003
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4        0x7004
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5        0x7005
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6        0x7006
+#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7        0x7007
+#define NOVATELWIRELESS_PRODUCT_MC996D         0x7030
+#define NOVATELWIRELESS_PRODUCT_MF3470         0x7041
+#define NOVATELWIRELESS_PRODUCT_MC547          0x7042
+#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED        0x8000
+#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED        0x8001
+#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED        0x9000
+#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED        0x9001
+#define NOVATELWIRELESS_PRODUCT_G1             0xA001
+#define NOVATELWIRELESS_PRODUCT_G1_M           0xA002
+#define NOVATELWIRELESS_PRODUCT_G2             0xA010
 
 /* AMOI PRODUCTS */
 #define AMOI_VENDOR_ID                         0x1614
@@ -490,36 +513,44 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
        { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */
-       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
+       { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
 
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
        { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
index 68c18fdfc6da320978f9212325fd3f1ecf2ec64c..e986002b3844c07d24b292344258286434820d8b 100644 (file)
@@ -46,7 +46,7 @@
 #define FULLPWRBIT          0x00000080
 #define NEXT_BOARD_POWER_BIT        0x00000004
 
-static int debug = 1;
+static int debug;
 
 /* Version Information */
 #define DRIVER_VERSION "v0.1"
index e05557d529992ec4deb1827a32574043bcd00925..c579dcc9200ccabe0f1bcf79afed59577e4a6f38 100644 (file)
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
        return 0;
 }
 
+static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
+{
+       INIT_LIST_HEAD(&work->node);
+       work->fn = fn;
+       init_waitqueue_head(&work->done);
+       work->flushing = 0;
+       work->queue_seq = work->done_seq = 0;
+}
+
 /* Init poll structure */
 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
                     unsigned long mask, struct vhost_dev *dev)
 {
-       struct vhost_work *work = &poll->work;
-
        init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
        init_poll_funcptr(&poll->table, vhost_poll_func);
        poll->mask = mask;
        poll->dev = dev;
 
-       INIT_LIST_HEAD(&work->node);
-       work->fn = fn;
-       init_waitqueue_head(&work->done);
-       work->flushing = 0;
-       work->queue_seq = work->done_seq = 0;
+       vhost_work_init(&poll->work, fn);
 }
 
 /* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
        remove_wait_queue(poll->wqh, &poll->wait);
 }
 
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 {
-       struct vhost_work *work = &poll->work;
        unsigned seq;
        int left;
        int flushing;
 
-       spin_lock_irq(&poll->dev->work_lock);
+       spin_lock_irq(&dev->work_lock);
        seq = work->queue_seq;
        work->flushing++;
-       spin_unlock_irq(&poll->dev->work_lock);
+       spin_unlock_irq(&dev->work_lock);
        wait_event(work->done, ({
-                  spin_lock_irq(&poll->dev->work_lock);
+                  spin_lock_irq(&dev->work_lock);
                   left = seq - work->done_seq <= 0;
-                  spin_unlock_irq(&poll->dev->work_lock);
+                  spin_unlock_irq(&dev->work_lock);
                   left;
        }));
-       spin_lock_irq(&poll->dev->work_lock);
+       spin_lock_irq(&dev->work_lock);
        flushing = --work->flushing;
-       spin_unlock_irq(&poll->dev->work_lock);
+       spin_unlock_irq(&dev->work_lock);
        BUG_ON(flushing < 0);
 }
 
-void vhost_poll_queue(struct vhost_poll *poll)
+/* Flush any work that has been scheduled. When calling this, don't hold any
+ * locks that are also used by the callback. */
+void vhost_poll_flush(struct vhost_poll *poll)
+{
+       vhost_work_flush(poll->dev, &poll->work);
+}
+
+static inline void vhost_work_queue(struct vhost_dev *dev,
+                                   struct vhost_work *work)
 {
-       struct vhost_dev *dev = poll->dev;
-       struct vhost_work *work = &poll->work;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
        spin_unlock_irqrestore(&dev->work_lock, flags);
 }
 
+void vhost_poll_queue(struct vhost_poll *poll)
+{
+       vhost_work_queue(poll->dev, &poll->work);
+}
+
 static void vhost_vq_reset(struct vhost_dev *dev,
                           struct vhost_virtqueue *vq)
 {
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
        return dev->mm == current->mm ? 0 : -EPERM;
 }
 
+struct vhost_attach_cgroups_struct {
+        struct vhost_work work;
+        struct task_struct *owner;
+        int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+        struct vhost_attach_cgroups_struct *s;
+        s = container_of(work, struct vhost_attach_cgroups_struct, work);
+        s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_attach_cgroups(struct vhost_dev *dev)
+{
+        struct vhost_attach_cgroups_struct attach;
+        attach.owner = current;
+        vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+        vhost_work_queue(dev, &attach.work);
+        vhost_work_flush(dev, &attach.work);
+        return attach.ret;
+}
+
 /* Caller should have device mutex */
 static long vhost_dev_set_owner(struct vhost_dev *dev)
 {
@@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
        }
 
        dev->worker = worker;
-       err = cgroup_attach_task_current_cg(worker);
+       wake_up_process(worker);        /* avoid contributing to loadavg */
+
+       err = vhost_attach_cgroups(dev);
        if (err)
                goto err_cgroup;
-       wake_up_process(worker);        /* avoid contributing to loadavg */
 
        return 0;
 err_cgroup:
        kthread_stop(worker);
+       dev->worker = NULL;
 err_worker:
        if (dev->mm)
                mmput(dev->mm);
@@ -323,7 +359,10 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
        dev->mm = NULL;
 
        WARN_ON(!list_empty(&dev->work_list));
-       kthread_stop(dev->worker);
+       if (dev->worker) {
+               kthread_stop(dev->worker);
+               dev->worker = NULL;
+       }
 }
 
 static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
index c91a7f70f7b086f56882ba07f97ba38e314adfd6..5d786bd3e304f1a174e682461d7d94a265ae2119 100644 (file)
@@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = {
        .fb_imageblit   = cfb_imageblit,
 };
 
-static int __init pxa168fb_init_mode(struct fb_info *info,
+static int __devinit pxa168fb_init_mode(struct fb_info *info,
                              struct pxa168fb_mach_info *mi)
 {
        struct pxa168fb_info *fbi = info->par;
@@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info,
        return ret;
 }
 
-static int __init pxa168fb_probe(struct platform_device *pdev)
+static int __devinit pxa168fb_probe(struct platform_device *pdev)
 {
        struct pxa168fb_mach_info *mi;
        struct fb_info *info = 0;
@@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = {
        .probe          = pxa168fb_probe,
 };
 
-static int __devinit pxa168fb_init(void)
+static int __init pxa168fb_init(void)
 {
        return platform_driver_register(&pxa168fb_driver);
 }
index 358563689064df61c9b81713ad8bb8911c6518bd..6406f896bf95fe56d404f40cdae7e083d88e3e9c 100644 (file)
@@ -242,7 +242,8 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
        }
        kfree(wnames);
 fid_out:
-       v9fs_fid_add(dentry, fid);
+       if (!IS_ERR(fid))
+               v9fs_fid_add(dentry, fid);
 err_out:
        up_read(&v9ses->rename_sem);
        return fid;
index a7528b91393676bb1f1affa72f6b78f38206d4c5..fd0cc0bf9a40396a150ad77b69bf5284531d6125 100644 (file)
@@ -724,7 +724,7 @@ static int __init init_misc_binfmt(void)
 {
        int err = register_filesystem(&bm_fs_type);
        if (!err) {
-               err = register_binfmt(&misc_format);
+               err = insert_binfmt(&misc_format);
                if (err)
                        unregister_filesystem(&bm_fs_type);
        }
index 612a5c38d3c1a5fc49e0d0990e19550c27217650..4d0ff5ee27b86bef6d377b9211694941939999a9 100644 (file)
@@ -413,10 +413,10 @@ int bio_integrity_prep(struct bio *bio)
 
        /* Allocate kernel buffer for protection data */
        len = sectors * blk_integrity_tuple_size(bi);
-       buf = kmalloc(len, GFP_NOIO | __GFP_NOFAIL | q->bounce_gfp);
+       buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
        if (unlikely(buf == NULL)) {
                printk(KERN_ERR "could not allocate integrity buffer\n");
-               return -EIO;
+               return -ENOMEM;
        }
 
        end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
index 51f270b479b6938a4a730ea56f9011c30f99563f..48d74c7391d13f4f07393c45d19825e937ecbcd1 100644 (file)
@@ -634,7 +634,7 @@ static int dio_send_cur_page(struct dio *dio)
        int ret = 0;
 
        if (dio->bio) {
-               loff_t cur_offset = dio->block_in_file << dio->blkbits;
+               loff_t cur_offset = dio->cur_page_fs_offset;
                loff_t bio_next_offset = dio->logical_offset_in_bio +
                        dio->bio->bi_size;
 
@@ -659,7 +659,7 @@ static int dio_send_cur_page(struct dio *dio)
                 * Submit now if the underlying fs is about to perform a
                 * metadata read
                 */
-               if (dio->boundary)
+               else if (dio->boundary)
                        dio_bio_submit(dio);
        }
 
index 2d9455282744bce582e48e0ecec4f4a6d332a28c..828dd2461d6beb7c37ed7df74ef11177c2bcba6d 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -376,6 +376,9 @@ static int count(const char __user * const __user * argv, int max)
                        argv++;
                        if (i++ >= max)
                                return -E2BIG;
+
+                       if (fatal_signal_pending(current))
+                               return -ERESTARTNOHAND;
                        cond_resched();
                }
        }
@@ -419,6 +422,12 @@ static int copy_strings(int argc, const char __user *const __user *argv,
                while (len > 0) {
                        int offset, bytes_to_copy;
 
+                       if (fatal_signal_pending(current)) {
+                               ret = -ERESTARTNOHAND;
+                               goto out;
+                       }
+                       cond_resched();
+
                        offset = pos % PAGE_SIZE;
                        if (offset == 0)
                                offset = PAGE_SIZE;
@@ -594,6 +603,11 @@ int setup_arg_pages(struct linux_binprm *bprm,
 #else
        stack_top = arch_align_stack(stack_top);
        stack_top = PAGE_ALIGN(stack_top);
+
+       if (unlikely(stack_top < mmap_min_addr) ||
+           unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
+               return -ENOMEM;
+
        stack_shift = vma->vm_end - stack_top;
 
        bprm->p -= stack_shift;
index 6769fd0f35b88373fdb8d0b265668a976a7ab251..f8cc34f542c3a1cc8b4f53309ffae317ecdddf15 100644 (file)
@@ -769,11 +769,15 @@ EXPORT_SYMBOL(kill_fasync);
 
 static int __init fcntl_init(void)
 {
-       /* please add new bits here to ensure allocation uniqueness */
-       BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+       /*
+        * Please add new bits here to ensure allocation uniqueness.
+        * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
+        * is defined as O_NONBLOCK on some platforms and not on others.
+        */
+       BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
                O_RDONLY        | O_WRONLY      | O_RDWR        |
                O_CREAT         | O_EXCL        | O_NOCTTY      |
-               O_TRUNC         | O_APPEND      | O_NONBLOCK    |
+               O_TRUNC         | O_APPEND      | /* O_NONBLOCK | */
                __O_SYNC        | O_DSYNC       | FASYNC        |
                O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
                O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
index 7d9d06ba184b409e2ae90050ce82ca365cf3ce82..81e086d8aa5733c7a86a966827bf7ea18d29baa4 100644 (file)
@@ -808,7 +808,7 @@ int bdi_writeback_thread(void *data)
                        wb->last_active = jiffies;
 
                set_current_state(TASK_INTERRUPTIBLE);
-               if (!list_empty(&bdi->work_list)) {
+               if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
                        __set_current_state(TASK_RUNNING);
                        continue;
                }
index 69ad053ffd78cb0f2669516b5327571f37d65254..d367af1514efe696b50a73374fffe77784c65b6a 100644 (file)
@@ -276,7 +276,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
  * Called with fc->lock, unlocks it
  */
 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
 {
        void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
        req->end = NULL;
@@ -306,8 +306,8 @@ __releases(&fc->lock)
 
 static void wait_answer_interruptible(struct fuse_conn *fc,
                                      struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        if (signal_pending(current))
                return;
@@ -325,8 +325,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
 }
 
 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        if (!fc->no_interrupt) {
                /* Any signal may interrupt this */
@@ -905,8 +905,8 @@ static int request_pending(struct fuse_conn *fc)
 
 /* Wait until a request is available on the pending list */
 static void request_wait(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        DECLARE_WAITQUEUE(wait, current);
 
@@ -934,7 +934,7 @@ __acquires(&fc->lock)
  */
 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
                               size_t nbytes, struct fuse_req *req)
-__releases(&fc->lock)
+__releases(fc->lock)
 {
        struct fuse_in_header ih;
        struct fuse_interrupt_in arg;
@@ -1720,8 +1720,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
  * This function releases and reacquires fc->lock
  */
 static void end_requests(struct fuse_conn *fc, struct list_head *head)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        while (!list_empty(head)) {
                struct fuse_req *req;
@@ -1744,8 +1744,8 @@ __acquires(&fc->lock)
  * locked).
  */
 static void end_io_requests(struct fuse_conn *fc)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        while (!list_empty(&fc->io)) {
                struct fuse_req *req =
@@ -1769,6 +1769,16 @@ __acquires(&fc->lock)
        }
 }
 
+static void end_queued_requests(struct fuse_conn *fc)
+__releases(fc->lock)
+__acquires(fc->lock)
+{
+       fc->max_background = UINT_MAX;
+       flush_bg_queue(fc);
+       end_requests(fc, &fc->pending);
+       end_requests(fc, &fc->processing);
+}
+
 /*
  * Abort all requests.
  *
@@ -1795,8 +1805,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
                fc->connected = 0;
                fc->blocked = 0;
                end_io_requests(fc);
-               end_requests(fc, &fc->pending);
-               end_requests(fc, &fc->processing);
+               end_queued_requests(fc);
                wake_up_all(&fc->waitq);
                wake_up_all(&fc->blocked_waitq);
                kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1811,8 +1820,9 @@ int fuse_dev_release(struct inode *inode, struct file *file)
        if (fc) {
                spin_lock(&fc->lock);
                fc->connected = 0;
-               end_requests(fc, &fc->pending);
-               end_requests(fc, &fc->processing);
+               fc->blocked = 0;
+               end_queued_requests(fc);
+               wake_up_all(&fc->blocked_waitq);
                spin_unlock(&fc->lock);
                fuse_conn_put(fc);
        }
index 147c1f71bdb9f0213307fd3e63f6e3b30fc3f403..c8224587123f6e2ff84c8933f8d56a50ffd06c80 100644 (file)
@@ -1144,8 +1144,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
 
 /* Called under fc->lock, may release and reacquire it */
 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        struct fuse_inode *fi = get_fuse_inode(req->inode);
        loff_t size = i_size_read(req->inode);
@@ -1183,8 +1183,8 @@ __acquires(&fc->lock)
  * Called with fc->lock
  */
 void fuse_flush_writepages(struct inode *inode)
-__releases(&fc->lock)
-__acquires(&fc->lock)
+__releases(fc->lock)
+__acquires(fc->lock)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_inode *fi = get_fuse_inode(inode);
index e20ee85955d1c77c3a410da2c82893cd38acce8e..f3f3578393a417085812ba1ad7e0b7c1e4e5c981 100644 (file)
@@ -115,7 +115,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode)
 
        inode_inc_link_count(dir);
 
-       inode = minix_new_inode(dir, mode, &err);
+       inode = minix_new_inode(dir, S_IFDIR | mode, &err);
        if (!inode)
                goto out_dir;
 
index de402eb6eafbad3df3957ca7b74ee92256c1e8c4..a72eaabfe8f2a58868e96397b320ba819e159884 100644 (file)
@@ -1483,6 +1483,23 @@ out_unlock:
        return err;
 }
 
+/*
+ * Sanity check the flags to change_mnt_propagation.
+ */
+
+static int flags_to_propagation_type(int flags)
+{
+       int type = flags & ~MS_REC;
+
+       /* Fail if any non-propagation flags are set */
+       if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+               return 0;
+       /* Only one propagation flag should be set */
+       if (!is_power_of_2(type))
+               return 0;
+       return type;
+}
+
 /*
  * recursively change the type of the mountpoint.
  */
@@ -1490,7 +1507,7 @@ static int do_change_type(struct path *path, int flag)
 {
        struct vfsmount *m, *mnt = path->mnt;
        int recurse = flag & MS_REC;
-       int type = flag & ~MS_REC;
+       int type;
        int err = 0;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -1499,6 +1516,10 @@ static int do_change_type(struct path *path, int flag)
        if (path->dentry != path->mnt->mnt_root)
                return -EINVAL;
 
+       type = flags_to_propagation_type(flag);
+       if (!type)
+               return -EINVAL;
+
        down_write(&namespace_sem);
        if (type == MS_SHARED) {
                err = invent_group_ids(mnt, recurse);
index 3dfef062396845d2b45cc42a22064ec4402ee05f..cf0d2ffb3c84a149bc904323cd53599620c8c917 100644 (file)
@@ -440,7 +440,7 @@ test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
 
 static int nfs4_access_to_omode(u32 access)
 {
-       switch (access) {
+       switch (access & NFS4_SHARE_ACCESS_BOTH) {
        case NFS4_SHARE_ACCESS_READ:
                return O_RDONLY;
        case NFS4_SHARE_ACCESS_WRITE:
index 4317f177ea7cb27c46cb315f5055205f3a0addc7..ba7c10c917fcd1545a668c53f6abb2819b04f3bc 100644 (file)
@@ -446,6 +446,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
        nilfs_mdt_destroy(nilfs->ns_cpfile);
        nilfs_mdt_destroy(nilfs->ns_sufile);
        nilfs_mdt_destroy(nilfs->ns_dat);
+       nilfs_mdt_destroy(nilfs->ns_gc_dat);
 
  failed:
        nilfs_clear_recovery_info(&ri);
index 215e12ce1d85e2079359838cf287f1c3c670ff31..592fae5007d1245baade87453ce731121aa6efe5 100644 (file)
@@ -6672,7 +6672,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
        last_page_bytes = PAGE_ALIGN(end);
        index = start >> PAGE_CACHE_SHIFT;
        do {
-               pages[numpages] = grab_cache_page(mapping, index);
+               pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
                if (!pages[numpages]) {
                        ret = -ENOMEM;
                        mlog_errno(ret);
index ec6d123395932b69b6a67ba0b5256be02b811c6f..c7ee03c22226253d970cce94beb11f6353b3e1d0 100644 (file)
@@ -439,7 +439,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
 
        ocfs2_blockcheck_inc_failure(stats);
        mlog(ML_ERROR,
-            "CRC32 failed: stored: %u, computed %u.  Applying ECC.\n",
+            "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
             (unsigned int)check.bc_crc32e, (unsigned int)crc);
 
        /* Ok, try ECC fixups */
@@ -453,7 +453,7 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
                goto out;
        }
 
-       mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
+       mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
             (unsigned int)check.bc_crc32e, (unsigned int)crc);
 
        rc = -EIO;
index 81296b4e364632dd5936f59d8adeab9832f2d2fd..9a03c151b5ceabc169215d99777abb92e2d2ad36 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/writeback.h>
 #include <linux/falloc.h>
 #include <linux/quotaops.h>
+#include <linux/blkdev.h>
 
 #define MLOG_MASK_PREFIX ML_INODE
 #include <cluster/masklog.h>
@@ -190,8 +191,16 @@ static int ocfs2_sync_file(struct file *file, int datasync)
        if (err)
                goto bail;
 
-       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) {
+               /*
+                * We still have to flush drive's caches to get data to the
+                * platter
+                */
+               if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
+                       blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
+                                          NULL, BLKDEV_IFL_WAIT);
                goto bail;
+       }
 
        journal = osb->journal->j_journal;
        err = jbd2_journal_force_commit(journal);
@@ -774,7 +783,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
        BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
        BUG_ON(abs_from & (inode->i_blkbits - 1));
 
-       page = grab_cache_page(mapping, index);
+       page = find_or_create_page(mapping, index, GFP_NOFS);
        if (!page) {
                ret = -ENOMEM;
                mlog_errno(ret);
@@ -2329,7 +2338,7 @@ out_dio:
        BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
 
        if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
-           ((file->f_flags & O_DIRECT) && has_refcount)) {
+           ((file->f_flags & O_DIRECT) && !direct_io)) {
                ret = filemap_fdatawrite_range(file->f_mapping, pos,
                                               pos + count - 1);
                if (ret < 0)
index 0492464916b19324e73425e29c473956b0b4bd33..eece3e05d9d0124d04b81c940c1289f876e700bb 100644 (file)
@@ -488,7 +488,11 @@ static int ocfs2_read_locked_inode(struct inode *inode,
                                                     OCFS2_BH_IGNORE_CACHE);
        } else {
                status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
-               if (!status)
+               /*
+                * If buffer is in jbd, then its checksum may not have been
+                * computed as yet.
+                */
+               if (!status && !buffer_jbd(bh))
                        status = ocfs2_validate_inode_block(osb->sb, bh);
        }
        if (status < 0) {
index af2b8fe1f13999e26f6e2543a047847bcf517c4e..4c18f4ad93b43cae6e5ddcd5a2781fbc79292484 100644 (file)
@@ -74,9 +74,11 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
        /*
         * Another node might have truncated while we were waiting on
         * cluster locks.
+        * We don't check size == 0 before the shift. This is borrowed
+        * from do_generic_file_read.
         */
-       last_index = size >> PAGE_CACHE_SHIFT;
-       if (page->index > last_index) {
+       last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+       if (unlikely(!size || page->index > last_index)) {
                ret = -EINVAL;
                goto out;
        }
@@ -107,7 +109,7 @@ static int __ocfs2_page_mkwrite(struct inode *inode, struct buffer_head *di_bh,
         * because the "write" would invalidate their data.
         */
        if (page->index == last_index)
-               len = size & ~PAGE_CACHE_MASK;
+               len = ((size - 1) & ~PAGE_CACHE_MASK) + 1;
 
        ret = ocfs2_write_begin_nolock(mapping, pos, len, 0, &locked_page,
                                       &fsdata, di_bh, page);
index f171b51a74f78d6e268b5d743a24df4e702f9643..a00dda2e4f16698e5c7d8e0946ba1f09506651c6 100644 (file)
@@ -472,32 +472,23 @@ leave:
        return status;
 }
 
-static int ocfs2_mknod_locked(struct ocfs2_super *osb,
-                             struct inode *dir,
-                             struct inode *inode,
-                             dev_t dev,
-                             struct buffer_head **new_fe_bh,
-                             struct buffer_head *parent_fe_bh,
-                             handle_t *handle,
-                             struct ocfs2_alloc_context *inode_ac)
+static int __ocfs2_mknod_locked(struct inode *dir,
+                               struct inode *inode,
+                               dev_t dev,
+                               struct buffer_head **new_fe_bh,
+                               struct buffer_head *parent_fe_bh,
+                               handle_t *handle,
+                               struct ocfs2_alloc_context *inode_ac,
+                               u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
 {
        int status = 0;
+       struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
        struct ocfs2_dinode *fe = NULL;
        struct ocfs2_extent_list *fel;
-       u64 suballoc_loc, fe_blkno = 0;
-       u16 suballoc_bit;
        u16 feat;
 
        *new_fe_bh = NULL;
 
-       status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
-                                      inode_ac, &suballoc_loc,
-                                      &suballoc_bit, &fe_blkno);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
-
        /* populate as many fields early on as possible - many of
         * these are used by the support functions here and in
         * callers. */
@@ -591,6 +582,34 @@ leave:
        return status;
 }
 
+static int ocfs2_mknod_locked(struct ocfs2_super *osb,
+                             struct inode *dir,
+                             struct inode *inode,
+                             dev_t dev,
+                             struct buffer_head **new_fe_bh,
+                             struct buffer_head *parent_fe_bh,
+                             handle_t *handle,
+                             struct ocfs2_alloc_context *inode_ac)
+{
+       int status = 0;
+       u64 suballoc_loc, fe_blkno = 0;
+       u16 suballoc_bit;
+
+       *new_fe_bh = NULL;
+
+       status = ocfs2_claim_new_inode(handle, dir, parent_fe_bh,
+                                      inode_ac, &suballoc_loc,
+                                      &suballoc_bit, &fe_blkno);
+       if (status < 0) {
+               mlog_errno(status);
+               return status;
+       }
+
+       return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
+                                   parent_fe_bh, handle, inode_ac,
+                                   fe_blkno, suballoc_loc, suballoc_bit);
+}
+
 static int ocfs2_mkdir(struct inode *dir,
                       struct dentry *dentry,
                       int mode)
@@ -1852,61 +1871,117 @@ bail:
        return status;
 }
 
-static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
-                                   struct inode **ret_orphan_dir,
-                                   u64 blkno,
-                                   char *name,
-                                   struct ocfs2_dir_lookup_result *lookup)
+static int ocfs2_lookup_lock_orphan_dir(struct ocfs2_super *osb,
+                                       struct inode **ret_orphan_dir,
+                                       struct buffer_head **ret_orphan_dir_bh)
 {
        struct inode *orphan_dir_inode;
        struct buffer_head *orphan_dir_bh = NULL;
-       int status = 0;
-
-       status = ocfs2_blkno_stringify(blkno, name);
-       if (status < 0) {
-               mlog_errno(status);
-               return status;
-       }
+       int ret = 0;
 
        orphan_dir_inode = ocfs2_get_system_file_inode(osb,
                                                       ORPHAN_DIR_SYSTEM_INODE,
                                                       osb->slot_num);
        if (!orphan_dir_inode) {
-               status = -ENOENT;
-               mlog_errno(status);
-               return status;
+               ret = -ENOENT;
+               mlog_errno(ret);
+               return ret;
        }
 
        mutex_lock(&orphan_dir_inode->i_mutex);
 
-       status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
+       ret = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1);
+       if (ret < 0) {
+               mutex_unlock(&orphan_dir_inode->i_mutex);
+               iput(orphan_dir_inode);
+
+               mlog_errno(ret);
+               return ret;
        }
 
-       status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
-                                             orphan_dir_bh, name,
-                                             OCFS2_ORPHAN_NAMELEN, lookup);
-       if (status < 0) {
-               ocfs2_inode_unlock(orphan_dir_inode, 1);
+       *ret_orphan_dir = orphan_dir_inode;
+       *ret_orphan_dir_bh = orphan_dir_bh;
 
-               mlog_errno(status);
-               goto leave;
+       return 0;
+}
+
+static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode,
+                                     struct buffer_head *orphan_dir_bh,
+                                     u64 blkno,
+                                     char *name,
+                                     struct ocfs2_dir_lookup_result *lookup)
+{
+       int ret;
+       struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb);
+
+       ret = ocfs2_blkno_stringify(blkno, name);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
+                                          orphan_dir_bh, name,
+                                          OCFS2_ORPHAN_NAMELEN, lookup);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * ocfs2_prepare_orphan_dir() - Prepare an orphan directory for
+ * insertion of an orphan.
+ * @osb: ocfs2 file system
+ * @ret_orphan_dir: Orphan dir inode - returned locked!
+ * @blkno: Actual block number of the inode to be inserted into orphan dir.
+ * @lookup: dir lookup result, to be passed back into functions like
+ *          ocfs2_orphan_add
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure. 
+ */
+static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
+                                   struct inode **ret_orphan_dir,
+                                   u64 blkno,
+                                   char *name,
+                                   struct ocfs2_dir_lookup_result *lookup)
+{
+       struct inode *orphan_dir_inode = NULL;
+       struct buffer_head *orphan_dir_bh = NULL;
+       int ret = 0;
+
+       ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir_inode,
+                                          &orphan_dir_bh);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh,
+                                        blkno, name, lookup);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
        }
 
        *ret_orphan_dir = orphan_dir_inode;
 
-leave:
-       if (status) {
+out:
+       brelse(orphan_dir_bh);
+
+       if (ret) {
+               ocfs2_inode_unlock(orphan_dir_inode, 1);
                mutex_unlock(&orphan_dir_inode->i_mutex);
                iput(orphan_dir_inode);
        }
 
-       brelse(orphan_dir_bh);
-
-       mlog_exit(status);
-       return status;
+       mlog_exit(ret);
+       return ret;
 }
 
 static int ocfs2_orphan_add(struct ocfs2_super *osb,
@@ -2053,6 +2128,99 @@ leave:
        return status;
 }
 
+/**
+ * ocfs2_prep_new_orphaned_file() - Prepare the orphan dir to recieve a newly
+ * allocated file. This is different from the typical 'add to orphan dir'
+ * operation in that the inode does not yet exist. This is a problem because
+ * the orphan dir stringifies the inode block number to come up with it's
+ * dirent. Obviously if the inode does not yet exist we have a chicken and egg
+ * problem. This function works around it by calling deeper into the orphan
+ * and suballoc code than other callers. Use this only by necessity.
+ * @dir: The directory which this inode will ultimately wind up under - not the
+ * orphan dir!
+ * @dir_bh: buffer_head the @dir inode block
+ * @orphan_name: string of length (CFS2_ORPHAN_NAMELEN + 1). Will be filled
+ * with the string to be used for orphan dirent. Pass back to the orphan dir
+ * code.
+ * @ret_orphan_dir: orphan dir inode returned to be passed back into orphan
+ * dir code.
+ * @ret_di_blkno: block number where the new inode will be allocated.
+ * @orphan_insert: Dir insert context to be passed back into orphan dir code.
+ * @ret_inode_ac: Inode alloc context to be passed back to the allocator.
+ *
+ * Returns zero on success and the ret_orphan_dir, name and lookup
+ * fields will be populated.
+ *
+ * Returns non-zero on failure. 
+ */
+static int ocfs2_prep_new_orphaned_file(struct inode *dir,
+                                       struct buffer_head *dir_bh,
+                                       char *orphan_name,
+                                       struct inode **ret_orphan_dir,
+                                       u64 *ret_di_blkno,
+                                       struct ocfs2_dir_lookup_result *orphan_insert,
+                                       struct ocfs2_alloc_context **ret_inode_ac)
+{
+       int ret;
+       u64 di_blkno;
+       struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+       struct inode *orphan_dir = NULL;
+       struct buffer_head *orphan_dir_bh = NULL;
+       struct ocfs2_alloc_context *inode_ac = NULL;
+
+       ret = ocfs2_lookup_lock_orphan_dir(osb, &orphan_dir, &orphan_dir_bh);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       /* reserve an inode spot */
+       ret = ocfs2_reserve_new_inode(osb, &inode_ac);
+       if (ret < 0) {
+               if (ret != -ENOSPC)
+                       mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_find_new_inode_loc(dir, dir_bh, inode_ac,
+                                      &di_blkno);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh,
+                                        di_blkno, orphan_name, orphan_insert);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+out:
+       if (ret == 0) {
+               *ret_orphan_dir = orphan_dir;
+               *ret_di_blkno = di_blkno;
+               *ret_inode_ac = inode_ac;
+               /*
+                * orphan_name and orphan_insert are already up to
+                * date via prepare_orphan_dir
+                */
+       } else {
+               /* Unroll reserve_new_inode* */
+               if (inode_ac)
+                       ocfs2_free_alloc_context(inode_ac);
+
+               /* Unroll orphan dir locking */
+               mutex_unlock(&orphan_dir->i_mutex);
+               ocfs2_inode_unlock(orphan_dir, 1);
+               iput(orphan_dir);
+       }
+
+       brelse(orphan_dir_bh);
+
+       return 0;
+}
+
 int ocfs2_create_inode_in_orphan(struct inode *dir,
                                 int mode,
                                 struct inode **new_inode)
@@ -2068,6 +2236,8 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
        struct buffer_head *new_di_bh = NULL;
        struct ocfs2_alloc_context *inode_ac = NULL;
        struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
+       u64 uninitialized_var(di_blkno), suballoc_loc;
+       u16 suballoc_bit;
 
        status = ocfs2_inode_lock(dir, &parent_di_bh, 1);
        if (status < 0) {
@@ -2076,20 +2246,9 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
                return status;
        }
 
-       /*
-        * We give the orphan dir the root blkno to fake an orphan name,
-        * and allocate enough space for our insertion.
-        */
-       status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
-                                         osb->root_blkno,
-                                         orphan_name, &orphan_insert);
-       if (status < 0) {
-               mlog_errno(status);
-               goto leave;
-       }
-
-       /* reserve an inode spot */
-       status = ocfs2_reserve_new_inode(osb, &inode_ac);
+       status = ocfs2_prep_new_orphaned_file(dir, parent_di_bh,
+                                             orphan_name, &orphan_dir,
+                                             &di_blkno, &orphan_insert, &inode_ac);
        if (status < 0) {
                if (status != -ENOSPC)
                        mlog_errno(status);
@@ -2116,17 +2275,20 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
                goto leave;
        did_quota_inode = 1;
 
-       inode->i_nlink = 0;
-       /* do the real work now. */
-       status = ocfs2_mknod_locked(osb, dir, inode,
-                                   0, &new_di_bh, parent_di_bh, handle,
-                                   inode_ac);
+       status = ocfs2_claim_new_inode_at_loc(handle, dir, inode_ac,
+                                             &suballoc_loc,
+                                             &suballoc_bit, di_blkno);
        if (status < 0) {
                mlog_errno(status);
                goto leave;
        }
 
-       status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name);
+       inode->i_nlink = 0;
+       /* do the real work now. */
+       status = __ocfs2_mknod_locked(dir, inode,
+                                     0, &new_di_bh, parent_di_bh, handle,
+                                     inode_ac, di_blkno, suballoc_loc,
+                                     suballoc_bit);
        if (status < 0) {
                mlog_errno(status);
                goto leave;
index 73a11ccfd4c280681abe672c5e9cd81e3b229a93..0afeda83120fa0e54bd2c9cc7e1f4f08c6e836bf 100644 (file)
@@ -2960,7 +2960,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                if (map_end & (PAGE_CACHE_SIZE - 1))
                        to = map_end & (PAGE_CACHE_SIZE - 1);
 
-               page = grab_cache_page(mapping, page_index);
+               page = find_or_create_page(mapping, page_index, GFP_NOFS);
 
                /*
                 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
@@ -3179,7 +3179,8 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
                if (map_end > end)
                        map_end = end;
 
-               page = grab_cache_page(context->inode->i_mapping, page_index);
+               page = find_or_create_page(context->inode->i_mapping,
+                                          page_index, GFP_NOFS);
                BUG_ON(!page);
 
                wait_on_page_writeback(page);
index a8e6a95a353f03dcb8a34cf928ded84ff7e6d127..8a286f54dca1f30a1d65b0d6cbb5baa0fa8c063c 100644 (file)
@@ -57,11 +57,28 @@ struct ocfs2_suballoc_result {
        u64             sr_bg_blkno;    /* The bg we allocated from.  Set
                                           to 0 when a block group is
                                           contiguous. */
+       u64             sr_bg_stable_blkno; /*
+                                            * Doesn't change, always
+                                            * set to target block
+                                            * group descriptor
+                                            * block.
+                                            */
        u64             sr_blkno;       /* The first allocated block */
        unsigned int    sr_bit_offset;  /* The bit in the bg */
        unsigned int    sr_bits;        /* How many bits we claimed */
 };
 
+static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
+{
+       if (res->sr_blkno == 0)
+               return 0;
+
+       if (res->sr_bg_blkno)
+               return res->sr_bg_blkno;
+
+       return ocfs2_which_suballoc_group(res->sr_blkno, res->sr_bit_offset);
+}
+
 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
@@ -138,6 +155,10 @@ void ocfs2_free_ac_resource(struct ocfs2_alloc_context *ac)
        brelse(ac->ac_bh);
        ac->ac_bh = NULL;
        ac->ac_resv = NULL;
+       if (ac->ac_find_loc_priv) {
+               kfree(ac->ac_find_loc_priv);
+               ac->ac_find_loc_priv = NULL;
+       }
 }
 
 void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
@@ -1678,6 +1699,15 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
        if (!ret)
                ocfs2_bg_discontig_fix_result(ac, gd, res);
 
+       /*
+        * sr_bg_blkno might have been changed by
+        * ocfs2_bg_discontig_fix_result
+        */
+       res->sr_bg_stable_blkno = group_bh->b_blocknr;
+
+       if (ac->ac_find_loc_only)
+               goto out_loc_only;
+
        ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
                                               res->sr_bits,
                                               le16_to_cpu(gd->bg_chain));
@@ -1691,6 +1721,7 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
        if (ret < 0)
                mlog_errno(ret);
 
+out_loc_only:
        *bits_left = le16_to_cpu(gd->bg_free_bits_count);
 
 out:
@@ -1708,7 +1739,6 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
 {
        int status;
        u16 chain;
-       u32 tmp_used;
        u64 next_group;
        struct inode *alloc_inode = ac->ac_inode;
        struct buffer_head *group_bh = NULL;
@@ -1770,6 +1800,11 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
        if (!status)
                ocfs2_bg_discontig_fix_result(ac, bg, res);
 
+       /*
+        * sr_bg_blkno might have been changed by
+        * ocfs2_bg_discontig_fix_result
+        */
+       res->sr_bg_stable_blkno = group_bh->b_blocknr;
 
        /*
         * Keep track of previous block descriptor read. When
@@ -1796,22 +1831,17 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
                }
        }
 
-       /* Ok, claim our bits now: set the info on dinode, chainlist
-        * and then the group */
-       status = ocfs2_journal_access_di(handle,
-                                        INODE_CACHE(alloc_inode),
-                                        ac->ac_bh,
-                                        OCFS2_JOURNAL_ACCESS_WRITE);
-       if (status < 0) {
+       if (ac->ac_find_loc_only)
+               goto out_loc_only;
+
+       status = ocfs2_alloc_dinode_update_counts(alloc_inode, handle,
+                                                 ac->ac_bh, res->sr_bits,
+                                                 chain);
+       if (status) {
                mlog_errno(status);
                goto bail;
        }
 
-       tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
-       fe->id1.bitmap1.i_used = cpu_to_le32(res->sr_bits + tmp_used);
-       le32_add_cpu(&cl->cl_recs[chain].c_free, -res->sr_bits);
-       ocfs2_journal_dirty(handle, ac->ac_bh);
-
        status = ocfs2_block_group_set_bits(handle,
                                            alloc_inode,
                                            bg,
@@ -1826,6 +1856,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
        mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
             (unsigned long long)le64_to_cpu(fe->i_blkno));
 
+out_loc_only:
        *bits_left = le16_to_cpu(bg->bg_free_bits_count);
 bail:
        brelse(group_bh);
@@ -1845,6 +1876,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
        int status;
        u16 victim, i;
        u16 bits_left = 0;
+       u64 hint = ac->ac_last_group;
        struct ocfs2_chain_list *cl;
        struct ocfs2_dinode *fe;
 
@@ -1872,7 +1904,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
                goto bail;
        }
 
-       res->sr_bg_blkno = ac->ac_last_group;
+       res->sr_bg_blkno = hint;
        if (res->sr_bg_blkno) {
                /* Attempt to short-circuit the usual search mechanism
                 * by jumping straight to the most recently used
@@ -1896,8 +1928,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
 
        status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
                                    res, &bits_left);
-       if (!status)
+       if (!status) {
+               hint = ocfs2_group_from_res(res);
                goto set_hint;
+       }
        if (status < 0 && status != -ENOSPC) {
                mlog_errno(status);
                goto bail;
@@ -1920,8 +1954,10 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
                ac->ac_chain = i;
                status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
                                            res, &bits_left);
-               if (!status)
+               if (!status) {
+                       hint = ocfs2_group_from_res(res);
                        break;
+               }
                if (status < 0 && status != -ENOSPC) {
                        mlog_errno(status);
                        goto bail;
@@ -1936,7 +1972,7 @@ set_hint:
                if (bits_left < min_bits)
                        ac->ac_last_group = 0;
                else
-                       ac->ac_last_group = res->sr_bg_blkno;
+                       ac->ac_last_group = hint;
        }
 
 bail:
@@ -2016,6 +2052,136 @@ static inline void ocfs2_save_inode_ac_group(struct inode *dir,
        OCFS2_I(dir)->ip_last_used_slot = ac->ac_alloc_slot;
 }
 
+int ocfs2_find_new_inode_loc(struct inode *dir,
+                            struct buffer_head *parent_fe_bh,
+                            struct ocfs2_alloc_context *ac,
+                            u64 *fe_blkno)
+{
+       int ret;
+       handle_t *handle = NULL;
+       struct ocfs2_suballoc_result *res;
+
+       BUG_ON(!ac);
+       BUG_ON(ac->ac_bits_given != 0);
+       BUG_ON(ac->ac_bits_wanted != 1);
+       BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
+
+       res = kzalloc(sizeof(*res), GFP_NOFS);
+       if (res == NULL) {
+               ret = -ENOMEM;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ocfs2_init_inode_ac_group(dir, parent_fe_bh, ac);
+
+       /*
+        * The handle started here is for chain relink. Alternatively,
+        * we could just disable relink for these calls.
+        */
+       handle = ocfs2_start_trans(OCFS2_SB(dir->i_sb), OCFS2_SUBALLOC_ALLOC);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               handle = NULL;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       /*
+        * This will instruct ocfs2_claim_suballoc_bits and
+        * ocfs2_search_one_group to search but save actual allocation
+        * for later.
+        */
+       ac->ac_find_loc_only = 1;
+
+       ret = ocfs2_claim_suballoc_bits(ac, handle, 1, 1, res);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ac->ac_find_loc_priv = res;
+       *fe_blkno = res->sr_blkno;
+
+out:
+       if (handle)
+               ocfs2_commit_trans(OCFS2_SB(dir->i_sb), handle);
+
+       if (ret)
+               kfree(res);
+
+       return ret;
+}
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+                                struct inode *dir,
+                                struct ocfs2_alloc_context *ac,
+                                u64 *suballoc_loc,
+                                u16 *suballoc_bit,
+                                u64 di_blkno)
+{
+       int ret;
+       u16 chain;
+       struct ocfs2_suballoc_result *res = ac->ac_find_loc_priv;
+       struct buffer_head *bg_bh = NULL;
+       struct ocfs2_group_desc *bg;
+       struct ocfs2_dinode *di = (struct ocfs2_dinode *) ac->ac_bh->b_data;
+
+       /*
+        * Since di_blkno is being passed back in, we check for any
+        * inconsistencies which may have happened between
+        * calls. These are code bugs as di_blkno is not expected to
+        * change once returned from ocfs2_find_new_inode_loc()
+        */
+       BUG_ON(res->sr_blkno != di_blkno);
+
+       ret = ocfs2_read_group_descriptor(ac->ac_inode, di,
+                                         res->sr_bg_stable_blkno, &bg_bh);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+       chain = le16_to_cpu(bg->bg_chain);
+
+       ret = ocfs2_alloc_dinode_update_counts(ac->ac_inode, handle,
+                                              ac->ac_bh, res->sr_bits,
+                                              chain);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_block_group_set_bits(handle,
+                                        ac->ac_inode,
+                                        bg,
+                                        bg_bh,
+                                        res->sr_bit_offset,
+                                        res->sr_bits);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       mlog(0, "Allocated %u bits from suballocator %llu\n", res->sr_bits,
+            (unsigned long long)di_blkno);
+
+       atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
+
+       BUG_ON(res->sr_bits != 1);
+
+       *suballoc_loc = res->sr_bg_blkno;
+       *suballoc_bit = res->sr_bit_offset;
+       ac->ac_bits_given++;
+       ocfs2_save_inode_ac_group(dir, ac);
+
+out:
+       brelse(bg_bh);
+
+       return ret;
+}
+
 int ocfs2_claim_new_inode(handle_t *handle,
                          struct inode *dir,
                          struct buffer_head *parent_fe_bh,
@@ -2567,7 +2733,8 @@ out:
  * suballoc_bit.
  */
 static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
-                                      u16 *suballoc_slot, u16 *suballoc_bit)
+                                      u16 *suballoc_slot, u64 *group_blkno,
+                                      u16 *suballoc_bit)
 {
        int status;
        struct buffer_head *inode_bh = NULL;
@@ -2604,6 +2771,8 @@ static int ocfs2_get_suballoc_slot_bit(struct ocfs2_super *osb, u64 blkno,
                *suballoc_slot = le16_to_cpu(inode_fe->i_suballoc_slot);
        if (suballoc_bit)
                *suballoc_bit = le16_to_cpu(inode_fe->i_suballoc_bit);
+       if (group_blkno)
+               *group_blkno = le64_to_cpu(inode_fe->i_suballoc_loc);
 
 bail:
        brelse(inode_bh);
@@ -2621,7 +2790,8 @@ bail:
  */
 static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
                                   struct inode *suballoc,
-                                  struct buffer_head *alloc_bh, u64 blkno,
+                                  struct buffer_head *alloc_bh,
+                                  u64 group_blkno, u64 blkno,
                                   u16 bit, int *res)
 {
        struct ocfs2_dinode *alloc_di;
@@ -2642,10 +2812,8 @@ static int ocfs2_test_suballoc_bit(struct ocfs2_super *osb,
                goto bail;
        }
 
-       if (alloc_di->i_suballoc_loc)
-               bg_blkno = le64_to_cpu(alloc_di->i_suballoc_loc);
-       else
-               bg_blkno = ocfs2_which_suballoc_group(blkno, bit);
+       bg_blkno = group_blkno ? group_blkno :
+                  ocfs2_which_suballoc_group(blkno, bit);
        status = ocfs2_read_group_descriptor(suballoc, alloc_di, bg_blkno,
                                             &group_bh);
        if (status < 0) {
@@ -2680,6 +2848,7 @@ bail:
 int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
 {
        int status;
+       u64 group_blkno = 0;
        u16 suballoc_bit = 0, suballoc_slot = 0;
        struct inode *inode_alloc_inode;
        struct buffer_head *alloc_bh = NULL;
@@ -2687,7 +2856,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
        mlog_entry("blkno: %llu", (unsigned long long)blkno);
 
        status = ocfs2_get_suballoc_slot_bit(osb, blkno, &suballoc_slot,
-                                            &suballoc_bit);
+                                            &group_blkno, &suballoc_bit);
        if (status < 0) {
                mlog(ML_ERROR, "get alloc slot and bit failed %d\n", status);
                goto bail;
@@ -2715,7 +2884,7 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
        }
 
        status = ocfs2_test_suballoc_bit(osb, inode_alloc_inode, alloc_bh,
-                                        blkno, suballoc_bit, res);
+                                        group_blkno, blkno, suballoc_bit, res);
        if (status < 0)
                mlog(ML_ERROR, "test suballoc bit failed %d\n", status);
 
index a017dd3ee7d9ce2d6c0429d090b57ac077ed585a..b8afabfeede4c43694bdb8bf0a9664b0befd24a6 100644 (file)
@@ -56,6 +56,9 @@ struct ocfs2_alloc_context {
        u64    ac_max_block;  /* Highest block number to allocate. 0 is
                                 is the same as ~0 - unlimited */
 
+       int    ac_find_loc_only;  /* hack for reflink operation ordering */
+       struct ocfs2_suballoc_result *ac_find_loc_priv; /* */
+
        struct ocfs2_alloc_reservation  *ac_resv;
 };
 
@@ -197,4 +200,22 @@ int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_extent_tree *et,
                          struct ocfs2_alloc_context **meta_ac);
 
 int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res);
+
+
+
+/*
+ * The following two interfaces are for ocfs2_create_inode_in_orphan().
+ */
+int ocfs2_find_new_inode_loc(struct inode *dir,
+                            struct buffer_head *parent_fe_bh,
+                            struct ocfs2_alloc_context *ac,
+                            u64 *fe_blkno);
+
+int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+                                struct inode *dir,
+                                struct ocfs2_alloc_context *ac,
+                                u64 *suballoc_loc,
+                                u16 *suballoc_bit,
+                                u64 di_blkno);
+
 #endif /* _CHAINALLOC_H_ */
index 180cf5a0bd67119218c170b265cf944c103b889b..3b8b456603318f1ff017056cdeda40129d559ab4 100644 (file)
@@ -146,7 +146,7 @@ u64 stable_page_flags(struct page *page)
        u |= kpf_copy_bit(k, KPF_HWPOISON,      PG_hwpoison);
 #endif
 
-#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
        u |= kpf_copy_bit(k, KPF_UNCACHED,      PG_uncached);
 #endif
 
index 439fc1f1c1c41487ad76d23523d995a9f416b926..271afc48b9a5d58dd2874d41f8a08dfcae644481 100644 (file)
@@ -224,7 +224,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
        /* We don't show the stack guard page in /proc/maps */
        start = vma->vm_start;
        if (vma->vm_flags & VM_GROWSDOWN)
-               start += PAGE_SIZE;
+               if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
+                       start += PAGE_SIZE;
 
        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
                        start,
index 1b27b5688f62fdfd8abd40386c6e1ece7aa1bf49..da3fefe91a8f855f59e62c84f0ef70a2a9bd0524 100644 (file)
@@ -340,7 +340,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
        char *p;
 
        p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file));
-       if (p)
+       if (!IS_ERR(p))
                memmove(last_sysfs_file, p, strlen(p) + 1);
 
        /* need attr_sd for attr and ops, its parent for kobj */
index ea79072f521012549c4c2673a6210340834202a4..286e36e21dae587672216d7b7722488e91b45758 100644 (file)
@@ -440,12 +440,7 @@ _xfs_buf_find(
                ASSERT(btp == bp->b_target);
                if (bp->b_file_offset == range_base &&
                    bp->b_buffer_length == range_length) {
-                       /*
-                        * If we look at something, bring it to the
-                        * front of the list for next time.
-                        */
                        atomic_inc(&bp->b_hold);
-                       list_move(&bp->b_hash_list, &hash->bh_list);
                        goto found;
                }
        }
@@ -1443,8 +1438,7 @@ xfs_alloc_bufhash(
 {
        unsigned int            i;
 
-       btp->bt_hashshift = external ? 3 : 8;   /* 8 or 256 buckets */
-       btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
+       btp->bt_hashshift = external ? 3 : 12;  /* 8 or 4096 buckets */
        btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
                                         sizeof(xfs_bufhash_t));
        for (i = 0; i < (1 << btp->bt_hashshift); i++) {
@@ -1938,7 +1932,8 @@ xfs_buf_init(void)
        if (!xfs_buf_zone)
                goto out;
 
-       xfslogd_workqueue = create_workqueue("xfslogd");
+       xfslogd_workqueue = alloc_workqueue("xfslogd",
+                                       WQ_RESCUER | WQ_HIGHPRI, 1);
        if (!xfslogd_workqueue)
                goto out_free_buf_zone;
 
index d072e5ff923b3e71eac4000bd432cd84d72e0d9b..2a05614f0b920c672a3d5950aef3701f91aab465 100644 (file)
@@ -137,7 +137,6 @@ typedef struct xfs_buftarg {
        size_t                  bt_smask;
 
        /* per device buffer hash table */
-       uint                    bt_hashmask;
        uint                    bt_hashshift;
        xfs_bufhash_t           *bt_hash;
 
index 237f5ffb2ee8ca067f6dc81ebc38a6de1f7b36ce..3b9e626f7cd1562877cc340d0c43c5a776dd2e35 100644 (file)
@@ -785,6 +785,8 @@ xfs_ioc_fsgetxattr(
 {
        struct fsxattr          fa;
 
+       memset(&fa, 0, sizeof(struct fsxattr));
+
        xfs_ilock(ip, XFS_ILOCK_SHARED);
        fa.fsx_xflags = xfs_ip2xflags(ip);
        fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
@@ -906,6 +908,13 @@ xfs_ioctl_setattr(
        if (XFS_FORCED_SHUTDOWN(mp))
                return XFS_ERROR(EIO);
 
+       /*
+        * Disallow 32bit project ids because on-disk structure
+        * is 16bit only.
+        */
+       if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1))
+               return XFS_ERROR(EINVAL);
+
        /*
         * If disk quotas is on, we make sure that the dquots do exist on disk,
         * before we start any other transactions. Trying to do this later
index 68be25dcd301801506889f0f2ac23cd5e5ff420d..b1fc2a6bfe834667cefe95e9a842527ba886944d 100644 (file)
@@ -664,7 +664,7 @@ xfs_vn_fiemap(
                                        fieinfo->fi_extents_max + 1;
        bm.bmv_count = min_t(__s32, bm.bmv_count,
                             (PAGE_SIZE * 16 / sizeof(struct getbmapx)));
-       bm.bmv_iflags = BMV_IF_PREALLOC;
+       bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES;
        if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
                bm.bmv_iflags |= BMV_IF_ATTRFORK;
        if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC))
index 23f14e595c18cd3a8068796f5b73396fa9db98f9..f90dadd5a96877ff9975ecaaf2fb260863f7d089 100644 (file)
@@ -5533,12 +5533,24 @@ xfs_getbmap(
                                        map[i].br_startblock))
                                goto out_free_map;
 
-                       nexleft--;
                        bmv->bmv_offset =
                                out[cur_ext].bmv_offset +
                                out[cur_ext].bmv_length;
                        bmv->bmv_length =
                                max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
+
+                       /*
+                        * In case we don't want to return the hole,
+                        * don't increase cur_ext so that we can reuse
+                        * it in the next loop.
+                        */
+                       if ((iflags & BMV_IF_NO_HOLES) &&
+                           map[i].br_startblock == HOLESTARTBLOCK) {
+                               memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
+                               continue;
+                       }
+
+                       nexleft--;
                        bmv->bmv_entries++;
                        cur_ext++;
                }
index 7cf7220e7d5fb30dfece1fe9aa4f2798f3e017fc..87c2e9d02288d6ff0eba2949d379e1d02546eb11 100644 (file)
@@ -114,8 +114,10 @@ struct getbmapx {
 #define BMV_IF_NO_DMAPI_READ   0x2     /* Do not generate DMAPI read event  */
 #define BMV_IF_PREALLOC                0x4     /* rtn status BMV_OF_PREALLOC if req */
 #define BMV_IF_DELALLOC                0x8     /* rtn status BMV_OF_DELALLOC if req */
+#define BMV_IF_NO_HOLES                0x10    /* Do not return holes */
 #define BMV_IF_VALID   \
-       (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|BMV_IF_DELALLOC)
+       (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|  \
+        BMV_IF_DELALLOC|BMV_IF_NO_HOLES)
 
 /*     bmv_oflags values - returned for each non-header segment */
 #define BMV_OF_PREALLOC                0x1     /* segment = unwritten pre-allocation */
index 66d585c6917cfd55e50447b8048a2adc3f60d68c..4c7c7bfb2b2fc0aa8cc3cb3c98334392eb902c00 100644 (file)
@@ -2299,15 +2299,22 @@ xfs_alloc_file_space(
                        e = allocatesize_fsb;
                }
 
+               /*
+                * The transaction reservation is limited to a 32-bit block
+                * count, hence we need to limit the number of blocks we are
+                * trying to reserve to avoid an overflow. We can't allocate
+                * more than @nimaps extents, and an extent is limited on disk
+                * to MAXEXTLEN (21 bits), so use that to enforce the limit.
+                */
+               resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
                if (unlikely(rt)) {
-                       resrtextents = qblocks = (uint)(e - s);
+                       resrtextents = qblocks = resblks;
                        resrtextents /= mp->m_sb.sb_rextsize;
                        resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
                        quota_flag = XFS_QMOPT_RES_RTBLKS;
                } else {
                        resrtextents = 0;
-                       resblks = qblocks = \
-                               XFS_DIOSTRAT_SPACE_RES(mp, (uint)(e - s));
+                       resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
                        quota_flag = XFS_QMOPT_RES_REGBLKS;
                }
 
index baacd98e7cc6244b795b5db1b09b440292ee3dac..4de84ce3a927123f118861df950202a8dba1a536 100644 (file)
@@ -377,9 +377,6 @@ struct acpi_pci_root {
 
        u32 osc_support_set;    /* _OSC state of support bits */
        u32 osc_control_set;    /* _OSC state of control bits */
-       u32 osc_control_qry;    /* the latest _OSC query result */
-
-       u32 osc_queried:1;      /* has _OSC control been queried? */
 };
 
 /* helper */
index c7376bf80b0604bf8f8b9394989178d6de45ecc2..8ca18e26d7e39fe429a8179d48f2f9f17f58a589 100644 (file)
  * While the GPIO programming interface defines valid GPIO numbers
  * to be in the range 0..MAX_INT, this library restricts them to the
  * smaller range 0..ARCH_NR_GPIOS-1.
+ *
+ * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
+ * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
+ * actually an estimate of a board-specific value.
  */
 
 #ifndef ARCH_NR_GPIOS
 #define ARCH_NR_GPIOS          256
 #endif
 
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request().  only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+
 static inline int gpio_is_valid(int number)
 {
-       /* only some non-negative numbers are valid */
        return ((unsigned)number) < ARCH_NR_GPIOS;
 }
 
index b5043a9890d85a38d4e63d3ae40f546675639e30..08923b6847681f364557fd52a43b076eaf4d9a2e 100644 (file)
@@ -70,11 +70,16 @@ extern void setup_per_cpu_areas(void);
 
 #else /* ! SMP */
 
-#define per_cpu(var, cpu)                      (*((void)(cpu), &(var)))
-#define __get_cpu_var(var)                     (var)
-#define __raw_get_cpu_var(var)                 (var)
-#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
-#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
+#define VERIFY_PERCPU_PTR(__p) ({                      \
+       __verify_pcpu_ptr((__p));                       \
+       (typeof(*(__p)) __kernel __force *)(__p);       \
+})
+
+#define per_cpu(var, cpu)      (*((void)(cpu), VERIFY_PERCPU_PTR(&(var))))
+#define __get_cpu_var(var)     (*VERIFY_PERCPU_PTR(&(var)))
+#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
+#define this_cpu_ptr(ptr)      per_cpu_ptr(ptr, 0)
+#define __this_cpu_ptr(ptr)    this_cpu_ptr(ptr)
 
 #endif /* SMP */
 
index ccf94dc5acdf0ba8cc4f836968f103f542ff43b6..c227757feb06b4c22eb15ca3faf82c1dfa9f107e 100644 (file)
@@ -304,8 +304,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
                                OSC_PCI_EXPRESS_PME_CONTROL |           \
                                OSC_PCI_EXPRESS_AER_CONTROL |           \
                                OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
-
-extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
+extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
+                                            u32 *mask, u32 req);
 extern void acpi_early_init(void);
 
 #else  /* !CONFIG_ACPI */
index ed3e92e41c6e5683ad3dbb823e48259f5150ac33..0c991023ee475fad85136a04b00cb8d2699a382b 100644 (file)
@@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
 int cgroup_scan_tasks(struct cgroup_scanner *scan);
 int cgroup_attach_task(struct cgroup *, struct task_struct *);
-int cgroup_attach_task_current_cg(struct task_struct *);
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+
+static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
+{
+       return cgroup_attach_task_all(current, tsk);
+}
 
 /*
  * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
 }
 
 /* No cgroups - nothing to do */
+static inline int cgroup_attach_task_all(struct task_struct *from,
+                                        struct task_struct *t)
+{
+       return 0;
+}
 static inline int cgroup_attach_task_current_cg(struct task_struct *t)
 {
        return 0;
index 2c958f4fce1ed6f1d6f4a8c3fc865155cd1eac81..926b50322a469c9d16c48ed35e10c4c12bd695f3 100644 (file)
@@ -136,6 +136,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
 
 extern int elevator_init(struct request_queue *, char *);
 extern void elevator_exit(struct elevator_queue *);
+extern int elevator_change(struct request_queue *, const char *);
 extern int elv_rq_merge_ok(struct request *, struct bio *);
 
 /*
index ee3049cb9ba5782e27447d3984a914a2d39ae87b..52baa79d69a763f7f94f728b115fe0337190addd 100644 (file)
@@ -63,6 +63,9 @@
  *            IRQ lines will appear.  Similarly to gpio_base, the expander
  *            will create a block of irqs beginning at this number.
  *            This value is ignored if irq_summary is < 0.
+ * @reset_during_probe: If set to true, the driver will trigger a full
+ *                      reset of the chip at the beginning of the probe
+ *                      in order to place it in a known state.
  */
 struct sx150x_platform_data {
        unsigned gpio_base;
@@ -73,6 +76,7 @@ struct sx150x_platform_data {
        u16      io_polarity;
        int      irq_summary;
        unsigned irq_base;
+       bool     reset_during_probe;
 };
 
 #endif /* __LINUX_I2C_SX150X_H */
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h
new file mode 100644 (file)
index 0000000..1d19ab2
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Common Intel AGPGART and GTT definitions.
+ */
+#ifndef _INTEL_GTT_H
+#define _INTEL_GTT_H
+
+#include <linux/agp_backend.h>
+
+/* This is for Intel only GTT controls.
+ *
+ * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
+ */
+
+#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
+#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
+
+/* flag for GFDT type */
+#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
+
+#endif
index 0a6b3d5c490ccfcd3ab9a3dbdba9b41f88e1c4b8..7fb59279373823339f6fdda86158952e3e6fac45 100644 (file)
@@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping)
 }
 
 /* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset,
                         int slot)
@@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
 }
 
 static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
 {
        iounmap_atomic(vaddr, slot);
 }
 
-static inline void *
+static inline void __iomem *
 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 {
        resource_size_t phys_addr;
@@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 }
 
 static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
 {
        iounmap(vaddr);
 }
@@ -125,38 +125,38 @@ struct io_mapping;
 static inline struct io_mapping *
 io_mapping_create_wc(resource_size_t base, unsigned long size)
 {
-       return (struct io_mapping *) ioremap_wc(base, size);
+       return (struct io_mapping __force *) ioremap_wc(base, size);
 }
 
 static inline void
 io_mapping_free(struct io_mapping *mapping)
 {
-       iounmap(mapping);
+       iounmap((void __force __iomem *) mapping);
 }
 
 /* Atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset,
                         int slot)
 {
-       return ((char *) mapping) + offset;
+       return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
-io_mapping_unmap_atomic(void *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
 {
 }
 
 /* Non-atomic map/unmap */
-static inline void *
+static inline void __iomem *
 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
 {
-       return ((char *) mapping) + offset;
+       return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
-io_mapping_unmap(void *vaddr)
+io_mapping_unmap(void __iomem *vaddr)
 {
 }
 
index 4aa95f203f3ee773a6ab4bbdbb632efaf970785e..62dbee554f608c91fe7b2b3bf01f390260821c52 100644 (file)
@@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_reset(fifo) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        __tmp->kfifo.in = __tmp->kfifo.out = 0; \
 })
 
@@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_reset_out(fifo)  \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        __tmp->kfifo.out = __tmp->kfifo.in; \
 })
 
@@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define kfifo_len(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpl = (fifo); \
+       typeof((fifo) + 1) __tmpl = (fifo); \
        __tmpl->kfifo.in - __tmpl->kfifo.out; \
 })
 
@@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define        kfifo_is_empty(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        __tmpq->kfifo.in == __tmpq->kfifo.out; \
 })
 
@@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val)
  */
 #define        kfifo_is_full(fifo) \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
 })
 
@@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val)
 #define        kfifo_avail(fifo) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmpq = (fifo); \
+       typeof((fifo) + 1) __tmpq = (fifo); \
        const size_t __recsize = sizeof(*__tmpq->rectype); \
        unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
        (__recsize) ? ((__avail <= __recsize) ? 0 : \
@@ -284,7 +284,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_skip(fifo) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        if (__recsize) \
@@ -302,7 +302,7 @@ __kfifo_must_check_helper( \
 #define kfifo_peek_len(fifo) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
@@ -325,7 +325,7 @@ __kfifo_must_check_helper( \
 #define kfifo_alloc(fifo, size, gfp_mask) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        __is_kfifo_ptr(__tmp) ? \
        __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
@@ -339,7 +339,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_free(fifo) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        if (__is_kfifo_ptr(__tmp)) \
                __kfifo_free(__kfifo); \
@@ -358,7 +358,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_init(fifo, buffer, size) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
        __is_kfifo_ptr(__tmp) ? \
        __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
@@ -379,8 +379,8 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_put(fifo, val) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -421,8 +421,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_get(fifo, val) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -462,8 +462,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_peek(fifo, val) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(val + 1) __val = (val); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((val) + 1) __val = (val); \
        unsigned int __ret; \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -501,8 +501,8 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_in(fifo, buf, n) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -554,8 +554,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_out(fifo, buf, n) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -611,7 +611,7 @@ __kfifo_must_check_helper( \
 #define        kfifo_from_user(fifo, from, len, copied) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        const void __user *__from = (from); \
        unsigned int __len = (len); \
        unsigned int *__copied = (copied); \
@@ -639,7 +639,7 @@ __kfifo_must_check_helper( \
 #define        kfifo_to_user(fifo, to, len, copied) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        void __user *__to = (to); \
        unsigned int __len = (len); \
        unsigned int *__copied = (copied); \
@@ -666,7 +666,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_dma_in_prepare(fifo, sgl, nents, len) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        struct scatterlist *__sgl = (sgl); \
        int __nents = (nents); \
        unsigned int __len = (len); \
@@ -690,7 +690,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_dma_in_finish(fifo, len) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        unsigned int __len = (len); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -717,7 +717,7 @@ __kfifo_must_check_helper( \
  */
 #define        kfifo_dma_out_prepare(fifo, sgl, nents, len) \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo);  \
        struct scatterlist *__sgl = (sgl); \
        int __nents = (nents); \
        unsigned int __len = (len); \
@@ -741,7 +741,7 @@ __kfifo_must_check_helper( \
  */
 #define kfifo_dma_out_finish(fifo, len) \
 (void)({ \
-       typeof(fifo + 1) __tmp = (fifo); \
+       typeof((fifo) + 1) __tmp = (fifo); \
        unsigned int __len = (len); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
@@ -766,8 +766,8 @@ __kfifo_must_check_helper( \
 #define        kfifo_out_peek(fifo, buf, n) \
 __kfifo_must_check_helper( \
 ({ \
-       typeof(fifo + 1) __tmp = (fifo); \
-       typeof(buf + 1) __buf = (buf); \
+       typeof((fifo) + 1) __tmp = (fifo); \
+       typeof((buf) + 1) __buf = (buf); \
        unsigned long __n = (n); \
        const size_t __recsize = sizeof(*__tmp->rectype); \
        struct __kfifo *__kfifo = &__tmp->kfifo; \
index 74d691ee9121c5bb3aa8336d7eeffcc03d88cc46..3319a6967626e02f91c340080b21950a8310a67f 100644 (file)
@@ -16,6 +16,9 @@
 struct stable_node;
 struct mem_cgroup;
 
+struct page *ksm_does_need_to_copy(struct page *page,
+                       struct vm_area_struct *vma, unsigned long address);
+
 #ifdef CONFIG_KSM
 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
                unsigned long end, int advice, unsigned long *vm_flags);
@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page,
  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
  * but what if the vma was unmerged while the page was swapped out?
  */
-struct page *ksm_does_need_to_copy(struct page *page,
-                       struct vm_area_struct *vma, unsigned long address);
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
                        struct vm_area_struct *vma, unsigned long address)
 {
        struct anon_vma *anon_vma = page_anon_vma(page);
 
-       if (!anon_vma ||
-           (anon_vma->root == vma->anon_vma->root &&
-            page->index == linear_page_index(vma, address)))
-               return page;
-
-       return ksm_does_need_to_copy(page, vma, address);
+       return anon_vma &&
+               (anon_vma->root != vma->anon_vma->root ||
+                page->index != linear_page_index(vma, address));
 }
 
 int page_referenced_ksm(struct page *page,
@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
        return 0;
 }
 
-static inline struct page *ksm_might_need_to_copy(struct page *page,
+static inline int ksm_might_need_to_copy(struct page *page,
                        struct vm_area_struct *vma, unsigned long address)
 {
-       return page;
+       return 0;
 }
 
 static inline int page_referenced_ksm(struct page *page,
index b288cb713b902182cca71156e5d9f75c7452a117..f549056fb20bd5533555918cc1b1f9805c2cdcc3 100644 (file)
        int i;                                                          \
        preempt_disable();                                              \
        rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_online_cpu(i) {                                        \
+       for_each_possible_cpu(i) {                                      \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_lock(lock);                                   \
  void name##_global_unlock(void) {                                     \
        int i;                                                          \
        rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_online_cpu(i) {                                        \
+       for_each_possible_cpu(i) {                                      \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_unlock(lock);                                 \
index f010f18a0f863f39e139d65469debae94232b259..45fb2967b66d6949094289a467ae120d12014c0d 100644 (file)
@@ -335,6 +335,7 @@ enum {
        ATA_EHI_HOTPLUGGED      = (1 << 0),  /* could have been hotplugged */
        ATA_EHI_NO_AUTOPSY      = (1 << 2),  /* no autopsy */
        ATA_EHI_QUIET           = (1 << 3),  /* be quiet */
+       ATA_EHI_NO_RECOVERY     = (1 << 4),  /* no recovery */
 
        ATA_EHI_DID_SOFTRESET   = (1 << 16), /* already soft-reset this port */
        ATA_EHI_DID_HARDRESET   = (1 << 17), /* already soft-reset this port */
@@ -723,6 +724,7 @@ struct ata_port {
        struct ata_ioports      ioaddr; /* ATA cmd/ctl/dma register blocks */
        u8                      ctl;    /* cache of ATA control register */
        u8                      last_ctl;       /* Cache last written value */
+       struct ata_link*        sff_pio_task_link; /* link currently used */
        struct delayed_work     sff_pio_task;
 #ifdef CONFIG_ATA_BMDMA
        struct ata_bmdma_prd    *bmdma_prd;     /* BMDMA SG list */
@@ -1594,7 +1596,7 @@ extern void ata_sff_irq_on(struct ata_port *ap);
 extern void ata_sff_irq_clear(struct ata_port *ap);
 extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
                            u8 status, int in_wq);
-extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay);
+extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
 extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
 extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
 extern unsigned int ata_sff_port_intr(struct ata_port *ap,
index e6b1210772ceace3fc70817a32e1a411096a6b22..74949fbef8c608b9c5ef6dab3b508041a11243f3 100644 (file)
@@ -864,6 +864,12 @@ int set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
 
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+{
+       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
                unsigned long new_addr, unsigned long len);
index 329a8faa6e37bb32bd6e65f1b8fd758ac45baa36..245cdacee5443791eb5418d10bde3daa190351a9 100644 (file)
@@ -38,6 +38,8 @@
  *      [8:0] Byte/block count
  */
 
+#define R4_MEMORY_PRESENT (1 << 27)
+
 /*
   SDIO status in R5
   Type
index 6e6e62648a4d4a6d792fe207d42105563b112aaa..3984c4eb41fdc9c85759dbffe308df5a806391f4 100644 (file)
@@ -283,6 +283,13 @@ struct zone {
        /* zone watermarks, access with *_wmark_pages(zone) macros */
        unsigned long watermark[NR_WMARK];
 
+       /*
+        * When free pages are below this point, additional steps are taken
+        * when reading the number of free pages to avoid per-cpu counter
+        * drift allowing watermarks to be breached
+        */
+       unsigned long percpu_drift_mark;
+
        /*
         * We don't know if the memory that we're going to allocate will be freeable
         * or/and it will be released eventually, so to avoid totally wasting several
@@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone)
        return test_bit(ZONE_OOM_LOCKED, &zone->flags);
 }
 
+#ifdef CONFIG_SMP
+unsigned long zone_nr_free_pages(struct zone *zone);
+#else
+#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
+#endif /* CONFIG_SMP */
+
 /*
  * The "priority" of VM scanning is how much of the queues we will scan in one
  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
index 878cab4f5fcc5db95585184c22aea5d905a34295..f363bc8fdc74c821c99aa59d5bfcb9554c012c9a 100644 (file)
@@ -78,6 +78,14 @@ struct mutex_waiter {
 # include <linux/mutex-debug.h>
 #else
 # define __DEBUG_MUTEX_INITIALIZER(lockname)
+/**
+ * mutex_init - initialize the mutex
+ * @mutex: the mutex to be initialized
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
 # define mutex_init(mutex) \
 do {                                                   \
        static struct lock_class_key __key;             \
index b1d17956a1536ff9f1e913cc6f33410f2e54cfa8..c8d95e369ff441fe54760192ce4a0d9922f0508a 100644 (file)
@@ -1214,6 +1214,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
                                                unsigned int devfn)
 { return NULL; }
 
+static inline int pci_domain_nr(struct pci_bus *bus)
+{ return 0; }
+
 #define dev_is_pci(d) (false)
 #define dev_is_pf(d) (false)
 #define dev_num_vf(d) (0)
index f6a3b2d36cadde8e1a6684d1c9fdc408eebef9b2..10d33309e9a61351aa4d3597540f201c97dc9294 100644 (file)
 #define PCI_DEVICE_ID_P2010            0x0079
 #define PCI_DEVICE_ID_P1020E           0x0100
 #define PCI_DEVICE_ID_P1020            0x0101
+#define PCI_DEVICE_ID_P1021E           0x0102
+#define PCI_DEVICE_ID_P1021            0x0103
 #define PCI_DEVICE_ID_P1011E           0x0108
 #define PCI_DEVICE_ID_P1011            0x0109
 #define PCI_DEVICE_ID_P1022E           0x0110
index b8b9084527b18029f602c02fe9db5d7c5754364b..49466b13c5c6b310f36b31c052573864df6f02a0 100644 (file)
@@ -149,7 +149,7 @@ extern void __init percpu_init_late(void);
 
 #else /* CONFIG_SMP */
 
-#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
+#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
 
 /* can't distinguish from other static vars, always false */
 static inline bool is_kernel_percpu_address(unsigned long addr)
index 7415839ac890f538b88611f7843b5b770e73292f..5310d27abd2a503ad523059ea4832f34ecfbb194 100644 (file)
@@ -26,6 +26,9 @@ struct semaphore {
        .wait_list      = LIST_HEAD_INIT((name).wait_list),             \
 }
 
+#define DEFINE_SEMAPHORE(name) \
+       struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
 #define DECLARE_MUTEX(name)    \
        struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
 
index 1ebc694a6d521575165ad8443a472e0268e36dff..ef914061511ec833c706a38ddf47cac8ad52d93a 100644 (file)
@@ -77,8 +77,7 @@ struct serial_struct {
 #define PORT_16654     11
 #define PORT_16850     12
 #define PORT_RSA       13      /* RSA-DV II/S card */
-#define PORT_U6_16550A 14
-#define PORT_MAX       14
+#define PORT_MAX       13
 
 #define SERIAL_IO_PORT 0
 #define SERIAL_IO_HUB6 1
index 64458a9a893809af26a6036b3aee1f62209d5107..563e234009130ec557d1d6cb7d58b1875488d035 100644 (file)
@@ -44,7 +44,8 @@
 #define PORT_RM9000    16      /* PMC-Sierra RM9xxx internal UART */
 #define PORT_OCTEON    17      /* Cavium OCTEON internal UART */
 #define PORT_AR7       18      /* Texas Instruments AR7 internal UART */
-#define PORT_MAX_8250  18      /* max port ID */
+#define PORT_U6_16550A 19      /* ST-Ericsson U6xxx internal UART */
+#define PORT_MAX_8250  19      /* max port ID */
 
 /*
  * ARM specific type numbers.  These are not currently guaranteed
index 2fee51a11b7399aea7ea7427ae32777b45cf8903..7cdd63366f883a164a7f5d5b74ff8882c62b4f8c 100644 (file)
@@ -19,6 +19,7 @@ struct bio;
 #define SWAP_FLAG_PREFER       0x8000  /* set if swap priority specified */
 #define SWAP_FLAG_PRIO_MASK    0x7fff
 #define SWAP_FLAG_PRIO_SHIFT   0
+#define SWAP_FLAG_DISCARD      0x10000 /* discard swap cluster after use */
 
 static inline int current_is_kswapd(void)
 {
@@ -142,7 +143,7 @@ struct swap_extent {
 enum {
        SWP_USED        = (1 << 0),     /* is slot in swap_info[] used? */
        SWP_WRITEOK     = (1 << 1),     /* ok to write to this swap?    */
-       SWP_DISCARDABLE = (1 << 2),     /* blkdev supports discard */
+       SWP_DISCARDABLE = (1 << 2),     /* swapon+blkdev support discard */
        SWP_DISCARDING  = (1 << 3),     /* now discarding a free cluster */
        SWP_SOLIDSTATE  = (1 << 4),     /* blkdev seeks are cheap */
        SWP_CONTINUED   = (1 << 5),     /* swap_map has count continuation */
@@ -315,6 +316,7 @@ extern long nr_swap_pages;
 extern long total_swap_pages;
 extern void si_swapinfo(struct sysinfo *);
 extern swp_entry_t get_swap_page(void);
+extern swp_entry_t get_swap_page_of_type(int);
 extern int valid_swaphandles(swp_entry_t, unsigned long *);
 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
 extern void swap_shmem_alloc(swp_entry_t);
@@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *);
 extern int try_to_free_swap(struct page *);
 struct backing_dev_info;
 
-#ifdef CONFIG_HIBERNATION
-void hibernation_freeze_swap(void);
-void hibernation_thaw_swap(void);
-swp_entry_t get_swap_for_hibernation(int type);
-void swap_free_for_hibernation(swp_entry_t val);
-#endif
-
 /* linux/mm/thrash.c */
 extern struct mm_struct *swap_token_mm;
 extern void grab_swap_token(struct mm_struct *);
index 7f43ccdc1d38c0eb919efe4891e1b91ec19c3705..eaaea37b3b75dd64b73a34a0e3beb31417bdd0d6 100644 (file)
@@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone,
        return x;
 }
 
+/*
+ * More accurate version that also considers the currently pending
+ * deltas. For that we need to loop over all cpus to find the current
+ * deltas. There is no synchronization so the result cannot be
+ * exactly accurate either.
+ */
+static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+                                       enum zone_stat_item item)
+{
+       long x = atomic_long_read(&zone->vm_stat[item]);
+
+#ifdef CONFIG_SMP
+       int cpu;
+       for_each_online_cpu(cpu)
+               x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+
+       if (x < 0)
+               x = 0;
+#endif
+       return x;
+}
+
 extern unsigned long global_reclaimable_pages(void);
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
 
index 4f9d277bcd9a5cd32ef35bc304bdbe3f6339a38a..f11100f964824c250b4eacb21e96e36640527ac6 100644 (file)
@@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work);
 
 enum {
        WORK_STRUCT_PENDING_BIT = 0,    /* work item is pending execution */
-       WORK_STRUCT_CWQ_BIT     = 1,    /* data points to cwq */
-       WORK_STRUCT_LINKED_BIT  = 2,    /* next work is linked to this one */
+       WORK_STRUCT_DELAYED_BIT = 1,    /* work item is delayed */
+       WORK_STRUCT_CWQ_BIT     = 2,    /* data points to cwq */
+       WORK_STRUCT_LINKED_BIT  = 3,    /* next work is linked to this one */
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
-       WORK_STRUCT_STATIC_BIT  = 3,    /* static initializer (debugobjects) */
-       WORK_STRUCT_COLOR_SHIFT = 4,    /* color for workqueue flushing */
+       WORK_STRUCT_STATIC_BIT  = 4,    /* static initializer (debugobjects) */
+       WORK_STRUCT_COLOR_SHIFT = 5,    /* color for workqueue flushing */
 #else
-       WORK_STRUCT_COLOR_SHIFT = 3,    /* color for workqueue flushing */
+       WORK_STRUCT_COLOR_SHIFT = 4,    /* color for workqueue flushing */
 #endif
 
        WORK_STRUCT_COLOR_BITS  = 4,
 
        WORK_STRUCT_PENDING     = 1 << WORK_STRUCT_PENDING_BIT,
+       WORK_STRUCT_DELAYED     = 1 << WORK_STRUCT_DELAYED_BIT,
        WORK_STRUCT_CWQ         = 1 << WORK_STRUCT_CWQ_BIT,
        WORK_STRUCT_LINKED      = 1 << WORK_STRUCT_LINKED_BIT,
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -59,8 +61,8 @@ enum {
 
        /*
         * Reserve 7 bits off of cwq pointer w/ debugobjects turned
-        * off.  This makes cwqs aligned to 128 bytes which isn't too
-        * excessive while allowing 15 workqueue flush colors.
+        * off.  This makes cwqs aligned to 256 bytes and allows 15
+        * workqueue flush colors.
         */
        WORK_STRUCT_FLAG_BITS   = WORK_STRUCT_COLOR_SHIFT +
                                  WORK_STRUCT_COLOR_BITS,
@@ -241,6 +243,8 @@ enum {
        WQ_HIGHPRI              = 1 << 4, /* high priority */
        WQ_CPU_INTENSIVE        = 1 << 5, /* cpu instensive workqueue */
 
+       WQ_DYING                = 1 << 6, /* internal: workqueue is dying */
+
        WQ_MAX_ACTIVE           = 512,    /* I like 512, better ideas? */
        WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */
        WQ_DFL_ACTIVE           = WQ_MAX_ACTIVE / 2,
index 726cc353640988bd6fed9361f74cb8ef6dfbb9ce..ef6c24a529e1a23188a27b60a0d6e73e84fb7003 100644 (file)
@@ -27,11 +27,17 @@ struct cgroup_cls_state
 #ifdef CONFIG_NET_CLS_CGROUP
 static inline u32 task_cls_classid(struct task_struct *p)
 {
+       int classid;
+
        if (in_interrupt())
                return 0;
 
-       return container_of(task_subsys_state(p, net_cls_subsys_id),
-                           struct cgroup_cls_state, css)->classid;
+       rcu_read_lock();
+       classid = container_of(task_subsys_state(p, net_cls_subsys_id),
+                              struct cgroup_cls_state, css)->classid;
+       rcu_read_unlock();
+
+       return classid;
 }
 #else
 extern int net_cls_subsys_id;
index a4747a0f7303ab73b1dffa7cd9a66740c0ef0ffe..f976885f686f67f593d2928175e9f06de1c94f56 100644 (file)
@@ -955,6 +955,9 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
        return csum_partial(diff, sizeof(diff), oldsum);
 }
 
+extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
+                                  int outin);
+
 #endif /* __KERNEL__ */
 
 #endif /* _NET_IP_VS_H */
index ac53bfbdfe16b57038cf6c0b7f88cc88f5221594..adab9dc5818355c603a699101c4787c99434eb12 100644 (file)
@@ -752,6 +752,7 @@ struct proto {
        /* Keeping track of sk's, looking them up, and port selection methods. */
        void                    (*hash)(struct sock *sk);
        void                    (*unhash)(struct sock *sk);
+       void                    (*rehash)(struct sock *sk);
        int                     (*get_port)(struct sock *sk, unsigned short snum);
 
        /* Keeping track of sockets in use */
index 7abdf305da50fad63880b12c2c76a69b6b74a5fd..a184d3496b1369deefd62aba376f04320f76a773 100644 (file)
@@ -151,6 +151,7 @@ static inline void udp_lib_hash(struct sock *sk)
 }
 
 extern void udp_lib_unhash(struct sock *sk);
+extern void udp_lib_rehash(struct sock *sk, u16 new_hash);
 
 static inline void udp_lib_close(struct sock *sk, long timeout)
 {
index 192f88c5b0f9df29b80d51d4c5ceba5388d099eb..c9483d8f6140ed6cb4e06fa6139e2aeb907b3d47 100644 (file)
@@ -1791,19 +1791,20 @@ out:
 }
 
 /**
- * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
+ * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
+ * @from: attach to all cgroups of a given task
  * @tsk: the task to be attached
  */
-int cgroup_attach_task_current_cg(struct task_struct *tsk)
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 {
        struct cgroupfs_root *root;
-       struct cgroup *cur_cg;
        int retval = 0;
 
        cgroup_lock();
        for_each_active_root(root) {
-               cur_cg = task_cgroup_from_root(current, root);
-               retval = cgroup_attach_task(cur_cg, tsk);
+               struct cgroup *from_cg = task_cgroup_from_root(from, root);
+
+               retval = cgroup_attach_task(from_cg, tsk);
                if (retval)
                        break;
        }
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
 
        return retval;
 }
-EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
+EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
 
 /*
  * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
index 75bd9b3ebbb7cf501800115a531e86e7c30c5a28..20059ef4459a4ff293428337d9936ff438a8eb96 100644 (file)
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
        int i, bpno;
        kdb_bp_t *bp, *bp_check;
        int diag;
-       int free;
        char *symname = NULL;
        long offset = 0ul;
        int nextarg;
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
        /*
         * Find an empty bp structure to allocate
         */
-       free = KDB_MAXBPT;
        for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
                if (bp->bp_free)
                        break;
index ef3c3f88a7a35e36d8f1fb551c56e534cea52687..f83972b16564d00676154900f09d3c70affd773c 100644 (file)
  * @children: child nodes
  * @all: list head for list of all nodes
  * @parent: parent node
- * @info: associated profiling data structure if not a directory
- * @ghost: when an object file containing profiling data is unloaded we keep a
- *         copy of the profiling data here to allow collecting coverage data
- *         for cleanup code. Such a node is called a "ghost".
+ * @loaded_info: array of pointers to profiling data sets for loaded object
+ *   files.
+ * @num_loaded: number of profiling data sets for loaded object files.
+ * @unloaded_info: accumulated copy of profiling data sets for unloaded
+ *   object files. Used only when gcov_persist=1.
  * @dentry: main debugfs entry, either a directory or data file
  * @links: associated symbolic links
  * @name: data file basename
@@ -51,10 +52,11 @@ struct gcov_node {
        struct list_head children;
        struct list_head all;
        struct gcov_node *parent;
-       struct gcov_info *info;
-       struct gcov_info *ghost;
+       struct gcov_info **loaded_info;
+       struct gcov_info *unloaded_info;
        struct dentry *dentry;
        struct dentry **links;
+       int num_loaded;
        char name[0];
 };
 
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = {
 };
 
 /*
- * Return the profiling data set for a given node. This can either be the
- * original profiling data structure or a duplicate (also called "ghost")
- * in case the associated object file has been unloaded.
+ * Return a profiling data set associated with the given node. This is
+ * either a data set for a loaded object file or a data set copy in case
+ * all associated object files have been unloaded.
  */
 static struct gcov_info *get_node_info(struct gcov_node *node)
 {
-       if (node->info)
-               return node->info;
+       if (node->num_loaded > 0)
+               return node->loaded_info[0];
 
-       return node->ghost;
+       return node->unloaded_info;
+}
+
+/*
+ * Return a newly allocated profiling data set which contains the sum of
+ * all profiling data associated with the given node.
+ */
+static struct gcov_info *get_accumulated_info(struct gcov_node *node)
+{
+       struct gcov_info *info;
+       int i = 0;
+
+       if (node->unloaded_info)
+               info = gcov_info_dup(node->unloaded_info);
+       else
+               info = gcov_info_dup(node->loaded_info[i++]);
+       if (!info)
+               return NULL;
+       for (; i < node->num_loaded; i++)
+               gcov_info_add(info, node->loaded_info[i]);
+
+       return info;
 }
 
 /*
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
        mutex_lock(&node_lock);
        /*
         * Read from a profiling data copy to minimize reference tracking
-        * complexity and concurrent access.
+        * complexity and concurrent access and to keep accumulating multiple
+        * profiling data sets associated with one node simple.
         */
-       info = gcov_info_dup(get_node_info(node));
+       info = get_accumulated_info(node);
        if (!info)
                goto out_unlock;
        iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name)
        return NULL;
 }
 
+/*
+ * Reset all profiling data associated with the specified node.
+ */
+static void reset_node(struct gcov_node *node)
+{
+       int i;
+
+       if (node->unloaded_info)
+               gcov_info_reset(node->unloaded_info);
+       for (i = 0; i < node->num_loaded; i++)
+               gcov_info_reset(node->loaded_info[i]);
+}
+
 static void remove_node(struct gcov_node *node);
 
 /*
  * write() implementation for gcov data files. Reset profiling data for the
- * associated file. If the object file has been unloaded (i.e. this is
- * a "ghost" node), remove the debug fs node as well.
+ * corresponding file. If all associated object files have been unloaded,
+ * remove the debug fs node as well.
  */
 static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
                              size_t len, loff_t *pos)
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
        node = get_node_by_name(info->filename);
        if (node) {
                /* Reset counts or remove node for unloaded modules. */
-               if (node->ghost)
+               if (node->num_loaded == 0)
                        remove_node(node);
                else
-                       gcov_info_reset(node->info);
+                       reset_node(node);
        }
        /* Reset counts for open file. */
        gcov_info_reset(info);
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
        INIT_LIST_HEAD(&node->list);
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->all);
-       node->info = info;
+       if (node->loaded_info) {
+               node->loaded_info[0] = info;
+               node->num_loaded = 1;
+       }
        node->parent = parent;
        if (name)
                strcpy(node->name, name);
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent,
        struct gcov_node *node;
 
        node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
-       if (!node) {
-               pr_warning("out of memory\n");
-               return NULL;
+       if (!node)
+               goto err_nomem;
+       if (info) {
+               node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
+                                          GFP_KERNEL);
+               if (!node->loaded_info)
+                       goto err_nomem;
        }
        init_node(node, info, name, parent);
        /* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent,
        list_add(&node->all, &all_head);
 
        return node;
+
+err_nomem:
+       kfree(node);
+       pr_warning("out of memory\n");
+       return NULL;
 }
 
 /* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node)
        list_del(&node->all);
        debugfs_remove(node->dentry);
        remove_links(node);
-       if (node->ghost)
-               gcov_info_free(node->ghost);
+       kfree(node->loaded_info);
+       if (node->unloaded_info)
+               gcov_info_free(node->unloaded_info);
        kfree(node);
 }
 
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
 
 /*
  * write() implementation for reset file. Reset all profiling data to zero
- * and remove ghost nodes.
+ * and remove nodes for which all associated object files are unloaded.
  */
 static ssize_t reset_write(struct file *file, const char __user *addr,
                           size_t len, loff_t *pos)
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
        mutex_lock(&node_lock);
 restart:
        list_for_each_entry(node, &all_head, all) {
-               if (node->info)
-                       gcov_info_reset(node->info);
+               if (node->num_loaded > 0)
+                       reset_node(node);
                else if (list_empty(&node->children)) {
                        remove_node(node);
                        /* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@ err_remove:
 }
 
 /*
- * The profiling data set associated with this node is being unloaded. Store a
- * copy of the profiling data and turn this node into a "ghost".
+ * Associate a profiling data set with an existing node. Needs to be called
+ * with node_lock held.
  */
-static int ghost_node(struct gcov_node *node)
+static void add_info(struct gcov_node *node, struct gcov_info *info)
 {
-       node->ghost = gcov_info_dup(node->info);
-       if (!node->ghost) {
-               pr_warning("could not save data for '%s' (out of memory)\n",
-                          node->info->filename);
-               return -ENOMEM;
+       struct gcov_info **loaded_info;
+       int num = node->num_loaded;
+
+       /*
+        * Prepare new array. This is done first to simplify cleanup in
+        * case the new data set is incompatible, the node only contains
+        * unloaded data sets and there's not enough memory for the array.
+        */
+       loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
+       if (!loaded_info) {
+               pr_warning("could not add '%s' (out of memory)\n",
+                          info->filename);
+               return;
+       }
+       memcpy(loaded_info, node->loaded_info,
+              num * sizeof(struct gcov_info *));
+       loaded_info[num] = info;
+       /* Check if the new data set is compatible. */
+       if (num == 0) {
+               /*
+                * A module was unloaded, modified and reloaded. The new
+                * data set replaces the copy of the last one.
+                */
+               if (!gcov_info_is_compatible(node->unloaded_info, info)) {
+                       pr_warning("discarding saved data for %s "
+                                  "(incompatible version)\n", info->filename);
+                       gcov_info_free(node->unloaded_info);
+                       node->unloaded_info = NULL;
+               }
+       } else {
+               /*
+                * Two different versions of the same object file are loaded.
+                * The initial one takes precedence.
+                */
+               if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
+                       pr_warning("could not add '%s' (incompatible "
+                                  "version)\n", info->filename);
+                       kfree(loaded_info);
+                       return;
+               }
        }
-       node->info = NULL;
+       /* Overwrite previous array. */
+       kfree(node->loaded_info);
+       node->loaded_info = loaded_info;
+       node->num_loaded = num + 1;
+}
 
-       return 0;
+/*
+ * Return the index of a profiling data set associated with a node.
+ */
+static int get_info_index(struct gcov_node *node, struct gcov_info *info)
+{
+       int i;
+
+       for (i = 0; i < node->num_loaded; i++) {
+               if (node->loaded_info[i] == info)
+                       return i;
+       }
+       return -ENOENT;
 }
 
 /*
- * Profiling data for this node has been loaded again. Add profiling data
- * from previous instantiation and turn this node into a regular node.
+ * Save the data of a profiling data set which is being unloaded.
  */
-static void revive_node(struct gcov_node *node, struct gcov_info *info)
+static void save_info(struct gcov_node *node, struct gcov_info *info)
 {
-       if (gcov_info_is_compatible(node->ghost, info))
-               gcov_info_add(info, node->ghost);
+       if (node->unloaded_info)
+               gcov_info_add(node->unloaded_info, info);
        else {
-               pr_warning("discarding saved data for '%s' (version changed)\n",
+               node->unloaded_info = gcov_info_dup(info);
+               if (!node->unloaded_info) {
+                       pr_warning("could not save data for '%s' "
+                                  "(out of memory)\n", info->filename);
+               }
+       }
+}
+
+/*
+ * Disassociate a profiling data set from a node. Needs to be called with
+ * node_lock held.
+ */
+static void remove_info(struct gcov_node *node, struct gcov_info *info)
+{
+       int i;
+
+       i = get_info_index(node, info);
+       if (i < 0) {
+               pr_warning("could not remove '%s' (not found)\n",
                           info->filename);
+               return;
        }
-       gcov_info_free(node->ghost);
-       node->ghost = NULL;
-       node->info = info;
+       if (gcov_persist)
+               save_info(node, info);
+       /* Shrink array. */
+       node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
+       node->num_loaded--;
+       if (node->num_loaded > 0)
+               return;
+       /* Last loaded data set was removed. */
+       kfree(node->loaded_info);
+       node->loaded_info = NULL;
+       node->num_loaded = 0;
+       if (!node->unloaded_info)
+               remove_node(node);
 }
 
 /*
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
        node = get_node_by_name(info->filename);
        switch (action) {
        case GCOV_ADD:
-               /* Add new node or revive ghost. */
-               if (!node) {
+               if (node)
+                       add_info(node, info);
+               else
                        add_node(info);
-                       break;
-               }
-               if (gcov_persist)
-                       revive_node(node, info);
-               else {
-                       pr_warning("could not add '%s' (already exists)\n",
-                                  info->filename);
-               }
                break;
        case GCOV_REMOVE:
-               /* Remove node or turn into ghost. */
-               if (!node) {
+               if (node)
+                       remove_info(node, info);
+               else {
                        pr_warning("could not remove '%s' (not found)\n",
                                   info->filename);
-                       break;
                }
-               if (gcov_persist) {
-                       if (!ghost_node(node))
-                               break;
-               }
-               remove_node(node);
                break;
        }
        mutex_unlock(&node_lock);
index 53b1916c94926c245aacd17ccb420fc00d06a19b..253dc0f35cf4c30786d1ff3565427d1374762a04 100644 (file)
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
        right = group_info->ngroups;
        while (left < right) {
                unsigned int mid = (left+right)/2;
-               int cmp = grp - GROUP_AT(group_info, mid);
-               if (cmp > 0)
+               if (grp > GROUP_AT(group_info, mid))
                        left = mid + 1;
-               else if (cmp < 0)
+               else if (grp < GROUP_AT(group_info, mid))
                        right = mid;
                else
                        return 1;
index ce669174f355c7dd1e1893903bb4d18a25f17c34..1decafbb6b1a28197b768021cc987e230d352b5b 100644 (file)
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
  */
 ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
 {
-       struct hrtimer_clock_base *base;
        unsigned long flags;
        ktime_t rem;
 
-       base = lock_hrtimer_base(timer, &flags);
+       lock_hrtimer_base(timer, &flags);
        rem = hrtimer_expires_remaining(timer);
        unlock_hrtimer_base(timer, &flags);
 
index 4c0b7b3e6d2e9a483c6cb4cc384e979911ed03bb..200407c1502f509ee3f9d8a665bc4d3b78a27f74 100644 (file)
 # include <asm/mutex.h>
 #endif
 
-/***
- * mutex_init - initialize the mutex
- * @lock: the mutex to be initialized
- * @key: the lock_class_key for the class; used by mutex lock debugging
- *
- * Initialize the mutex to unlocked state.
- *
- * It is not allowed to initialize an already locked mutex.
- */
 void
 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 {
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
 static __used noinline void __sched
 __mutex_lock_slowpath(atomic_t *lock_count);
 
-/***
+/**
  * mutex_lock - acquire the mutex
  * @lock: the mutex to be acquired
  *
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
 
 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
 
-/***
+/**
  * mutex_unlock - release the mutex
  * @lock: the mutex to be released
  *
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
 static noinline int __sched
 __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
 
-/***
- * mutex_lock_interruptible - acquire the mutex, interruptable
+/**
+ * mutex_lock_interruptible - acquire the mutex, interruptible
  * @lock: the mutex to be acquired
  *
  * Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
        return prev == 1;
 }
 
-/***
- * mutex_trylock - try acquire the mutex, without waiting
+/**
+ * mutex_trylock - try to acquire the mutex, without waiting
  * @lock: the mutex to be acquired
  *
  * Try to acquire the mutex atomically. Returns 1 if the mutex
  * has been acquired successfully, and 0 on contention.
  *
  * NOTE: this function follows the spin_trylock() convention, so
- * it is negated to the down_trylock() return values! Be careful
+ * it is negated from the down_trylock() return values! Be careful
  * about this when converting semaphore users to mutexes.
  *
  * This function must not be used in interrupt context. The
index 403d1804b198140e4f1355c70c0b25e6efa9e5d8..db5b56064687e453c0df1cc118b975ea047bdcae 100644 (file)
@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
        }
 }
 
+static inline int
+event_filter_match(struct perf_event *event)
+{
+       return event->cpu == -1 || event->cpu == smp_processor_id();
+}
+
 static void
 event_sched_out(struct perf_event *event,
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
 {
+       u64 delta;
+       /*
+        * An event which could not be activated because of
+        * filter mismatch still needs to have its timings
+        * maintained, otherwise bogus information is return
+        * via read() for time_enabled, time_running:
+        */
+       if (event->state == PERF_EVENT_STATE_INACTIVE
+           && !event_filter_match(event)) {
+               delta = ctx->time - event->tstamp_stopped;
+               event->tstamp_running += delta;
+               event->tstamp_stopped = ctx->time;
+       }
+
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return;
 
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
                struct perf_event_context *ctx)
 {
        struct perf_event *event;
-
-       if (group_event->state != PERF_EVENT_STATE_ACTIVE)
-               return;
+       int state = group_event->state;
 
        event_sched_out(group_event, cpuctx, ctx);
 
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
        list_for_each_entry(event, &group_event->sibling_list, group_entry)
                event_sched_out(event, cpuctx, ctx);
 
-       if (group_event->attr.exclusive)
+       if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
                cpuctx->exclusive = 0;
 }
 
@@ -5743,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 {
        unsigned int cpu = (long)hcpu;
 
-       switch (action) {
+       switch (action & ~CPU_TASKS_FROZEN) {
 
        case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
+       case CPU_DOWN_FAILED:
                perf_event_init_cpu(cpu);
                break;
 
+       case CPU_UP_CANCELED:
        case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
                perf_event_exit_cpu(cpu);
                break;
 
index c77963938bca440a90423952d6d85cf4d66abd83..8dc31e02ae129e8f042804b67c38ab02f997d94c 100644 (file)
@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
                goto Close;
 
        suspend_console();
-       hibernation_freeze_swap();
        saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
        error = dpm_suspend_start(PMSG_FREEZE);
        if (error)
index 5e7edfb05e66cff0d2c99d5fc8fddfde03e372c3..f6cd6faf84fdb516323e4257f53a61778c7fd60b 100644 (file)
@@ -1086,7 +1086,6 @@ void swsusp_free(void)
        buffer = NULL;
        alloc_normal = 0;
        alloc_highmem = 0;
-       hibernation_thaw_swap();
 }
 
 /* Helper functions used for the shrinking of memory. */
index 5d0059eed3e4e3ce0bc38ad072bd7b1430d9f712..e6a5bdf61a375c309c1f9ea356e9123d79699037 100644 (file)
@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
 {
        unsigned long offset;
 
-       offset = swp_offset(get_swap_for_hibernation(swap));
+       offset = swp_offset(get_swap_page_of_type(swap));
        if (offset) {
                if (swsusp_extents_insert(offset))
-                       swap_free_for_hibernation(swp_entry(swap, offset));
+                       swap_free(swp_entry(swap, offset));
                else
                        return swapdev_block(swap, offset);
        }
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
                ext = container_of(node, struct swsusp_extent, node);
                rb_erase(node, &swsusp_extents);
                for (offset = ext->start; offset <= ext->end; offset++)
-                       swap_free_for_hibernation(swp_entry(swap, offset));
+                       swap_free(swp_entry(swap, offset));
 
                kfree(ext);
        }
index 09b574e7f4df7c14615d104c3fe736f7c1be0847..ed09d4f2a69c5b4c1412d350c7c834a655732f2a 100644 (file)
@@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p)
 static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 {
 }
+
+static void sched_avg_update(struct rq *rq)
+{
+}
 #endif /* CONFIG_SMP */
 
 #if BITS_PER_LONG == 32
@@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq)
 
                this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
        }
+
+       sched_avg_update(this_rq);
 }
 
 static void update_cpu_load_active(struct rq *this_rq)
index ab661ebc4895a8471ecc808825477cf0c3558444..9b5b4f86b742e80b6e653f967b509b60487d49e9 100644 (file)
@@ -1313,7 +1313,7 @@ static struct sched_group *
 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                  int this_cpu, int load_idx)
 {
-       struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
+       struct sched_group *idlest = NULL, *group = sd->groups;
        unsigned long min_load = ULONG_MAX, this_load = 0;
        int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 
                if (local_group) {
                        this_load = avg_load;
-                       this = group;
                } else if (avg_load < min_load) {
                        min_load = avg_load;
                        idlest = group;
@@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu)
        struct rq *rq = cpu_rq(cpu);
        u64 total, available;
 
-       sched_avg_update(rq);
-
        total = sched_avg_period() + (rq->clock - rq->age_stamp);
        available = total - rq->rt_avg;
 
index e9ad4448982860af9919df53c3368156a4bf2445..7f5a0cd296a96ca44e43f0db028026094dbbb57a 100644 (file)
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
                pgid = pid;
        if (pgid < 0)
                return -EINVAL;
+       rcu_read_lock();
 
        /* From this point forward we keep holding onto the tasklist lock
         * so that our parent does not change from under us. -DaveM
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
 out:
        /* All paths lead to here, thus we are safe. -DaveM */
        write_unlock_irq(&tasklist_lock);
+       rcu_read_unlock();
        return err;
 }
 
index ca38e8e3e907557f74faaad7ddb57d330bd43d2d..f88552c6d2275be1216187f07b1e0e1b22b93af2 100644 (file)
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void)
 {
        sysctl_set_parent(NULL, root_table);
 #ifdef CONFIG_SYSCTL_SYSCALL_CHECK
-       {
-               int err;
-               err = sysctl_check_table(current->nsproxy, root_table);
-       }
+       sysctl_check_table(current->nsproxy, root_table);
 #endif
        return 0;
 }
index 0d88ce9b9fb8828c9a81fdffcd47763ae5cc2543..fa7ece649fe1bcad7b0db621fa8579e91860fb04 100644 (file)
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
 {
        struct ftrace_profile *rec = v;
        char str[KSYM_SYMBOL_LEN];
+       int ret = 0;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       static DEFINE_MUTEX(mutex);
        static struct trace_seq s;
        unsigned long long avg;
        unsigned long long stddev;
 #endif
+       mutex_lock(&ftrace_profile_lock);
+
+       /* we raced with function_profile_reset() */
+       if (unlikely(rec->counter == 0)) {
+               ret = -EBUSY;
+               goto out;
+       }
 
        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
                do_div(stddev, (rec->counter - 1) * 1000);
        }
 
-       mutex_lock(&mutex);
        trace_seq_init(&s);
        trace_print_graph_duration(rec->time, &s);
        trace_seq_puts(&s, "    ");
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
        trace_seq_puts(&s, "    ");
        trace_print_graph_duration(stddev, &s);
        trace_print_seq(m, &s);
-       mutex_unlock(&mutex);
 #endif
        seq_putc(m, '\n');
+out:
+       mutex_unlock(&ftrace_profile_lock);
 
-       return 0;
+       return ret;
 }
 
 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@@ -1503,6 +1510,8 @@ static void *t_start(struct seq_file *m, loff_t *pos)
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
+               /* reset in case of seek/pread */
+               iter->flags &= ~FTRACE_ITER_HASH;
                return iter;
        }
 
@@ -2409,7 +2418,7 @@ static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
        .read = seq_read,
        .write = ftrace_filter_write,
-       .llseek = ftrace_regex_lseek,
+       .llseek = no_llseek,
        .release = ftrace_filter_release,
 };
 
index 19cccc3c302871beae5fd39ad937b0791a2e785d..492197e2f86cda2792603186b59ad3fdd17c448d 100644 (file)
@@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
 
 static void rb_advance_iter(struct ring_buffer_iter *iter)
 {
-       struct ring_buffer *buffer;
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
        unsigned length;
 
        cpu_buffer = iter->cpu_buffer;
-       buffer = cpu_buffer->buffer;
 
        /*
         * Check if we are at the end of the buffer.
index 000e6e85b445906893d7003b2f28c615453bb726..31cc4cb0dbf2afaa49d5f7428f5cce31d535211d 100644 (file)
@@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event)
                    tp_event->class && tp_event->class->reg &&
                    try_module_get(tp_event->mod)) {
                        ret = perf_trace_event_init(tp_event, p_event);
+                       if (ret)
+                               module_put(tp_event->mod);
                        break;
                }
        }
@@ -146,6 +148,7 @@ void perf_trace_destroy(struct perf_event *p_event)
                }
        }
 out:
+       module_put(tp_event->mod);
        mutex_unlock(&event_mutex);
 }
 
index 8b27c9849b427905ea9a5ef83864a88526afe405..544301d29dee45b0dc089db788bcfe89687bfa45 100644 (file)
@@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
                                struct pt_regs *regs);
 
-/* Check the name is good for event/group */
-static int check_event_name(const char *name)
+/* Check the name is good for event/group/fields */
+static int is_good_name(const char *name)
 {
        if (!isalpha(*name) && *name != '_')
                return 0;
@@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
        else
                tp->rp.kp.pre_handler = kprobe_dispatcher;
 
-       if (!event || !check_event_name(event)) {
+       if (!event || !is_good_name(event)) {
                ret = -EINVAL;
                goto error;
        }
@@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
        if (!tp->call.name)
                goto error;
 
-       if (!group || !check_event_name(group)) {
+       if (!group || !is_good_name(group)) {
                ret = -EINVAL;
                goto error;
        }
@@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv)
        int i, ret = 0;
        int is_return = 0, is_delete = 0;
        char *symbol = NULL, *event = NULL, *group = NULL;
-       char *arg, *tmp;
+       char *arg;
        unsigned long offset = 0;
        void *addr = NULL;
        char buf[MAX_EVENT_NAME_LEN];
@@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv)
        /* parse arguments */
        ret = 0;
        for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
+               /* Increment count for freeing args in error case */
+               tp->nr_args++;
+
                /* Parse argument name */
                arg = strchr(argv[i], '=');
-               if (arg)
+               if (arg) {
                        *arg++ = '\0';
-               else
+                       tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
+               } else {
                        arg = argv[i];
+                       /* If argument name is omitted, set "argN" */
+                       snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
+                       tp->args[i].name = kstrdup(buf, GFP_KERNEL);
+               }
 
-               tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
                if (!tp->args[i].name) {
-                       pr_info("Failed to allocate argument%d name '%s'.\n",
-                               i, argv[i]);
+                       pr_info("Failed to allocate argument[%d] name.\n", i);
                        ret = -ENOMEM;
                        goto error;
                }
-               tmp = strchr(tp->args[i].name, ':');
-               if (tmp)
-                       *tmp = '_';     /* convert : to _ */
+
+               if (!is_good_name(tp->args[i].name)) {
+                       pr_info("Invalid argument[%d] name: %s\n",
+                               i, tp->args[i].name);
+                       ret = -EINVAL;
+                       goto error;
+               }
 
                if (conflict_field_name(tp->args[i].name, tp->args, i)) {
-                       pr_info("Argument%d name '%s' conflicts with "
+                       pr_info("Argument[%d] name '%s' conflicts with "
                                "another field.\n", i, argv[i]);
                        ret = -EINVAL;
                        goto error;
@@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv)
                /* Parse fetch argument */
                ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
                if (ret) {
-                       pr_info("Parse error at argument%d. (%d)\n", i, ret);
-                       kfree(tp->args[i].name);
+                       pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
                        goto error;
                }
-
-               tp->nr_args++;
        }
 
        ret = register_trace_probe(tp);
index 0d53c8e853b12450cf0c74665d13a22e91a47543..7f9c3c52ecc12ef5d0de1728839218ff87c2dc7b 100644 (file)
@@ -122,7 +122,7 @@ static void __touch_watchdog(void)
 
 void touch_softlockup_watchdog(void)
 {
-       __get_cpu_var(watchdog_touch_ts) = 0;
+       __raw_get_cpu_var(watchdog_touch_ts) = 0;
 }
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void)
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 void touch_nmi_watchdog(void)
 {
-       __get_cpu_var(watchdog_nmi_touch) = true;
+       if (watchdog_enabled) {
+               unsigned cpu;
+
+               for_each_present_cpu(cpu) {
+                       if (per_cpu(watchdog_nmi_touch, cpu) != true)
+                               per_cpu(watchdog_nmi_touch, cpu) = true;
+               }
+       }
        touch_softlockup_watchdog();
 }
 EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -433,6 +440,9 @@ static int watchdog_enable(int cpu)
                wake_up_process(p);
        }
 
+       /* if any cpu succeeds, watchdog is considered enabled for the system */
+       watchdog_enabled = 1;
+
        return 0;
 }
 
@@ -455,9 +465,6 @@ static void watchdog_disable(int cpu)
                per_cpu(softlockup_watchdog, cpu) = NULL;
                kthread_stop(p);
        }
-
-       /* if any cpu succeeds, watchdog is considered enabled for the system */
-       watchdog_enabled = 1;
 }
 
 static void watchdog_enable_all_cpus(void)
index 8bd600c020e5cdf5f2454681bc2e89fcc99c47d0..727f24e563aef326b8eba951d2a31a9aa864d32b 100644 (file)
@@ -90,7 +90,8 @@ enum {
 /*
  * Structure fields follow one of the following exclusion rules.
  *
- * I: Set during initialization and read-only afterwards.
+ * I: Modifiable by initialization/destruction paths and read-only for
+ *    everyone else.
  *
  * P: Preemption protected.  Disabling preemption is enough and should
  *    only be modified and accessed from the local cpu.
@@ -198,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t;
        cpumask_test_and_set_cpu((cpu), (mask))
 #define mayday_clear_cpu(cpu, mask)            cpumask_clear_cpu((cpu), (mask))
 #define for_each_mayday_cpu(cpu, mask)         for_each_cpu((cpu), (mask))
-#define alloc_mayday_mask(maskp, gfp)          alloc_cpumask_var((maskp), (gfp))
+#define alloc_mayday_mask(maskp, gfp)          zalloc_cpumask_var((maskp), (gfp))
 #define free_mayday_mask(mask)                 free_cpumask_var((mask))
 #else
 typedef unsigned long mayday_mask_t;
@@ -943,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        struct global_cwq *gcwq;
        struct cpu_workqueue_struct *cwq;
        struct list_head *worklist;
+       unsigned int work_flags;
        unsigned long flags;
 
        debug_work_activate(work);
 
+       if (WARN_ON_ONCE(wq->flags & WQ_DYING))
+               return;
+
        /* determine gcwq to use */
        if (!(wq->flags & WQ_UNBOUND)) {
                struct global_cwq *last_gcwq;
@@ -989,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        BUG_ON(!list_empty(&work->entry));
 
        cwq->nr_in_flight[cwq->work_color]++;
+       work_flags = work_color_to_flags(cwq->work_color);
 
        if (likely(cwq->nr_active < cwq->max_active)) {
                cwq->nr_active++;
                worklist = gcwq_determine_ins_pos(gcwq, cwq);
-       } else
+       } else {
+               work_flags |= WORK_STRUCT_DELAYED;
                worklist = &cwq->delayed_works;
+       }
 
-       insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
+       insert_work(cwq, work, worklist, work_flags);
 
        spin_unlock_irqrestore(&gcwq->lock, flags);
 }
@@ -1215,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker)
  * bound), %false if offline.
  */
 static bool worker_maybe_bind_and_lock(struct worker *worker)
+__acquires(&gcwq->lock)
 {
        struct global_cwq *gcwq = worker->gcwq;
        struct task_struct *task = worker->task;
@@ -1488,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
  * otherwise.
  */
 static bool maybe_create_worker(struct global_cwq *gcwq)
+__releases(&gcwq->lock)
+__acquires(&gcwq->lock)
 {
        if (!need_to_create_worker(gcwq))
                return false;
@@ -1662,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
        struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
 
        move_linked_works(work, pos, NULL);
+       __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
        cwq->nr_active++;
 }
 
@@ -1669,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
  * @cwq: cwq of interest
  * @color: color of work which left the queue
+ * @delayed: for a delayed work
  *
  * A work either has completed or is removed from pending queue,
  * decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1676,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
  * CONTEXT:
  * spin_lock_irq(gcwq->lock).
  */
-static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
+static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
+                                bool delayed)
 {
        /* ignore uncolored works */
        if (color == WORK_NO_COLOR)
                return;
 
        cwq->nr_in_flight[color]--;
-       cwq->nr_active--;
 
-       if (!list_empty(&cwq->delayed_works)) {
-               /* one down, submit a delayed one */
-               if (cwq->nr_active < cwq->max_active)
-                       cwq_activate_first_delayed(cwq);
+       if (!delayed) {
+               cwq->nr_active--;
+               if (!list_empty(&cwq->delayed_works)) {
+                       /* one down, submit a delayed one */
+                       if (cwq->nr_active < cwq->max_active)
+                               cwq_activate_first_delayed(cwq);
+               }
        }
 
        /* is flush in progress and are we at the flushing tip? */
@@ -1725,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
  */
 static void process_one_work(struct worker *worker, struct work_struct *work)
+__releases(&gcwq->lock)
+__acquires(&gcwq->lock)
 {
        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
        struct global_cwq *gcwq = cwq->gcwq;
@@ -1823,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
        hlist_del_init(&worker->hentry);
        worker->current_work = NULL;
        worker->current_cwq = NULL;
-       cwq_dec_nr_in_flight(cwq, work_color);
+       cwq_dec_nr_in_flight(cwq, work_color, false);
 }
 
 /**
@@ -2388,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work)
                        debug_work_deactivate(work);
                        list_del_init(&work->entry);
                        cwq_dec_nr_in_flight(get_work_cwq(work),
-                                            get_work_color(work));
+                               get_work_color(work),
+                               *work_data_bits(work) & WORK_STRUCT_DELAYED);
                        ret = 1;
                }
        }
@@ -2791,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
                if (IS_ERR(rescuer->task))
                        goto err;
 
-               wq->rescuer = rescuer;
                rescuer->task->flags |= PF_THREAD_BOUND;
                wake_up_process(rescuer->task);
        }
@@ -2833,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
 {
        unsigned int cpu;
 
+       wq->flags |= WQ_DYING;
        flush_workqueue(wq);
 
        /*
@@ -2857,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
        if (wq->flags & WQ_RESCUER) {
                kthread_stop(wq->rescuer->task);
                free_mayday_mask(wq->mayday_mask);
+               kfree(wq->rescuer);
        }
 
        free_cwqs(wq);
@@ -3239,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
  * multiple times.  To be used by cpu_callback.
  */
 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
+__releases(&gcwq->lock)
+__acquires(&gcwq->lock)
 {
        if (!(gcwq->trustee_state == state ||
              gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3545,8 +3567,7 @@ static int __init init_workqueues(void)
                spin_lock_init(&gcwq->lock);
                INIT_LIST_HEAD(&gcwq->worklist);
                gcwq->cpu = cpu;
-               if (cpu == WORK_CPU_UNBOUND)
-                       gcwq->flags |= GCWQ_DISASSOCIATED;
+               gcwq->flags |= GCWQ_DISASSOCIATED;
 
                INIT_LIST_HEAD(&gcwq->idle_list);
                for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3570,6 +3591,8 @@ static int __init init_workqueues(void)
                struct global_cwq *gcwq = get_gcwq(cpu);
                struct worker *worker;
 
+               if (cpu != WORK_CPU_UNBOUND)
+                       gcwq->flags &= ~GCWQ_DISASSOCIATED;
                worker = create_worker(gcwq, true);
                BUG_ON(!worker);
                spin_lock_irq(&gcwq->lock);
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
new file mode 100644 (file)
index 0000000..162beca
--- /dev/null
@@ -0,0 +1,4 @@
+mktables
+altivec*.c
+int*.c
+tables.c
index a5ec42868f99d8d6700f34ca81c061c1ef21d15b..4ceb05d772aed12d392d618358284ea71cb51dd2 100644 (file)
@@ -248,8 +248,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
                left -= sg_size;
 
                sg = alloc_fn(alloc_size, gfp_mask);
-               if (unlikely(!sg))
-                       return -ENOMEM;
+               if (unlikely(!sg)) {
+                       /*
+                        * Adjust entry count to reflect that the last
+                        * entry of the previous table won't be used for
+                        * linkage.  Without this, sg_kfree() may get
+                        * confused.
+                        */
+                       if (prv)
+                               table->nents = ++table->orig_nents;
+
+                       return -ENOMEM;
+               }
 
                sg_init_table(sg, alloc_size);
                table->nents = table->orig_nents += sg_size;
index f4e516e9c37cc4c62f93faf92131a632098d2ca5..f0fb9124e410c436c0f240d69f089f4935af2e92 100644 (file)
@@ -189,7 +189,7 @@ config COMPACTION
 config MIGRATION
        bool "Page migration"
        def_bool y
-       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE
+       depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
        help
          Allows the migration of the physical location of pages of processes
          while the virtual addresses are not changed. This is useful in
index eaa4a5bbe0634390fc802ebffdbad4291fa3b991..c2bf86f470ed0044ab7366d5e6ea95a4aea2adf2 100644 (file)
@@ -445,8 +445,8 @@ static int bdi_forker_thread(void *ptr)
                switch (action) {
                case FORK_THREAD:
                        __set_current_state(TASK_RUNNING);
-                       task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s",
-                                          dev_name(bdi->dev));
+                       task = kthread_create(bdi_writeback_thread, &bdi->wb,
+                                             "flush-%s", dev_name(bdi->dev));
                        if (IS_ERR(task)) {
                                /*
                                 * If thread creation fails, force writeout of
@@ -457,10 +457,13 @@ static int bdi_forker_thread(void *ptr)
                                /*
                                 * The spinlock makes sure we do not lose
                                 * wake-ups when racing with 'bdi_queue_work()'.
+                                * And as soon as the bdi thread is visible, we
+                                * can start it.
                                 */
                                spin_lock_bh(&bdi->wb_lock);
                                bdi->wb.task = task;
                                spin_unlock_bh(&bdi->wb_lock);
+                               wake_up_process(task);
                        }
                        break;
 
index 13b6dad1eed272bec61a388d17f116be62cb1bb5..1481de68184bce6d8fae978d3b6be8e3223319a5 100644 (file)
@@ -116,8 +116,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
                 */
                vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
 
-               flush_dcache_page(tovec->bv_page);
                bounce_copy_vec(tovec, vfrom);
+               flush_dcache_page(tovec->bv_page);
        }
 }
 
index 94cce51b0b3535af75c20f29ecb86a11aba32a71..4d709ee5901370842534224a9f81e7d13943e196 100644 (file)
@@ -214,15 +214,16 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
 /* Similar to reclaim, but different enough that they don't share logic */
 static bool too_many_isolated(struct zone *zone)
 {
-
-       unsigned long inactive, isolated;
+       unsigned long active, inactive, isolated;
 
        inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
                                        zone_page_state(zone, NR_INACTIVE_ANON);
+       active = zone_page_state(zone, NR_ACTIVE_FILE) +
+                                       zone_page_state(zone, NR_ACTIVE_ANON);
        isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
                                        zone_page_state(zone, NR_ISOLATED_ANON);
 
-       return isolated > inactive;
+       return isolated > (inactive + active) / 2;
 }
 
 /*
index e2ae00458320786a380a1ab370efe0dc6cfd6e1a..b1873cf03ed986bcb062259da8f1c4093a97160b 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1504,8 +1504,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
 {
        struct page *new_page;
 
-       unlock_page(page);      /* any racers will COW it, not modify it */
-
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
        if (new_page) {
                copy_user_highpage(new_page, page, address, vma);
@@ -1521,7 +1519,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
                        add_page_to_unevictable_list(new_page);
        }
 
-       page_cache_release(page);
        return new_page;
 }
 
index 6b2ab10518512052c895dd5db7ff0f20fd1df2f3..71b161b73bb503be50556e9ff302ca0c7eaba396 100644 (file)
@@ -2623,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned int flags, pte_t orig_pte)
 {
        spinlock_t *ptl;
-       struct page *page;
+       struct page *page, *swapcache = NULL;
        swp_entry_t entry;
        pte_t pte;
        struct mem_cgroup *ptr = NULL;
@@ -2679,10 +2679,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
-       page = ksm_might_need_to_copy(page, vma, address);
-       if (!page) {
-               ret = VM_FAULT_OOM;
-               goto out;
+       /*
+        * Make sure try_to_free_swap didn't release the swapcache
+        * from under us. The page pin isn't enough to prevent that.
+        */
+       if (unlikely(!PageSwapCache(page)))
+               goto out_page;
+
+       if (ksm_might_need_to_copy(page, vma, address)) {
+               swapcache = page;
+               page = ksm_does_need_to_copy(page, vma, address);
+
+               if (unlikely(!page)) {
+                       ret = VM_FAULT_OOM;
+                       page = swapcache;
+                       swapcache = NULL;
+                       goto out_page;
+               }
        }
 
        if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
@@ -2735,6 +2748,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
                try_to_free_swap(page);
        unlock_page(page);
+       if (swapcache) {
+               /*
+                * Hold the lock to avoid the swap entry to be reused
+                * until we take the PT lock for the pte_same() check
+                * (to avoid false positives from pte_same). For
+                * further safety release the lock after the swap_free
+                * so that the swap count won't change under a
+                * parallel locked swapcache.
+                */
+               unlock_page(swapcache);
+               page_cache_release(swapcache);
+       }
 
        if (flags & FAULT_FLAG_WRITE) {
                ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
@@ -2756,6 +2781,10 @@ out_page:
        unlock_page(page);
 out_release:
        page_cache_release(page);
+       if (swapcache) {
+               unlock_page(swapcache);
+               page_cache_release(swapcache);
+       }
        return ret;
 }
 
index a4cfcdc00455de4be15fcec98c76e45f8de5feab..dd186c1a5d53f9ebd27de4c1bedcaac471d6c93e 100644 (file)
@@ -584,19 +584,19 @@ static inline int pageblock_free(struct page *page)
 /* Return the start of the next active pageblock after a given page */
 static struct page *next_active_pageblock(struct page *page)
 {
-       int pageblocks_stride;
-
        /* Ensure the starting page is pageblock-aligned */
        BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
 
-       /* Move forward by at least 1 * pageblock_nr_pages */
-       pageblocks_stride = 1;
-
        /* If the entire pageblock is free, move to the end of free page */
-       if (pageblock_free(page))
-               pageblocks_stride += page_order(page) - pageblock_order;
+       if (pageblock_free(page)) {
+               int order;
+               /* be careful. we don't have locks, page_order can be changed.*/
+               order = page_order(page);
+               if ((order < MAX_ORDER) && (order >= pageblock_order))
+                       return page + (1 << order);
+       }
 
-       return page + (pageblocks_stride * pageblock_nr_pages);
+       return page + pageblock_nr_pages;
 }
 
 /* Checks if this range of memory is likely to be hot-removable. */
index cbae7c5b95680a1bfca1df7e11a215bfce15b57c..b70919ce4f72e6941f67b1a5462f5f270c231536 100644 (file)
@@ -135,12 +135,6 @@ void munlock_vma_page(struct page *page)
        }
 }
 
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
-{
-       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
 static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
 {
        return (vma->vm_flags & VM_GROWSDOWN) &&
index f5b7d1760213e53db3c46e84dde56daf219ea0cd..e35bfb82c8555b7377334dbea42bfcf588b0bab8 100644 (file)
@@ -87,3 +87,24 @@ int memmap_valid_within(unsigned long pfn,
        return 1;
 }
 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+#ifdef CONFIG_SMP
+/* Called when a more accurate view of NR_FREE_PAGES is needed */
+unsigned long zone_nr_free_pages(struct zone *zone)
+{
+       unsigned long nr_free_pages = zone_page_state(zone, NR_FREE_PAGES);
+
+       /*
+        * While kswapd is awake, it is considered the zone is under some
+        * memory pressure. Under pressure, there is a risk that
+        * per-cpu-counter-drift will allow the min watermark to be breached
+        * potentially causing a live-lock. While kswapd is awake and
+        * free pages are low, get a better estimate for free pages
+        */
+       if (nr_free_pages < zone->percpu_drift_mark &&
+                       !waitqueue_active(&zone->zone_pgdat->kswapd_wait))
+               return zone_page_state_snapshot(zone, NR_FREE_PAGES);
+
+       return nr_free_pages;
+}
+#endif /* CONFIG_SMP */
index a9649f4b261e6b3c01632939c46a77f19f447de1..a8cfa9cc6e86e5d6912a39bc3f5c9d18fb97b7cf 100644 (file)
@@ -588,13 +588,13 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 {
        int migratetype = 0;
        int batch_free = 0;
+       int to_free = count;
 
        spin_lock(&zone->lock);
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
-       while (count) {
+       while (to_free) {
                struct page *page;
                struct list_head *list;
 
@@ -619,8 +619,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
                        __free_one_page(page, zone, 0, page_private(page));
                        trace_mm_page_pcpu_drain(page, 0, page_private(page));
-               } while (--count && --batch_free && !list_empty(list));
+               } while (--to_free && --batch_free && !list_empty(list));
        }
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
        spin_unlock(&zone->lock);
 }
 
@@ -631,8 +632,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        __free_one_page(page, zone, order, migratetype);
+       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
        spin_unlock(&zone->lock);
 }
 
@@ -1461,7 +1462,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
 {
        /* free_pages my go negative - that's OK */
        long min = mark;
-       long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
+       long free_pages = zone_nr_free_pages(z) - (1 << order) + 1;
        int o;
 
        if (alloc_flags & ALLOC_HIGH)
@@ -1846,6 +1847,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
        struct page *page = NULL;
        struct reclaim_state reclaim_state;
        struct task_struct *p = current;
+       bool drained = false;
 
        cond_resched();
 
@@ -1864,14 +1866,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        cond_resched();
 
-       if (order != 0)
-               drain_all_pages();
+       if (unlikely(!(*did_some_progress)))
+               return NULL;
 
-       if (likely(*did_some_progress))
-               page = get_page_from_freelist(gfp_mask, nodemask, order,
+retry:
+       page = get_page_from_freelist(gfp_mask, nodemask, order,
                                        zonelist, high_zoneidx,
                                        alloc_flags, preferred_zone,
                                        migratetype);
+
+       /*
+        * If an allocation failed after direct reclaim, it could be because
+        * pages are pinned on the per-cpu lists. Drain them and try again
+        */
+       if (!page && !drained) {
+               drain_all_pages();
+               drained = true;
+               goto retry;
+       }
+
        return page;
 }
 
@@ -2423,7 +2436,7 @@ void show_free_areas(void)
                        " all_unreclaimable? %s"
                        "\n",
                        zone->name,
-                       K(zone_page_state(zone, NR_FREE_PAGES)),
+                       K(zone_nr_free_pages(zone)),
                        K(min_wmark_pages(zone)),
                        K(low_wmark_pages(zone)),
                        K(high_wmark_pages(zone)),
index e61dc2cc5873d7d89efc45609a4f4f2a8ae75732..58c572b18b07ffbca4e2120d4e1600705db5fc0e 100644 (file)
@@ -393,7 +393,9 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
                goto out_unlock;
 
        old_size = chunk->map_alloc * sizeof(chunk->map[0]);
-       memcpy(new, chunk->map, old_size);
+       old = chunk->map;
+
+       memcpy(new, old, old_size);
 
        chunk->map_alloc = new_alloc;
        chunk->map = new;
@@ -1162,7 +1164,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
                }
 
                /*
-                * Don't accept if wastage is over 25%.  The
+                * Don't accept if wastage is over 1/3.  The
                 * greater-than comparison ensures upa==1 always
                 * passes the following check.
                 */
index c4351c7f57d21db63487d0c06728a64d0c0f1318..db884fae5721749e57990d9f9ac33398efb1ca37 100644 (file)
@@ -14,13 +14,13 @@ void __percpu *__alloc_percpu(size_t size, size_t align)
         * percpu sections on SMP for which this path isn't used.
         */
        WARN_ON_ONCE(align > SMP_CACHE_BYTES);
-       return kzalloc(size, GFP_KERNEL);
+       return (void __percpu __force *)kzalloc(size, GFP_KERNEL);
 }
 EXPORT_SYMBOL_GPL(__alloc_percpu);
 
 void free_percpu(void __percpu *p)
 {
-       kfree(p);
+       kfree(this_cpu_ptr(p));
 }
 EXPORT_SYMBOL_GPL(free_percpu);
 
index 1f3f9c59a73ab5be4ff4bb37f428364df7544706..7c703ff2f36f0b760b79eb36149084f07621a0a1 100644 (file)
@@ -47,8 +47,6 @@ long nr_swap_pages;
 long total_swap_pages;
 static int least_priority;
 
-static bool swap_for_hibernation;
-
 static const char Bad_file[] = "Bad swap file entry ";
 static const char Unused_file[] = "Unused swap file entry ";
 static const char Bad_offset[] = "Bad swap offset entry ";
@@ -141,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si)
        nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
        if (nr_blocks) {
                err = blkdev_issue_discard(si->bdev, start_block,
-                               nr_blocks, GFP_KERNEL,
-                               BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+                               nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
                if (err)
                        return err;
                cond_resched();
@@ -153,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si)
                nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
 
                err = blkdev_issue_discard(si->bdev, start_block,
-                               nr_blocks, GFP_KERNEL,
-                               BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+                               nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
                if (err)
                        break;
 
@@ -193,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si,
                        start_block <<= PAGE_SHIFT - 9;
                        nr_blocks <<= PAGE_SHIFT - 9;
                        if (blkdev_issue_discard(si->bdev, start_block,
-                                   nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT |
-                                                       BLKDEV_IFL_BARRIER))
+                                   nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT))
                                break;
                }
 
@@ -320,10 +315,8 @@ checks:
        if (offset > si->highest_bit)
                scan_base = offset = si->lowest_bit;
 
-       /* reuse swap entry of cache-only swap if not hibernation. */
-       if (vm_swap_full()
-               && usage == SWAP_HAS_CACHE
-               && si->swap_map[offset] == SWAP_HAS_CACHE) {
+       /* reuse swap entry of cache-only swap if not busy. */
+       if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
                int swap_was_freed;
                spin_unlock(&swap_lock);
                swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -453,8 +446,6 @@ swp_entry_t get_swap_page(void)
        spin_lock(&swap_lock);
        if (nr_swap_pages <= 0)
                goto noswap;
-       if (swap_for_hibernation)
-               goto noswap;
        nr_swap_pages--;
 
        for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
@@ -487,6 +478,28 @@ noswap:
        return (swp_entry_t) {0};
 }
 
+/* The only caller of this function is now susupend routine */
+swp_entry_t get_swap_page_of_type(int type)
+{
+       struct swap_info_struct *si;
+       pgoff_t offset;
+
+       spin_lock(&swap_lock);
+       si = swap_info[type];
+       if (si && (si->flags & SWP_WRITEOK)) {
+               nr_swap_pages--;
+               /* This is called for allocating swap entry, not cache */
+               offset = scan_swap_map(si, 1);
+               if (offset) {
+                       spin_unlock(&swap_lock);
+                       return swp_entry(type, offset);
+               }
+               nr_swap_pages++;
+       }
+       spin_unlock(&swap_lock);
+       return (swp_entry_t) {0};
+}
+
 static struct swap_info_struct *swap_info_get(swp_entry_t entry)
 {
        struct swap_info_struct *p;
@@ -670,6 +683,24 @@ int try_to_free_swap(struct page *page)
        if (page_swapcount(page))
                return 0;
 
+       /*
+        * Once hibernation has begun to create its image of memory,
+        * there's a danger that one of the calls to try_to_free_swap()
+        * - most probably a call from __try_to_reclaim_swap() while
+        * hibernation is allocating its own swap pages for the image,
+        * but conceivably even a call from memory reclaim - will free
+        * the swap from a page which has already been recorded in the
+        * image as a clean swapcache page, and then reuse its swap for
+        * another page of the image.  On waking from hibernation, the
+        * original page might be freed under memory pressure, then
+        * later read back in from swap, now with the wrong data.
+        *
+        * Hibernation clears bits from gfp_allowed_mask to prevent
+        * memory reclaim from writing to disk, so check that here.
+        */
+       if (!(gfp_allowed_mask & __GFP_IO))
+               return 0;
+
        delete_from_swap_cache(page);
        SetPageDirty(page);
        return 1;
@@ -746,74 +777,6 @@ int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
 #endif
 
 #ifdef CONFIG_HIBERNATION
-
-static pgoff_t hibernation_offset[MAX_SWAPFILES];
-/*
- * Once hibernation starts to use swap, we freeze swap_map[]. Otherwise,
- * saved swap_map[] image to the disk will be an incomplete because it's
- * changing without synchronization with hibernation snap shot.
- * At resume, we just make swap_for_hibernation=false. We can forget
- * used maps easily.
- */
-void hibernation_freeze_swap(void)
-{
-       int i;
-
-       spin_lock(&swap_lock);
-
-       printk(KERN_INFO "PM: Freeze Swap\n");
-       swap_for_hibernation = true;
-       for (i = 0; i < MAX_SWAPFILES; i++)
-               hibernation_offset[i] = 1;
-       spin_unlock(&swap_lock);
-}
-
-void hibernation_thaw_swap(void)
-{
-       spin_lock(&swap_lock);
-       if (swap_for_hibernation) {
-               printk(KERN_INFO "PM: Thaw Swap\n");
-               swap_for_hibernation = false;
-       }
-       spin_unlock(&swap_lock);
-}
-
-/*
- * Because updateing swap_map[] can make not-saved-status-change,
- * we use our own easy allocator.
- * Please see kernel/power/swap.c, Used swaps are recorded into
- * RB-tree.
- */
-swp_entry_t get_swap_for_hibernation(int type)
-{
-       pgoff_t off;
-       swp_entry_t val = {0};
-       struct swap_info_struct *si;
-
-       spin_lock(&swap_lock);
-
-       si = swap_info[type];
-       if (!si || !(si->flags & SWP_WRITEOK))
-               goto done;
-
-       for (off = hibernation_offset[type]; off < si->max; ++off) {
-               if (!si->swap_map[off])
-                       break;
-       }
-       if (off < si->max) {
-               val = swp_entry(type, off);
-               hibernation_offset[type] = off + 1;
-       }
-done:
-       spin_unlock(&swap_lock);
-       return val;
-}
-
-void swap_free_for_hibernation(swp_entry_t ent)
-{
-       /* Nothing to do */
-}
-
 /*
  * Find the swap type that corresponds to given device (if any).
  *
@@ -2084,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                        p->flags |= SWP_SOLIDSTATE;
                        p->cluster_next = 1 + (random32() % p->highest_bit);
                }
-               if (discard_swap(p) == 0)
+               if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
                        p->flags |= SWP_DISCARDABLE;
        }
 
index f389168f9a837b9c6be4e1f9bb3d0892396315de..355a9e669aaa800d62fa31d2b83110bf76cce9d7 100644 (file)
@@ -138,11 +138,24 @@ static void refresh_zone_stat_thresholds(void)
        int threshold;
 
        for_each_populated_zone(zone) {
+               unsigned long max_drift, tolerate_drift;
+
                threshold = calculate_threshold(zone);
 
                for_each_online_cpu(cpu)
                        per_cpu_ptr(zone->pageset, cpu)->stat_threshold
                                                        = threshold;
+
+               /*
+                * Only set percpu_drift_mark if there is a danger that
+                * NR_FREE_PAGES reports the low watermark is ok when in fact
+                * the min watermark could be breached by an allocation
+                */
+               tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
+               max_drift = num_online_cpus() * threshold;
+               if (max_drift > tolerate_drift)
+                       zone->percpu_drift_mark = high_wmark_pages(zone) +
+                                       max_drift;
        }
 }
 
@@ -813,7 +826,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   "\n        scanned  %lu"
                   "\n        spanned  %lu"
                   "\n        present  %lu",
-                  zone_page_state(zone, NR_FREE_PAGES),
+                  zone_nr_free_pages(zone),
                   min_wmark_pages(zone),
                   low_wmark_pages(zone),
                   high_wmark_pages(zone),
@@ -998,6 +1011,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
        switch (action) {
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
+               refresh_zone_stat_thresholds();
                start_cpu_timer(cpu);
                node_set_state(cpu_to_node(cpu), N_CPU);
                break;
index 5ed00bd7009f55d1bdb5b8697889594442848e3f..137f23259a93947ce581a9d14c8f6048f3ca2c19 100644 (file)
@@ -761,9 +761,11 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 {
        if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
            skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
-           !skb_is_gso(skb))
+           !skb_is_gso(skb)) {
+               /* BUG: Should really parse the IP options here. */
+               memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
                return ip_fragment(skb, br_dev_queue_push_xmit);
-       else
+       else
                return br_dev_queue_push_xmit(skb);
 }
 #else
index 3721fbb9a83c3c7761c05ae39d8acab21b6f6b66..b9b22a3c4c8fa36ea3412e22a816d0c738562831 100644 (file)
@@ -2058,16 +2058,16 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
                                        struct sk_buff *skb)
 {
        int queue_index;
-       struct sock *sk = skb->sk;
+       const struct net_device_ops *ops = dev->netdev_ops;
 
-       queue_index = sk_tx_queue_get(sk);
-       if (queue_index < 0) {
-               const struct net_device_ops *ops = dev->netdev_ops;
+       if (ops->ndo_select_queue) {
+               queue_index = ops->ndo_select_queue(dev, skb);
+               queue_index = dev_cap_txqueue(dev, queue_index);
+       } else {
+               struct sock *sk = skb->sk;
+               queue_index = sk_tx_queue_get(sk);
+               if (queue_index < 0) {
 
-               if (ops->ndo_select_queue) {
-                       queue_index = ops->ndo_select_queue(dev, skb);
-                       queue_index = dev_cap_txqueue(dev, queue_index);
-               } else {
                        queue_index = 0;
                        if (dev->real_num_tx_queues > 1)
                                queue_index = skb_tx_hash(dev, skb);
index 9fbe7f7429b0efd036cc9e4a7df06293f85a8f0b..6743146e4d6b49a61204395b49e4ded721de2703 100644 (file)
@@ -232,7 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
        est->last_packets = bstats->packets;
        est->avpps = rate_est->pps<<10;
 
-       spin_lock(&est_tree_lock);
+       spin_lock_bh(&est_tree_lock);
        if (!elist[idx].timer.function) {
                INIT_LIST_HEAD(&elist[idx].list);
                setup_timer(&elist[idx].timer, est_timer, idx);
@@ -243,7 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
 
        list_add_rcu(&est->list, &elist[idx].list);
        gen_add_node(est);
-       spin_unlock(&est_tree_lock);
+       spin_unlock_bh(&est_tree_lock);
 
        return 0;
 }
@@ -270,7 +270,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
 {
        struct gen_estimator *e;
 
-       spin_lock(&est_tree_lock);
+       spin_lock_bh(&est_tree_lock);
        while ((e = gen_find_node(bstats, rate_est))) {
                rb_erase(&e->node, &est_root);
 
@@ -281,7 +281,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
                list_del_rcu(&e->list);
                call_rcu(&e->e_rcu, __gen_kill_estimator);
        }
-       spin_unlock(&est_tree_lock);
+       spin_unlock_bh(&est_tree_lock);
 }
 EXPORT_SYMBOL(gen_kill_estimator);
 
@@ -320,9 +320,9 @@ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
 
        ASSERT_RTNL();
 
-       spin_lock(&est_tree_lock);
+       spin_lock_bh(&est_tree_lock);
        res = gen_find_node(bstats, rate_est) != NULL;
-       spin_unlock(&est_tree_lock);
+       spin_unlock_bh(&est_tree_lock);
 
        return res;
 }
index 3a2513f0d0c3036bf6c3f688f7e94b3e1d8f197c..c83b421341c01b4a1de702e42d3380fac2f0aa2b 100644 (file)
@@ -2573,6 +2573,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
                __copy_skb_header(nskb, skb);
                nskb->mac_len = skb->mac_len;
 
+               /* nskb and skb might have different headroom */
+               if (nskb->ip_summed == CHECKSUM_PARTIAL)
+                       nskb->csum_start += skb_headroom(nskb) - headroom;
+
                skb_reset_mac_header(nskb);
                skb_set_network_header(nskb, skb->mac_len);
                nskb->transport_header = (nskb->network_header +
@@ -2703,7 +2707,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                return -E2BIG;
 
        headroom = skb_headroom(p);
-       nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
+       nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
        if (unlikely(!nskb))
                return -ENOMEM;
 
index 7c3a7d19124995fd89034a4db2254566aab4cdfd..571f8950ed06f585f4dca482037d4b7985c256af 100644 (file)
@@ -46,7 +46,7 @@ config IP_ADVANCED_ROUTER
          rp_filter on use:
 
          echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter
-          and
+          or
          echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter
 
          Note that some distributions enable it in startup scripts.
index f0550941df7b9e1ac63468384eba7ede1bf6d537..721a8a37b45c77ce1d3ae78afffe57cf9eaeedf1 100644 (file)
@@ -62,8 +62,11 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        }
        if (!inet->inet_saddr)
                inet->inet_saddr = rt->rt_src;  /* Update source address */
-       if (!inet->inet_rcv_saddr)
+       if (!inet->inet_rcv_saddr) {
                inet->inet_rcv_saddr = rt->rt_src;
+               if (sk->sk_prot->rehash)
+                       sk->sk_prot->rehash(sk);
+       }
        inet->inet_daddr = rt->rt_dst;
        inet->inet_dport = usin->sin_port;
        sk->sk_state = TCP_ESTABLISHED;
index a43968918350244a057e6f3364727d6a2aa7baf2..7d02a9f999fabcebeb61800816d722e6f6c054ff 100644 (file)
@@ -246,6 +246,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
 
        struct fib_result res;
        int no_addr, rpf, accept_local;
+       bool dev_match;
        int ret;
        struct net *net;
 
@@ -273,12 +274,22 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
        }
        *spec_dst = FIB_RES_PREFSRC(res);
        fib_combine_itag(itag, &res);
+       dev_match = false;
+
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-       if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1)
+       for (ret = 0; ret < res.fi->fib_nhs; ret++) {
+               struct fib_nh *nh = &res.fi->fib_nh[ret];
+
+               if (nh->nh_dev == dev) {
+                       dev_match = true;
+                       break;
+               }
+       }
 #else
        if (FIB_RES_DEV(res) == dev)
+               dev_match = true;
 #endif
-       {
+       if (dev_match) {
                ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
                fib_res_put(&res);
                return ret;
index 79d057a939ba6404d4e7553008a2613815a9b049..4a8e370862bca453cd6e939162ded09de6bdf831 100644 (file)
@@ -186,7 +186,9 @@ static inline struct tnode *node_parent_rcu(struct node *node)
 {
        struct tnode *ret = node_parent(node);
 
-       return rcu_dereference(ret);
+       return rcu_dereference_check(ret,
+                                    rcu_read_lock_held() ||
+                                    lockdep_rtnl_is_held());
 }
 
 /* Same as rcu_assign_pointer
@@ -1753,7 +1755,9 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
 
 static struct leaf *trie_firstleaf(struct trie *t)
 {
-       struct tnode *n = (struct tnode *) rcu_dereference(t->trie);
+       struct tnode *n = (struct tnode *) rcu_dereference_check(t->trie,
+                                                       rcu_read_lock_held() ||
+                                                       lockdep_rtnl_is_held());
 
        if (!n)
                return NULL;
index 3f56b6e6c6aab583d65902e7190bbf1a6eaa60b6..6298f75d5e93c4ad4e838a1982143207e7d3f855 100644 (file)
@@ -2738,6 +2738,11 @@ slow_output:
 }
 EXPORT_SYMBOL_GPL(__ip_route_output_key);
 
+static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
+{
+       return NULL;
+}
+
 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
 }
@@ -2746,7 +2751,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
        .family                 =       AF_INET,
        .protocol               =       cpu_to_be16(ETH_P_IP),
        .destroy                =       ipv4_dst_destroy,
-       .check                  =       ipv4_dst_check,
+       .check                  =       ipv4_blackhole_dst_check,
        .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
        .entries                =       ATOMIC_INIT(0),
 };
index 32e0bef60d0afdbedd3d4cbdd3c57a9f9beafcdc..fb23c2e63b5281a7ca505af3ac94f411e10c4ee6 100644 (file)
@@ -1260,6 +1260,49 @@ void udp_lib_unhash(struct sock *sk)
 }
 EXPORT_SYMBOL(udp_lib_unhash);
 
+/*
+ * inet_rcv_saddr was changed, we must rehash secondary hash
+ */
+void udp_lib_rehash(struct sock *sk, u16 newhash)
+{
+       if (sk_hashed(sk)) {
+               struct udp_table *udptable = sk->sk_prot->h.udp_table;
+               struct udp_hslot *hslot, *hslot2, *nhslot2;
+
+               hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
+               nhslot2 = udp_hashslot2(udptable, newhash);
+               udp_sk(sk)->udp_portaddr_hash = newhash;
+               if (hslot2 != nhslot2) {
+                       hslot = udp_hashslot(udptable, sock_net(sk),
+                                            udp_sk(sk)->udp_port_hash);
+                       /* we must lock primary chain too */
+                       spin_lock_bh(&hslot->lock);
+
+                       spin_lock(&hslot2->lock);
+                       hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
+                       hslot2->count--;
+                       spin_unlock(&hslot2->lock);
+
+                       spin_lock(&nhslot2->lock);
+                       hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+                                                &nhslot2->head);
+                       nhslot2->count++;
+                       spin_unlock(&nhslot2->lock);
+
+                       spin_unlock_bh(&hslot->lock);
+               }
+       }
+}
+EXPORT_SYMBOL(udp_lib_rehash);
+
+static void udp_v4_rehash(struct sock *sk)
+{
+       u16 new_hash = udp4_portaddr_hash(sock_net(sk),
+                                         inet_sk(sk)->inet_rcv_saddr,
+                                         inet_sk(sk)->inet_num);
+       udp_lib_rehash(sk, new_hash);
+}
+
 static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        int rc;
@@ -1843,6 +1886,7 @@ struct proto udp_prot = {
        .backlog_rcv       = __udp_queue_rcv_skb,
        .hash              = udp_lib_hash,
        .unhash            = udp_lib_unhash,
+       .rehash            = udp_v4_rehash,
        .get_port          = udp_v4_get_port,
        .memory_allocated  = &udp_memory_allocated,
        .sysctl_mem        = sysctl_udp_mem,
index 7d929a22cbc2f505aca94ba5a6fd9cae412824dc..ef371aa01ac50724f9dff9cbf7d084e062d844c9 100644 (file)
@@ -105,9 +105,12 @@ ipv4_connected:
                if (ipv6_addr_any(&np->saddr))
                        ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 
-               if (ipv6_addr_any(&np->rcv_saddr))
+               if (ipv6_addr_any(&np->rcv_saddr)) {
                        ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
                                               &np->rcv_saddr);
+                       if (sk->sk_prot->rehash)
+                               sk->sk_prot->rehash(sk);
+               }
 
                goto out;
        }
@@ -181,6 +184,8 @@ ipv4_connected:
        if (ipv6_addr_any(&np->rcv_saddr)) {
                ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
                inet->inet_rcv_saddr = LOOPBACK4_IPV6;
+               if (sk->sk_prot->rehash)
+                       sk->sk_prot->rehash(sk);
        }
 
        ip6_dst_store(sk, dst,
index 13ef5bc05cf5220a3e2aa62543c4a12b79e5a98a..578f3c1a16db614614f986947cbae5a7960fcdb5 100644 (file)
@@ -113,14 +113,6 @@ static void nf_skb_free(struct sk_buff *skb)
                kfree_skb(NFCT_FRAG6_CB(skb)->orig);
 }
 
-/* Memory Tracking Functions. */
-static void frag_kfree_skb(struct sk_buff *skb)
-{
-       atomic_sub(skb->truesize, &nf_init_frags.mem);
-       nf_skb_free(skb);
-       kfree_skb(skb);
-}
-
 /* Destruction primitives. */
 
 static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
@@ -282,66 +274,22 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
        }
 
 found:
-       /* We found where to put this one.  Check for overlap with
-        * preceding fragment, and, if needed, align things so that
-        * any overlaps are eliminated.
-        */
-       if (prev) {
-               int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset;
-
-               if (i > 0) {
-                       offset += i;
-                       if (end <= offset) {
-                               pr_debug("overlap\n");
-                               goto err;
-                       }
-                       if (!pskb_pull(skb, i)) {
-                               pr_debug("Can't pull\n");
-                               goto err;
-                       }
-                       if (skb->ip_summed != CHECKSUM_UNNECESSARY)
-                               skb->ip_summed = CHECKSUM_NONE;
-               }
-       }
-
-       /* Look for overlap with succeeding segments.
-        * If we can merge fragments, do it.
+       /* RFC5722, Section 4:
+        *                                  When reassembling an IPv6 datagram, if
+        *   one or more its constituent fragments is determined to be an
+        *   overlapping fragment, the entire datagram (and any constituent
+        *   fragments, including those not yet received) MUST be silently
+        *   discarded.
         */
-       while (next && NFCT_FRAG6_CB(next)->offset < end) {
-               /* overlap is 'i' bytes */
-               int i = end - NFCT_FRAG6_CB(next)->offset;
-
-               if (i < next->len) {
-                       /* Eat head of the next overlapped fragment
-                        * and leave the loop. The next ones cannot overlap.
-                        */
-                       pr_debug("Eat head of the overlapped parts.: %d", i);
-                       if (!pskb_pull(next, i))
-                               goto err;
 
-                       /* next fragment */
-                       NFCT_FRAG6_CB(next)->offset += i;
-                       fq->q.meat -= i;
-                       if (next->ip_summed != CHECKSUM_UNNECESSARY)
-                               next->ip_summed = CHECKSUM_NONE;
-                       break;
-               } else {
-                       struct sk_buff *free_it = next;
-
-                       /* Old fragmnet is completely overridden with
-                        * new one drop it.
-                        */
-                       next = next->next;
+       /* Check for overlap with preceding fragment. */
+       if (prev &&
+           (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+               goto discard_fq;
 
-                       if (prev)
-                               prev->next = next;
-                       else
-                               fq->q.fragments = next;
-
-                       fq->q.meat -= free_it->len;
-                       frag_kfree_skb(free_it);
-               }
-       }
+       /* Look for overlap with succeeding segment. */
+       if (next && NFCT_FRAG6_CB(next)->offset < end)
+               goto discard_fq;
 
        NFCT_FRAG6_CB(skb)->offset = offset;
 
@@ -371,6 +319,8 @@ found:
        write_unlock(&nf_frags.lock);
        return 0;
 
+discard_fq:
+       fq_kill(fq);
 err:
        return -1;
 }
index 545c4141b755ee91277c6bd2912de4f42813bb05..64cfef1b0a4c556ccf63c2f912c86ec3a1599b82 100644 (file)
@@ -149,13 +149,6 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
 }
 EXPORT_SYMBOL(ip6_frag_match);
 
-/* Memory Tracking Functions. */
-static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
-{
-       atomic_sub(skb->truesize, &nf->mem);
-       kfree_skb(skb);
-}
-
 void ip6_frag_init(struct inet_frag_queue *q, void *a)
 {
        struct frag_queue *fq = container_of(q, struct frag_queue, q);
@@ -346,58 +339,22 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
        }
 
 found:
-       /* We found where to put this one.  Check for overlap with
-        * preceding fragment, and, if needed, align things so that
-        * any overlaps are eliminated.
+       /* RFC5722, Section 4:
+        *                                  When reassembling an IPv6 datagram, if
+        *   one or more its constituent fragments is determined to be an
+        *   overlapping fragment, the entire datagram (and any constituent
+        *   fragments, including those not yet received) MUST be silently
+        *   discarded.
         */
-       if (prev) {
-               int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
 
-               if (i > 0) {
-                       offset += i;
-                       if (end <= offset)
-                               goto err;
-                       if (!pskb_pull(skb, i))
-                               goto err;
-                       if (skb->ip_summed != CHECKSUM_UNNECESSARY)
-                               skb->ip_summed = CHECKSUM_NONE;
-               }
-       }
+       /* Check for overlap with preceding fragment. */
+       if (prev &&
+           (FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+               goto discard_fq;
 
-       /* Look for overlap with succeeding segments.
-        * If we can merge fragments, do it.
-        */
-       while (next && FRAG6_CB(next)->offset < end) {
-               int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
-
-               if (i < next->len) {
-                       /* Eat head of the next overlapped fragment
-                        * and leave the loop. The next ones cannot overlap.
-                        */
-                       if (!pskb_pull(next, i))
-                               goto err;
-                       FRAG6_CB(next)->offset += i;    /* next fragment */
-                       fq->q.meat -= i;
-                       if (next->ip_summed != CHECKSUM_UNNECESSARY)
-                               next->ip_summed = CHECKSUM_NONE;
-                       break;
-               } else {
-                       struct sk_buff *free_it = next;
-
-                       /* Old fragment is completely overridden with
-                        * new one drop it.
-                        */
-                       next = next->next;
-
-                       if (prev)
-                               prev->next = next;
-                       else
-                               fq->q.fragments = next;
-
-                       fq->q.meat -= free_it->len;
-                       frag_kfree_skb(fq->q.net, free_it);
-               }
-       }
+       /* Look for overlap with succeeding segment. */
+       if (next && FRAG6_CB(next)->offset < end)
+               goto discard_fq;
 
        FRAG6_CB(skb)->offset = offset;
 
@@ -436,6 +393,8 @@ found:
        write_unlock(&ip6_frags.lock);
        return -1;
 
+discard_fq:
+       fq_kill(fq);
 err:
        IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
                      IPSTATS_MIB_REASMFAILS);
index 1dd1affdead2d418a3d98a0f34e5d027cab88371..5acb3560ff15267021266f59c7b2102f633f3d6a 100644 (file)
@@ -111,6 +111,15 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
        return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
 }
 
+static void udp_v6_rehash(struct sock *sk)
+{
+       u16 new_hash = udp6_portaddr_hash(sock_net(sk),
+                                         &inet6_sk(sk)->rcv_saddr,
+                                         inet_sk(sk)->inet_num);
+
+       udp_lib_rehash(sk, new_hash);
+}
+
 static inline int compute_score(struct sock *sk, struct net *net,
                                unsigned short hnum,
                                struct in6_addr *saddr, __be16 sport,
@@ -1447,6 +1456,7 @@ struct proto udpv6_prot = {
        .backlog_rcv       = udpv6_queue_rcv_skb,
        .hash              = udp_lib_hash,
        .unhash            = udp_lib_unhash,
+       .rehash            = udp_v6_rehash,
        .get_port          = udp_v6_get_port,
        .memory_allocated  = &udp_memory_allocated,
        .sysctl_mem        = sysctl_udp_mem,
index 79986a674f6ea23329fb5a3960f1d3ece82b9a9d..fd55b5135de5aad91f547281a9c7a07c44f676b0 100644 (file)
@@ -824,8 +824,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 
        err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
        if (err < 0) {
-               kfree(self->ias_obj->name);
-               kfree(self->ias_obj);
+               irias_delete_object(self->ias_obj);
+               self->ias_obj = NULL;
                goto out;
        }
 
index a788f9e9427d2c1c127d411adc8b06eb9d19ba87..6130f9d9dbe138edffaaafed0528b1d2110db0c4 100644 (file)
@@ -1102,7 +1102,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
        memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */
        le16_to_cpus(&val_len); n+=2;
 
-       if (val_len > 1016) {
+       if (val_len >= 1016) {
                IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ );
                return -RSP_INVALID_COMMAND_FORMAT;
        }
index 798a91b100cc277d154bde089eb6f4745f47b6b6..ded5c3843e061a13e1d82be4666b746c829c467c 100644 (file)
@@ -732,6 +732,12 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 
        rtnl_unlock();
 
+       /*
+        * Now all work items will be gone, but the
+        * timer might still be armed, so delete it
+        */
+       del_timer_sync(&local->work_timer);
+
        cancel_work_sync(&local->reconfig_filter);
 
        ieee80211_clear_tx_pending(local);
index 4f8ddba480110167674fa36f6854e53f673e8d68..4c2f89df5ccecc40f1dc9200772a4e8bb3e85b95 100644 (file)
@@ -924,6 +924,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
 
        ip_vs_out_stats(cp, skb);
        ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
+       ip_vs_update_conntrack(skb, cp, 0);
        ip_vs_conn_put(cp);
 
        skb->ipvs_property = 1;
index f228a17ec6499b1440cae8a9dd9a1d8e312c4a00..7e9af5b76d9eb280cc7d197538e5ce6161c42e66 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/netfilter.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_helper.h>
 #include <linux/gfp.h>
 #include <net/protocol.h>
@@ -359,7 +360,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                buf_len = strlen(buf);
 
                ct = nf_ct_get(skb, &ctinfo);
-               if (ct && !nf_ct_is_untracked(ct)) {
+               if (ct && !nf_ct_is_untracked(ct) && nfct_nat(ct)) {
                        /* If mangling fails this function will return 0
                         * which will cause the packet to be dropped.
                         * Mangling can only fail under memory pressure,
@@ -409,7 +410,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        union nf_inet_addr to;
        __be16 port;
        struct ip_vs_conn *n_cp;
-       struct nf_conn *ct;
 
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
@@ -496,11 +496,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
                ip_vs_control_add(n_cp, cp);
        }
 
-       ct = (struct nf_conn *)skb->nfct;
-       if (ct && ct != &nf_conntrack_untracked)
-               ip_vs_expect_related(skb, ct, n_cp,
-                                    IPPROTO_TCP, &n_cp->dport, 1);
-
        /*
         *      Move tunnel to listen state
         */
index 21e1a5e9b9d3cd354d74808e44094ffc67f671da..49df6bea6a2ddaec391ce077cf9f72a72efc4a7f 100644 (file)
@@ -349,8 +349,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 }
 #endif
 
-static void
-ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
+void
+ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
 {
        struct nf_conn *ct = (struct nf_conn *)skb->nfct;
        struct nf_conntrack_tuple new_tuple;
@@ -365,11 +365,17 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
         * real-server we will see RIP->DIP.
         */
        new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
-       new_tuple.src.u3 = cp->daddr;
+       if (outin)
+               new_tuple.src.u3 = cp->daddr;
+       else
+               new_tuple.dst.u3 = cp->vaddr;
        /*
         * This will also take care of UDP and other protocols.
         */
-       new_tuple.src.u.tcp.port = cp->dport;
+       if (outin)
+               new_tuple.src.u.tcp.port = cp->dport;
+       else
+               new_tuple.dst.u.tcp.port = cp->vport;
        nf_conntrack_alter_reply(ct, &new_tuple);
 }
 
@@ -428,7 +434,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
 
-       ip_vs_update_conntrack(skb, cp);
+       ip_vs_update_conntrack(skb, cp, 1);
 
        /* FIXME: when application helper enlarges the packet and the length
           is larger than the MTU of outgoing device, there will be still
@@ -506,7 +512,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
 
-       ip_vs_update_conntrack(skb, cp);
+       ip_vs_update_conntrack(skb, cp, 1);
 
        /* FIXME: when application helper enlarges the packet and the length
           is larger than the MTU of outgoing device, there will be still
index 980fe4ad0016c52ed260735c955b0600181c7720..cd96ed3ccee4602a9fee20464e4a54d3fb0783b2 100644 (file)
@@ -2102,6 +2102,26 @@ static void __net_exit netlink_net_exit(struct net *net)
 #endif
 }
 
+static void __init netlink_add_usersock_entry(void)
+{
+       unsigned long *listeners;
+       int groups = 32;
+
+       listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
+                           GFP_KERNEL);
+       if (!listeners)
+               panic("netlink_add_usersock_entry: Cannot allocate listneres\n");
+
+       netlink_table_grab();
+
+       nl_table[NETLINK_USERSOCK].groups = groups;
+       nl_table[NETLINK_USERSOCK].listeners = listeners;
+       nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
+       nl_table[NETLINK_USERSOCK].registered = 1;
+
+       netlink_table_ungrab();
+}
+
 static struct pernet_operations __net_initdata netlink_net_ops = {
        .init = netlink_net_init,
        .exit = netlink_net_exit,
@@ -2150,6 +2170,8 @@ static int __init netlink_proto_init(void)
                hash->rehash_time = jiffies;
        }
 
+       netlink_add_usersock_entry();
+
        sock_register(&netlink_family_ops);
        register_pernet_subsys(&netlink_net_ops);
        /* The netlink device handler may be needed early. */
index 537a48732e9e0b8513dd324801c342ec00cd3820..7ebf7439b478d24b4246527c7313fb8cb774496b 100644 (file)
@@ -350,22 +350,19 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
 {
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_police *police = a->priv;
-       struct tc_police opt;
-
-       opt.index = police->tcf_index;
-       opt.action = police->tcf_action;
-       opt.mtu = police->tcfp_mtu;
-       opt.burst = police->tcfp_burst;
-       opt.refcnt = police->tcf_refcnt - ref;
-       opt.bindcnt = police->tcf_bindcnt - bind;
+       struct tc_police opt = {
+               .index = police->tcf_index,
+               .action = police->tcf_action,
+               .mtu = police->tcfp_mtu,
+               .burst = police->tcfp_burst,
+               .refcnt = police->tcf_refcnt - ref,
+               .bindcnt = police->tcf_bindcnt - bind,
+       };
+
        if (police->tcfp_R_tab)
                opt.rate = police->tcfp_R_tab->rate;
-       else
-               memset(&opt.rate, 0, sizeof(opt.rate));
        if (police->tcfp_P_tab)
                opt.peakrate = police->tcfp_P_tab->rate;
-       else
-               memset(&opt.peakrate, 0, sizeof(opt.peakrate));
        NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
        if (police->tcfp_result)
                NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
index abd904be428717462395d62adb682023e64e17a5..47496098d35c2876cd0b5a88e1365922cc369ce9 100644 (file)
@@ -761,8 +761,8 @@ init_vf(struct hfsc_class *cl, unsigned int len)
                if (f != cl->cl_f) {
                        cl->cl_f = f;
                        cftree_update(cl);
-                       update_cfmin(cl->cl_parent);
                }
+               update_cfmin(cl->cl_parent);
        }
 }
 
index 24b2cd55563726e8cd24be5dfbfafe8eb2892886..d344dc481ccc7a22d517c13995e825a73bed21ce 100644 (file)
@@ -1232,6 +1232,18 @@ out:
        return 0;
 }
 
+static bool list_has_sctp_addr(const struct list_head *list,
+                              union sctp_addr *ipaddr)
+{
+       struct sctp_transport *addr;
+
+       list_for_each_entry(addr, list, transports) {
+               if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr))
+                       return true;
+       }
+
+       return false;
+}
 /* A restart is occurring, check to make sure no new addresses
  * are being added as we may be under a takeover attack.
  */
@@ -1240,10 +1252,10 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
                                       struct sctp_chunk *init,
                                       sctp_cmd_seq_t *commands)
 {
-       struct sctp_transport *new_addr, *addr;
-       int found;
+       struct sctp_transport *new_addr;
+       int ret = 1;
 
-       /* Implementor's Guide - Sectin 5.2.2
+       /* Implementor's Guide - Section 5.2.2
         * ...
         * Before responding the endpoint MUST check to see if the
         * unexpected INIT adds new addresses to the association. If new
@@ -1254,31 +1266,19 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
        /* Search through all current addresses and make sure
         * we aren't adding any new ones.
         */
-       new_addr = NULL;
-       found = 0;
-
        list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
-                       transports) {
-               found = 0;
-               list_for_each_entry(addr, &asoc->peer.transport_addr_list,
-                               transports) {
-                       if (sctp_cmp_addr_exact(&new_addr->ipaddr,
-                                               &addr->ipaddr)) {
-                               found = 1;
-                               break;
-                       }
-               }
-               if (!found)
+                           transports) {
+               if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
+                                       &new_addr->ipaddr)) {
+                       sctp_sf_send_restart_abort(&new_addr->ipaddr, init,
+                                                  commands);
+                       ret = 0;
                        break;
-       }
-
-       /* If a new address was added, ABORT the sender. */
-       if (!found && new_addr) {
-               sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands);
+               }
        }
 
        /* Return success if all addresses were found. */
-       return found;
+       return ret;
 }
 
 /* Populate the verification/tie tags based on overlapping INIT
index 4414a18c63b49601357789fb5d6df78326819ba7..0b39b2451ea59958c0819c51faf337d000cbb88d 100644 (file)
@@ -692,6 +692,7 @@ static int unix_autobind(struct socket *sock)
        static u32 ordernum = 1;
        struct unix_address *addr;
        int err;
+       unsigned int retries = 0;
 
        mutex_lock(&u->readlock);
 
@@ -717,9 +718,17 @@ retry:
        if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
                                      addr->hash)) {
                spin_unlock(&unix_table_lock);
-               /* Sanity yield. It is unusual case, but yet... */
-               if (!(ordernum&0xFF))
-                       yield();
+               /*
+                * __unix_find_socket_byname() may take long time if many names
+                * are already in use.
+                */
+               cond_resched();
+               /* Give up if all names seems to be in use. */
+               if (retries++ == 0xFFFFF) {
+                       err = -ENOSPC;
+                       kfree(addr);
+                       goto out;
+               }
                goto retry;
        }
        addr->hash ^= sk->sk_type;
index 541e2fff5e9c5a0fab346fa55ac9f078630dcf43..d6d046b9f6f2d7d85f8a10680cf662519838e877 100644 (file)
@@ -475,12 +475,10 @@ int wiphy_register(struct wiphy *wiphy)
        mutex_lock(&cfg80211_mutex);
 
        res = device_add(&rdev->wiphy.dev);
-       if (res)
-               goto out_unlock;
-
-       res = rfkill_register(rdev->rfkill);
-       if (res)
-               goto out_rm_dev;
+       if (res) {
+               mutex_unlock(&cfg80211_mutex);
+               return res;
+       }
 
        /* set up regulatory info */
        wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
@@ -509,13 +507,18 @@ int wiphy_register(struct wiphy *wiphy)
        cfg80211_debugfs_rdev_add(rdev);
        mutex_unlock(&cfg80211_mutex);
 
+       /*
+        * due to a locking dependency this has to be outside of the
+        * cfg80211_mutex lock
+        */
+       res = rfkill_register(rdev->rfkill);
+       if (res)
+               goto out_rm_dev;
+
        return 0;
 
 out_rm_dev:
        device_del(&rdev->wiphy.dev);
-
-out_unlock:
-       mutex_unlock(&cfg80211_mutex);
        return res;
 }
 EXPORT_SYMBOL(wiphy_register);
index bb5e0a5ecfa1c2f999034bf4ceb34e69fbec855d..7e5c3a45f811d1a951ebee2486cfd8ffa70e5482 100644 (file)
@@ -1420,6 +1420,9 @@ int cfg80211_wext_giwessid(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
 
+       data->flags = 0;
+       data->length = 0;
+
        switch (wdev->iftype) {
        case NL80211_IFTYPE_ADHOC:
                return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
index 0ef17bc42bac05996eab6e2dada02fc67953c3dc..8f5116f5af19988555316eb5b97c7465a0890677 100644 (file)
@@ -782,6 +782,22 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
                }
        }
 
+       if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
+               /*
+                * If this is a GET, but not NOMAX, it means that the extra
+                * data is not bounded by userspace, but by max_tokens. Thus
+                * set the length to max_tokens. This matches the extra data
+                * allocation.
+                * The driver should fill it with the number of tokens it
+                * provided, and it may check iwp->length rather than having
+                * knowledge of max_tokens. If the driver doesn't change the
+                * iwp->length, this ioctl just copies back max_token tokens
+                * filled with zeroes. Hopefully the driver isn't claiming
+                * them to be valid data.
+                */
+               iwp->length = descr->max_tokens;
+       }
+
        err = handler(dev, info, (union iwreq_data *) iwp, extra);
 
        iwp->length += essid_compat;
index b14ed4b1f27c3bd70f5837c2f032baf11a88e9a1..8bae6b22c8461c7a03fd15f86bd672a662b0b437 100644 (file)
@@ -1801,7 +1801,7 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct xfrm_user_expire *ue = nlmsg_data(nlh);
        struct xfrm_usersa_info *p = &ue->state;
        struct xfrm_mark m;
-       u32 mark = xfrm_mark_get(attrs, &m);;
+       u32 mark = xfrm_mark_get(attrs, &m);
 
        x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
 
index 3c88be94649408b412c0193f0bf4186273c6a972..02baec732bb512c77fed7d5ede247b72d788a147 100644 (file)
@@ -33,8 +33,8 @@ struct aa_rlimit {
 };
 
 int aa_map_resource(int resource);
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
-                     struct rlimit *new_rlim);
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
+                     unsigned int resource, struct rlimit *new_rlim);
 
 void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
 
index 6e85cdb4303f69fc1911c94aefd5f1c5b695239c..506d2baf614797624fc4b9450c0d12c9f56e8ae4 100644 (file)
@@ -40,6 +40,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
        *ns_name = NULL;
        if (name[0] == ':') {
                char *split = strchr(&name[1], ':');
+               *ns_name = skip_spaces(&name[1]);
                if (split) {
                        /* overwrite ':' with \0 */
                        *split = 0;
@@ -47,7 +48,6 @@ char *aa_split_fqname(char *fqname, char **ns_name)
                } else
                        /* a ns name without a following profile is allowed */
                        name = NULL;
-               *ns_name = &name[1];
        }
        if (name && *name == 0)
                name = NULL;
index f73e2c2042185fff2d079dbdb3f4b8b828371e72..cf1de4462ccd3fb297f48bf351dd3494804f22c1 100644 (file)
@@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
        int error = 0;
 
        if (!unconfined(profile))
-               error = aa_task_setrlimit(profile, resource, new_rlim);
+               error = aa_task_setrlimit(profile, task, resource, new_rlim);
 
        return error;
 }
index 19358dc14605bae1422ae00226291751695ba44c..82396050f18646ac0519352321637e930c05e367 100644 (file)
@@ -59,8 +59,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
 {
        struct path root, tmp;
        char *res;
-       int deleted, connected;
-       int error = 0;
+       int connected, error = 0;
 
        /* Get the root we want to resolve too, released below */
        if (flags & PATH_CHROOT_REL) {
@@ -74,19 +73,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        }
 
        spin_lock(&dcache_lock);
-       /* There is a race window between path lookup here and the
-        * need to strip the " (deleted) string that __d_path applies
-        * Detect the race and relookup the path
-        *
-        * The stripping of (deleted) is a hack that could be removed
-        * with an updated __d_path
-        */
-       do {
-               tmp = root;
-               deleted = d_unlinked(path->dentry);
-               res = __d_path(path, &tmp, buf, buflen);
-
-       } while (deleted != d_unlinked(path->dentry));
+       tmp = root;
+       res = __d_path(path, &tmp, buf, buflen);
        spin_unlock(&dcache_lock);
 
        *name = res;
@@ -98,21 +86,17 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                *name = buf;
                goto out;
        }
-       if (deleted) {
-               /* On some filesystems, newly allocated dentries appear to the
-                * security_path hooks as a deleted dentry except without an
-                * inode allocated.
-                *
-                * Remove the appended deleted text and return as string for
-                * normal mediation, or auditing.  The (deleted) string is
-                * guaranteed to be added in this case, so just strip it.
-                */
-               buf[buflen - 11] = 0;   /* - (len(" (deleted)") +\0) */
 
-               if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) {
+       /* Handle two cases:
+        * 1. A deleted dentry && profile is not allowing mediation of deleted
+        * 2. On some filesystems, newly allocated dentries appear to the
+        *    security_path hooks as a deleted dentry except without an inode
+        *    allocated.
+        */
+       if (d_unlinked(path->dentry) && path->dentry->d_inode &&
+           !(flags & PATH_MEDIATE_DELETED)) {
                        error = -ENOENT;
                        goto out;
-               }
        }
 
        /* Determine if the path is connected to the expected root */
index 3cdc1ad0787ec9c4769455f8aeb004a417d246bf..52cc865f1464574e696fd28eca6a6e0eed326d68 100644 (file)
@@ -1151,12 +1151,14 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
                /* released below */
                ns = aa_get_namespace(root);
 
-       write_lock(&ns->lock);
        if (!name) {
                /* remove namespace - can only happen if fqname[0] == ':' */
+               write_lock(&ns->parent->lock);
                __remove_namespace(ns);
+               write_unlock(&ns->parent->lock);
        } else {
                /* remove profile */
+               write_lock(&ns->lock);
                profile = aa_get_profile(__lookup_profile(&ns->base, name));
                if (!profile) {
                        error = -ENOENT;
@@ -1165,8 +1167,8 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
                }
                name = profile->base.hname;
                __remove_profile(profile);
+               write_unlock(&ns->lock);
        }
-       write_unlock(&ns->lock);
 
        /* don't fail removal if audit fails */
        (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
index 4a368f1fd36ddf02af7204d30ee1b136f1d57bf7..a4136c10b1c6292edbdadae7285803583fb74241 100644 (file)
@@ -72,6 +72,7 @@ int aa_map_resource(int resource)
 /**
  * aa_task_setrlimit - test permission to set an rlimit
  * @profile - profile confining the task  (NOT NULL)
+ * @task - task the resource is being set on
  * @resource - the resource being set
  * @new_rlim - the new resource limit  (NOT NULL)
  *
@@ -79,18 +80,21 @@ int aa_map_resource(int resource)
  *
  * Returns: 0 or error code if setting resource failed
  */
-int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
-                     struct rlimit *new_rlim)
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
+                     unsigned int resource, struct rlimit *new_rlim)
 {
        int error = 0;
 
-       if (profile->rlimits.mask & (1 << resource) &&
-           new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)
-
-               error = audit_resource(profile, resource, new_rlim->rlim_max,
-                       -EACCES);
+       /* TODO: extend resource control to handle other (non current)
+        * processes.  AppArmor rules currently have the implicit assumption
+        * that the task is setting the resource of the current process
+        */
+       if ((task != current->group_leader) ||
+           (profile->rlimits.mask & (1 << resource) &&
+            new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
+               error = -EACCES;
 
-       return error;
+       return audit_resource(profile, resource, new_rlim->rlim_max, error);
 }
 
 /**
index 16d100d3fc38de931de8e1a2679bc025359f3d31..3fbcd1dda0ef6e06da4a5b4b9c23a240a6378a35 100644 (file)
@@ -35,6 +35,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
 #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
 
 /* set during initialization */
+extern int iint_initialized;
 extern int ima_initialized;
 extern int ima_used_chip;
 extern char *ima_hash;
index 7625b85c2274f457fc0d260a21e2d9039758d12c..afba4aef812f699134f7c7bc66c32251d2f12c69 100644 (file)
 
 RADIX_TREE(ima_iint_store, GFP_ATOMIC);
 DEFINE_SPINLOCK(ima_iint_lock);
-
 static struct kmem_cache *iint_cache __read_mostly;
 
+int iint_initialized = 0;
+
 /* ima_iint_find_get - return the iint associated with an inode
  *
  * ima_iint_find_get gets a reference to the iint. Caller must
@@ -141,6 +142,7 @@ static int __init ima_iintcache_init(void)
        iint_cache =
            kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
                              SLAB_PANIC, init_once);
+       iint_initialized = 1;
        return 0;
 }
 security_initcall(ima_iintcache_init);
index f93641382e9f9483576578a3ba5f41286f4cc3ab..e662b89d407944103dc121b9ccb37f7e68ac62e1 100644 (file)
@@ -148,12 +148,14 @@ void ima_counts_get(struct file *file)
        struct ima_iint_cache *iint;
        int rc;
 
-       if (!ima_initialized || !S_ISREG(inode->i_mode))
+       if (!iint_initialized || !S_ISREG(inode->i_mode))
                return;
        iint = ima_iint_find_get(inode);
        if (!iint)
                return;
        mutex_lock(&iint->mutex);
+       if (!ima_initialized)
+               goto out;
        rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
        if (rc < 0)
                goto out;
@@ -213,7 +215,7 @@ void ima_file_free(struct file *file)
        struct inode *inode = file->f_dentry->d_inode;
        struct ima_iint_cache *iint;
 
-       if (!ima_initialized || !S_ISREG(inode->i_mode))
+       if (!iint_initialized || !S_ISREG(inode->i_mode))
                return;
        iint = ima_iint_find_get(inode);
        if (!iint)
@@ -230,7 +232,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
 {
        struct inode *inode = file->f_dentry->d_inode;
        struct ima_iint_cache *iint;
-       int rc;
+       int rc = 0;
 
        if (!ima_initialized || !S_ISREG(inode->i_mode))
                return 0;
index b2b0998d6abda7759433d7032d255eb0a317c126..60924f6a52db2bbff40ddc953a50bb9d708febb5 100644 (file)
@@ -1272,6 +1272,7 @@ long keyctl_session_to_parent(void)
        keyring_r = NULL;
 
        me = current;
+       rcu_read_lock();
        write_lock_irq(&tasklist_lock);
 
        parent = me->real_parent;
@@ -1304,7 +1305,8 @@ long keyctl_session_to_parent(void)
                goto not_permitted;
 
        /* the keyrings must have the same UID */
-       if (pcred->tgcred->session_keyring->uid != mycred->euid ||
+       if ((pcred->tgcred->session_keyring &&
+            pcred->tgcred->session_keyring->uid != mycred->euid) ||
            mycred->tgcred->session_keyring->uid != mycred->euid)
                goto not_permitted;
 
@@ -1319,6 +1321,7 @@ long keyctl_session_to_parent(void)
        set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
 
        write_unlock_irq(&tasklist_lock);
+       rcu_read_unlock();
        if (oldcred)
                put_cred(oldcred);
        return 0;
@@ -1327,6 +1330,7 @@ already_same:
        ret = 0;
 not_permitted:
        write_unlock_irq(&tasklist_lock);
+       rcu_read_unlock();
        put_cred(cred);
        return ret;
 
index eb68326c37d47626b53d7970fe6e88027edc0e13..a7868ad4d530fd40b3a9bd608bd7860a7d35ce44 100644 (file)
@@ -829,6 +829,8 @@ static int snd_rawmidi_control_ioctl(struct snd_card *card,
                
                if (get_user(device, (int __user *)argp))
                        return -EFAULT;
+               if (device >= SNDRV_RAWMIDI_DEVICES) /* next device is -1 */
+                       device = SNDRV_RAWMIDI_DEVICES - 1;
                mutex_lock(&register_mutex);
                device = device < 0 ? 0 : device + 1;
                while (device < SNDRV_RAWMIDI_DEVICES) {
index 685712276ac95ab0d57985489b86f4f840bcf9b1..69cd7b3c362d19f4b0ac989b8bc107e49e59ff1b 100644 (file)
@@ -281,13 +281,10 @@ snd_seq_oss_open(struct file *file, int level)
        return 0;
 
  _error:
-       snd_seq_oss_writeq_delete(dp->writeq);
-       snd_seq_oss_readq_delete(dp->readq);
        snd_seq_oss_synth_cleanup(dp);
        snd_seq_oss_midi_cleanup(dp);
-       delete_port(dp);
        delete_seq_queue(dp->queue);
-       kfree(dp);
+       delete_port(dp);
 
        return rc;
 }
@@ -350,8 +347,10 @@ create_port(struct seq_oss_devinfo *dp)
 static int
 delete_port(struct seq_oss_devinfo *dp)
 {
-       if (dp->port < 0)
+       if (dp->port < 0) {
+               kfree(dp);
                return 0;
+       }
 
        debug_printk(("delete_port %i\n", dp->port));
        return snd_seq_event_port_detach(dp->cseq, dp->port);
index 5f3e68401f905dbf2b8a03bc12fd657450b739d4..91d6023a63e57c6b14227e158c171b8edf4b8087 100644 (file)
@@ -764,9 +764,9 @@ static long io[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
 static long mem[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 
+#ifndef MSND_CLASSIC
 static long cfg[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 
-#ifndef MSND_CLASSIC
 /* Extra Peripheral Configuration (Default: Disable) */
 static long ide_io0[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
 static long ide_io1[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
@@ -894,7 +894,11 @@ static int __devinit snd_msnd_isa_probe(struct device *pdev, unsigned int idx)
        struct snd_card *card;
        struct snd_msnd *chip;
 
-       if (has_isapnp(idx) || cfg[idx] == SNDRV_AUTO_PORT) {
+       if (has_isapnp(idx)
+#ifndef MSND_CLASSIC
+           || cfg[idx] == SNDRV_AUTO_PORT
+#endif
+           ) {
                printk(KERN_INFO LOGNAME ": Assuming PnP mode\n");
                return -ENODEV;
        }
index 3827092cc1d2802e0902a2c809032cac6f6ef9da..14829210ef0bf4b1bac018d4f5e31a1d1c4728b9 100644 (file)
@@ -4536,7 +4536,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
                        cfg->hp_outs--;
                        memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1,
                                sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i));
-                       memmove(sequences_hp + i - 1, sequences_hp + i,
+                       memmove(sequences_hp + i, sequences_hp + i + 1,
                                sizeof(sequences_hp[0]) * (cfg->hp_outs - i));
                }
        }
index 4ef5efaaaef1a81d29290d5d9618097634957c10..488fd9ade1ba2bf7b306bf6e48ae106ef823294b 100644 (file)
@@ -972,6 +972,53 @@ static struct hda_verb cs_coef_init_verbs[] = {
        {} /* terminator */
 };
 
+/* Errata: CS4207 rev C0/C1/C2 Silicon
+ *
+ * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf
+ *
+ * 6. At high temperature (TA > +85°C), the digital supply current (IVD)
+ * may be excessive (up to an additional 200 Î¼A), which is most easily
+ * observed while the part is being held in reset (RESET# active low).
+ *
+ * Root Cause: At initial powerup of the device, the logic that drives
+ * the clock and write enable to the S/PDIF SRC RAMs is not properly
+ * initialized.
+ * Certain random patterns will cause a steady leakage current in those
+ * RAM cells. The issue will resolve once the SRCs are used (turned on).
+ *
+ * Workaround: The following verb sequence briefly turns on the S/PDIF SRC
+ * blocks, which will alleviate the issue.
+ */
+
+static struct hda_verb cs_errata_init_verbs[] = {
+       {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */
+       {0x11, AC_VERB_SET_PROC_STATE, 0x01},  /* VPW: processing on */
+
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x9999},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
+       {0x11, AC_VERB_SET_PROC_COEF, 0xa412},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x0009},
+
+       {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */
+       {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */
+
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0017},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x2412},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0008},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x0000},
+       {0x11, AC_VERB_SET_COEF_INDEX, 0x0001},
+       {0x11, AC_VERB_SET_PROC_COEF, 0x0008},
+       {0x11, AC_VERB_SET_PROC_STATE, 0x00},
+
+       {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
+       {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
+       /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
+
+       {} /* terminator */
+};
+
 /* SPDIF setup */
 static void init_digital(struct hda_codec *codec)
 {
@@ -991,6 +1038,9 @@ static int cs_init(struct hda_codec *codec)
 {
        struct cs_spec *spec = codec->spec;
 
+       /* init_verb sequence for C0/C1/C2 errata*/
+       snd_hda_sequence_write(codec, cs_errata_init_verbs);
+
        snd_hda_sequence_write(codec, cs_coef_init_verbs);
 
        if (spec->gpio_mask) {
index 5cdb80edbd7f06d0cc00db6dd43cc325a1ef7779..71f9d6475b09e2b4d52d38b60130918dff8235b2 100644 (file)
@@ -116,6 +116,7 @@ struct conexant_spec {
        unsigned int dell_vostro:1;
        unsigned int ideapad:1;
        unsigned int thinkpad:1;
+       unsigned int hp_laptop:1;
 
        unsigned int ext_mic_present;
        unsigned int recording;
@@ -2299,6 +2300,18 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec)
        }
 }
 
+/* toggle input of built-in digital mic and mic jack appropriately */
+static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
+{
+       unsigned int present;
+
+       present = snd_hda_jack_detect(codec, 0x1b);
+       snd_printdd("CXT5066: external microphone present=%d\n", present);
+       snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
+                           present ? 1 : 3);
+}
+
+
 /* toggle input of built-in digital mic and mic jack appropriately
    order is: external mic -> dock mic -> interal mic */
 static void cxt5066_thinkpad_automic(struct hda_codec *codec)
@@ -2407,6 +2420,20 @@ static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res)
        }
 }
 
+/* unsolicited event for jack sensing */
+static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res)
+{
+       snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26);
+       switch (res >> 26) {
+       case CONEXANT_HP_EVENT:
+               cxt5066_hp_automute(codec);
+               break;
+       case CONEXANT_MIC_EVENT:
+               cxt5066_hp_laptop_automic(codec);
+               break;
+       }
+}
+
 /* unsolicited event for jack sensing */
 static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
 {
@@ -2989,6 +3016,14 @@ static struct hda_verb cxt5066_init_verbs_portd_lo[] = {
        { } /* end */
 };
 
+
+static struct hda_verb cxt5066_init_verbs_hp_laptop[] = {
+       {0x14, AC_VERB_SET_CONNECT_SEL, 0x0},
+       {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
+       {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
+       { } /* end */
+};
+
 /* initialize jack-sensing, too */
 static int cxt5066_init(struct hda_codec *codec)
 {
@@ -3004,6 +3039,8 @@ static int cxt5066_init(struct hda_codec *codec)
                        cxt5066_ideapad_automic(codec);
                else if (spec->thinkpad)
                        cxt5066_thinkpad_automic(codec);
+               else if (spec->hp_laptop)
+                       cxt5066_hp_laptop_automic(codec);
        }
        cxt5066_set_mic_boost(codec);
        return 0;
@@ -3031,6 +3068,7 @@ enum {
        CXT5066_DELL_VOSTO,     /* Dell Vostro 1015i */
        CXT5066_IDEAPAD,        /* Lenovo IdeaPad U150 */
        CXT5066_THINKPAD,       /* Lenovo ThinkPad T410s, others? */
+       CXT5066_HP_LAPTOP,      /* HP Laptop */
        CXT5066_MODELS
 };
 
@@ -3041,6 +3079,7 @@ static const char *cxt5066_models[CXT5066_MODELS] = {
        [CXT5066_DELL_VOSTO]    = "dell-vostro",
        [CXT5066_IDEAPAD]       = "ideapad",
        [CXT5066_THINKPAD]      = "thinkpad",
+       [CXT5066_HP_LAPTOP]     = "hp-laptop",
 };
 
 static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
@@ -3052,8 +3091,10 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO),
        SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
        SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
        SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
        SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
+       SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
@@ -3116,6 +3157,23 @@ static int patch_cxt5066(struct hda_codec *codec)
                spec->num_init_verbs++;
                spec->dell_automute = 1;
                break;
+       case CXT5066_HP_LAPTOP:
+               codec->patch_ops.init = cxt5066_init;
+               codec->patch_ops.unsol_event = cxt5066_hp_laptop_event;
+               spec->init_verbs[spec->num_init_verbs] =
+                       cxt5066_init_verbs_hp_laptop;
+               spec->num_init_verbs++;
+               spec->hp_laptop = 1;
+               spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
+               spec->mixers[spec->num_mixers++] = cxt5066_mixers;
+               /* no S/PDIF out */
+               spec->multiout.dig_out_nid = 0;
+               /* input source automatically selected */
+               spec->input_mux = NULL;
+               spec->port_d_mode = 0;
+               spec->mic_boost = 3; /* default 30dB gain */
+               break;
+
        case CXT5066_OLPC_XO_1_5:
                codec->patch_ops.init = cxt5066_olpc_init;
                codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event;
index 627bf99633681483242a7559a9eaef878c3f7605..bcbf9160ed81af7a805007f152067908f9970840 100644 (file)
@@ -5334,6 +5334,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids,
 
 static struct snd_pci_quirk beep_white_list[] = {
        SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+       SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
        SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
        {}
 };
index 6147216af74412f5ff47af36b04ce9fd51013f6b..a3409edcfb5094791c33563f8ad3911826ce3e26 100644 (file)
@@ -155,6 +155,7 @@ void oxygen_pci_remove(struct pci_dev *pci);
 int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state);
 int oxygen_pci_resume(struct pci_dev *pci);
 #endif
+void oxygen_pci_shutdown(struct pci_dev *pci);
 
 /* oxygen_mixer.c */
 
index fad03d64e3ad0c936aca79a257f6c484b067f657..7e93cf884437d0b5844c9515250c49994390e54c 100644 (file)
@@ -519,16 +519,21 @@ static void oxygen_init(struct oxygen *chip)
        }
 }
 
-static void oxygen_card_free(struct snd_card *card)
+static void oxygen_shutdown(struct oxygen *chip)
 {
-       struct oxygen *chip = card->private_data;
-
        spin_lock_irq(&chip->reg_lock);
        chip->interrupt_mask = 0;
        chip->pcm_running = 0;
        oxygen_write16(chip, OXYGEN_DMA_STATUS, 0);
        oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0);
        spin_unlock_irq(&chip->reg_lock);
+}
+
+static void oxygen_card_free(struct snd_card *card)
+{
+       struct oxygen *chip = card->private_data;
+
+       oxygen_shutdown(chip);
        if (chip->irq >= 0)
                free_irq(chip->irq, chip);
        flush_scheduled_work();
@@ -778,3 +783,13 @@ int oxygen_pci_resume(struct pci_dev *pci)
 }
 EXPORT_SYMBOL(oxygen_pci_resume);
 #endif /* CONFIG_PM */
+
+void oxygen_pci_shutdown(struct pci_dev *pci)
+{
+       struct snd_card *card = pci_get_drvdata(pci);
+       struct oxygen *chip = card->private_data;
+
+       oxygen_shutdown(chip);
+       chip->model.cleanup(chip);
+}
+EXPORT_SYMBOL(oxygen_pci_shutdown);
index f03a2f2cffee88911e9c11db08c5dd7dd8087b55..06c863e86e3d3d24a5dfd0f7b43f56a47b120719 100644 (file)
@@ -95,6 +95,7 @@ static struct pci_driver xonar_driver = {
        .suspend = oxygen_pci_suspend,
        .resume = oxygen_pci_resume,
 #endif
+       .shutdown = oxygen_pci_shutdown,
 };
 
 static int __init alsa_card_xonar_init(void)
index dbc4b89d74e43ffa3311af7c9386fb5aae38d9a9..b82c1cfa96f5334554435a6eeebc6e82fa63857d 100644 (file)
@@ -53,6 +53,8 @@ struct xonar_wm87x6 {
        struct xonar_generic generic;
        u16 wm8776_regs[0x17];
        u16 wm8766_regs[0x10];
+       struct snd_kcontrol *line_adcmux_control;
+       struct snd_kcontrol *mic_adcmux_control;
        struct snd_kcontrol *lc_controls[13];
 };
 
@@ -193,6 +195,7 @@ static void xonar_ds_init(struct oxygen *chip)
 static void xonar_ds_cleanup(struct oxygen *chip)
 {
        xonar_disable_output(chip);
+       wm8776_write(chip, WM8776_RESET, 0);
 }
 
 static void xonar_ds_suspend(struct oxygen *chip)
@@ -603,6 +606,7 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl,
 {
        struct oxygen *chip = ctl->private_data;
        struct xonar_wm87x6 *data = chip->model_data;
+       struct snd_kcontrol *other_ctl;
        unsigned int mux_bit = ctl->private_value;
        u16 reg;
        int changed;
@@ -610,8 +614,18 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl,
        mutex_lock(&chip->mutex);
        reg = data->wm8776_regs[WM8776_ADCMUX];
        if (value->value.integer.value[0]) {
-               reg &= ~0x003;
                reg |= mux_bit;
+               /* line-in and mic-in are exclusive */
+               mux_bit ^= 3;
+               if (reg & mux_bit) {
+                       reg &= ~mux_bit;
+                       if (mux_bit == 1)
+                               other_ctl = data->line_adcmux_control;
+                       else
+                               other_ctl = data->mic_adcmux_control;
+                       snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
+                                      &other_ctl->id);
+               }
        } else
                reg &= ~mux_bit;
        changed = reg != data->wm8776_regs[WM8776_ADCMUX];
@@ -963,7 +977,13 @@ static int xonar_ds_mixer_init(struct oxygen *chip)
                err = snd_ctl_add(chip->card, ctl);
                if (err < 0)
                        return err;
+               if (!strcmp(ctl->id.name, "Line Capture Switch"))
+                       data->line_adcmux_control = ctl;
+               else if (!strcmp(ctl->id.name, "Mic Capture Switch"))
+                       data->mic_adcmux_control = ctl;
        }
+       if (!data->line_adcmux_control || !data->mic_adcmux_control)
+               return -ENXIO;
        BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
        for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
                ctl = snd_ctl_new1(&lc_controls[i], chip);
index 9feb00c831a02b791227b75294e6dc8e15975f1f..4eabafa5b037db66b250db64cc0ce05aa783f595 100644 (file)
@@ -126,7 +126,7 @@ static void snd_usb_stream_disconnect(struct list_head *head)
        for (idx = 0; idx < 2; idx++) {
                subs = &as->substream[idx];
                if (!subs->num_formats)
-                       return;
+                       continue;
                snd_usb_release_substream_urbs(subs, 1);
                subs->interface = -1;
        }
@@ -216,6 +216,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
        }
 
        switch (protocol) {
+       default:
+               snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n",
+                           protocol);
+               /* fall through */
+
        case UAC_VERSION_1: {
                struct uac1_ac_header_descriptor *h1 = control_header;
 
@@ -253,10 +258,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
 
                break;
        }
-
-       default:
-               snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol);
-               return -EINVAL;
        }
 
        return 0;
@@ -465,7 +466,13 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
                        goto __error;
        }
 
-       chip->ctrl_intf = alts;
+       /*
+        * For devices with more than one control interface, we assume the
+        * first contains the audio controls. We might need a more specific
+        * check here in the future.
+        */
+       if (!chip->ctrl_intf)
+               chip->ctrl_intf = alts;
 
        if (err > 0) {
                /* create normal USB audio interfaces */
index b853f8df794f6a35ceb8e39576abf3d1cb91e36c..7754a10345451109a9ff9bd03d271148160861b9 100644 (file)
@@ -295,12 +295,11 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
 
        switch (altsd->bInterfaceProtocol) {
        case UAC_VERSION_1:
+       default:
                return set_sample_rate_v1(chip, iface, alts, fmt, rate);
 
        case UAC_VERSION_2:
                return set_sample_rate_v2(chip, iface, alts, fmt, rate);
        }
-
-       return -EINVAL;
 }
 
index 1a701f1e8f501b358029f84bedcbe1268e9f543f..ef0a07e34844ae4d53ffd12e54b16a203da02a19 100644 (file)
@@ -275,6 +275,12 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
 
                /* get audio formats */
                switch (protocol) {
+               default:
+                       snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n",
+                                   dev->devnum, iface_no, altno, protocol);
+                       protocol = UAC_VERSION_1;
+                       /* fall through */
+
                case UAC_VERSION_1: {
                        struct uac1_as_header_descriptor *as =
                                snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL);
@@ -336,11 +342,6 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
                                   dev->devnum, iface_no, altno, as->bTerminalLink);
                        continue;
                }
-
-               default:
-                       snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n",
-                                  dev->devnum, iface_no, altno, protocol);
-                       continue;
                }
 
                /* get format type */
index 3a1375459c06a49d0d48a309460af30a0fcfd8c0..69148212aa70e66f9e1193040bfb41aad5548b6b 100644 (file)
@@ -49,7 +49,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
        u64 pcm_formats;
 
        switch (protocol) {
-       case UAC_VERSION_1: {
+       case UAC_VERSION_1:
+       default: {
                struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
                sample_width = fmt->bBitResolution;
                sample_bytes = fmt->bSubframeSize;
@@ -64,9 +65,6 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
                format <<= 1;
                break;
        }
-
-       default:
-               return -EINVAL;
        }
 
        pcm_formats = 0;
@@ -384,6 +382,10 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
         * audio class v2 uses class specific EP0 range requests for that.
         */
        switch (protocol) {
+       default:
+               snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
+                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
+               /* fall through */
        case UAC_VERSION_1:
                fp->channels = fmt->bNrChannels;
                ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7);
@@ -392,10 +394,6 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
                /* fp->channels is already set in this case */
                ret = parse_audio_format_rates_v2(chip, fp);
                break;
-       default:
-               snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
-                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
-               return -EINVAL;
        }
 
        if (fp->channels < 1) {
@@ -438,6 +436,10 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
        fp->channels = 1;
 
        switch (protocol) {
+       default:
+               snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n",
+                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
+               /* fall through */
        case UAC_VERSION_1: {
                struct uac_format_type_ii_discrete_descriptor *fmt = _fmt;
                brate = le16_to_cpu(fmt->wMaxBitRate);
@@ -456,10 +458,6 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
                ret = parse_audio_format_rates_v2(chip, fp);
                break;
        }
-       default:
-               snd_printk(KERN_ERR "%d:%u:%d : invalid protocol version %d\n",
-                          chip->dev->devnum, fp->iface, fp->altsetting, protocol);
-               return -EINVAL;
        }
 
        return ret;
index c166db0057d3e0e613954b21f8ca5274f0242741..3ed3901369ce1a9ca58473ecef450623cf95a7b2 100644 (file)
@@ -2175,7 +2175,15 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
        }
 
        host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0];
-       mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol;
+       switch (get_iface_desc(host_iface)->bInterfaceProtocol) {
+       case UAC_VERSION_1:
+       default:
+               mixer->protocol = UAC_VERSION_1;
+               break;
+       case UAC_VERSION_2:
+               mixer->protocol = UAC_VERSION_2;
+               break;
+       }
 
        if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
            (err = snd_usb_mixer_status_create(mixer)) < 0)
index 3634cedf93061629619125690fb19dc7d0beae64..3b5135c930628fc092cad54e67ce2a7645c59c02 100644 (file)
@@ -173,13 +173,12 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
 
        switch (altsd->bInterfaceProtocol) {
        case UAC_VERSION_1:
+       default:
                return init_pitch_v1(chip, iface, alts, fmt);
 
        case UAC_VERSION_2:
                return init_pitch_v2(chip, iface, alts, fmt);
        }
-
-       return -EINVAL;
 }
 
 /*
index 624a96c636fdbc36a472ecaadf5fcb72c226bf38..6de4313924fb5c510f354cb3a8e2a73061e103f6 100644 (file)
@@ -50,6 +50,7 @@ static inline void callchain_init(struct callchain_node *node)
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->val);
 
+       node->children_hit = 0;
        node->parent = NULL;
        node->hit = 0;
 }
index e72f05c3bef09258311f7192afb179a10657c450..fcc16e4349df9f3353ac03f975d9c1be9938863a 100644 (file)
@@ -1539,6 +1539,7 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
                goto error;
        }
        tev->point.offset = pev->point.offset;
+       tev->point.retprobe = pev->point.retprobe;
        tev->nargs = pev->nargs;
        if (tev->nargs) {
                tev->args = zalloc(sizeof(struct probe_trace_arg)
index 525136684d4ec170201b4d07c7b080b73851edf9..32b81f707ff5eb5d950a2d233d6c00c6b90fedb2 100644 (file)
@@ -686,6 +686,25 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
        char buf[32], *ptr;
        int ret, nscopes;
 
+       if (!is_c_varname(pf->pvar->var)) {
+               /* Copy raw parameters */
+               pf->tvar->value = strdup(pf->pvar->var);
+               if (pf->tvar->value == NULL)
+                       return -ENOMEM;
+               if (pf->pvar->type) {
+                       pf->tvar->type = strdup(pf->pvar->type);
+                       if (pf->tvar->type == NULL)
+                               return -ENOMEM;
+               }
+               if (pf->pvar->name) {
+                       pf->tvar->name = strdup(pf->pvar->name);
+                       if (pf->tvar->name == NULL)
+                               return -ENOMEM;
+               } else
+                       pf->tvar->name = NULL;
+               return 0;
+       }
+
        if (pf->pvar->name)
                pf->tvar->name = strdup(pf->pvar->name);
        else {
@@ -700,19 +719,6 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf)
        if (pf->tvar->name == NULL)
                return -ENOMEM;
 
-       if (!is_c_varname(pf->pvar->var)) {
-               /* Copy raw parameters */
-               pf->tvar->value = strdup(pf->pvar->var);
-               if (pf->tvar->value == NULL)
-                       return -ENOMEM;
-               if (pf->pvar->type) {
-                       pf->tvar->type = strdup(pf->pvar->type);
-                       if (pf->tvar->type == NULL)
-                               return -ENOMEM;
-               }
-               return 0;
-       }
-
        pr_debug("Searching '%s' variable in context.\n",
                 pf->pvar->var);
        /* Search child die for local variables and parameters. */
@@ -783,6 +789,16 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
                /* This function has no name. */
                tev->point.offset = (unsigned long)pf->addr;
 
+       /* Return probe must be on the head of a subprogram */
+       if (pf->pev->point.retprobe) {
+               if (tev->point.offset != 0) {
+                       pr_warning("Return probe must be on the head of"
+                                  " a real function\n");
+                       return -EINVAL;
+               }
+               tev->point.retprobe = true;
+       }
+
        pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
                 tev->point.offset);
 
index 1a367734e01693c8a93f79bb3846af4a89f9cffe..b2f5ae97f33dded1cdaba3e843ab2df888515777 100644 (file)
@@ -2268,6 +2268,9 @@ static int setup_list(struct strlist **list, const char *list_str,
 
 int symbol__init(void)
 {
+       if (symbol_conf.initialized)
+               return 0;
+
        elf_version(EV_CURRENT);
        if (symbol_conf.sort_by_name)
                symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
@@ -2293,6 +2296,7 @@ int symbol__init(void)
                       symbol_conf.sym_list_str, "symbol") < 0)
                goto out_free_comm_list;
 
+       symbol_conf.initialized = true;
        return 0;
 
 out_free_dso_list:
@@ -2304,11 +2308,14 @@ out_free_comm_list:
 
 void symbol__exit(void)
 {
+       if (!symbol_conf.initialized)
+               return;
        strlist__delete(symbol_conf.sym_list);
        strlist__delete(symbol_conf.dso_list);
        strlist__delete(symbol_conf.comm_list);
        vmlinux_path__exit();
        symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
+       symbol_conf.initialized = false;
 }
 
 int machines__create_kernel_maps(struct rb_root *self, pid_t pid)
index b7a8da4af5a0a98e84e1616e55cdaceec18fcdbf..ea95c2756f05e73d09f6868f24b174ed1221f59a 100644 (file)
@@ -69,7 +69,8 @@ struct symbol_conf {
                        show_nr_samples,
                        use_callchain,
                        exclude_other,
-                       show_cpu_utilization;
+                       show_cpu_utilization,
+                       initialized;
        const char      *vmlinux_name,
                        *source_prefix,
                        *field_sep;
index b78b794c1039df394b6ccfc43ae34fb2ff6fc47a..d4853a54771a04a2bb3e126b1aa54ec13e5c29d0 100644 (file)
@@ -1958,10 +1958,10 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
                       cpu);
                hardware_disable(NULL);
                break;
-       case CPU_ONLINE:
+       case CPU_STARTING:
                printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
                       cpu);
-               smp_call_function_single(cpu, hardware_enable, NULL, 1);
+               hardware_enable(NULL);
                break;
        }
        return NOTIFY_OK;
@@ -2096,7 +2096,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 
 static struct notifier_block kvm_cpu_notifier = {
        .notifier_call = kvm_cpu_hotplug,
-       .priority = 20, /* must be > scheduler priority */
 };
 
 static int vm_stat_get(void *_offset, u64 *val)