]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'for-linus' of git://git.infradead.org/users/eparis/notify
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 28 Aug 2010 21:11:04 +0000 (14:11 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 28 Aug 2010 21:11:04 +0000 (14:11 -0700)
* 'for-linus' of git://git.infradead.org/users/eparis/notify:
  fsnotify: drop two useless bools in the fnsotify main loop
  fsnotify: fix list walk order
  fanotify: Return EPERM when a process is not privileged
  fanotify: resize pid and reorder structure
  fanotify: drop duplicate pr_debug statement
  fanotify: flush outstanding perm requests on group destroy
  fsnotify: fix ignored mask handling between inode and vfsmount marks
  fanotify: add MAINTAINERS entry
  fsnotify: reset used_inode and used_vfsmount on each pass
  fanotify: do not dereference inode_mark when it is unset

382 files changed:
Documentation/kernel-parameters.txt
Documentation/lguest/Makefile
Documentation/lguest/lguest.c
MAINTAINERS
Makefile
arch/alpha/kernel/osf_sys.c
arch/arm/configs/omap_4430sdp_defconfig
arch/arm/kernel/etm.c
arch/arm/mach-imx/mach-cpuimx27.c
arch/arm/mach-imx/mach-pca100.c
arch/arm/mach-mx25/mach-cpuimx25.c
arch/arm/mach-mx3/mach-cpuimx35.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/clock3xxx_data.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/include/mach/entry-macro.S
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-tegra/board-harmony.c
arch/arm/mach-tegra/include/mach/vmalloc.h
arch/arm/plat-omap/include/plat/smp.h
arch/blackfin/include/asm/bitops.h
arch/blackfin/include/asm/unistd.h
arch/blackfin/mach-common/entry.S
arch/ia64/hp/sim/simserial.c
arch/mn10300/mm/dma-alloc.c
arch/powerpc/Makefile
arch/powerpc/boot/dts/canyonlands.dts
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/rwsem.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/crash.c
arch/powerpc/kernel/head_44x.S
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/idle.c
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/pci_of_scan.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/sys_ppc32.c
arch/powerpc/kernel/vio.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/tlb_nohash_low.S
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/cell/iommu.c
arch/powerpc/platforms/iseries/iommu.c
arch/powerpc/platforms/powermac/feature.c
arch/powerpc/platforms/powermac/pci.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/smp.c
arch/powerpc/platforms/pseries/xics.c
arch/powerpc/xmon/xmon.c
arch/s390/include/asm/hugetlb.h
arch/s390/include/asm/mmu.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/tlb.h
arch/s390/include/asm/tlbflush.h
arch/s390/kernel/entry.h
arch/s390/kernel/smp.c
arch/s390/mm/init.c
arch/sparc/include/asm/atomic_64.h
arch/sparc/include/asm/backoff.h
arch/sparc/include/asm/oplib_64.h
arch/sparc/include/asm/rwsem-const.h [deleted file]
arch/sparc/include/asm/rwsem.h
arch/sparc/include/asm/system_64.h
arch/sparc/kernel/process_64.c
arch/sparc/lib/Makefile
arch/sparc/lib/atomic_64.S
arch/sparc/lib/bitops.S
arch/sparc/lib/rwsem_64.S [deleted file]
arch/sparc/prom/cif.S
arch/sparc/prom/console_64.c
arch/sparc/prom/devops_64.c
arch/sparc/prom/misc_64.c
arch/sparc/prom/p1275.c
arch/sparc/prom/tree_64.c
arch/um/drivers/mconsole_kern.c
arch/x86/include/asm/tsc.h
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/tsc.c
arch/x86/power/cpu.c
arch/x86/xen/platform-pci-unplug.c
drivers/ata/Kconfig
drivers/ata/Makefile
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-sff.c
drivers/ata/pata_cmd64x.c
drivers/ata/pata_legacy.c
drivers/ata/pata_winbond.c [deleted file]
drivers/ata/sata_dwc_460ex.c
drivers/ata/sata_mv.c
drivers/base/firmware_class.c
drivers/block/xen-blkfront.c
drivers/char/hangcheck-timer.c
drivers/char/hvc_console.c
drivers/char/hvsi.c
drivers/char/ip2/ip2main.c
drivers/char/rocket.c
drivers/char/synclink_gt.c
drivers/char/sysrq.c
drivers/edac/amd64_edac.c
drivers/edac/edac_mce_amd.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_lock.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i830/i830_dma.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/mga/mga_state.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bios.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_channel.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_i2c.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nv04_dfp.c
drivers/gpu/drm/nouveau/nv17_tv.c
drivers/gpu/drm/nouveau/nv50_instmem.c
drivers/gpu/drm/nouveau/nvc0_instmem.c
drivers/gpu/drm/r128/r128_state.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/radeon_agp.c
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_state.c
drivers/gpu/drm/savage/savage_bci.c
drivers/gpu/drm/sis/sis_mm.c
drivers/gpu/drm/via/via_dma.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/hwmon/ads7871.c
drivers/hwmon/coretemp.c
drivers/hwmon/k8temp.c
drivers/input/keyboard/hil_kbd.c
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/misc/uinput.c
drivers/input/mousedev.c
drivers/isdn/hardware/avm/Kconfig
drivers/macintosh/via-pmu.c
drivers/media/dvb/mantis/Kconfig
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/e1000e/82571.c
drivers/net/e1000e/defines.h
drivers/net/e1000e/lib.c
drivers/net/ehea/ehea.h
drivers/net/ehea/ehea_main.c
drivers/net/ibm_newemac/debug.c
drivers/net/ibmveth.c
drivers/net/ll_temac_main.c
drivers/net/netxen/netxen_nic.h
drivers/net/netxen/netxen_nic_init.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/pxa168_eth.c [new file with mode: 0644]
drivers/net/qlcnic/qlcnic_main.c
drivers/net/sh_eth.c
drivers/net/usb/ipheth.c
drivers/net/wireless/adm8211.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/ath/ar9170/main.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-3945.c
drivers/net/wireless/iwlwifi/iwl-4965.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-dev.h
drivers/net/wireless/iwlwifi/iwl3945-base.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/p54/eeprom.c
drivers/net/wireless/p54/fwio.c
drivers/net/wireless/p54/led.c
drivers/net/wireless/p54/p54pci.c
drivers/net/wireless/p54/txrx.c
drivers/net/wireless/rtl818x/rtl8180_dev.c
drivers/net/wireless/rtl818x/rtl8187_dev.c
drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
drivers/platform/x86/Kconfig
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/intel_rar_register.c
drivers/platform/x86/intel_scu_ipc.c
drivers/s390/char/ctrlchar.c
drivers/s390/char/keyboard.c
drivers/serial/68328serial.c
drivers/serial/8250_early.c
drivers/serial/sn_console.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/batman-adv/bat_sysfs.c
drivers/staging/batman-adv/hard-interface.c
drivers/staging/batman-adv/icmp_socket.c
drivers/staging/batman-adv/main.c
drivers/staging/batman-adv/originator.c
drivers/staging/batman-adv/routing.c
drivers/staging/batman-adv/types.h
drivers/staging/sep/Kconfig [deleted file]
drivers/staging/sep/Makefile [deleted file]
drivers/staging/sep/TODO [deleted file]
drivers/staging/sep/sep_dev.h [deleted file]
drivers/staging/sep/sep_driver.c [deleted file]
drivers/staging/sep/sep_driver_api.h [deleted file]
drivers/staging/sep/sep_driver_config.h [deleted file]
drivers/staging/sep/sep_driver_hw_defs.h [deleted file]
drivers/staging/spectra/ffsport.c
drivers/staging/spectra/flash.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/m66592-udc.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/gadget/uvc_v4l2.c
drivers/usb/host/isp1760-hcd.c
drivers/usb/host/xhci-ring.c
drivers/usb/misc/adutux.c
drivers/usb/misc/iowarrior.c
drivers/usb/otg/twl4030-usb.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/generic.c
drivers/usb/serial/io_ti.c
drivers/usb/serial/navman.c
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/ssu100.c
drivers/usb/serial/usb-serial.c
drivers/xen/events.c
drivers/xen/manage.c
firmware/Makefile
fs/ceph/addr.c
fs/ceph/auth_x.c
fs/ceph/caps.c
fs/ceph/debugfs.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/osd_client.c
fs/ceph/snap.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/Kconfig
fs/cifs/asn1.c
fs/cifs/cifs_unicode.h
fs/cifs/cifs_uniupr.h
fs/cifs/cifsencrypt.c
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/ntlmssp.h
fs/cifs/sess.c
fs/cifs/transport.c
fs/ecryptfs/crypto.c
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/ecryptfs/kthread.c
fs/ecryptfs/messaging.c
fs/ecryptfs/miscdev.c
fs/nfsd/nfs4state.c
fs/nfsd/state.h
fs/nfsd/vfs.c
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_fsops.h
fs/xfs/xfs_ialloc.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans_priv.h
include/drm/drmP.h
include/drm/i830_drm.h
include/drm/i915_drm.h
include/drm/mga_drm.h
include/drm/nouveau_drm.h
include/drm/radeon_drm.h
include/drm/savage_drm.h
include/linux/if_ether.h
include/linux/if_fddi.h
include/linux/if_hippi.h
include/linux/if_pppox.h
include/linux/ipv6.h
include/linux/kobject.h
include/linux/kobject_ns.h [new file with mode: 0644]
include/linux/miscdevice.h
include/linux/mm.h
include/linux/nbd.h
include/linux/ncp.h
include/linux/netfilter/xt_IDLETIMER.h
include/linux/netfilter/xt_ipvs.h
include/linux/phonet.h
include/linux/pxa168_eth.h [new file with mode: 0644]
include/linux/rfkill.h
include/linux/serial_core.h
include/linux/sysfs.h
include/linux/sysrq.h
include/linux/uinput.h
include/linux/usb/composite.h
include/linux/usb/serial.h
include/linux/vgaarb.h
include/trace/events/timer.h
include/xen/platform_pci.h
kernel/debug/debug_core.c
kernel/debug/kdb/kdb_main.c
kernel/pm_qos_params.c
kernel/power/poweroff.c
kernel/sched.c
kernel/sched_fair.c
kernel/trace/trace_stack.c
kernel/watchdog.c
lib/kobject_uevent.c
lib/radix-tree.c
mm/memory.c
mm/mmap.c
mm/page-writeback.c
mm/rmap.c
net/8021q/vlan_dev.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv6/netfilter/ip6_tables.c
net/irda/irlan/irlan_eth.c
net/netlink/af_netlink.c
net/rds/recv.c
scripts/kconfig/confdata.c
scripts/kconfig/symbol.c
scripts/mkmakefile
scripts/setlocalversion
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/hda_eld.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_intelhdmi.c
sound/pci/hda/patch_nvhdmi.c
sound/pci/hda/patch_sigmatel.c
sound/pci/intel8x0.c
sound/soc/imx/imx-ssi.c

index 2c85c0692b015cc099710b0ba4b9a4f0c30a745d..f084af0cb8e01f7a67026367508fef1085bc801b 100644 (file)
@@ -2629,8 +2629,10 @@ and is between 256 and 4096 characters. It is defined in the file
                        aux-ide-disks -- unplug non-primary-master IDE devices
                        nics -- unplug network devices
                        all -- unplug all emulated devices (NICs and IDE disks)
-                       ignore -- continue loading the Xen platform PCI driver even
-                               if the version check failed
+                       unnecessary -- unplugging emulated devices is
+                               unnecessary even if the host did not respond to
+                               the unplug protocol
+                       never -- do not unplug even if version check succeeds
 
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
index 28c8cdfcafd8693898ae4c4a4d9bc5f4f9f77d62..bebac6b4f332cc117287f58c0cf5df11bf5531f8 100644 (file)
@@ -1,5 +1,6 @@
 # This creates the demonstration utility "lguest" which runs a Linux guest.
-CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE
+# Missing headers?  Add "-I../../include -I../../arch/x86/include"
+CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE
 
 all: lguest
 
index e9ce3c5545145f9b2c683c26690c0159b6c437fc..8a6a8c6d498043dc00184906d71a55e680844ef2 100644 (file)
 #include <limits.h>
 #include <stddef.h>
 #include <signal.h>
-#include "linux/lguest_launcher.h"
-#include "linux/virtio_config.h"
-#include "linux/virtio_net.h"
-#include "linux/virtio_blk.h"
-#include "linux/virtio_console.h"
-#include "linux/virtio_rng.h"
-#include "linux/virtio_ring.h"
-#include "asm/bootparam.h"
+#include <linux/virtio_config.h>
+#include <linux/virtio_net.h>
+#include <linux/virtio_blk.h>
+#include <linux/virtio_console.h>
+#include <linux/virtio_rng.h>
+#include <linux/virtio_ring.h>
+#include <asm/bootparam.h>
+#include "../../include/linux/lguest_launcher.h"
 /*L:110
  * We can ignore the 42 include files we need for this program, but I do want
  * to draw attention to the use of kernel-style types.
@@ -1447,14 +1447,15 @@ static void add_to_bridge(int fd, const char *if_name, const char *br_name)
 static void configure_device(int fd, const char *tapif, u32 ipaddr)
 {
        struct ifreq ifr;
-       struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
+       struct sockaddr_in sin;
 
        memset(&ifr, 0, sizeof(ifr));
        strcpy(ifr.ifr_name, tapif);
 
        /* Don't read these incantations.  Just cut & paste them like I did! */
-       sin->sin_family = AF_INET;
-       sin->sin_addr.s_addr = htonl(ipaddr);
+       sin.sin_family = AF_INET;
+       sin.sin_addr.s_addr = htonl(ipaddr);
+       memcpy(&ifr.ifr_addr, &sin, sizeof(sin));
        if (ioctl(fd, SIOCSIFADDR, &ifr) != 0)
                err(1, "Setting %s interface address", tapif);
        ifr.ifr_flags = IFF_UP;
index 271fadbf9d1c92d8e3522e86683ff54e34b2896c..c36f5d76e1a2e659db6c4f2e6f243df640c088d2 100644 (file)
@@ -454,6 +454,17 @@ L: linux-rdma@vger.kernel.org
 S:     Maintained
 F:     drivers/infiniband/hw/amso1100/
 
+ANALOG DEVICES INC ASOC DRIVERS
+L:     uclinux-dist-devel@blackfin.uclinux.org
+L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
+W:     http://blackfin.uclinux.org/
+S:     Supported
+F:     sound/soc/blackfin/*
+F:     sound/soc/codecs/ad1*
+F:     sound/soc/codecs/adau*
+F:     sound/soc/codecs/adav*
+F:     sound/soc/codecs/ssm*
+
 AOA (Apple Onboard Audio) ALSA DRIVER
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linuxppc-dev@lists.ozlabs.org
@@ -1665,8 +1676,7 @@ F:        kernel/cgroup*
 F:     mm/*cgroup*
 
 CORETEMP HARDWARE MONITORING DRIVER
-M:     Rudolf Marek <r.marek@assembler.cz>
-M:     Huaxu Wan <huaxu.wan@intel.com>
+M:     Fenghua Yu <fenghua.yu@intel.com>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/coretemp
@@ -3484,7 +3494,7 @@ LGUEST
 M:     Rusty Russell <rusty@rustcorp.com.au>
 L:     lguest@lists.ozlabs.org
 W:     http://lguest.ozlabs.org/
-S:     Maintained
+S:     Odd Fixes
 F:     Documentation/lguest/
 F:     arch/x86/lguest/
 F:     drivers/lguest/
index f3bdff8c8d784c0045075da5ea690c8ed5445408..031b61cb5274f1d520b7f95fe44fd66b742d16d4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 36
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Sheep on Meth
 
 # *DOCUMENTATION*
@@ -1408,8 +1408,8 @@ checkstack:
        $(OBJDUMP) -d vmlinux $$(find . -name '*.ko') | \
        $(PERL) $(src)/scripts/checkstack.pl $(CHECKSTACK_ARCH)
 
-kernelrelease: include/config/kernel.release
-       @echo $(KERNELRELEASE)
+kernelrelease:
+       @echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
 
 kernelversion:
        @echo $(KERNELVERSION)
index fb58150a7e8f4fb667f10ac7ae0a772b021f9b53..5d1e6d6ce6843b136fb7810e74c86df16d86daf4 100644 (file)
@@ -252,7 +252,7 @@ SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname,
 
        retval = user_path(pathname, &path);
        if (!retval) {
-               retval = do_osf_statfs(&path buffer, bufsiz);
+               retval = do_osf_statfs(&path, buffer, bufsiz);
                path_put(&path);
        }
        return retval;
index 63e0c2d50f324c6eb7ed3c1ff84ded9930f12538..14c1e18c648f7340c243bd821645df57f955f950 100644 (file)
@@ -13,6 +13,9 @@ CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_ARCH_OMAP=y
 CONFIG_ARCH_OMAP4=y
+# CONFIG_ARCH_OMAP2PLUS_TYPICAL is not set
+# CONFIG_ARCH_OMAP2 is not set
+# CONFIG_ARCH_OMAP3 is not set
 # CONFIG_OMAP_MUX is not set
 CONFIG_OMAP_32K_TIMER=y
 CONFIG_OMAP_DM_TIMER=y
index 56418f98cd016e9e3668e15c4473c9ba43bf1ca6..33c7077174db118175a1818228b900f56226b45f 100644 (file)
@@ -230,7 +230,7 @@ static void etm_dump(void)
        etb_lock(t);
 }
 
-static void sysrq_etm_dump(int key, struct tty_struct *tty)
+static void sysrq_etm_dump(int key)
 {
        dev_dbg(tracer.dev, "Dumping ETB buffer\n");
        etm_dump();
index 575ff1ae85a738f84cf48720d19e850b96caa0e5..339150ab0ea5d63e13f0520e06612fc827f37f49 100644 (file)
@@ -279,13 +279,13 @@ static void __init eukrea_cpuimx27_init(void)
 #if defined(CONFIG_USB_ULPI)
        if (otg_mode_host) {
                otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
-                               USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+                               ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
 
                mxc_register_device(&mxc_otg_host, &otg_pdata);
        }
 
        usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
-                               USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+                               ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
 
        mxc_register_device(&mxc_usbh2, &usbh2_pdata);
 #endif
index a389d1148f18c641cd1c997f4798b6641e084bdc..23c9e1f37b9c022bee669908e7aeca791de3b4b4 100644 (file)
@@ -419,13 +419,13 @@ static void __init pca100_init(void)
 #if defined(CONFIG_USB_ULPI)
        if (otg_mode_host) {
                otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
-                               USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+                               ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
 
                mxc_register_device(&mxc_otg_host, &otg_pdata);
        }
 
        usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
-                               USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+                               ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
 
        mxc_register_device(&mxc_usbh2, &usbh2_pdata);
 #endif
index 56b2e26d23b4eda6a178e6ee633eae2526bd3186..a5f0174290b4eaa0ae36a1769ae4c9cb32631d3c 100644 (file)
@@ -138,7 +138,7 @@ static void __init eukrea_cpuimx25_init(void)
 #if defined(CONFIG_USB_ULPI)
        if (otg_mode_host) {
                otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
-                               USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+                               ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
 
                mxc_register_device(&mxc_otg, &otg_pdata);
        }
index 63f970f340a2cc28482ea1484a248fade05098e8..9770a6a973be561fdfb67cfdda1f8cdf36381e8e 100644 (file)
@@ -192,7 +192,7 @@ static void __init mxc_board_init(void)
 #if defined(CONFIG_USB_ULPI)
        if (otg_mode_host) {
                otg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
-                               USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
+                               ULPI_OTG_DRVVBUS | ULPI_OTG_DRVVBUS_EXT);
 
                mxc_register_device(&mxc_otg_host, &otg_pdata);
        }
index 63b2d8859c3c291af8e8af729f63d8302ad4915e..88d3a1e920f583110a88aa8c90bbe7903b098f8f 100644 (file)
@@ -25,6 +25,7 @@ obj-$(CONFIG_LOCAL_TIMERS)            += timer-mpu.o
 obj-$(CONFIG_HOTPLUG_CPU)              += omap-hotplug.o
 obj-$(CONFIG_ARCH_OMAP4)               += omap44xx-smc.o omap4-common.o
 
+AFLAGS_omap-headsmp.o                  :=-Wa,-march=armv7-a
 AFLAGS_omap44xx-smc.o                  :=-Wa,-march=armv7-a
 
 # Functions loaded to SRAM
index 138646deac8932210cc6168ab647444908c92b85..dfdce2d8277992b3e6ccb9fcfbc247e3b8df3708 100644 (file)
@@ -3417,7 +3417,13 @@ int __init omap3xxx_clk_init(void)
        struct omap_clk *c;
        u32 cpu_clkflg = CK_3XXX;
 
-       if (cpu_is_omap34xx()) {
+       if (cpu_is_omap3517()) {
+               cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
+               cpu_clkflg |= CK_3517;
+       } else if (cpu_is_omap3505()) {
+               cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
+               cpu_clkflg |= CK_3505;
+       } else if (cpu_is_omap34xx()) {
                cpu_mask = RATE_IN_3XXX;
                cpu_clkflg |= CK_343X;
 
@@ -3432,12 +3438,6 @@ int __init omap3xxx_clk_init(void)
                        cpu_mask |= RATE_IN_3430ES2PLUS;
                        cpu_clkflg |= CK_3430ES2;
                }
-       } else if (cpu_is_omap3517()) {
-               cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
-               cpu_clkflg |= CK_3517;
-       } else if (cpu_is_omap3505()) {
-               cpu_mask = RATE_IN_3XXX | RATE_IN_3430ES2PLUS;
-               cpu_clkflg |= CK_3505;
        }
 
        if (omap3_has_192mhz_clk())
index e8256a2ed8e782dd56a7bfa674b98886a9f63e71..9a879f9595098dd5e3fcb303b8d205af744d3a28 100644 (file)
@@ -284,8 +284,8 @@ static void __init omap3_check_revision(void)
                default:
                        omap_revision =  OMAP3630_REV_ES1_2;
                        omap_chip.oc |= CHIP_IS_OMAP3630ES1_2;
-                       break;
                }
+               break;
        default:
                /* Unknown default to latest silicon rev as default*/
                omap_revision =  OMAP3630_REV_ES1_2;
index 50fd749166433f7ebadfa492a4ea136718660e7e..06e64e1fc28a7e18957ff0bb9fe620e7487f7284 100644 (file)
@@ -177,7 +177,10 @@ omap_irq_base:     .word   0
                cmpne   \irqnr, \tmp
                cmpcs   \irqnr, \irqnr
                .endm
+#endif
+#endif /* MULTI_OMAP2 */
 
+#ifdef CONFIG_SMP
                /* We assume that irqstat (the raw value of the IRQ acknowledge
                 * register) is preserved from the macro above.
                 * If there is an IPI, we immediately signal end of interrupt
@@ -205,8 +208,7 @@ omap_irq_base:      .word   0
                streq   \irqstat, [\base, #GIC_CPU_EOI]
                cmp     \tmp, #0
                .endm
-#endif
-#endif /* MULTI_OMAP2 */
+#endif /* CONFIG_SMP */
 
                .macro  irq_prio_table
                .endm
index af3c20c8d3f9202e742068f8ba1e57c911e3c265..9e9f70e18e3c95436cf6b957e2791175a3cdc90a 100644 (file)
@@ -102,8 +102,7 @@ static void __init wakeup_secondary(void)
         * Send a 'sev' to wake the secondary core from WFE.
         * Drain the outstanding writes to memory
         */
-       dsb();
-       set_event();
+       dsb_sev();
        mb();
 }
 
index fb4994ad622ec469aaec34665a98af6250d29dc8..7b03426c72a317307db1df3931a5018443117b33 100644 (file)
@@ -480,7 +480,9 @@ void omap_sram_idle(void)
        }
 
        /* Disable IO-PAD and IO-CHAIN wakeup */
-       if (omap3_has_io_wakeup() && core_next_state < PWRDM_POWER_ON) {
+       if (omap3_has_io_wakeup() &&
+           (per_next_state < PWRDM_POWER_ON ||
+            core_next_state < PWRDM_POWER_ON)) {
                prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
                omap3_disable_io_chain();
        }
index 05e78dd9b50ca22ef7b20e91f8a21e6e92c8021d..9e305de56be9ac28ab023dbfc4f748c5f260bec1 100644 (file)
@@ -91,10 +91,8 @@ static void __init tegra_harmony_fixup(struct machine_desc *desc,
 {
        mi->nr_banks = 2;
        mi->bank[0].start = PHYS_OFFSET;
-       mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
        mi->bank[0].size = 448 * SZ_1M;
        mi->bank[1].start = SZ_512M;
-       mi->bank[1].node = PHYS_TO_NID(SZ_512M);
        mi->bank[1].size = SZ_512M;
 }
 
index 267a141730d9c94fa59a7c0acbd5bbc6426e79d2..fd6aa65b2dc6ab433cc7a3f2ca45cf9128294f38 100644 (file)
@@ -23,6 +23,6 @@
 
 #include <asm/sizes.h>
 
-#define VMALLOC_END        0xFE000000
+#define VMALLOC_END        0xFE000000UL
 
 #endif
index 6a3ff65c030350e121649f5bed7cb9d9f8d76724..5177a9c5a25acb62966f14763cf6aff842ee5a11 100644 (file)
 
 #include <asm/hardware/gic.h>
 
-/*
- * set_event() is used to wake up secondary core from wfe using sev. ROM
- * code puts the second core into wfe(standby).
- *
- */
-#define set_event()    __asm__ __volatile__ ("sev" : : : "memory")
-
 /* Needed for secondary core boot */
 extern void omap_secondary_startup(void);
 extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
index d5872cd967ab3e793fd6f6a55e0f296b138444e3..3f7ef4d97791514c9d4eab13d17baffc78efb5f3 100644 (file)
@@ -22,7 +22,9 @@
 
 #include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/const_hweight.h>
 #include <asm-generic/bitops/lock.h>
+
 #include <asm-generic/bitops/ext2-non-atomic.h>
 #include <asm-generic/bitops/ext2-atomic.h>
 #include <asm-generic/bitops/minix.h>
@@ -115,7 +117,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
  * of bits set) of a N-bit word
  */
 
-static inline unsigned int hweight32(unsigned int w)
+static inline unsigned int __arch_hweight32(unsigned int w)
 {
        unsigned int res;
 
@@ -125,19 +127,20 @@ static inline unsigned int hweight32(unsigned int w)
        return res;
 }
 
-static inline unsigned int hweight64(__u64 w)
+static inline unsigned int __arch_hweight64(__u64 w)
 {
-       return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
+       return __arch_hweight32((unsigned int)(w >> 32)) +
+              __arch_hweight32((unsigned int)w);
 }
 
-static inline unsigned int hweight16(unsigned int w)
+static inline unsigned int __arch_hweight16(unsigned int w)
 {
-       return hweight32(w & 0xffff);
+       return __arch_hweight32(w & 0xffff);
 }
 
-static inline unsigned int hweight8(unsigned int w)
+static inline unsigned int __arch_hweight8(unsigned int w)
 {
-       return hweight32(w & 0xff);
+       return __arch_hweight32(w & 0xff);
 }
 
 #endif                         /* _BLACKFIN_BITOPS_H */
index 22886cbdae7aa2b7c615bd800259600bc1148bf6..14fcd254b185a2706bd53c5bad024b4b749273af 100644 (file)
 #define __NR_rt_tgsigqueueinfo 368
 #define __NR_perf_event_open   369
 #define __NR_recvmmsg          370
+#define __NR_fanotify_init     371
+#define __NR_fanotify_mark     372
+#define __NR_prlimit64         373
 
-#define __NR_syscall           371
+#define __NR_syscall           374
 #define NR_syscalls            __NR_syscall
 
 /* Old optional stuff no one actually uses */
index a5847f5d67c7325ae6c53996f02f0b7d348267c3..af1bffa21dc14dccdefca16268da5c737024dfa8 100644 (file)
@@ -1628,6 +1628,9 @@ ENTRY(_sys_call_table)
        .long _sys_rt_tgsigqueueinfo
        .long _sys_perf_event_open
        .long _sys_recvmmsg             /* 370 */
+       .long _sys_fanotify_init
+       .long _sys_fanotify_mark
+       .long _sys_prlimit64
 
        .rept NR_syscalls-(.-_sys_call_table)/4
        .long _sys_ni_syscall
index 2bef5261d96dce13c983758a68f0e280041bdaa5..1e8d71ad93ef6ecd57bbfcc918067e6998b443d7 100644 (file)
@@ -149,7 +149,7 @@ static  void receive_chars(struct tty_struct *tty)
                                                ch = ia64_ssc(0, 0, 0, 0,
                                                              SSC_GETCHAR);
                                        while (!ch);
-                                       handle_sysrq(ch, NULL);
+                                       handle_sysrq(ch);
                                }
 #endif
                                seen_esc = 0;
index 4e34880bea03b889dcbcaea5865d576045bf1707..159acb02cfd4c16dcd1e91f9268b8d9dc6e74463 100644 (file)
@@ -25,7 +25,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
        unsigned long addr;
        void *ret;
 
-       printk("dma_alloc_coherent(%s,%zu,,%x)\n", dev_name(dev), size, gfp);
+       pr_debug("dma_alloc_coherent(%s,%zu,%x)\n",
+                dev ? dev_name(dev) : "?", size, gfp);
 
        if (0xbe000000 - pci_sram_allocated >= size) {
                size = (size + 255) & ~255;
index e3ea151c95975a9434995dd7e2da3f6033d6c912..b7212b619c52377ddeb471cf8e32f6080522cafa 100644 (file)
@@ -164,7 +164,7 @@ drivers-$(CONFIG_OPROFILE)  += arch/powerpc/oprofile/
 all: zImage
 
 # With make 3.82 we cannot mix normal and wildcard targets
-BOOT_TARGETS1 := zImage zImage.initrd uImaged
+BOOT_TARGETS1 := zImage zImage.initrd uImage
 BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
 
 PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2)
index 5806ef0b860b5adcf8342e472c744f40cbf8821a..a30370396250ae2d524cac63e987bc5a80f7199b 100644 (file)
                        interrupts = <0x1e 4>;
                };
 
+               SATA0: sata@bffd1000 {
+                       compatible = "amcc,sata-460ex";
+                       reg = <4 0xbffd1000 0x800 4 0xbffd0800 0x400>;
+                       interrupt-parent = <&UIC3>;
+                       interrupts = <0x0 0x4       /* SATA */
+                                     0x5 0x4>;     /* AHBDMA */
+               };
+
                POB0: opb {
                        compatible = "ibm,opb-460ex", "ibm,opb";
                        #address-cells = <1>;
index 0e398cfee2c82826b6abad1eb8ece6330df6b0b4..acac35d5b382da579cd53061c089a88a7758bb2f 100644 (file)
@@ -433,7 +433,7 @@ typedef struct {
  * with.  However gcc is not clever enough to compute the
  * modulus (2^n-1) without a second multiply.
  */
-#define vsid_scrample(protovsid, size) \
+#define vsid_scramble(protovsid, size) \
        ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
 
 #else /* 1 */
index d8be016d2ede49e750ad68c478203afe7b45f9a7..ff0005eec7dd0d3d3c54946bcbf51cdfdf8bb569 100644 (file)
 #ifdef CONFIG_PPC64
 
 extern void ppc64_runlatch_on(void);
-extern void ppc64_runlatch_off(void);
+extern void __ppc64_runlatch_off(void);
+
+#define ppc64_runlatch_off()                                   \
+       do {                                                    \
+               if (cpu_has_feature(CPU_FTR_CTRL) &&            \
+                   test_thread_flag(TIF_RUNLATCH))             \
+                       __ppc64_runlatch_off();                 \
+       } while (0)
 
 extern unsigned long scom970_read(unsigned int address);
 extern void scom970_write(unsigned int address, unsigned long value);
index 24cd9281ec3726a55c95a7a9f8d076c76b97e2b8..8447d89fbe72639a6a0ce2db68694554bd7c61e8 100644 (file)
 /*
  * the semaphore definition
  */
-struct rw_semaphore {
-       /* XXX this should be able to be an atomic_t  -- paulus */
-       signed int              count;
-#define RWSEM_UNLOCKED_VALUE           0x00000000
-#define RWSEM_ACTIVE_BIAS              0x00000001
-#define RWSEM_ACTIVE_MASK              0x0000ffff
-#define RWSEM_WAITING_BIAS             (-0x00010000)
+#ifdef CONFIG_PPC64
+# define RWSEM_ACTIVE_MASK             0xffffffffL
+#else
+# define RWSEM_ACTIVE_MASK             0x0000ffffL
+#endif
+
+#define RWSEM_UNLOCKED_VALUE           0x00000000L
+#define RWSEM_ACTIVE_BIAS              0x00000001L
+#define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
 #define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
+struct rw_semaphore {
+       long                    count;
        spinlock_t              wait_lock;
        struct list_head        wait_list;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -43,9 +48,13 @@ struct rw_semaphore {
 # define __RWSEM_DEP_MAP_INIT(lockname)
 #endif
 
-#define __RWSEM_INITIALIZER(name) \
-       { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
-         LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+#define __RWSEM_INITIALIZER(name)                              \
+{                                                              \
+       RWSEM_UNLOCKED_VALUE,                                   \
+       __SPIN_LOCK_UNLOCKED((name).wait_lock),                 \
+       LIST_HEAD_INIT((name).wait_list)                        \
+       __RWSEM_DEP_MAP_INIT(name)                              \
+}
 
 #define DECLARE_RWSEM(name)            \
        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -70,13 +79,13 @@ extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  */
 static inline void __down_read(struct rw_semaphore *sem)
 {
-       if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
+       if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))
                rwsem_down_read_failed(sem);
 }
 
 static inline int __down_read_trylock(struct rw_semaphore *sem)
 {
-       int tmp;
+       long tmp;
 
        while ((tmp = sem->count) >= 0) {
                if (tmp == cmpxchg(&sem->count, tmp,
@@ -92,10 +101,10 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
  */
 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 {
-       int tmp;
+       long tmp;
 
-       tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
-                               (atomic_t *)(&sem->count));
+       tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+                                    (atomic_long_t *)&sem->count);
        if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
                rwsem_down_write_failed(sem);
 }
@@ -107,7 +116,7 @@ static inline void __down_write(struct rw_semaphore *sem)
 
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
-       int tmp;
+       long tmp;
 
        tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
                      RWSEM_ACTIVE_WRITE_BIAS);
@@ -119,9 +128,9 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
  */
 static inline void __up_read(struct rw_semaphore *sem)
 {
-       int tmp;
+       long tmp;
 
-       tmp = atomic_dec_return((atomic_t *)(&sem->count));
+       tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);
        if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
                rwsem_wake(sem);
 }
@@ -131,17 +140,17 @@ static inline void __up_read(struct rw_semaphore *sem)
  */
 static inline void __up_write(struct rw_semaphore *sem)
 {
-       if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
-                             (atomic_t *)(&sem->count)) < 0))
+       if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+                                (atomic_long_t *)&sem->count) < 0))
                rwsem_wake(sem);
 }
 
 /*
  * implement atomic add functionality
  */
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
 {
-       atomic_add(delta, (atomic_t *)(&sem->count));
+       atomic_long_add(delta, (atomic_long_t *)&sem->count);
 }
 
 /*
@@ -149,9 +158,10 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  */
 static inline void __downgrade_write(struct rw_semaphore *sem)
 {
-       int tmp;
+       long tmp;
 
-       tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
+       tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,
+                                    (atomic_long_t *)&sem->count);
        if (tmp < 0)
                rwsem_downgrade_wake(sem);
 }
@@ -159,14 +169,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
 /*
  * implement exchange and add functionality
  */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
 {
-       return atomic_add_return(delta, (atomic_t *)(&sem->count));
+       return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
 }
 
 static inline int rwsem_is_locked(struct rw_semaphore *sem)
 {
-       return (sem->count != 0);
+       return sem->count != 0;
 }
 
 #endif /* __KERNEL__ */
index a5ee345b6a5cc27134bb42742641048aeba48eb6..3d212669a130a5621d1006694c75785f08c87d9d 100644 (file)
@@ -326,3 +326,6 @@ SYSCALL_SPU(perf_event_open)
 COMPAT_SYS_SPU(preadv)
 COMPAT_SYS_SPU(pwritev)
 COMPAT_SYS(rt_tgsigqueueinfo)
+SYSCALL(fanotify_init)
+COMPAT_SYS(fanotify_mark)
+SYSCALL_SPU(prlimit64)
index f0a10266e7f7b0245180661ae3dc0de2cd92390e..597e6f9d094a95dd51ff80873e8425d75ed94759 100644 (file)
 #define __NR_preadv            320
 #define __NR_pwritev           321
 #define __NR_rt_tgsigqueueinfo 322
+#define __NR_fanotify_init     323
+#define __NR_fanotify_mark     324
+#define __NR_prlimit64         325
 
 #ifdef __KERNEL__
 
-#define __NR_syscalls          323
+#define __NR_syscalls          326
 
 #define __NR__exit __NR_exit
 #define NR_syscalls    __NR_syscalls
index 65e2b4e10f97cfb0fa068571c8ec6ac41c59029f..1f9123f412ec3c5a7a817f58e6316298d6ce7e05 100644 (file)
@@ -1826,7 +1826,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .cpu_features           = CPU_FTRS_47X,
                .cpu_user_features      = COMMON_USER_BOOKE |
                        PPC_FEATURE_HAS_FPU,
-               .cpu_user_features      = COMMON_USER_BOOKE,
                .mmu_features           = MMU_FTR_TYPE_47x |
                        MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
                .icache_bsize           = 32,
index 417f7b05a9cebc6e10acfaf41eec88d17a2f7e89..4457382f8667a7e770f51c9cb82ee09f9531fb1b 100644 (file)
@@ -402,6 +402,18 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
         */
        hard_irq_disable();
 
+       /*
+        * Make a note of crashing cpu. Will be used in machine_kexec
+        * such that another IPI will not be sent.
+        */
+       crashing_cpu = smp_processor_id();
+       crash_save_cpu(regs, crashing_cpu);
+       crash_kexec_prepare_cpus(crashing_cpu);
+       cpu_set(crashing_cpu, cpus_in_crash);
+#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
+       crash_kexec_wait_realmode(crashing_cpu);
+#endif
+
        for_each_irq(i) {
                struct irq_desc *desc = irq_to_desc(i);
 
@@ -438,18 +450,8 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
        crash_shutdown_cpu = -1;
        __debugger_fault_handler = old_handler;
 
-       /*
-        * Make a note of crashing cpu. Will be used in machine_kexec
-        * such that another IPI will not be sent.
-        */
-       crashing_cpu = smp_processor_id();
-       crash_save_cpu(regs, crashing_cpu);
-       crash_kexec_prepare_cpus(crashing_cpu);
-       cpu_set(crashing_cpu, cpus_in_crash);
        crash_kexec_stop_spus();
-#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
-       crash_kexec_wait_realmode(crashing_cpu);
-#endif
+
        if (ppc_md.kexec_cpu_down)
                ppc_md.kexec_cpu_down(1, 0);
 }
index 5ab484ef06a720770f72d9883340a045a180f520..562305b40a8e7c669adc95cc3cfc5edaca87e5e5 100644 (file)
@@ -113,6 +113,10 @@ _ENTRY(_start);
        stw     r5, 0(r4)       /* Save abatron_pteptrs at a fixed location */
        stw     r6, 0(r5)
 
+       /* Clear the Machine Check Syndrome Register */
+       li      r0,0
+       mtspr   SPRN_MCSR,r0
+
        /* Let's move on */
        lis     r4,start_kernel@h
        ori     r4,r4,start_kernel@l
index 844a44b6447254889a10c35c2acf877981a4f16e..4d6681dce8163c98f2f2f48828f67cf985dc49f6 100644 (file)
@@ -572,9 +572,6 @@ __secondary_start:
        /* Set thread priority to MEDIUM */
        HMT_MEDIUM
 
-       /* Do early setup for that CPU (stab, slb, hash table pointer) */
-       bl      .early_setup_secondary
-
        /* Initialize the kernel stack.  Just a repeat for iSeries.      */
        LOAD_REG_ADDR(r3, current_set)
        sldi    r28,r24,3               /* get current_set[cpu#]         */
@@ -582,6 +579,9 @@ __secondary_start:
        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
        std     r1,PACAKSAVE(r13)
 
+       /* Do early setup for that CPU (stab, slb, hash table pointer) */
+       bl      .early_setup_secondary
+
        /* Clear backchain so we get nice backtraces */
        li      r7,0
        mtlr    r7
index 049dda60e4750296e7b890e752e6420d0d1fb769..39a2baa6ad58aec28a3c1d0782753569bbb51f6e 100644 (file)
@@ -94,9 +94,9 @@ void cpu_idle(void)
                HMT_medium();
                ppc64_runlatch_on();
                tick_nohz_restart_sched_tick();
+               preempt_enable_no_resched();
                if (cpu_should_die())
                        cpu_die();
-               preempt_enable_no_resched();
                schedule();
                preempt_disable();
        }
index d3ce67cf03be35855394905009d6a39729f7ac92..4a65386995d7fa697f86e32b77503558f1198961 100644 (file)
@@ -67,6 +67,7 @@
 #include <asm/machdep.h>
 #include <asm/udbg.h>
 #include <asm/dbell.h>
+#include <asm/smp.h>
 
 #ifdef CONFIG_PPC64
 #include <asm/paca.h>
@@ -446,22 +447,23 @@ struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
 void exc_lvl_ctx_init(void)
 {
        struct thread_info *tp;
-       int i;
+       int i, hw_cpu;
 
        for_each_possible_cpu(i) {
-               memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
-               tp = critirq_ctx[i];
+               hw_cpu = get_hard_smp_processor_id(i);
+               memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE);
+               tp = critirq_ctx[hw_cpu];
                tp->cpu = i;
                tp->preempt_count = 0;
 
 #ifdef CONFIG_BOOKE
-               memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
-               tp = dbgirq_ctx[i];
+               memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE);
+               tp = dbgirq_ctx[hw_cpu];
                tp->cpu = i;
                tp->preempt_count = 0;
 
-               memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
-               tp = mcheckirq_ctx[i];
+               memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE);
+               tp = mcheckirq_ctx[hw_cpu];
                tp->cpu = i;
                tp->preempt_count = HARDIRQ_OFFSET;
 #endif
index 6ddb795f83e8e9ccd62e713d997599baee77122a..e751506323b4509bf04abefb266a6435173a307f 100644 (file)
@@ -336,7 +336,7 @@ static void __devinit __of_scan_bus(struct device_node *node,
                if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
                    dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
                        struct device_node *child = pci_device_to_OF_node(dev);
-                       if (dev)
+                       if (child)
                                of_scan_pci_bridge(child, dev);
                }
        }
index 91356ffda2ca3230e930245a0db1cc58cca323a0..b1c648a36b03cbc2d08104b30ccd21e14bf70644 100644 (file)
@@ -728,7 +728,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                p->thread.regs = childregs;
                if (clone_flags & CLONE_SETTLS) {
 #ifdef CONFIG_PPC64
-                       if (!test_thread_flag(TIF_32BIT))
+                       if (!is_32bit_task())
                                childregs->gpr[13] = childregs->gpr[6];
                        else
 #endif
@@ -823,7 +823,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
        regs->nip = start;
        regs->msr = MSR_USER;
 #else
-       if (!test_thread_flag(TIF_32BIT)) {
+       if (!is_32bit_task()) {
                unsigned long entry, toc;
 
                /* start is a relocated pointer to the function descriptor for
@@ -995,7 +995,7 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
        if (usp == 0)
                usp = regs->gpr[1];     /* stack pointer for child */
 #ifdef CONFIG_PPC64
-       if (test_thread_flag(TIF_32BIT)) {
+       if (is_32bit_task()) {
                parent_tidp = TRUNC_PTR(parent_tidp);
                child_tidp = TRUNC_PTR(child_tidp);
        }
@@ -1199,19 +1199,17 @@ void ppc64_runlatch_on(void)
        }
 }
 
-void ppc64_runlatch_off(void)
+void __ppc64_runlatch_off(void)
 {
        unsigned long ctrl;
 
-       if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) {
-               HMT_medium();
+       HMT_medium();
 
-               clear_thread_flag(TIF_RUNLATCH);
+       clear_thread_flag(TIF_RUNLATCH);
 
-               ctrl = mfspr(SPRN_CTRLF);
-               ctrl &= ~CTRL_RUNLATCH;
-               mtspr(SPRN_CTRLT, ctrl);
-       }
+       ctrl = mfspr(SPRN_CTRLF);
+       ctrl &= ~CTRL_RUNLATCH;
+       mtspr(SPRN_CTRLT, ctrl);
 }
 #endif
 
index a10ffc85ada77cfa802f371725a8d1b86bc7475b..93666f9cabf17fd6c0271332e9f4cb84ded8cd85 100644 (file)
@@ -258,17 +258,18 @@ static void __init irqstack_early_init(void)
 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
 static void __init exc_lvl_early_init(void)
 {
-       unsigned int i;
+       unsigned int i, hw_cpu;
 
        /* interrupt stacks must be in lowmem, we get that for free on ppc32
         * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
        for_each_possible_cpu(i) {
-               critirq_ctx[i] = (struct thread_info *)
+               hw_cpu = get_hard_smp_processor_id(i);
+               critirq_ctx[hw_cpu] = (struct thread_info *)
                        __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
 #ifdef CONFIG_BOOKE
-               dbgirq_ctx[i] = (struct thread_info *)
+               dbgirq_ctx[hw_cpu] = (struct thread_info *)
                        __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
-               mcheckirq_ctx[i] = (struct thread_info *)
+               mcheckirq_ctx[hw_cpu] = (struct thread_info *)
                        __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
 #endif
        }
index 1bee4b68fa452ed4c3894785fdfc8ff80276a6e5..e72690ec9b87489af9e8d30a8e81ac84e03db229 100644 (file)
@@ -95,7 +95,7 @@ int ucache_bsize;
 
 #ifdef CONFIG_SMP
 
-static int smt_enabled_cmdline;
+static char *smt_enabled_cmdline;
 
 /* Look for ibm,smt-enabled OF option */
 static void check_smt_enabled(void)
@@ -103,37 +103,46 @@ static void check_smt_enabled(void)
        struct device_node *dn;
        const char *smt_option;
 
-       /* Allow the command line to overrule the OF option */
-       if (smt_enabled_cmdline)
-               return;
-
-       dn = of_find_node_by_path("/options");
-
-       if (dn) {
-               smt_option = of_get_property(dn, "ibm,smt-enabled", NULL);
+       /* Default to enabling all threads */
+       smt_enabled_at_boot = threads_per_core;
 
-                if (smt_option) {
-                       if (!strcmp(smt_option, "on"))
-                               smt_enabled_at_boot = 1;
-                       else if (!strcmp(smt_option, "off"))
-                               smt_enabled_at_boot = 0;
-                }
-        }
+       /* Allow the command line to overrule the OF option */
+       if (smt_enabled_cmdline) {
+               if (!strcmp(smt_enabled_cmdline, "on"))
+                       smt_enabled_at_boot = threads_per_core;
+               else if (!strcmp(smt_enabled_cmdline, "off"))
+                       smt_enabled_at_boot = 0;
+               else {
+                       long smt;
+                       int rc;
+
+                       rc = strict_strtol(smt_enabled_cmdline, 10, &smt);
+                       if (!rc)
+                               smt_enabled_at_boot =
+                                       min(threads_per_core, (int)smt);
+               }
+       } else {
+               dn = of_find_node_by_path("/options");
+               if (dn) {
+                       smt_option = of_get_property(dn, "ibm,smt-enabled",
+                                                    NULL);
+
+                       if (smt_option) {
+                               if (!strcmp(smt_option, "on"))
+                                       smt_enabled_at_boot = threads_per_core;
+                               else if (!strcmp(smt_option, "off"))
+                                       smt_enabled_at_boot = 0;
+                       }
+
+                       of_node_put(dn);
+               }
+       }
 }
 
 /* Look for smt-enabled= cmdline option */
 static int __init early_smt_enabled(char *p)
 {
-       smt_enabled_cmdline = 1;
-
-       if (!p)
-               return 0;
-
-       if (!strcmp(p, "on") || !strcmp(p, "1"))
-               smt_enabled_at_boot = 1;
-       else if (!strcmp(p, "off") || !strcmp(p, "0"))
-               smt_enabled_at_boot = 0;
-
+       smt_enabled_cmdline = p;
        return 0;
 }
 early_param("smt-enabled", early_smt_enabled);
@@ -380,8 +389,8 @@ void __init setup_system(void)
         */
        xmon_setup();
 
-       check_smt_enabled();
        smp_setup_cpu_maps();
+       check_smt_enabled();
 
 #ifdef CONFIG_SMP
        /* Release secondary cpus out of their spinloops at 0x60 now that
index a61b3ddd7bb3c0cd8fb3a3ec79d91005a7e4294d..0008bc58e826c53b29f9e689336388f16377eb77 100644 (file)
@@ -427,11 +427,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
 #endif
 
        if (!cpu_callin_map[cpu]) {
-               printk("Processor %u is stuck.\n", cpu);
+               printk(KERN_ERR "Processor %u is stuck.\n", cpu);
                return -ENOENT;
        }
 
-       printk("Processor %u found.\n", cpu);
+       DBG("Processor %u found.\n", cpu);
 
        if (smp_ops->give_timebase)
                smp_ops->give_timebase();
index 20fd701a686adfeffc10e718c078f074eab92eaa..b1b6043a56c44f6b07537e8eb4a0aa720400e296 100644 (file)
@@ -616,3 +616,11 @@ asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
 
        return sys_sync_file_range(fd, offset, nbytes, flags);
 }
+
+asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags,
+                                        unsigned mask_hi, unsigned mask_lo,
+                                        int dfd, const char __user *pathname)
+{
+       u64 mask = ((u64)mask_hi << 32) | mask_lo;
+       return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
+}
index 00b9436f76525c501ecd8bcfab1abef5911a42d4..fa3469ddaef8d010bca8974765c9c656dfa06635 100644 (file)
@@ -1059,7 +1059,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
        if (!dma_window)
                return NULL;
 
-       tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+       tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
        if (tbl == NULL)
                return NULL;
 
@@ -1072,6 +1072,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
        tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
        tbl->it_busno = 0;
        tbl->it_type = TCE_VB;
+       tbl->it_blocksize = 16;
 
        return iommu_init_table(tbl, -1);
 }
index 71f1415e2472399049a2273eded9c4fb8d173265..ace85fa74b2923a63a5924ef6432c84980b41406 100644 (file)
@@ -79,7 +79,9 @@
 #endif /* CONFIG_PPC_STD_MMU_64 */
 
 phys_addr_t memstart_addr = ~0;
+EXPORT_SYMBOL_GPL(memstart_addr);
 phys_addr_t kernstart_addr;
+EXPORT_SYMBOL_GPL(kernstart_addr);
 
 void free_initmem(void)
 {
index cfa768203d085e3c3fd7486758513883e40c7ae6..b9d9fed8f36e355083f32d22de3ba2274fa5fb71 100644 (file)
@@ -200,6 +200,7 @@ _GLOBAL(_tlbivax_bcast)
        rlwimi  r5,r4,0,16,31
        wrteei  0
        mtspr   SPRN_MMUCR,r5
+       isync
 /*     tlbivax 0,r3 - use .long to avoid binutils deps */
        .long 0x7c000624 | (r3 << 11)
        isync
index d1663db7810f3809fd6ea7658299d15d6425c347..81c9208025fa42833b5472da2ccd75845c8b3cc0 100644 (file)
@@ -106,8 +106,7 @@ config MMIO_NVRAM
 
 config MPIC_U3_HT_IRQS
        bool
-       depends on PPC_MAPLE
-       default y
+       default n
 
 config MPIC_BROKEN_REGREAD
        bool
index 58b13ce3847ec3c92453ef70d5ec4e653d6d886a..26a067122a54705ab07840269a6a9b4e9b9f41a5 100644 (file)
@@ -477,7 +477,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
 
        ioid = cell_iommu_get_ioid(np);
 
-       window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
+       window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
        BUG_ON(window == NULL);
 
        window->offset = offset;
index ce61cea0afb52cc77a4fa806da88cd7489518e88..d8b76335bd13d1d9f89dda4861bd928dccbf7703 100644 (file)
@@ -184,7 +184,7 @@ static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
 
        BUG_ON(lsn == NULL);
 
-       tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+       tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
 
        iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
 
index 39df6ab1735a6161ff4d2581368d5127f6a8297a..df423993f17503997d4ba6e808a0f7754078548e 100644 (file)
@@ -2873,12 +2873,11 @@ set_initial_features(void)
 
                /* Switch airport off */
                for_each_node_by_name(np, "radio") {
-                       if (np && np->parent == macio_chips[0].of_node) {
+                       if (np->parent == macio_chips[0].of_node) {
                                macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON;
                                core99_airport_enable(np, 0, 0);
                        }
                }
-               of_node_put(np);
        }
 
        /* On all machines that support sound PM, switch sound off */
index ab2027cdf8936ea47fdc6579ee6455b5af5330ae..3bc075c788ef3af25bcf06223098cffee2808942 100644 (file)
@@ -1155,13 +1155,11 @@ void __init pmac_pcibios_after_init(void)
                        pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
                }
        }
-       of_node_put(nd);
        for_each_node_by_name(nd, "ethernet") {
                if (nd->parent && of_device_is_compatible(nd, "gmac")
                    && of_device_is_compatible(nd->parent, "uni-north"))
                        pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
        }
-       of_node_put(nd);
 }
 
 void pmac_pci_fixup_cardbus(struct pci_dev* dev)
index 395848e30c523b36f7b31bbd258610777e87b4dd..a77bcaed80af8fa1c037444bf0aac0b0f5f0e1ea 100644 (file)
@@ -403,7 +403,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
        pci->phb->dma_window_size = 0x8000000ul;
        pci->phb->dma_window_base_cur = 0x8000000ul;
 
-       tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+       tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
                           pci->phb->node);
 
        iommu_table_setparms(pci->phb, dn, tbl);
@@ -448,7 +448,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
                 pdn->full_name, ppci->iommu_table);
 
        if (!ppci->iommu_table) {
-               tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+               tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
                                   ppci->phb->node);
                iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window,
                        bus->number);
@@ -478,7 +478,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
                struct pci_controller *phb = PCI_DN(dn)->phb;
 
                pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
-               tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+               tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
                                   phb->node);
                iommu_table_setparms(phb, dn, tbl);
                PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
@@ -544,7 +544,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
 
        pci = PCI_DN(pdn);
        if (!pci->iommu_table) {
-               tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
+               tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
                                   pci->phb->node);
                iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window,
                        pci->phb->bus->number);
index 3b1bf61c45bebda01f084e9229a2c764adb48b2f..0317cce877c647c28e66525dffa24ff1987deedf 100644 (file)
@@ -182,10 +182,13 @@ static int smp_pSeries_cpu_bootable(unsigned int nr)
        /* Special case - we inhibit secondary thread startup
         * during boot if the user requests it.
         */
-       if (system_state < SYSTEM_RUNNING &&
-           cpu_has_feature(CPU_FTR_SMT) &&
-           !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
-               return 0;
+       if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
+               if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
+                       return 0;
+               if (smt_enabled_at_boot
+                   && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
+                       return 0;
+       }
 
        return 1;
 }
index 5b22b07c8f67aa19da32832c6b553220445489ee..93834b0d8272231f5c9376784bc5e82bd98c9e38 100644 (file)
@@ -928,8 +928,10 @@ void xics_migrate_irqs_away(void)
                if (xics_status[0] != hw_cpu)
                        goto unlock;
 
-               printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
-                      virq, cpu);
+               /* This is expected during cpu offline. */
+               if (cpu_online(cpu))
+                       printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
+                              virq, cpu);
 
                /* Reset affinity to all cpus */
                cpumask_setall(irq_to_desc(virq)->affinity);
index 0554445200bfd1417a1fe6076b0d258b74e69a02..d17d04cfb2cd4095c77adb306fcb690161c09fbf 100644 (file)
@@ -2880,15 +2880,14 @@ static void xmon_init(int enable)
 }
 
 #ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_xmon(int key, struct tty_struct *tty) 
+static void sysrq_handle_xmon(int key)
 {
        /* ensure xmon is enabled */
        xmon_init(1);
        debugger(get_irq_regs());
 }
 
-static struct sysrq_key_op sysrq_xmon_op = 
-{
+static struct sysrq_key_op sysrq_xmon_op = {
        .handler =      sysrq_handle_xmon,
        .help_msg =     "Xmon",
        .action_msg =   "Entering xmon",
index 670a1d1745d271e6e89a8246e6432c0c281b3d94..bb8343d157bc18b6117d02aa4ef164039b06f108 100644 (file)
@@ -97,6 +97,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 {
        pte_t pte = huge_ptep_get(ptep);
 
+       mm->context.flush_mm = 1;
        pmd_clear((pmd_t *) ptep);
        return pte;
 }
@@ -167,7 +168,8 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
 ({                                                                     \
        pte_t __pte = huge_ptep_get(__ptep);                            \
        if (pte_write(__pte)) {                                         \
-               if (atomic_read(&(__mm)->mm_users) > 1 ||               \
+               (__mm)->context.flush_mm = 1;                           \
+               if (atomic_read(&(__mm)->context.attach_count) > 1 ||   \
                    (__mm) != current->active_mm)                       \
                        huge_ptep_invalidate(__mm, __addr, __ptep);     \
                set_huge_pte_at(__mm, __addr, __ptep,                   \
index 99e3409102b9d955960ad9c75e1dba563358b21a..78522cdefdd42334c7b4f72be1180a6a2ed2f327 100644 (file)
@@ -2,6 +2,8 @@
 #define __MMU_H
 
 typedef struct {
+       atomic_t attach_count;
+       unsigned int flush_mm;
        spinlock_t list_lock;
        struct list_head crst_list;
        struct list_head pgtable_list;
index 976e273988c2ab2147722d46e76372f00e7ece10..a6f0e7cc9cde2f341fba3fef347ea93240bb9372 100644 (file)
 
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
+#include <asm/tlbflush.h>
 #include <asm-generic/mm_hooks.h>
 
 static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
 {
+       atomic_set(&mm->context.attach_count, 0);
+       mm->context.flush_mm = 0;
        mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
 #ifdef CONFIG_64BIT
        mm->context.asce_bits |= _ASCE_TYPE_REGION3;
@@ -76,6 +79,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
        cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
        update_mm(next, tsk);
+       atomic_dec(&prev->context.attach_count);
+       WARN_ON(atomic_read(&prev->context.attach_count) < 0);
+       atomic_inc(&next->context.attach_count);
+       /* Check for TLBs not flushed yet */
+       if (next->context.flush_mm)
+               __tlb_flush_mm(next);
 }
 
 #define enter_lazy_tlb(mm,tsk) do { } while (0)
index 89a504c3f12ec13f89fdcb34ab2b3602ce0c75a0..3157441ee1da287345a1f1a6bfe79769939bd7d7 100644 (file)
@@ -880,7 +880,8 @@ static inline void ptep_invalidate(struct mm_struct *mm,
 #define ptep_get_and_clear(__mm, __address, __ptep)                    \
 ({                                                                     \
        pte_t __pte = *(__ptep);                                        \
-       if (atomic_read(&(__mm)->mm_users) > 1 ||                       \
+       (__mm)->context.flush_mm = 1;                                   \
+       if (atomic_read(&(__mm)->context.attach_count) > 1 ||           \
            (__mm) != current->active_mm)                               \
                ptep_invalidate(__mm, __address, __ptep);               \
        else                                                            \
@@ -923,7 +924,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
 ({                                                                     \
        pte_t __pte = *(__ptep);                                        \
        if (pte_write(__pte)) {                                         \
-               if (atomic_read(&(__mm)->mm_users) > 1 ||               \
+               (__mm)->context.flush_mm = 1;                           \
+               if (atomic_read(&(__mm)->context.attach_count) > 1 ||   \
                    (__mm) != current->active_mm)                       \
                        ptep_invalidate(__mm, __addr, __ptep);          \
                set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
index 81150b0536890b7d664052a33f22d397bb05b852..fd1c00d08bf57ca1ead5095746ba082847f52752 100644 (file)
@@ -50,8 +50,7 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
        struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
 
        tlb->mm = mm;
-       tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) ||
-               (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm);
+       tlb->fullmm = full_mm_flush;
        tlb->nr_ptes = 0;
        tlb->nr_pxds = TLB_NR_PTRS;
        if (tlb->fullmm)
index 304cffa623e158041e128c738999abbed1d3aa5b..29d5d6d4becc7e12a4a5a0b9801a0de554c44b68 100644 (file)
@@ -94,8 +94,12 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
 
 static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
 {
-       if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm)
+       spin_lock(&mm->page_table_lock);
+       if (mm->context.flush_mm) {
                __tlb_flush_mm(mm);
+               mm->context.flush_mm = 0;
+       }
+       spin_unlock(&mm->page_table_lock);
 }
 
 /*
index 403fb430a896104b0c5f7982a9dd76ecf80d5e95..ff579b6bde066e35b306669039773733d61509ef 100644 (file)
@@ -42,8 +42,8 @@ long sys_clone(unsigned long newsp, unsigned long clone_flags,
               int __user *parent_tidptr, int __user *child_tidptr);
 long sys_vfork(void);
 void execve_tail(void);
-long sys_execve(const char __user *name, char __user * __user *argv,
-               char __user * __user *envp);
+long sys_execve(const char __user *name, const char __user *const __user *argv,
+               const char __user *const __user *envp);
 long sys_sigsuspend(int history0, int history1, old_sigset_t mask);
 long sys_sigaction(int sig, const struct old_sigaction __user *act,
                   struct old_sigaction __user *oact);
index 541053ed234ea1c0cd8a893de5bf16c9c30d9022..8127ebd59c4d81f4e8368e7728ad49bf3a173900 100644 (file)
@@ -583,6 +583,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
        sf->gprs[9] = (unsigned long) sf;
        cpu_lowcore->save_area[15] = (unsigned long) sf;
        __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
+       atomic_inc(&init_mm.context.attach_count);
        asm volatile(
                "       stam    0,15,0(%0)"
                : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
@@ -659,6 +660,7 @@ void __cpu_die(unsigned int cpu)
        while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
                udelay(10);
        smp_free_lowcore(cpu);
+       atomic_dec(&init_mm.context.attach_count);
        pr_info("Processor %d stopped\n", cpu);
 }
 
index acc91c75bc94a7e4a3b8f87600cbe312c4bdcc90..30eb6d02ddb89d59bf11d57ecbf743823cc44087 100644 (file)
@@ -74,6 +74,8 @@ void __init paging_init(void)
        __ctl_load(S390_lowcore.kernel_asce, 13, 13);
        __raw_local_irq_ssm(ssm_mask);
 
+       atomic_set(&init_mm.context.attach_count, 1);
+
        sparse_memory_present_with_active_regions(MAX_NUMNODES);
        sparse_init();
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
index f0c74227c737e0a94efb33c95640375ad38c89bc..bdb2ff880bdd68f58c0b4357479f31bd32ff19d9 100644 (file)
 #define atomic64_set(v, i)     (((v)->counter) = i)
 
 extern void atomic_add(int, atomic_t *);
-extern void atomic64_add(int, atomic64_t *);
+extern void atomic64_add(long, atomic64_t *);
 extern void atomic_sub(int, atomic_t *);
-extern void atomic64_sub(int, atomic64_t *);
+extern void atomic64_sub(long, atomic64_t *);
 
 extern int atomic_add_ret(int, atomic_t *);
-extern long atomic64_add_ret(int, atomic64_t *);
+extern long atomic64_add_ret(long, atomic64_t *);
 extern int atomic_sub_ret(int, atomic_t *);
-extern long atomic64_sub_ret(int, atomic64_t *);
+extern long atomic64_sub_ret(long, atomic64_t *);
 
 #define atomic_dec_return(v) atomic_sub_ret(1, v)
 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
index fa1fdf67e350e07d950107289f25bdf3c41a73b3..db3af0d30fb10129124c594d1f1195bb59f2a6f2 100644 (file)
@@ -8,6 +8,9 @@
 #define BACKOFF_SETUP(reg)     \
        mov     1, reg
 
+#define BACKOFF_LABEL(spin_label, continue_label) \
+       spin_label
+
 #define BACKOFF_SPIN(reg, tmp, label)  \
        mov     reg, tmp; \
 88:    brnz,pt tmp, 88b; \
 #else
 
 #define BACKOFF_SETUP(reg)
-#define BACKOFF_SPIN(reg, tmp, label) \
-       ba,pt   %xcc, label; \
-        nop;
+
+#define BACKOFF_LABEL(spin_label, continue_label) \
+       continue_label
+
+#define BACKOFF_SPIN(reg, tmp, label)
 
 #endif
 
index a5db0317b5fbfecc523e82b7a27172252cb1f4a2..3e0b2d62303df55133d1f9547e8c984ba5dbbeb2 100644 (file)
@@ -185,9 +185,8 @@ extern int prom_getunumber(int syndrome_code,
                           char *buf, int buflen);
 
 /* Retain physical memory to the caller across soft resets. */
-extern unsigned long prom_retain(const char *name,
-                                unsigned long pa_low, unsigned long pa_high,
-                                long size, long align);
+extern int prom_retain(const char *name, unsigned long size,
+                      unsigned long align, unsigned long *paddr);
 
 /* Load explicit I/D TLB entries into the calling processor. */
 extern long prom_itlb_load(unsigned long index,
@@ -287,26 +286,6 @@ extern void prom_sun4v_guest_soft_state(void);
 extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
 
 /* Client interface level routines. */
-extern long p1275_cmd(const char *, long, ...);
-
-#if 0
-#define P1275_SIZE(x) ((((long)((x) / 32)) << 32) | (x))
-#else
-#define P1275_SIZE(x) x
-#endif
-
-/* We support at most 16 input and 1 output argument */
-#define P1275_ARG_NUMBER               0
-#define P1275_ARG_IN_STRING            1
-#define P1275_ARG_OUT_BUF              2
-#define P1275_ARG_OUT_32B              3
-#define P1275_ARG_IN_FUNCTION          4
-#define P1275_ARG_IN_BUF               5
-#define P1275_ARG_IN_64B               6
-
-#define P1275_IN(x) ((x) & 0xf)
-#define P1275_OUT(x) (((x) << 4) & 0xf0)
-#define P1275_INOUT(i,o) (P1275_IN(i)|P1275_OUT(o))
-#define P1275_ARG(n,x) ((x) << ((n)*3 + 8))
+extern void p1275_cmd_direct(unsigned long *);
 
 #endif /* !(__SPARC64_OPLIB_H) */
diff --git a/arch/sparc/include/asm/rwsem-const.h b/arch/sparc/include/asm/rwsem-const.h
deleted file mode 100644 (file)
index e4c61a1..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* rwsem-const.h: RW semaphore counter constants.  */
-#ifndef _SPARC64_RWSEM_CONST_H
-#define _SPARC64_RWSEM_CONST_H
-
-#define RWSEM_UNLOCKED_VALUE           0x00000000
-#define RWSEM_ACTIVE_BIAS              0x00000001
-#define RWSEM_ACTIVE_MASK              0x0000ffff
-#define RWSEM_WAITING_BIAS             (-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-#endif /* _SPARC64_RWSEM_CONST_H */
index 6e5621006f85114d3356090b1ce66681021375a9..a2b4302869bcfb8ffe1d54ee3282293a18aeae83 100644 (file)
 
 #include <linux/list.h>
 #include <linux/spinlock.h>
-#include <asm/rwsem-const.h>
 
 struct rwsem_waiter;
 
 struct rw_semaphore {
-       signed int count;
-       spinlock_t              wait_lock;
-       struct list_head        wait_list;
+       signed long                     count;
+#define RWSEM_UNLOCKED_VALUE           0x00000000L
+#define RWSEM_ACTIVE_BIAS              0x00000001L
+#define RWSEM_ACTIVE_MASK              0xffffffffL
+#define RWSEM_WAITING_BIAS             (-RWSEM_ACTIVE_MASK-1)
+#define RWSEM_ACTIVE_READ_BIAS         RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+       spinlock_t                      wait_lock;
+       struct list_head                wait_list;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-       struct lockdep_map      dep_map;
+       struct lockdep_map              dep_map;
 #endif
 };
 
@@ -41,6 +46,11 @@ struct rw_semaphore {
 #define DECLARE_RWSEM(name) \
        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
 
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
 extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
                         struct lock_class_key *key);
 
@@ -51,27 +61,103 @@ do {                                                               \
        __init_rwsem((sem), #sem, &__key);                      \
 } while (0)
 
-extern void __down_read(struct rw_semaphore *sem);
-extern int __down_read_trylock(struct rw_semaphore *sem);
-extern void __down_write(struct rw_semaphore *sem);
-extern int __down_write_trylock(struct rw_semaphore *sem);
-extern void __up_read(struct rw_semaphore *sem);
-extern void __up_write(struct rw_semaphore *sem);
-extern void __downgrade_write(struct rw_semaphore *sem);
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+       if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
+               rwsem_down_read_failed(sem);
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       while ((tmp = sem->count) >= 0L) {
+               if (tmp == cmpxchg(&sem->count, tmp,
+                                  tmp + RWSEM_ACTIVE_READ_BIAS)) {
+                       return 1;
+               }
+       }
+       return 0;
+}
 
+/*
+ * lock for writing
+ */
 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 {
-       __down_write(sem);
+       long tmp;
+
+       tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+                                 (atomic64_t *)(&sem->count));
+       if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+               rwsem_down_write_failed(sem);
 }
 
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+static inline void __down_write(struct rw_semaphore *sem)
 {
-       return atomic_add_return(delta, (atomic_t *)(&sem->count));
+       __down_write_nested(sem, 0);
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+                     RWSEM_ACTIVE_WRITE_BIAS);
+       return tmp == RWSEM_UNLOCKED_VALUE;
 }
 
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
+       if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
+               rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+       if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+                                        (atomic64_t *)(&sem->count)) < 0L))
+               rwsem_wake(sem);
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+{
+       atomic64_add(delta, (atomic64_t *)(&sem->count));
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+       long tmp;
+
+       tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
+       if (tmp < 0L)
+               rwsem_downgrade_wake(sem);
+}
+
+/*
+ * implement exchange and add functionality
+ */
+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
 {
-       atomic_add(delta, (atomic_t *)(&sem->count));
+       return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
 }
 
 static inline int rwsem_is_locked(struct rw_semaphore *sem)
index d24cfe16afc1babc4feb78f0c697555754304346..e3b65d8cf41b715bb71b9339c9184e42feea01d0 100644 (file)
@@ -106,6 +106,7 @@ do {        __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
  */
 #define write_pic(__p)                                         \
        __asm__ __volatile__("ba,pt     %%xcc, 99f\n\t"         \
+                            " nop\n\t"                         \
                             ".align    64\n"                   \
                          "99:wr        %0, 0x0, %%pic\n\t"     \
                             "rd        %%pic, %%g0" : : "r" (__p))
index 485f547483847da14aebfaa0e327c6cbb75dce24..c158a95ec664f1301d78ceb433d99a1c94098dbc 100644 (file)
@@ -303,7 +303,7 @@ void arch_trigger_all_cpu_backtrace(void)
 
 #ifdef CONFIG_MAGIC_SYSRQ
 
-static void sysrq_handle_globreg(int key, struct tty_struct *tty)
+static void sysrq_handle_globreg(int key)
 {
        arch_trigger_all_cpu_backtrace();
 }
index c4b5e03af11557efa6ae42aa11f9ce0d74dfa42a..846d1c4374ea9c0c24a64bc4088185388274afd3 100644 (file)
@@ -15,7 +15,7 @@ lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
 lib-$(CONFIG_SPARC32) += copy_user.o locks.o
 lib-y                 += atomic_$(BITS).o
 lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
-lib-y                 += rwsem_$(BITS).o
+lib-$(CONFIG_SPARC32) += rwsem_32.o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
index 0268210ca1683f7bd0bb018953d93a22e207c701..59186e0fcf398feb5880b37fa28624230e0c756b 100644 (file)
@@ -21,7 +21,7 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
        add     %g1, %o0, %g7
        cas     [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %icc, 2f
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
@@ -36,7 +36,7 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
        sub     %g1, %o0, %g7
        cas     [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %icc, 2f
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
@@ -51,11 +51,10 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
        add     %g1, %o0, %g7
        cas     [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %icc, 2f
-        add    %g7, %o0, %g7
-       sra     %g7, 0, %o0
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
+        add    %g1, %o0, %g1
        retl
-        nop
+        sra    %g1, 0, %o0
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
        .size   atomic_add_ret, .-atomic_add_ret
 
@@ -67,11 +66,10 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
        sub     %g1, %o0, %g7
        cas     [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %icc, 2f
-        sub    %g7, %o0, %g7
-       sra     %g7, 0, %o0
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b)
+        sub    %g1, %o0, %g1
        retl
-        nop
+        sra    %g1, 0, %o0
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
        .size   atomic_sub_ret, .-atomic_sub_ret
 
@@ -83,7 +81,7 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
        add     %g1, %o0, %g7
        casx    [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
@@ -98,7 +96,7 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
        sub     %g1, %o0, %g7
        casx    [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
@@ -113,11 +111,10 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
        add     %g1, %o0, %g7
        casx    [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %xcc, 2f
-        add    %g7, %o0, %g7
-       mov     %g7, %o0
-       retl
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
+       retl
+        add    %g1, %o0, %o0
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
        .size   atomic64_add_ret, .-atomic64_add_ret
 
@@ -129,10 +126,9 @@ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
        sub     %g1, %o0, %g7
        casx    [%o1], %g1, %g7
        cmp     %g1, %g7
-       bne,pn  %xcc, 2f
-        sub    %g7, %o0, %g7
-       mov     %g7, %o0
-       retl
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
+       retl
+        sub    %g1, %o0, %o0
 2:     BACKOFF_SPIN(%o2, %o3, 1b)
        .size   atomic64_sub_ret, .-atomic64_sub_ret
index 2b7228cb8c2209332000d429257f16f4dbcf730f..3dc61d5537c08a6ad56f37b90df340a527298bb7 100644 (file)
@@ -22,7 +22,7 @@ test_and_set_bit:     /* %o0=nr, %o1=addr */
        or      %g7, %o2, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         and    %g7, %o2, %g2
        clr     %o0
        movrne  %g2, 1, %o0
@@ -45,7 +45,7 @@ test_and_clear_bit:   /* %o0=nr, %o1=addr */
        andn    %g7, %o2, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         and    %g7, %o2, %g2
        clr     %o0
        movrne  %g2, 1, %o0
@@ -68,7 +68,7 @@ test_and_change_bit:  /* %o0=nr, %o1=addr */
        xor     %g7, %o2, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         and    %g7, %o2, %g2
        clr     %o0
        movrne  %g2, 1, %o0
@@ -91,7 +91,7 @@ set_bit:              /* %o0=nr, %o1=addr */
        or      %g7, %o2, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
@@ -112,7 +112,7 @@ clear_bit:          /* %o0=nr, %o1=addr */
        andn    %g7, %o2, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
@@ -133,7 +133,7 @@ change_bit:         /* %o0=nr, %o1=addr */
        xor     %g7, %o2, %g1
        casx    [%o1], %g7, %g1
        cmp     %g7, %g1
-       bne,pn  %xcc, 2f
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b)
         nop
        retl
         nop
diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
deleted file mode 100644 (file)
index 91a7d29..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-/* rwsem.S: RW semaphore assembler.
- *
- * Written by David S. Miller (davem@redhat.com), 2001.
- * Derived from asm-i386/rwsem.h
- */
-
-#include <asm/rwsem-const.h>
-
-       .section        .sched.text, "ax"
-
-       .globl          __down_read
-__down_read:
-1:     lduw            [%o0], %g1
-       add             %g1, 1, %g7
-       cas             [%o0], %g1, %g7
-       cmp             %g1, %g7
-       bne,pn          %icc, 1b
-        add            %g7, 1, %g7
-       cmp             %g7, 0
-       bl,pn           %icc, 3f
-        nop
-2:
-       retl
-        nop
-3:
-       save            %sp, -192, %sp
-       call            rwsem_down_read_failed
-        mov            %i0, %o0
-       ret
-        restore
-       .size           __down_read, .-__down_read
-
-       .globl          __down_read_trylock
-__down_read_trylock:
-1:     lduw            [%o0], %g1
-       add             %g1, 1, %g7
-       cmp             %g7, 0
-       bl,pn           %icc, 2f
-        mov            0, %o1
-       cas             [%o0], %g1, %g7
-       cmp             %g1, %g7
-       bne,pn          %icc, 1b
-        mov            1, %o1
-2:     retl
-        mov            %o1, %o0
-       .size           __down_read_trylock, .-__down_read_trylock
-
-       .globl          __down_write
-__down_write:
-       sethi           %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
-       or              %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
-1:
-       lduw            [%o0], %g3
-       add             %g3, %g1, %g7
-       cas             [%o0], %g3, %g7
-       cmp             %g3, %g7
-       bne,pn          %icc, 1b
-        cmp            %g7, 0
-       bne,pn          %icc, 3f
-        nop
-2:     retl
-        nop
-3:
-       save            %sp, -192, %sp
-       call            rwsem_down_write_failed
-        mov            %i0, %o0
-       ret
-        restore
-       .size           __down_write, .-__down_write
-
-       .globl          __down_write_trylock
-__down_write_trylock:
-       sethi           %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
-       or              %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
-1:
-       lduw            [%o0], %g3
-       cmp             %g3, 0
-       bne,pn          %icc, 2f
-        mov            0, %o1
-       add             %g3, %g1, %g7
-       cas             [%o0], %g3, %g7
-       cmp             %g3, %g7
-       bne,pn          %icc, 1b
-        mov            1, %o1
-2:     retl
-        mov            %o1, %o0
-       .size           __down_write_trylock, .-__down_write_trylock
-
-       .globl          __up_read
-__up_read:
-1:
-       lduw            [%o0], %g1
-       sub             %g1, 1, %g7
-       cas             [%o0], %g1, %g7
-       cmp             %g1, %g7
-       bne,pn          %icc, 1b
-        cmp            %g7, 0
-       bl,pn           %icc, 3f
-        nop
-2:     retl
-        nop
-3:     sethi           %hi(RWSEM_ACTIVE_MASK), %g1
-       sub             %g7, 1, %g7
-       or              %g1, %lo(RWSEM_ACTIVE_MASK), %g1
-       andcc           %g7, %g1, %g0
-       bne,pn          %icc, 2b
-        nop
-       save            %sp, -192, %sp
-       call            rwsem_wake
-        mov            %i0, %o0
-       ret
-        restore
-       .size           __up_read, .-__up_read
-
-       .globl          __up_write
-__up_write:
-       sethi           %hi(RWSEM_ACTIVE_WRITE_BIAS), %g1
-       or              %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
-1:
-       lduw            [%o0], %g3
-       sub             %g3, %g1, %g7
-       cas             [%o0], %g3, %g7
-       cmp             %g3, %g7
-       bne,pn          %icc, 1b
-        sub            %g7, %g1, %g7
-       cmp             %g7, 0
-       bl,pn           %icc, 3f
-        nop
-2:
-       retl
-        nop
-3:
-       save            %sp, -192, %sp
-       call            rwsem_wake
-        mov            %i0, %o0
-       ret
-        restore
-       .size           __up_write, .-__up_write
-
-       .globl          __downgrade_write
-__downgrade_write:
-       sethi           %hi(RWSEM_WAITING_BIAS), %g1
-       or              %g1, %lo(RWSEM_WAITING_BIAS), %g1
-1:
-       lduw            [%o0], %g3
-       sub             %g3, %g1, %g7
-       cas             [%o0], %g3, %g7
-       cmp             %g3, %g7
-       bne,pn          %icc, 1b
-        sub            %g7, %g1, %g7
-       cmp             %g7, 0
-       bl,pn           %icc, 3f
-        nop
-2:
-       retl
-        nop
-3:
-       save            %sp, -192, %sp
-       call            rwsem_downgrade_wake
-        mov            %i0, %o0
-       ret
-        restore
-       .size           __downgrade_write, .-__downgrade_write
index 5f27ad779c0c578097218e44b005eaed770acf7d..9c86b4b7d4290b75a5967c28790c59b3ee24b5c9 100644 (file)
@@ -9,18 +9,18 @@
 #include <asm/thread_info.h>
 
        .text
-       .globl  prom_cif_interface
-prom_cif_interface:
-       sethi   %hi(p1275buf), %o0
-       or      %o0, %lo(p1275buf), %o0
-       ldx     [%o0 + 0x010], %o1      ! prom_cif_stack
-       save    %o1, -192, %sp
-       ldx     [%i0 + 0x008], %l2      ! prom_cif_handler
+       .globl  prom_cif_direct
+prom_cif_direct:
+       sethi   %hi(p1275buf), %o1
+       or      %o1, %lo(p1275buf), %o1
+       ldx     [%o1 + 0x0010], %o2     ! prom_cif_stack
+       save    %o2, -192, %sp
+       ldx     [%i1 + 0x0008], %l2     ! prom_cif_handler
        mov     %g4, %l0
        mov     %g5, %l1
        mov     %g6, %l3
        call    %l2
-        add    %i0, 0x018, %o0         ! prom_args
+        mov    %i0, %o0                ! prom_args
        mov     %l0, %g4
        mov     %l1, %g5
        mov     %l3, %g6
index f55d58a8a1567a653f8517598edcfe7f777b4257..10322dc2f557a22d5befcc17dd4a27d8b6604f89 100644 (file)
@@ -21,14 +21,22 @@ extern int prom_stdin, prom_stdout;
 inline int
 prom_nbgetchar(void)
 {
+       unsigned long args[7];
        char inc;
 
-       if (p1275_cmd("read", P1275_ARG(1,P1275_ARG_OUT_BUF)|
-                             P1275_INOUT(3,1),
-                             prom_stdin, &inc, P1275_SIZE(1)) == 1)
+       args[0] = (unsigned long) "read";
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) prom_stdin;
+       args[4] = (unsigned long) &inc;
+       args[5] = 1;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       if (args[6] == 1)
                return inc;
-       else
-               return -1;
+       return -1;
 }
 
 /* Non blocking put character to console device, returns -1 if
@@ -37,12 +45,22 @@ prom_nbgetchar(void)
 inline int
 prom_nbputchar(char c)
 {
+       unsigned long args[7];
        char outc;
        
        outc = c;
-       if (p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
-                              P1275_INOUT(3,1),
-                              prom_stdout, &outc, P1275_SIZE(1)) == 1)
+
+       args[0] = (unsigned long) "write";
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) prom_stdout;
+       args[4] = (unsigned long) &outc;
+       args[5] = 1;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       if (args[6] == 1)
                return 0;
        else
                return -1;
@@ -67,7 +85,15 @@ prom_putchar(char c)
 void
 prom_puts(const char *s, int len)
 {
-       p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
-                          P1275_INOUT(3,1),
-                          prom_stdout, s, P1275_SIZE(len));
+       unsigned long args[7];
+
+       args[0] = (unsigned long) "write";
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) prom_stdout;
+       args[4] = (unsigned long) s;
+       args[5] = len;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
 }
index 9dbd803e46e1f4c460300c6f5f2485f3d6c54e7d..a017119e7ef17c40ee8fa8328252f921c59a6ca8 100644 (file)
 int
 prom_devopen(const char *dstr)
 {
-       return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)|
-                                 P1275_INOUT(1,1),
-                                 dstr);
+       unsigned long args[5];
+
+       args[0] = (unsigned long) "open";
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned long) dstr;
+       args[4] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[4];
 }
 
 /* Close the device described by device handle 'dhandle'. */
 int
 prom_devclose(int dhandle)
 {
-       p1275_cmd ("close", P1275_INOUT(1,0), dhandle);
+       unsigned long args[4];
+
+       args[0] = (unsigned long) "close";
+       args[1] = 1;
+       args[2] = 0;
+       args[3] = (unsigned int) dhandle;
+
+       p1275_cmd_direct(args);
+
        return 0;
 }
 
@@ -37,5 +53,15 @@ prom_devclose(int dhandle)
 void
 prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
 {
-       p1275_cmd ("seek", P1275_INOUT(3,1), dhandle, seekhi, seeklo);
+       unsigned long args[7];
+
+       args[0] = (unsigned long) "seek";
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) dhandle;
+       args[4] = seekhi;
+       args[5] = seeklo;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
 }
index 39fc6af21b7c55ddc0e752c80c745ed6c018820a..6cb1581d6aef507fca9c036708cebe24e5ab053c 100644 (file)
 
 int prom_service_exists(const char *service_name)
 {
-       int err = p1275_cmd("test", P1275_ARG(0, P1275_ARG_IN_STRING) |
-                           P1275_INOUT(1, 1), service_name);
+       unsigned long args[5];
 
-       if (err)
+       args[0] = (unsigned long) "test";
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned long) service_name;
+       args[4] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       if (args[4])
                return 0;
        return 1;
 }
@@ -31,30 +38,47 @@ int prom_service_exists(const char *service_name)
 void prom_sun4v_guest_soft_state(void)
 {
        const char *svc = "SUNW,soft-state-supported";
+       unsigned long args[3];
 
        if (!prom_service_exists(svc))
                return;
-       p1275_cmd(svc, P1275_INOUT(0, 0));
+       args[0] = (unsigned long) svc;
+       args[1] = 0;
+       args[2] = 0;
+       p1275_cmd_direct(args);
 }
 
 /* Reset and reboot the machine with the command 'bcommand'. */
 void prom_reboot(const char *bcommand)
 {
+       unsigned long args[4];
+
 #ifdef CONFIG_SUN_LDOMS
        if (ldom_domaining_enabled)
                ldom_reboot(bcommand);
 #endif
-       p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) |
-                 P1275_INOUT(1, 0), bcommand);
+       args[0] = (unsigned long) "boot";
+       args[1] = 1;
+       args[2] = 0;
+       args[3] = (unsigned long) bcommand;
+
+       p1275_cmd_direct(args);
 }
 
 /* Forth evaluate the expression contained in 'fstring'. */
 void prom_feval(const char *fstring)
 {
+       unsigned long args[5];
+
        if (!fstring || fstring[0] == 0)
                return;
-       p1275_cmd("interpret", P1275_ARG(0, P1275_ARG_IN_STRING) |
-                 P1275_INOUT(1, 1), fstring);
+       args[0] = (unsigned long) "interpret";
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned long) fstring;
+       args[4] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
 }
 EXPORT_SYMBOL(prom_feval);
 
@@ -68,6 +92,7 @@ extern void smp_release(void);
  */
 void prom_cmdline(void)
 {
+       unsigned long args[3];
        unsigned long flags;
 
        local_irq_save(flags);
@@ -76,7 +101,11 @@ void prom_cmdline(void)
        smp_capture();
 #endif
 
-       p1275_cmd("enter", P1275_INOUT(0, 0));
+       args[0] = (unsigned long) "enter";
+       args[1] = 0;
+       args[2] = 0;
+
+       p1275_cmd_direct(args);
 
 #ifdef CONFIG_SMP
        smp_release();
@@ -90,22 +119,32 @@ void prom_cmdline(void)
  */
 void notrace prom_halt(void)
 {
+       unsigned long args[3];
+
 #ifdef CONFIG_SUN_LDOMS
        if (ldom_domaining_enabled)
                ldom_power_off();
 #endif
 again:
-       p1275_cmd("exit", P1275_INOUT(0, 0));
+       args[0] = (unsigned long) "exit";
+       args[1] = 0;
+       args[2] = 0;
+       p1275_cmd_direct(args);
        goto again; /* PROM is out to get me -DaveM */
 }
 
 void prom_halt_power_off(void)
 {
+       unsigned long args[3];
+
 #ifdef CONFIG_SUN_LDOMS
        if (ldom_domaining_enabled)
                ldom_power_off();
 #endif
-       p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
+       args[0] = (unsigned long) "SUNW,power-off";
+       args[1] = 0;
+       args[2] = 0;
+       p1275_cmd_direct(args);
 
        /* if nothing else helps, we just halt */
        prom_halt();
@@ -114,10 +153,15 @@ void prom_halt_power_off(void)
 /* Set prom sync handler to call function 'funcp'. */
 void prom_setcallback(callback_func_t funcp)
 {
+       unsigned long args[5];
        if (!funcp)
                return;
-       p1275_cmd("set-callback", P1275_ARG(0, P1275_ARG_IN_FUNCTION) |
-                 P1275_INOUT(1, 1), funcp);
+       args[0] = (unsigned long) "set-callback";
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned long) funcp;
+       args[4] = (unsigned long) -1;
+       p1275_cmd_direct(args);
 }
 
 /* Get the idprom and stuff it into buffer 'idbuf'.  Returns the
@@ -173,57 +217,61 @@ static int prom_get_memory_ihandle(void)
 }
 
 /* Load explicit I/D TLB entries. */
+static long tlb_load(const char *type, unsigned long index,
+                    unsigned long tte_data, unsigned long vaddr)
+{
+       unsigned long args[9];
+
+       args[0] = (unsigned long) prom_callmethod_name;
+       args[1] = 5;
+       args[2] = 1;
+       args[3] = (unsigned long) type;
+       args[4] = (unsigned int) prom_get_mmu_ihandle();
+       args[5] = vaddr;
+       args[6] = tte_data;
+       args[7] = index;
+       args[8] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (long) args[8];
+}
+
 long prom_itlb_load(unsigned long index,
                    unsigned long tte_data,
                    unsigned long vaddr)
 {
-       return p1275_cmd(prom_callmethod_name,
-                        (P1275_ARG(0, P1275_ARG_IN_STRING) |
-                         P1275_ARG(2, P1275_ARG_IN_64B) |
-                         P1275_ARG(3, P1275_ARG_IN_64B) |
-                         P1275_INOUT(5, 1)),
-                        "SUNW,itlb-load",
-                        prom_get_mmu_ihandle(),
-                        /* And then our actual args are pushed backwards. */
-                        vaddr,
-                        tte_data,
-                        index);
+       return tlb_load("SUNW,itlb-load", index, tte_data, vaddr);
 }
 
 long prom_dtlb_load(unsigned long index,
                    unsigned long tte_data,
                    unsigned long vaddr)
 {
-       return p1275_cmd(prom_callmethod_name,
-                        (P1275_ARG(0, P1275_ARG_IN_STRING) |
-                         P1275_ARG(2, P1275_ARG_IN_64B) |
-                         P1275_ARG(3, P1275_ARG_IN_64B) |
-                         P1275_INOUT(5, 1)),
-                        "SUNW,dtlb-load",
-                        prom_get_mmu_ihandle(),
-                        /* And then our actual args are pushed backwards. */
-                        vaddr,
-                        tte_data,
-                        index);
+       return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr);
 }
 
 int prom_map(int mode, unsigned long size,
             unsigned long vaddr, unsigned long paddr)
 {
-       int ret = p1275_cmd(prom_callmethod_name,
-                           (P1275_ARG(0, P1275_ARG_IN_STRING) |
-                            P1275_ARG(3, P1275_ARG_IN_64B) |
-                            P1275_ARG(4, P1275_ARG_IN_64B) |
-                            P1275_ARG(6, P1275_ARG_IN_64B) |
-                            P1275_INOUT(7, 1)),
-                           prom_map_name,
-                           prom_get_mmu_ihandle(),
-                           mode,
-                           size,
-                           vaddr,
-                           0,
-                           paddr);
-
+       unsigned long args[11];
+       int ret;
+
+       args[0] = (unsigned long) prom_callmethod_name;
+       args[1] = 7;
+       args[2] = 1;
+       args[3] = (unsigned long) prom_map_name;
+       args[4] = (unsigned int) prom_get_mmu_ihandle();
+       args[5] = (unsigned int) mode;
+       args[6] = size;
+       args[7] = vaddr;
+       args[8] = 0;
+       args[9] = paddr;
+       args[10] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       ret = (int) args[10];
        if (ret == 0)
                ret = -1;
        return ret;
@@ -231,40 +279,51 @@ int prom_map(int mode, unsigned long size,
 
 void prom_unmap(unsigned long size, unsigned long vaddr)
 {
-       p1275_cmd(prom_callmethod_name,
-                 (P1275_ARG(0, P1275_ARG_IN_STRING) |
-                  P1275_ARG(2, P1275_ARG_IN_64B) |
-                  P1275_ARG(3, P1275_ARG_IN_64B) |
-                  P1275_INOUT(4, 0)),
-                 prom_unmap_name,
-                 prom_get_mmu_ihandle(),
-                 size,
-                 vaddr);
+       unsigned long args[7];
+
+       args[0] = (unsigned long) prom_callmethod_name;
+       args[1] = 4;
+       args[2] = 0;
+       args[3] = (unsigned long) prom_unmap_name;
+       args[4] = (unsigned int) prom_get_mmu_ihandle();
+       args[5] = size;
+       args[6] = vaddr;
+
+       p1275_cmd_direct(args);
 }
 
 /* Set aside physical memory which is not touched or modified
  * across soft resets.
  */
-unsigned long prom_retain(const char *name,
-                         unsigned long pa_low, unsigned long pa_high,
-                         long size, long align)
+int prom_retain(const char *name, unsigned long size,
+               unsigned long align, unsigned long *paddr)
 {
-       /* XXX I don't think we return multiple values correctly.
-        * XXX OBP supposedly returns pa_low/pa_high here, how does
-        * XXX it work?
+       unsigned long args[11];
+
+       args[0] = (unsigned long) prom_callmethod_name;
+       args[1] = 5;
+       args[2] = 3;
+       args[3] = (unsigned long) "SUNW,retain";
+       args[4] = (unsigned int) prom_get_memory_ihandle();
+       args[5] = align;
+       args[6] = size;
+       args[7] = (unsigned long) name;
+       args[8] = (unsigned long) -1;
+       args[9] = (unsigned long) -1;
+       args[10] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       if (args[8])
+               return (int) args[8];
+
+       /* Next we get "phys_high" then "phys_low".  On 64-bit
+        * the phys_high cell is don't care since the phys_low
+        * cell has the full value.
         */
+       *paddr = args[10];
 
-       /* If align is zero, the pa_low/pa_high args are passed,
-        * else they are not.
-        */
-       if (align == 0)
-               return p1275_cmd("SUNW,retain",
-                                (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(5, 2)),
-                                name, pa_low, pa_high, size, align);
-       else
-               return p1275_cmd("SUNW,retain",
-                                (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(3, 2)),
-                                name, size, align);
+       return 0;
 }
 
 /* Get "Unumber" string for the SIMM at the given
@@ -277,62 +336,129 @@ int prom_getunumber(int syndrome_code,
                    unsigned long phys_addr,
                    char *buf, int buflen)
 {
-       return p1275_cmd(prom_callmethod_name,
-                        (P1275_ARG(0, P1275_ARG_IN_STRING)     |
-                         P1275_ARG(3, P1275_ARG_OUT_BUF)       |
-                         P1275_ARG(6, P1275_ARG_IN_64B)        |
-                         P1275_INOUT(8, 2)),
-                        "SUNW,get-unumber", prom_get_memory_ihandle(),
-                        buflen, buf, P1275_SIZE(buflen),
-                        0, phys_addr, syndrome_code);
+       unsigned long args[12];
+
+       args[0] = (unsigned long) prom_callmethod_name;
+       args[1] = 7;
+       args[2] = 2;
+       args[3] = (unsigned long) "SUNW,get-unumber";
+       args[4] = (unsigned int) prom_get_memory_ihandle();
+       args[5] = buflen;
+       args[6] = (unsigned long) buf;
+       args[7] = 0;
+       args[8] = phys_addr;
+       args[9] = (unsigned int) syndrome_code;
+       args[10] = (unsigned long) -1;
+       args[11] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[10];
 }
 
 /* Power management extensions. */
 void prom_sleepself(void)
 {
-       p1275_cmd("SUNW,sleep-self", P1275_INOUT(0, 0));
+       unsigned long args[3];
+
+       args[0] = (unsigned long) "SUNW,sleep-self";
+       args[1] = 0;
+       args[2] = 0;
+       p1275_cmd_direct(args);
 }
 
 int prom_sleepsystem(void)
 {
-       return p1275_cmd("SUNW,sleep-system", P1275_INOUT(0, 1));
+       unsigned long args[4];
+
+       args[0] = (unsigned long) "SUNW,sleep-system";
+       args[1] = 0;
+       args[2] = 1;
+       args[3] = (unsigned long) -1;
+       p1275_cmd_direct(args);
+
+       return (int) args[3];
 }
 
 int prom_wakeupsystem(void)
 {
-       return p1275_cmd("SUNW,wakeup-system", P1275_INOUT(0, 1));
+       unsigned long args[4];
+
+       args[0] = (unsigned long) "SUNW,wakeup-system";
+       args[1] = 0;
+       args[2] = 1;
+       args[3] = (unsigned long) -1;
+       p1275_cmd_direct(args);
+
+       return (int) args[3];
 }
 
 #ifdef CONFIG_SMP
 void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg)
 {
-       p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, arg);
+       unsigned long args[6];
+
+       args[0] = (unsigned long) "SUNW,start-cpu";
+       args[1] = 3;
+       args[2] = 0;
+       args[3] = (unsigned int) cpunode;
+       args[4] = pc;
+       args[5] = arg;
+       p1275_cmd_direct(args);
 }
 
 void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg)
 {
-       p1275_cmd("SUNW,start-cpu-by-cpuid", P1275_INOUT(3, 0),
-                 cpuid, pc, arg);
+       unsigned long args[6];
+
+       args[0] = (unsigned long) "SUNW,start-cpu-by-cpuid";
+       args[1] = 3;
+       args[2] = 0;
+       args[3] = (unsigned int) cpuid;
+       args[4] = pc;
+       args[5] = arg;
+       p1275_cmd_direct(args);
 }
 
 void prom_stopcpu_cpuid(int cpuid)
 {
-       p1275_cmd("SUNW,stop-cpu-by-cpuid", P1275_INOUT(1, 0),
-                 cpuid);
+       unsigned long args[4];
+
+       args[0] = (unsigned long) "SUNW,stop-cpu-by-cpuid";
+       args[1] = 1;
+       args[2] = 0;
+       args[3] = (unsigned int) cpuid;
+       p1275_cmd_direct(args);
 }
 
 void prom_stopself(void)
 {
-       p1275_cmd("SUNW,stop-self", P1275_INOUT(0, 0));
+       unsigned long args[3];
+
+       args[0] = (unsigned long) "SUNW,stop-self";
+       args[1] = 0;
+       args[2] = 0;
+       p1275_cmd_direct(args);
 }
 
 void prom_idleself(void)
 {
-       p1275_cmd("SUNW,idle-self", P1275_INOUT(0, 0));
+       unsigned long args[3];
+
+       args[0] = (unsigned long) "SUNW,idle-self";
+       args[1] = 0;
+       args[2] = 0;
+       p1275_cmd_direct(args);
 }
 
 void prom_resumecpu(int cpunode)
 {
-       p1275_cmd("SUNW,resume-cpu", P1275_INOUT(1, 0), cpunode);
+       unsigned long args[4];
+
+       args[0] = (unsigned long) "SUNW,resume-cpu";
+       args[1] = 1;
+       args[2] = 0;
+       args[3] = (unsigned int) cpunode;
+       p1275_cmd_direct(args);
 }
 #endif
index 2d8b70d397f154318612858c1bf7811160ee7f49..fa6e4e219b9ce436db25fcf3aa168d4b9f559ae2 100644 (file)
@@ -22,13 +22,11 @@ struct {
        long prom_callback;                     /* 0x00 */
        void (*prom_cif_handler)(long *);       /* 0x08 */
        unsigned long prom_cif_stack;           /* 0x10 */
-       unsigned long prom_args [23];           /* 0x18 */
-       char prom_buffer [3000];
 } p1275buf;
 
 extern void prom_world(int);
 
-extern void prom_cif_interface(void);
+extern void prom_cif_direct(unsigned long *args);
 extern void prom_cif_callback(void);
 
 /*
@@ -36,114 +34,20 @@ extern void prom_cif_callback(void);
  */
 DEFINE_RAW_SPINLOCK(prom_entry_lock);
 
-long p1275_cmd(const char *service, long fmt, ...)
+void p1275_cmd_direct(unsigned long *args)
 {
-       char *p, *q;
        unsigned long flags;
-       int nargs, nrets, i;
-       va_list list;
-       long attrs, x;
-       
-       p = p1275buf.prom_buffer;
 
        raw_local_save_flags(flags);
        raw_local_irq_restore(PIL_NMI);
        raw_spin_lock(&prom_entry_lock);
 
-       p1275buf.prom_args[0] = (unsigned long)p;               /* service */
-       strcpy (p, service);
-       p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
-       p1275buf.prom_args[1] = nargs = (fmt & 0x0f);           /* nargs */
-       p1275buf.prom_args[2] = nrets = ((fmt & 0xf0) >> 4);    /* nrets */
-       attrs = fmt >> 8;
-       va_start(list, fmt);
-       for (i = 0; i < nargs; i++, attrs >>= 3) {
-               switch (attrs & 0x7) {
-               case P1275_ARG_NUMBER:
-                       p1275buf.prom_args[i + 3] =
-                                               (unsigned)va_arg(list, long);
-                       break;
-               case P1275_ARG_IN_64B:
-                       p1275buf.prom_args[i + 3] =
-                               va_arg(list, unsigned long);
-                       break;
-               case P1275_ARG_IN_STRING:
-                       strcpy (p, va_arg(list, char *));
-                       p1275buf.prom_args[i + 3] = (unsigned long)p;
-                       p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
-                       break;
-               case P1275_ARG_OUT_BUF:
-                       (void) va_arg(list, char *);
-                       p1275buf.prom_args[i + 3] = (unsigned long)p;
-                       x = va_arg(list, long);
-                       i++; attrs >>= 3;
-                       p = (char *)(((long)(p + (int)x + 7)) & ~7);
-                       p1275buf.prom_args[i + 3] = x;
-                       break;
-               case P1275_ARG_IN_BUF:
-                       q = va_arg(list, char *);
-                       p1275buf.prom_args[i + 3] = (unsigned long)p;
-                       x = va_arg(list, long);
-                       i++; attrs >>= 3;
-                       memcpy (p, q, (int)x);
-                       p = (char *)(((long)(p + (int)x + 7)) & ~7);
-                       p1275buf.prom_args[i + 3] = x;
-                       break;
-               case P1275_ARG_OUT_32B:
-                       (void) va_arg(list, char *);
-                       p1275buf.prom_args[i + 3] = (unsigned long)p;
-                       p += 32;
-                       break;
-               case P1275_ARG_IN_FUNCTION:
-                       p1275buf.prom_args[i + 3] =
-                                       (unsigned long)prom_cif_callback;
-                       p1275buf.prom_callback = va_arg(list, long);
-                       break;
-               }
-       }
-       va_end(list);
-
        prom_world(1);
-       prom_cif_interface();
+       prom_cif_direct(args);
        prom_world(0);
 
-       attrs = fmt >> 8;
-       va_start(list, fmt);
-       for (i = 0; i < nargs; i++, attrs >>= 3) {
-               switch (attrs & 0x7) {
-               case P1275_ARG_NUMBER:
-                       (void) va_arg(list, long);
-                       break;
-               case P1275_ARG_IN_STRING:
-                       (void) va_arg(list, char *);
-                       break;
-               case P1275_ARG_IN_FUNCTION:
-                       (void) va_arg(list, long);
-                       break;
-               case P1275_ARG_IN_BUF:
-                       (void) va_arg(list, char *);
-                       (void) va_arg(list, long);
-                       i++; attrs >>= 3;
-                       break;
-               case P1275_ARG_OUT_BUF:
-                       p = va_arg(list, char *);
-                       x = va_arg(list, long);
-                       memcpy (p, (char *)(p1275buf.prom_args[i + 3]), (int)x);
-                       i++; attrs >>= 3;
-                       break;
-               case P1275_ARG_OUT_32B:
-                       p = va_arg(list, char *);
-                       memcpy (p, (char *)(p1275buf.prom_args[i + 3]), 32);
-                       break;
-               }
-       }
-       va_end(list);
-       x = p1275buf.prom_args [nargs + 3];
-
        raw_spin_unlock(&prom_entry_lock);
        raw_local_irq_restore(flags);
-
-       return x;
 }
 
 void prom_cif_init(void *cif_handler, void *cif_stack)
index 3c0d2dd9f6939faf1daf53f61141c785001429b4..9d3f9137a43ae1307ea698d81e13896af7504453 100644 (file)
 #include <asm/oplib.h>
 #include <asm/ldc.h>
 
+static int prom_node_to_node(const char *type, int node)
+{
+       unsigned long args[5];
+
+       args[0] = (unsigned long) type;
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned int) node;
+       args[4] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[4];
+}
+
 /* Return the child of node 'node' or zero if no this node has no
  * direct descendent.
  */
 inline int __prom_getchild(int node)
 {
-       return p1275_cmd ("child", P1275_INOUT(1, 1), node);
+       return prom_node_to_node("child", node);
 }
 
 inline int prom_getchild(int node)
 {
        int cnode;
 
-       if(node == -1) return 0;
+       if (node == -1)
+               return 0;
        cnode = __prom_getchild(node);
-       if(cnode == -1) return 0;
-       return (int)cnode;
+       if (cnode == -1)
+               return 0;
+       return cnode;
 }
 EXPORT_SYMBOL(prom_getchild);
 
@@ -39,10 +56,12 @@ inline int prom_getparent(int node)
 {
        int cnode;
 
-       if(node == -1) return 0;
-       cnode = p1275_cmd ("parent", P1275_INOUT(1, 1), node);
-       if(cnode == -1) return 0;
-       return (int)cnode;
+       if (node == -1)
+               return 0;
+       cnode = prom_node_to_node("parent", node);
+       if (cnode == -1)
+               return 0;
+       return cnode;
 }
 
 /* Return the next sibling of node 'node' or zero if no more siblings
@@ -50,7 +69,7 @@ inline int prom_getparent(int node)
  */
 inline int __prom_getsibling(int node)
 {
-       return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node);
+       return prom_node_to_node(prom_peer_name, node);
 }
 
 inline int prom_getsibling(int node)
@@ -72,11 +91,21 @@ EXPORT_SYMBOL(prom_getsibling);
  */
 inline int prom_getproplen(int node, const char *prop)
 {
-       if((!node) || (!prop)) return -1;
-       return p1275_cmd ("getproplen", 
-                         P1275_ARG(1,P1275_ARG_IN_STRING)|
-                         P1275_INOUT(2, 1), 
-                         node, prop);
+       unsigned long args[6];
+
+       if (!node || !prop)
+               return -1;
+
+       args[0] = (unsigned long) "getproplen";
+       args[1] = 2;
+       args[2] = 1;
+       args[3] = (unsigned int) node;
+       args[4] = (unsigned long) prop;
+       args[5] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[5];
 }
 EXPORT_SYMBOL(prom_getproplen);
 
@@ -87,19 +116,25 @@ EXPORT_SYMBOL(prom_getproplen);
 inline int prom_getproperty(int node, const char *prop,
                            char *buffer, int bufsize)
 {
+       unsigned long args[8];
        int plen;
 
        plen = prom_getproplen(node, prop);
-       if ((plen > bufsize) || (plen == 0) || (plen == -1)) {
+       if ((plen > bufsize) || (plen == 0) || (plen == -1))
                return -1;
-       } else {
-               /* Ok, things seem all right. */
-               return p1275_cmd(prom_getprop_name, 
-                                P1275_ARG(1,P1275_ARG_IN_STRING)|
-                                P1275_ARG(2,P1275_ARG_OUT_BUF)|
-                                P1275_INOUT(4, 1), 
-                                node, prop, buffer, P1275_SIZE(plen));
-       }
+
+       args[0] = (unsigned long) prom_getprop_name;
+       args[1] = 4;
+       args[2] = 1;
+       args[3] = (unsigned int) node;
+       args[4] = (unsigned long) prop;
+       args[5] = (unsigned long) buffer;
+       args[6] = bufsize;
+       args[7] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[7];
 }
 EXPORT_SYMBOL(prom_getproperty);
 
@@ -110,7 +145,7 @@ inline int prom_getint(int node, const char *prop)
 {
        int intprop;
 
-       if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
+       if (prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
                return intprop;
 
        return -1;
@@ -126,7 +161,8 @@ int prom_getintdefault(int node, const char *property, int deflt)
        int retval;
 
        retval = prom_getint(node, property);
-       if(retval == -1) return deflt;
+       if (retval == -1)
+               return deflt;
 
        return retval;
 }
@@ -138,7 +174,8 @@ int prom_getbool(int node, const char *prop)
        int retval;
 
        retval = prom_getproplen(node, prop);
-       if(retval == -1) return 0;
+       if (retval == -1)
+               return 0;
        return 1;
 }
 EXPORT_SYMBOL(prom_getbool);
@@ -152,7 +189,8 @@ void prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size)
        int len;
 
        len = prom_getproperty(node, prop, user_buf, ubuf_size);
-       if(len != -1) return;
+       if (len != -1)
+               return;
        user_buf[0] = 0;
 }
 EXPORT_SYMBOL(prom_getstring);
@@ -164,7 +202,8 @@ int prom_nodematch(int node, const char *name)
 {
        char namebuf[128];
        prom_getproperty(node, "name", namebuf, sizeof(namebuf));
-       if(strcmp(namebuf, name) == 0) return 1;
+       if (strcmp(namebuf, name) == 0)
+               return 1;
        return 0;
 }
 
@@ -190,16 +229,29 @@ int prom_searchsiblings(int node_start, const char *nodename)
 }
 EXPORT_SYMBOL(prom_searchsiblings);
 
+static const char *prom_nextprop_name = "nextprop";
+
 /* Return the first property type for node 'node'.
  * buffer should be at least 32B in length
  */
 inline char *prom_firstprop(int node, char *buffer)
 {
+       unsigned long args[7];
+
        *buffer = 0;
-       if(node == -1) return buffer;
-       p1275_cmd ("nextprop", P1275_ARG(2,P1275_ARG_OUT_32B)|
-                              P1275_INOUT(3, 0), 
-                              node, (char *) 0x0, buffer);
+       if (node == -1)
+               return buffer;
+
+       args[0] = (unsigned long) prom_nextprop_name;
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) node;
+       args[4] = 0;
+       args[5] = (unsigned long) buffer;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
        return buffer;
 }
 EXPORT_SYMBOL(prom_firstprop);
@@ -210,9 +262,10 @@ EXPORT_SYMBOL(prom_firstprop);
  */
 inline char *prom_nextprop(int node, const char *oprop, char *buffer)
 {
+       unsigned long args[7];
        char buf[32];
 
-       if(node == -1) {
+       if (node == -1) {
                *buffer = 0;
                return buffer;
        }
@@ -220,10 +273,17 @@ inline char *prom_nextprop(int node, const char *oprop, char *buffer)
                strcpy (buf, oprop);
                oprop = buf;
        }
-       p1275_cmd ("nextprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
-                                   P1275_ARG(2,P1275_ARG_OUT_32B)|
-                                   P1275_INOUT(3, 0), 
-                                   node, oprop, buffer); 
+
+       args[0] = (unsigned long) prom_nextprop_name;
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) node;
+       args[4] = (unsigned long) oprop;
+       args[5] = (unsigned long) buffer;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
        return buffer;
 }
 EXPORT_SYMBOL(prom_nextprop);
@@ -231,12 +291,19 @@ EXPORT_SYMBOL(prom_nextprop);
 int
 prom_finddevice(const char *name)
 {
+       unsigned long args[5];
+
        if (!name)
                return 0;
-       return p1275_cmd(prom_finddev_name,
-                        P1275_ARG(0,P1275_ARG_IN_STRING)|
-                        P1275_INOUT(1, 1), 
-                        name);
+       args[0] = (unsigned long) "finddevice";
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned long) name;
+       args[4] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[4];
 }
 EXPORT_SYMBOL(prom_finddevice);
 
@@ -247,7 +314,7 @@ int prom_node_has_property(int node, const char *prop)
        *buf = 0;
        do {
                prom_nextprop(node, buf, buf);
-               if(!strcmp(buf, prop))
+               if (!strcmp(buf, prop))
                        return 1;
        } while (*buf);
        return 0;
@@ -260,6 +327,8 @@ EXPORT_SYMBOL(prom_node_has_property);
 int
 prom_setprop(int node, const char *pname, char *value, int size)
 {
+       unsigned long args[8];
+
        if (size == 0)
                return 0;
        if ((pname == 0) || (value == 0))
@@ -271,19 +340,37 @@ prom_setprop(int node, const char *pname, char *value, int size)
                return 0;
        }
 #endif
-       return p1275_cmd ("setprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
-                                         P1275_ARG(2,P1275_ARG_IN_BUF)|
-                                         P1275_INOUT(4, 1), 
-                                         node, pname, value, P1275_SIZE(size));
+       args[0] = (unsigned long) "setprop";
+       args[1] = 4;
+       args[2] = 1;
+       args[3] = (unsigned int) node;
+       args[4] = (unsigned long) pname;
+       args[5] = (unsigned long) value;
+       args[6] = size;
+       args[7] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[7];
 }
 EXPORT_SYMBOL(prom_setprop);
 
 inline int prom_inst2pkg(int inst)
 {
+       unsigned long args[5];
        int node;
        
-       node = p1275_cmd ("instance-to-package", P1275_INOUT(1, 1), inst);
-       if (node == -1) return 0;
+       args[0] = (unsigned long) "instance-to-package";
+       args[1] = 1;
+       args[2] = 1;
+       args[3] = (unsigned int) inst;
+       args[4] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       node = (int) args[4];
+       if (node == -1)
+               return 0;
        return node;
 }
 
@@ -296,17 +383,28 @@ prom_pathtoinode(const char *path)
        int node, inst;
 
        inst = prom_devopen (path);
-       if (inst == 0) return 0;
-       node = prom_inst2pkg (inst);
-       prom_devclose (inst);
-       if (node == -1) return 0;
+       if (inst == 0)
+               return 0;
+       node = prom_inst2pkg(inst);
+       prom_devclose(inst);
+       if (node == -1)
+               return 0;
        return node;
 }
 
 int prom_ihandle2path(int handle, char *buffer, int bufsize)
 {
-       return p1275_cmd("instance-to-path",
-                        P1275_ARG(1,P1275_ARG_OUT_BUF)|
-                        P1275_INOUT(3, 1),
-                        handle, buffer, P1275_SIZE(bufsize));
+       unsigned long args[7];
+
+       args[0] = (unsigned long) "instance-to-path";
+       args[1] = 3;
+       args[2] = 1;
+       args[3] = (unsigned int) handle;
+       args[4] = (unsigned long) buffer;
+       args[5] = bufsize;
+       args[6] = (unsigned long) -1;
+
+       p1275_cmd_direct(args);
+
+       return (int) args[6];
 }
index de317d0c329486b57efb3568f7c4514393ac5677..ebc680717e59f69394793eeea1cd6e5e9ae50034 100644 (file)
@@ -690,7 +690,7 @@ static void with_console(struct mc_request *req, void (*proc)(void *),
 static void sysrq_proc(void *arg)
 {
        char *op = arg;
-       handle_sysrq(*op, NULL);
+       handle_sysrq(*op);
 }
 
 void mconsole_sysrq(struct mc_request *req)
index c0427295e8f58956e32f833c78c9ad75676778d2..1ca132fc0d039cbc8c3b7f605fa4dbbd91db7291 100644 (file)
@@ -59,5 +59,7 @@ extern void check_tsc_sync_source(int cpu);
 extern void check_tsc_sync_target(void);
 
 extern int notsc_setup(char *);
+extern void save_sched_clock_state(void);
+extern void restore_sched_clock_state(void);
 
 #endif /* _ASM_X86_TSC_H */
index febb12cea795268f224a9d39937e3f27bb86bf67..7e578e9cc58bd5062d30776d431dabdcf724ac67 100644 (file)
@@ -497,6 +497,8 @@ static int p4_hw_config(struct perf_event *event)
                event->hw.config |= event->attr.config &
                        (p4_config_pack_escr(P4_ESCR_MASK_HT) |
                         p4_config_pack_cccr(P4_CCCR_MASK_HT | P4_CCCR_RESERVED));
+
+               event->hw.config &= ~P4_CCCR_FORCE_OVF;
        }
 
        rc = x86_setup_perfctr(event);
index ce8e50239332470ba329dd3045692fee42f69a16..d632934cb6386947352650f262745eb3c93c68ce 100644 (file)
@@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
        local_irq_restore(flags);
 }
 
+static unsigned long long cyc2ns_suspend;
+
+void save_sched_clock_state(void)
+{
+       if (!sched_clock_stable)
+               return;
+
+       cyc2ns_suspend = sched_clock();
+}
+
+/*
+ * Even on processors with invariant TSC, TSC gets reset in some the
+ * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
+ * arbitrary value (still sync'd across cpu's) during resume from such sleep
+ * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
+ * that sched_clock() continues from the point where it was left off during
+ * suspend.
+ */
+void restore_sched_clock_state(void)
+{
+       unsigned long long offset;
+       unsigned long flags;
+       int cpu;
+
+       if (!sched_clock_stable)
+               return;
+
+       local_irq_save(flags);
+
+       get_cpu_var(cyc2ns_offset) = 0;
+       offset = cyc2ns_suspend - sched_clock();
+
+       for_each_possible_cpu(cpu)
+               per_cpu(cyc2ns_offset, cpu) = offset;
+
+       local_irq_restore(flags);
+}
+
 #ifdef CONFIG_CPU_FREQ
 
 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
index e7e8c5f549563a6b65a4b139d8202ee878c80c74..87bb35e34ef175d0a8b3beedd5c4d76010c9169d 100644 (file)
@@ -113,6 +113,7 @@ static void __save_processor_state(struct saved_context *ctxt)
 void save_processor_state(void)
 {
        __save_processor_state(&saved_context);
+       save_sched_clock_state();
 }
 #ifdef CONFIG_X86_32
 EXPORT_SYMBOL(save_processor_state);
@@ -229,6 +230,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
 void restore_processor_state(void)
 {
        __restore_processor_state(&saved_context);
+       restore_sched_clock_state();
 }
 #ifdef CONFIG_X86_32
 EXPORT_SYMBOL(restore_processor_state);
index 554c002a1e1afcd13a71c90a25eff86057e56832..0f456386cce5dc3a0c08ed45bfaf0a268be25819 100644 (file)
@@ -72,13 +72,17 @@ void __init xen_unplug_emulated_devices(void)
 {
        int r;
 
+       /* user explicitly requested no unplug */
+       if (xen_emul_unplug & XEN_UNPLUG_NEVER)
+               return;
        /* check the version of the xen platform PCI device */
        r = check_platform_magic();
        /* If the version matches enable the Xen platform PCI driver.
-        * Also enable the Xen platform PCI driver if the version is really old
-        * and the user told us to ignore it. */
+        * Also enable the Xen platform PCI driver if the host does
+        * not support the unplug protocol (XEN_PLATFORM_ERR_MAGIC)
+        * but the user told us that unplugging is unnecessary. */
        if (r && !(r == XEN_PLATFORM_ERR_MAGIC &&
-                       (xen_emul_unplug & XEN_UNPLUG_IGNORE)))
+                       (xen_emul_unplug & XEN_UNPLUG_UNNECESSARY)))
                return;
        /* Set the default value of xen_emul_unplug depending on whether or
         * not the Xen PV frontends and the Xen platform PCI driver have
@@ -99,7 +103,7 @@ void __init xen_unplug_emulated_devices(void)
                }
        }
        /* Now unplug the emulated devices */
-       if (!(xen_emul_unplug & XEN_UNPLUG_IGNORE))
+       if (!(xen_emul_unplug & XEN_UNPLUG_UNNECESSARY))
                outw(xen_emul_unplug, XEN_IOPORT_UNPLUG);
        xen_platform_pci_unplug = xen_emul_unplug;
 }
@@ -125,8 +129,10 @@ static int __init parse_xen_emul_unplug(char *arg)
                        xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS;
                else if (!strncmp(p, "nics", l))
                        xen_emul_unplug |= XEN_UNPLUG_ALL_NICS;
-               else if (!strncmp(p, "ignore", l))
-                       xen_emul_unplug |= XEN_UNPLUG_IGNORE;
+               else if (!strncmp(p, "unnecessary", l))
+                       xen_emul_unplug |= XEN_UNPLUG_UNNECESSARY;
+               else if (!strncmp(p, "never", l))
+                       xen_emul_unplug |= XEN_UNPLUG_NEVER;
                else
                        printk(KERN_WARNING "unrecognised option '%s' "
                                 "in parameter 'xen_emul_unplug'\n", p);
index 65e3e2708371962a3878061a8b71666230e80c2c..11ec911016c6ab3e81cefe7e202c4cc4c95762f0 100644 (file)
@@ -828,6 +828,7 @@ config PATA_SAMSUNG_CF
 config PATA_WINBOND_VLB
        tristate "Winbond W83759A VLB PATA support (Experimental)"
        depends on ISA && EXPERIMENTAL
+       select PATA_LEGACY
        help
          Support for the Winbond W83759A controller on Vesa Local Bus
          systems.
index 158eaa961b1e6f0d247f78691b8f4b309a9d6622..d5df04a395ca6e9eb057cfb7496b3101ffee69b7 100644 (file)
@@ -89,7 +89,6 @@ obj-$(CONFIG_PATA_QDI)                += pata_qdi.o
 obj-$(CONFIG_PATA_RB532)       += pata_rb532_cf.o
 obj-$(CONFIG_PATA_RZ1000)      += pata_rz1000.o
 obj-$(CONFIG_PATA_SAMSUNG_CF)  += pata_samsung_cf.o
-obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
 
 obj-$(CONFIG_PATA_PXA)         += pata_pxa.o
 
index fe75d8befc3a27aaa5d6c8048cbaa498416c7322..013727b20417226249ca942d28708164a497c2dc 100644 (file)
@@ -60,6 +60,7 @@ enum board_ids {
        board_ahci,
        board_ahci_ign_iferr,
        board_ahci_nosntf,
+       board_ahci_yes_fbs,
 
        /* board IDs for specific chipsets in alphabetical order */
        board_ahci_mcp65,
@@ -132,6 +133,14 @@ static const struct ata_port_info ahci_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &ahci_ops,
        },
+       [board_ahci_yes_fbs] =
+       {
+               AHCI_HFLAGS     (AHCI_HFLAG_YES_FBS),
+               .flags          = AHCI_FLAG_COMMON,
+               .pio_mask       = ATA_PIO4,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &ahci_ops,
+       },
        /* by chipsets */
        [board_ahci_mcp65] =
        {
@@ -362,6 +371,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        /* Marvell */
        { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },        /* 6145 */
        { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },        /* 6121 */
+       { PCI_DEVICE(0x1b4b, 0x9123),
+         .driver_data = board_ahci_yes_fbs },                  /* 88se9128 */
 
        /* Promise */
        { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },   /* PDC42819 */
index 7113c5724471c7df3a37bae273bef3c73fb45d1b..474427b6f99f47a8dd997042cc9f2d271a743228 100644 (file)
@@ -209,6 +209,7 @@ enum {
                                                        link offline */
        AHCI_HFLAG_NO_SNTF              = (1 << 12), /* no sntf */
        AHCI_HFLAG_NO_FPDMA_AA          = (1 << 13), /* no FPDMA AA */
+       AHCI_HFLAG_YES_FBS              = (1 << 14), /* force FBS cap on */
 
        /* ap->flags bits */
 
index 81e772a94d59857187fda27dbfe7702469fb590f..666850d31df2c304b9d1409e21e20355d52f2a65 100644 (file)
@@ -430,6 +430,12 @@ void ahci_save_initial_config(struct device *dev,
                cap &= ~HOST_CAP_SNTF;
        }
 
+       if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
+               dev_printk(KERN_INFO, dev,
+                          "controller can do FBS, turning on CAP_FBS\n");
+               cap |= HOST_CAP_FBS;
+       }
+
        if (force_port_map && port_map != force_port_map) {
                dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
                           port_map, force_port_map);
@@ -2036,9 +2042,15 @@ static int ahci_port_start(struct ata_port *ap)
                u32 cmd = readl(port_mmio + PORT_CMD);
                if (cmd & PORT_CMD_FBSCP)
                        pp->fbs_supported = true;
-               else
+               else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
+                       dev_printk(KERN_INFO, dev,
+                                  "port %d can do FBS, forcing FBSCP\n",
+                                  ap->port_no);
+                       pp->fbs_supported = true;
+               } else
                        dev_printk(KERN_WARNING, dev,
-                                  "The port is not capable of FBS\n");
+                                  "port %d is not capable of FBS\n",
+                                  ap->port_no);
        }
 
        if (pp->fbs_supported) {
index 7ef7c4f216fa58567e161c82118155411d070074..c035b3d041ee1572492d7a17f56323bde0fccacd 100644 (file)
@@ -5111,15 +5111,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
        qc->flags |= ATA_QCFLAG_ACTIVE;
        ap->qc_active |= 1 << qc->tag;
 
-       /* We guarantee to LLDs that they will have at least one
+       /*
+        * We guarantee to LLDs that they will have at least one
         * non-zero sg if the command is a data command.
         */
-       BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
+       if (WARN_ON_ONCE(ata_is_data(prot) &&
+                        (!qc->sg || !qc->n_elem || !qc->nbytes)))
+               goto sys_err;
 
        if (ata_is_dma(prot) || (ata_is_pio(prot) &&
                                 (ap->flags & ATA_FLAG_PIO_DMA)))
                if (ata_sg_setup(qc))
-                       goto sg_err;
+                       goto sys_err;
 
        /* if device is sleeping, schedule reset and abort the link */
        if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
@@ -5136,7 +5139,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
                goto err;
        return;
 
-sg_err:
+sys_err:
        qc->err_mask |= AC_ERR_SYSTEM;
 err:
        ata_qc_complete(qc);
index 674c1436491f5e5e6df3a2334ee7f995775f7634..3b82d8ef76f0ffc81e6bb62e9d3ea19d1be57195 100644 (file)
@@ -2735,10 +2735,6 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
 
-       /* see ata_dma_blacklisted() */
-       BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) &&
-              qc->tf.protocol == ATAPI_PROT_DMA);
-
        /* defer PIO handling to sff_qc_issue */
        if (!ata_is_dma(qc->tf.protocol))
                return ata_sff_qc_issue(qc);
index 9f5da1c7454be3e06f37586a77dc1377e7cd4a9d..905ff76d3cbbc0d18e828cd45922b53fd935f275 100644 (file)
@@ -121,14 +121,8 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
 
                if (pair) {
                        struct ata_timing tp;
-
                        ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
                        ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
-                       if (pair->dma_mode) {
-                               ata_timing_compute(pair, pair->dma_mode,
-                                               &tp, T, 0);
-                               ata_timing_merge(&tp, &t, &t, ATA_TIMING_SETUP);
-                       }
                }
        }
 
index 9df1ff7e1eaac84751766ec04c48b8f40da963c7..eaf194138f219f187e819fd56f4cce86b0cc90f7 100644 (file)
@@ -44,6 +44,9 @@
  *  Specific support is included for the ht6560a/ht6560b/opti82c611a/
  *  opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
  *
+ *  Support for the Winbond 83759A when operating in advanced mode.
+ *  Multichip mode is not currently supported.
+ *
  *  Use the autospeed and pio_mask options with:
  *     Appian ADI/2 aka CLPD7220 or AIC25VL01.
  *  Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
@@ -135,12 +138,18 @@ static int ht6560b;               /* HT 6560A on primary 1, second 2, both 3 */
 static int opti82c611a;                /* Opti82c611A on primary 1, sec 2, both 3 */
 static int opti82c46x;         /* Opti 82c465MV present(pri/sec autodetect) */
 static int qdi;                        /* Set to probe QDI controllers */
-static int winbond;            /* Set to probe Winbond controllers,
-                                       give I/O port if non standard */
 static int autospeed;          /* Chip present which snoops speed changes */
 static int pio_mask = ATA_PIO4;        /* PIO range for autospeed devices */
 static int iordy_mask = 0xFFFFFFFF;    /* Use iordy if available */
 
+#ifdef PATA_WINBOND_VLB_MODULE
+static int winbond = 1;                /* Set to probe Winbond controllers,
+                                       give I/O port if non standard */
+#else
+static int winbond;            /* Set to probe Winbond controllers,
+                                       give I/O port if non standard */
+#endif
+
 /**
  *     legacy_probe_add        -       Add interface to probe list
  *     @port: Controller port
@@ -1297,6 +1306,7 @@ MODULE_AUTHOR("Alan Cox");
 MODULE_DESCRIPTION("low-level driver for legacy ATA");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("pata_winbond");
 
 module_param(probe_all, int, 0);
 module_param(autospeed, int, 0);
@@ -1305,6 +1315,7 @@ module_param(ht6560b, int, 0);
 module_param(opti82c611a, int, 0);
 module_param(opti82c46x, int, 0);
 module_param(qdi, int, 0);
+module_param(winbond, int, 0);
 module_param(pio_mask, int, 0);
 module_param(iordy_mask, int, 0);
 
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
deleted file mode 100644 (file)
index 6d8619b..0000000
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- *    pata_winbond.c - Winbond VLB ATA controllers
- *     (C) 2006 Red Hat
- *
- *    Support for the Winbond 83759A when operating in advanced mode.
- *    Multichip mode is not currently supported.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <scsi/scsi_host.h>
-#include <linux/libata.h>
-#include <linux/platform_device.h>
-
-#define DRV_NAME "pata_winbond"
-#define DRV_VERSION "0.0.3"
-
-#define NR_HOST 4      /* Two winbond controllers, two channels each */
-
-struct winbond_data {
-       unsigned long config;
-       struct platform_device *platform_dev;
-};
-
-static struct ata_host *winbond_host[NR_HOST];
-static struct winbond_data winbond_data[NR_HOST];
-static int nr_winbond_host;
-
-#ifdef MODULE
-static int probe_winbond = 1;
-#else
-static int probe_winbond;
-#endif
-
-static DEFINE_SPINLOCK(winbond_lock);
-
-static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&winbond_lock, flags);
-       outb(reg, port + 0x01);
-       outb(val, port + 0x02);
-       spin_unlock_irqrestore(&winbond_lock, flags);
-}
-
-static u8 winbond_readcfg(unsigned long port, u8 reg)
-{
-       u8 val;
-
-       unsigned long flags;
-       spin_lock_irqsave(&winbond_lock, flags);
-       outb(reg, port + 0x01);
-       val = inb(port + 0x02);
-       spin_unlock_irqrestore(&winbond_lock, flags);
-
-       return val;
-}
-
-static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
-       struct ata_timing t;
-       struct winbond_data *winbond = ap->host->private_data;
-       int active, recovery;
-       u8 reg;
-       int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
-
-       reg = winbond_readcfg(winbond->config, 0x81);
-
-       /* Get the timing data in cycles */
-       if (reg & 0x40)         /* Fast VLB bus, assume 50MHz */
-               ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
-       else
-               ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
-
-       active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
-       recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
-       timing = (active << 4) | recovery;
-       winbond_writecfg(winbond->config, timing, reg);
-
-       /* Load the setup timing */
-
-       reg = 0x35;
-       if (adev->class != ATA_DEV_ATA)
-               reg |= 0x08;    /* FIFO off */
-       if (!ata_pio_need_iordy(adev))
-               reg |= 0x02;    /* IORDY off */
-       reg |= (clamp_val(t.setup, 0, 3) << 6);
-       winbond_writecfg(winbond->config, timing + 1, reg);
-}
-
-
-static unsigned int winbond_data_xfer(struct ata_device *dev,
-                       unsigned char *buf, unsigned int buflen, int rw)
-{
-       struct ata_port *ap = dev->link->ap;
-       int slop = buflen & 3;
-
-       if (ata_id_has_dword_io(dev->id)) {
-               if (rw == READ)
-                       ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-               else
-                       iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-
-               if (unlikely(slop)) {
-                       __le32 pad;
-                       if (rw == READ) {
-                               pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
-                               memcpy(buf + buflen - slop, &pad, slop);
-                       } else {
-                               memcpy(&pad, buf + buflen - slop, slop);
-                               iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
-                       }
-                       buflen += 4 - slop;
-               }
-       } else
-               buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
-
-       return buflen;
-}
-
-static struct scsi_host_template winbond_sht = {
-       ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations winbond_port_ops = {
-       .inherits       = &ata_sff_port_ops,
-       .sff_data_xfer  = winbond_data_xfer,
-       .cable_detect   = ata_cable_40wire,
-       .set_piomode    = winbond_set_piomode,
-};
-
-/**
- *     winbond_init_one                -       attach a winbond interface
- *     @type: Type to display
- *     @io: I/O port start
- *     @irq: interrupt line
- *     @fast: True if on a > 33Mhz VLB
- *
- *     Register a VLB bus IDE interface. Such interfaces are PIO and we
- *     assume do not support IRQ sharing.
- */
-
-static __init int winbond_init_one(unsigned long port)
-{
-       struct platform_device *pdev;
-       u8 reg;
-       int i, rc;
-
-       reg = winbond_readcfg(port, 0x81);
-       reg |= 0x80;    /* jumpered mode off */
-       winbond_writecfg(port, 0x81, reg);
-       reg = winbond_readcfg(port, 0x83);
-       reg |= 0xF0;    /* local control */
-       winbond_writecfg(port, 0x83, reg);
-       reg = winbond_readcfg(port, 0x85);
-       reg |= 0xF0;    /* programmable timing */
-       winbond_writecfg(port, 0x85, reg);
-
-       reg = winbond_readcfg(port, 0x81);
-
-       if (!(reg & 0x03))              /* Disabled */
-               return -ENODEV;
-
-       for (i = 0; i < 2 ; i ++) {
-               unsigned long cmd_port = 0x1F0 - (0x80 * i);
-               unsigned long ctl_port = cmd_port + 0x206;
-               struct ata_host *host;
-               struct ata_port *ap;
-               void __iomem *cmd_addr, *ctl_addr;
-
-               if (!(reg & (1 << i)))
-                       continue;
-
-               pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
-               if (IS_ERR(pdev))
-                       return PTR_ERR(pdev);
-
-               rc = -ENOMEM;
-               host = ata_host_alloc(&pdev->dev, 1);
-               if (!host)
-                       goto err_unregister;
-               ap = host->ports[0];
-
-               rc = -ENOMEM;
-               cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
-               ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
-               if (!cmd_addr || !ctl_addr)
-                       goto err_unregister;
-
-               ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
-
-               ap->ops = &winbond_port_ops;
-               ap->pio_mask = ATA_PIO4;
-               ap->flags |= ATA_FLAG_SLAVE_POSS;
-               ap->ioaddr.cmd_addr = cmd_addr;
-               ap->ioaddr.altstatus_addr = ctl_addr;
-               ap->ioaddr.ctl_addr = ctl_addr;
-               ata_sff_std_ports(&ap->ioaddr);
-
-               /* hook in a private data structure per channel */
-               host->private_data = &winbond_data[nr_winbond_host];
-               winbond_data[nr_winbond_host].config = port;
-               winbond_data[nr_winbond_host].platform_dev = pdev;
-
-               /* activate */
-               rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0,
-                                      &winbond_sht);
-               if (rc)
-                       goto err_unregister;
-
-               winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
-       }
-
-       return 0;
-
- err_unregister:
-       platform_device_unregister(pdev);
-       return rc;
-}
-
-/**
- *     winbond_init            -       attach winbond interfaces
- *
- *     Attach winbond IDE interfaces by scanning the ports it may occupy.
- */
-
-static __init int winbond_init(void)
-{
-       static const unsigned long config[2] = { 0x130, 0x1B0 };
-
-       int ct = 0;
-       int i;
-
-       if (probe_winbond == 0)
-               return -ENODEV;
-
-       /*
-        *      Check both base addresses
-        */
-
-       for (i = 0; i < 2; i++) {
-               if (probe_winbond & (1<<i)) {
-                       int ret = 0;
-                       unsigned long port = config[i];
-
-                       if (request_region(port, 2, "pata_winbond")) {
-                               ret = winbond_init_one(port);
-                               if (ret <= 0)
-                                       release_region(port, 2);
-                               else ct+= ret;
-                       }
-               }
-       }
-       if (ct != 0)
-               return 0;
-       return -ENODEV;
-}
-
-static __exit void winbond_exit(void)
-{
-       int i;
-
-       for (i = 0; i < nr_winbond_host; i++) {
-               ata_host_detach(winbond_host[i]);
-               release_region(winbond_data[i].config, 2);
-               platform_device_unregister(winbond_data[i].platform_dev);
-       }
-}
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-module_init(winbond_init);
-module_exit(winbond_exit);
-
-module_param(probe_winbond, int, 0);
-
index 2673a3d1480654ceec39f2ab144b15cde72ba72e..6cf57c5c2b5f3d50d966e63bf1e1ad0d32dea13d 100644 (file)
@@ -1459,7 +1459,7 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
 {
        struct scatterlist *sg = qc->sg;
        struct ata_port *ap = qc->ap;
-       u32 dma_chan;
+       int dma_chan;
        struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
        struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
        int err;
index 9463c71dd38ece66e4ad777208381573754e3f64..81982594a014b603aa2ebd8b5944ed13049c929b 100644 (file)
@@ -1898,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
  *     LOCKING:
  *     Inherited from caller.
  */
-static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+static void mv_bmdma_stop_ap(struct ata_port *ap)
 {
-       struct ata_port *ap = qc->ap;
        void __iomem *port_mmio = mv_ap_base(ap);
        u32 cmd;
 
        /* clear start/stop bit */
        cmd = readl(port_mmio + BMDMA_CMD);
-       cmd &= ~ATA_DMA_START;
-       writelfl(cmd, port_mmio + BMDMA_CMD);
+       if (cmd & ATA_DMA_START) {
+               cmd &= ~ATA_DMA_START;
+               writelfl(cmd, port_mmio + BMDMA_CMD);
+
+               /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+               ata_sff_dma_pause(ap);
+       }
+}
 
-       /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
-       ata_sff_dma_pause(ap);
+static void mv_bmdma_stop(struct ata_queued_cmd *qc)
+{
+       mv_bmdma_stop_ap(qc->ap);
 }
 
 /**
@@ -1934,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap)
        reg = readl(port_mmio + BMDMA_STATUS);
        if (reg & ATA_DMA_ACTIVE)
                status = ATA_DMA_ACTIVE;
-       else
+       else if (reg & ATA_DMA_ERR)
                status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
+       else {
+               /*
+                * Just because DMA_ACTIVE is 0 (DMA completed),
+                * this does _not_ mean the device is "done".
+                * So we should not yet be signalling ATA_DMA_INTR
+                * in some cases.  Eg. DSM/TRIM, and perhaps others.
+                */
+               mv_bmdma_stop_ap(ap);
+               if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
+                       status = 0;
+               else
+                       status = ATA_DMA_INTR;
+       }
        return status;
 }
 
@@ -1995,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
 
        switch (tf->protocol) {
        case ATA_PROT_DMA:
+               if (tf->command == ATA_CMD_DSM)
+                       return;
+               /* fall-thru */
        case ATA_PROT_NCQ:
                break;  /* continue below */
        case ATA_PROT_PIO:
@@ -2094,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
        if ((tf->protocol != ATA_PROT_DMA) &&
            (tf->protocol != ATA_PROT_NCQ))
                return;
+       if (tf->command == ATA_CMD_DSM)
+               return;  /* use bmdma for this */
 
        /* Fill in Gen IIE command request block */
        if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2289,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
 
        switch (qc->tf.protocol) {
        case ATA_PROT_DMA:
+               if (qc->tf.command == ATA_CMD_DSM) {
+                       if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
+                               return AC_ERR_OTHER;
+                       break;  /* use bmdma for this */
+               }
+               /* fall thru */
        case ATA_PROT_NCQ:
                mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
                pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
index c8a44f5e0584bec3d72e4bbada17ebb80efdd20b..40af43ebd92d355e5b96092d0f4b4ac0a3fd05ea 100644 (file)
@@ -568,7 +568,7 @@ static int _request_firmware(const struct firmware **firmware_p,
 out:
        if (retval) {
                release_firmware(firmware);
-               firmware_p = NULL;
+               *firmware_p = NULL;
        }
 
        return retval;
index ac1b682edecb362831eb32e7fd09faaff69da428..ab735a605cf3f23e0f516b21ae07f0d6c5e7e914 100644 (file)
@@ -834,7 +834,7 @@ static int blkfront_probe(struct xenbus_device *dev,
                char *type;
                int len;
                /* no unplug has been done: do not hook devices != xen vbds */
-               if (xen_platform_pci_unplug & XEN_UNPLUG_IGNORE) {
+               if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
                        int major;
 
                        if (!VDEV_IS_EXTENDED(vdevice))
index e0249722d25f1c8b9dd05fa6705187afc8e23256..f953c96efc86499165f6006d1c6b97ee1e430884 100644 (file)
@@ -159,7 +159,7 @@ static void hangcheck_fire(unsigned long data)
                if (hangcheck_dump_tasks) {
                        printk(KERN_CRIT "Hangcheck: Task state:\n");
 #ifdef CONFIG_MAGIC_SYSRQ
-                       handle_sysrq('t', NULL);
+                       handle_sysrq('t');
 #endif  /* CONFIG_MAGIC_SYSRQ */
                }
                if (hangcheck_reboot) {
index fa27d1676ee5e0405cc5497bec6c703ade6e10b2..3afd62e856ebfea311eba94c496ac6628f586458 100644 (file)
@@ -651,7 +651,7 @@ int hvc_poll(struct hvc_struct *hp)
                                        if (sysrq_pressed)
                                                continue;
                                } else if (sysrq_pressed) {
-                                       handle_sysrq(buf[i], tty);
+                                       handle_sysrq(buf[i]);
                                        sysrq_pressed = 0;
                                        continue;
                                }
index 1f4b6de65a2dcac06a565aa7777359e1e873b444..a2bc885ce60a8e2bab93a815ab6ffc7d86640220 100644 (file)
@@ -403,7 +403,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
                        hp->sysrq = 1;
                        continue;
                } else if (hp->sysrq) {
-                       handle_sysrq(c, hp->tty);
+                       handle_sysrq(c);
                        hp->sysrq = 0;
                        continue;
                }
index 07f3ea38b5828eda0a16ceed8b23fdbdee891d79..d4b71e8d0d23a13648be2e55f60f606f55b99edd 100644 (file)
@@ -1650,7 +1650,7 @@ ip2_close( PTTY tty, struct file *pFile )
        /* disable DSS reporting */
        i2QueueCommands(PTYPE_INLINE, pCh, 100, 4,
                                CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP);
-       if ( !tty || (tty->termios->c_cflag & HUPCL) ) {
+       if (tty->termios->c_cflag & HUPCL) {
                i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN);
                pCh->dataSetOut &= ~(I2_DTR | I2_RTS);
                i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25));
@@ -2930,6 +2930,8 @@ ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg )
                                if ( pCh )
                                {
                                        rc = copy_to_user(argp, pCh, sizeof(i2ChanStr));
+                                       if (rc)
+                                               rc = -EFAULT;
                                } else {
                                        rc = -ENODEV;
                                }
index 79c3bc69165afb919f8084cf274c4d2310def8c2..7c79d243acc9b3fac68d6b4062fc9a43bb93eb65 100644 (file)
@@ -1244,6 +1244,7 @@ static int set_config(struct tty_struct *tty, struct r_port *info,
                }
                info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK));
                configure_r_port(tty, info, NULL);
+               mutex_unlock(&info->port.mutex);
                return 0;
        }
 
index fef80cfcab5c8caf231b4435d423095dfb90815e..e63b830c86cc0778cb45827dc46a7e207488b586 100644 (file)
@@ -691,8 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
        if (info->port.count == 1) {
                /* 1st open on this device, init hardware */
                retval = startup(info);
-               if (retval < 0)
+               if (retval < 0) {
+                       mutex_unlock(&info->port.mutex);
                        goto cleanup;
+               }
        }
        mutex_unlock(&info->port.mutex);
        retval = block_til_ready(tty, filp, info);
index 878ac0c2cc6864a84c7df230c5c12efee1944607..ef31bb81e8438a83462a48d4fa6ee0bf5cff7faf 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/interrupt.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
-#include <linux/tty.h>
 #include <linux/mount.h>
 #include <linux/kdev_t.h>
 #include <linux/major.h>
@@ -76,7 +75,7 @@ static int __init sysrq_always_enabled_setup(char *str)
 __setup("sysrq_always_enabled", sysrq_always_enabled_setup);
 
 
-static void sysrq_handle_loglevel(int key, struct tty_struct *tty)
+static void sysrq_handle_loglevel(int key)
 {
        int i;
 
@@ -93,7 +92,7 @@ static struct sysrq_key_op sysrq_loglevel_op = {
 };
 
 #ifdef CONFIG_VT
-static void sysrq_handle_SAK(int key, struct tty_struct *tty)
+static void sysrq_handle_SAK(int key)
 {
        struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
        schedule_work(SAK_work);
@@ -109,7 +108,7 @@ static struct sysrq_key_op sysrq_SAK_op = {
 #endif
 
 #ifdef CONFIG_VT
-static void sysrq_handle_unraw(int key, struct tty_struct *tty)
+static void sysrq_handle_unraw(int key)
 {
        struct kbd_struct *kbd = &kbd_table[fg_console];
 
@@ -126,7 +125,7 @@ static struct sysrq_key_op sysrq_unraw_op = {
 #define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
 #endif /* CONFIG_VT */
 
-static void sysrq_handle_crash(int key, struct tty_struct *tty)
+static void sysrq_handle_crash(int key)
 {
        char *killer = NULL;
 
@@ -141,7 +140,7 @@ static struct sysrq_key_op sysrq_crash_op = {
        .enable_mask    = SYSRQ_ENABLE_DUMP,
 };
 
-static void sysrq_handle_reboot(int key, struct tty_struct *tty)
+static void sysrq_handle_reboot(int key)
 {
        lockdep_off();
        local_irq_enable();
@@ -154,7 +153,7 @@ static struct sysrq_key_op sysrq_reboot_op = {
        .enable_mask    = SYSRQ_ENABLE_BOOT,
 };
 
-static void sysrq_handle_sync(int key, struct tty_struct *tty)
+static void sysrq_handle_sync(int key)
 {
        emergency_sync();
 }
@@ -165,7 +164,7 @@ static struct sysrq_key_op sysrq_sync_op = {
        .enable_mask    = SYSRQ_ENABLE_SYNC,
 };
 
-static void sysrq_handle_show_timers(int key, struct tty_struct *tty)
+static void sysrq_handle_show_timers(int key)
 {
        sysrq_timer_list_show();
 }
@@ -176,7 +175,7 @@ static struct sysrq_key_op sysrq_show_timers_op = {
        .action_msg     = "Show clockevent devices & pending hrtimers (no others)",
 };
 
-static void sysrq_handle_mountro(int key, struct tty_struct *tty)
+static void sysrq_handle_mountro(int key)
 {
        emergency_remount();
 }
@@ -188,7 +187,7 @@ static struct sysrq_key_op sysrq_mountro_op = {
 };
 
 #ifdef CONFIG_LOCKDEP
-static void sysrq_handle_showlocks(int key, struct tty_struct *tty)
+static void sysrq_handle_showlocks(int key)
 {
        debug_show_all_locks();
 }
@@ -226,7 +225,7 @@ static void sysrq_showregs_othercpus(struct work_struct *dummy)
 
 static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
 
-static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
+static void sysrq_handle_showallcpus(int key)
 {
        /*
         * Fall back to the workqueue based printing if the
@@ -252,7 +251,7 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
 };
 #endif
 
-static void sysrq_handle_showregs(int key, struct tty_struct *tty)
+static void sysrq_handle_showregs(int key)
 {
        struct pt_regs *regs = get_irq_regs();
        if (regs)
@@ -266,7 +265,7 @@ static struct sysrq_key_op sysrq_showregs_op = {
        .enable_mask    = SYSRQ_ENABLE_DUMP,
 };
 
-static void sysrq_handle_showstate(int key, struct tty_struct *tty)
+static void sysrq_handle_showstate(int key)
 {
        show_state();
 }
@@ -277,7 +276,7 @@ static struct sysrq_key_op sysrq_showstate_op = {
        .enable_mask    = SYSRQ_ENABLE_DUMP,
 };
 
-static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty)
+static void sysrq_handle_showstate_blocked(int key)
 {
        show_state_filter(TASK_UNINTERRUPTIBLE);
 }
@@ -291,7 +290,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
 #ifdef CONFIG_TRACING
 #include <linux/ftrace.h>
 
-static void sysrq_ftrace_dump(int key, struct tty_struct *tty)
+static void sysrq_ftrace_dump(int key)
 {
        ftrace_dump(DUMP_ALL);
 }
@@ -305,7 +304,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
 #define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
 #endif
 
-static void sysrq_handle_showmem(int key, struct tty_struct *tty)
+static void sysrq_handle_showmem(int key)
 {
        show_mem();
 }
@@ -330,7 +329,7 @@ static void send_sig_all(int sig)
        }
 }
 
-static void sysrq_handle_term(int key, struct tty_struct *tty)
+static void sysrq_handle_term(int key)
 {
        send_sig_all(SIGTERM);
        console_loglevel = 8;
@@ -349,7 +348,7 @@ static void moom_callback(struct work_struct *ignored)
 
 static DECLARE_WORK(moom_work, moom_callback);
 
-static void sysrq_handle_moom(int key, struct tty_struct *tty)
+static void sysrq_handle_moom(int key)
 {
        schedule_work(&moom_work);
 }
@@ -361,7 +360,7 @@ static struct sysrq_key_op sysrq_moom_op = {
 };
 
 #ifdef CONFIG_BLOCK
-static void sysrq_handle_thaw(int key, struct tty_struct *tty)
+static void sysrq_handle_thaw(int key)
 {
        emergency_thaw_all();
 }
@@ -373,7 +372,7 @@ static struct sysrq_key_op sysrq_thaw_op = {
 };
 #endif
 
-static void sysrq_handle_kill(int key, struct tty_struct *tty)
+static void sysrq_handle_kill(int key)
 {
        send_sig_all(SIGKILL);
        console_loglevel = 8;
@@ -385,7 +384,7 @@ static struct sysrq_key_op sysrq_kill_op = {
        .enable_mask    = SYSRQ_ENABLE_SIGNAL,
 };
 
-static void sysrq_handle_unrt(int key, struct tty_struct *tty)
+static void sysrq_handle_unrt(int key)
 {
        normalize_rt_tasks();
 }
@@ -493,7 +492,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
                 sysrq_key_table[i] = op_p;
 }
 
-void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
+void __handle_sysrq(int key, bool check_mask)
 {
        struct sysrq_key_op *op_p;
        int orig_log_level;
@@ -520,7 +519,7 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
                if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
                        printk("%s\n", op_p->action_msg);
                        console_loglevel = orig_log_level;
-                       op_p->handler(key, tty);
+                       op_p->handler(key);
                } else {
                        printk("This sysrq operation is disabled.\n");
                }
@@ -545,10 +544,10 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
        spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
 }
 
-void handle_sysrq(int key, struct tty_struct *tty)
+void handle_sysrq(int key)
 {
        if (sysrq_on())
-               __handle_sysrq(key, tty, 1);
+               __handle_sysrq(key, true);
 }
 EXPORT_SYMBOL(handle_sysrq);
 
@@ -597,7 +596,7 @@ static bool sysrq_filter(struct input_handle *handle, unsigned int type,
 
        default:
                if (sysrq_down && value && value != 2)
-                       __handle_sysrq(sysrq_xlate[code], NULL, 1);
+                       __handle_sysrq(sysrq_xlate[code], true);
                break;
        }
 
@@ -765,7 +764,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
 
                if (get_user(c, buf))
                        return -EFAULT;
-               __handle_sysrq(c, NULL, 0);
+               __handle_sysrq(c, false);
        }
 
        return count;
index 670239ab7511aea7f902449c33e675efe3202a0a..e7d5d6b5dcf69683d5ac7c59d6608643c5ae4e53 100644 (file)
@@ -2071,16 +2071,6 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
                amd64_handle_ce(mci, info);
        else if (ecc_type == 1)
                amd64_handle_ue(mci, info);
-
-       /*
-        * If main error is CE then overflow must be CE.  If main error is UE
-        * then overflow is unknown.  We'll call the overflow a CE - if
-        * panic_on_ue is set then we're already panic'ed and won't arrive
-        * here. Else, then apparently someone doesn't think that UE's are
-        * catastrophic.
-        */
-       if (info->nbsh & K8_NBSH_OVERFLOW)
-               edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR " Error Overflow");
 }
 
 void amd64_decode_bus_error(int node_id, struct err_regs *regs)
index bae9351e9473872214d6bd9870776a2c48680909..9014df6f605d4882bba31656cd09a91187afb169 100644 (file)
@@ -365,11 +365,10 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
 
        pr_emerg("MC%d_STATUS: ", m->bank);
 
-       pr_cont("%sorrected error, report: %s, MiscV: %svalid, "
+       pr_cont("%sorrected error, other errors lost: %s, "
                 "CPU context corrupt: %s",
                 ((m->status & MCI_STATUS_UC) ? "Unc"  : "C"),
-                ((m->status & MCI_STATUS_EN) ? "yes"  : "no"),
-                ((m->status & MCI_STATUS_MISCV) ? ""  : "in"),
+                ((m->status & MCI_STATUS_OVER) ? "yes"  : "no"),
                 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
 
        /* do the two bits[14:13] together */
@@ -426,11 +425,15 @@ static struct notifier_block amd_mce_dec_nb = {
 static int __init mce_amd_init(void)
 {
        /*
-        * We can decode MCEs for Opteron and later CPUs:
+        * We can decode MCEs for K8, F10h and F11h CPUs:
         */
-       if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
-           (boot_cpu_data.x86 >= 0xf))
-               atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+               return 0;
+
+       if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
+               return 0;
+
+       atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
 
        return 0;
 }
index 90288ec7c28420133d0deea13e3ba2f1f98d413c..84da748555bc824379d1b49733db1444f33fc8d5 100644 (file)
@@ -55,6 +55,9 @@
 static int drm_version(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
 
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+       [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
+
 /** Ioctl table */
 static struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
@@ -421,6 +424,7 @@ long drm_ioctl(struct file *filp,
        int retcode = -EINVAL;
        char stack_kdata[128];
        char *kdata = NULL;
+       unsigned int usize, asize;
 
        dev = file_priv->minor->dev;
        atomic_inc(&dev->ioctl_count);
@@ -436,11 +440,18 @@ long drm_ioctl(struct file *filp,
            ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
                goto err_i1;
        if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
-           (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+           (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+               u32 drv_size;
                ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+               drv_size = _IOC_SIZE(ioctl->cmd_drv);
+               usize = asize = _IOC_SIZE(cmd);
+               if (drv_size > asize)
+                       asize = drv_size;
+       }
        else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
                ioctl = &drm_ioctls[nr];
                cmd = ioctl->cmd;
+               usize = asize = _IOC_SIZE(cmd);
        } else
                goto err_i1;
 
@@ -460,10 +471,10 @@ long drm_ioctl(struct file *filp,
                retcode = -EACCES;
        } else {
                if (cmd & (IOC_IN | IOC_OUT)) {
-                       if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) {
+                       if (asize <= sizeof(stack_kdata)) {
                                kdata = stack_kdata;
                        } else {
-                               kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+                               kdata = kmalloc(asize, GFP_KERNEL);
                                if (!kdata) {
                                        retcode = -ENOMEM;
                                        goto err_i1;
@@ -473,11 +484,13 @@ long drm_ioctl(struct file *filp,
 
                if (cmd & IOC_IN) {
                        if (copy_from_user(kdata, (void __user *)arg,
-                                          _IOC_SIZE(cmd)) != 0) {
+                                          usize) != 0) {
                                retcode = -EFAULT;
                                goto err_i1;
                        }
-               }
+               } else
+                       memset(kdata, 0, usize);
+
                if (ioctl->flags & DRM_UNLOCKED)
                        retcode = func(dev, kdata, file_priv);
                else {
@@ -488,7 +501,7 @@ long drm_ioctl(struct file *filp,
 
                if (cmd & IOC_OUT) {
                        if (copy_to_user((void __user *)arg, kdata,
-                                        _IOC_SIZE(cmd)) != 0)
+                                        usize) != 0)
                                retcode = -EFAULT;
                }
        }
index de82e201d6826d8ac0e422b22a2d0a93f8312d82..6a5e403f9aa160b54caad1bca22ba93dd27415e6 100644 (file)
@@ -94,10 +94,11 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn
        int i;
        enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
        struct drm_fb_helper_cmdline_mode *cmdline_mode;
-       struct drm_connector *connector = fb_helper_conn->connector;
+       struct drm_connector *connector;
 
        if (!fb_helper_conn)
                return false;
+       connector = fb_helper_conn->connector;
 
        cmdline_mode = &fb_helper_conn->cmdline_mode;
        if (!mode_option)
@@ -369,7 +370,7 @@ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
 }
 static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
 
-static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3)
+static void drm_fb_helper_sysrq(int dummy1)
 {
        schedule_work(&drm_fb_helper_restore_work);
 }
index 3a652a65546f16781542b7613865a8c1e692b60c..b744dad5c237f5371d571d13dc35d46a2fc95e2c 100644 (file)
@@ -41,6 +41,7 @@
 
 /* from BKL pushdown: note that nothing else serializes idr_find() */
 DEFINE_MUTEX(drm_global_mutex);
+EXPORT_SYMBOL(drm_global_mutex);
 
 static int drm_open_helper(struct inode *inode, struct file *filp,
                           struct drm_device * dev);
index e2f70a516c34412112078131203309c3656622d7..9bf93bc9a32c27798791684ab73512a046e92a9c 100644 (file)
@@ -92,7 +92,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
                }
 
                /* Contention */
+               mutex_unlock(&drm_global_mutex);
                schedule();
+               mutex_lock(&drm_global_mutex);
                if (signal_pending(current)) {
                        ret = -EINTR;
                        break;
index da99edc50888f2168c510054305e1c847e1fbefb..a6bfc302ed909ecf136888953b329d5acbdf65a3 100644 (file)
@@ -285,21 +285,21 @@ void drm_mm_put_block(struct drm_mm_node *cur)
 
 EXPORT_SYMBOL(drm_mm_put_block);
 
-static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size,
-                             unsigned alignment)
+static int check_free_hole(unsigned long start, unsigned long end,
+                          unsigned long size, unsigned alignment)
 {
        unsigned wasted = 0;
 
-       if (entry->size < size)
+       if (end - start < size)
                return 0;
 
        if (alignment) {
-               register unsigned tmp = entry->start % alignment;
+               unsigned tmp = start % alignment;
                if (tmp)
                        wasted = alignment - tmp;
        }
 
-       if (entry->size >= size + wasted) {
+       if (end >= start + size + wasted) {
                return 1;
        }
 
@@ -320,7 +320,8 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
        best_size = ~0UL;
 
        list_for_each_entry(entry, &mm->free_stack, free_stack) {
-               if (!check_free_mm_node(entry, size, alignment))
+               if (!check_free_hole(entry->start, entry->start + entry->size,
+                                    size, alignment))
                        continue;
 
                if (!best_match)
@@ -353,10 +354,12 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
        best_size = ~0UL;
 
        list_for_each_entry(entry, &mm->free_stack, free_stack) {
-               if (entry->start > end || (entry->start+entry->size) < start)
-                       continue;
+               unsigned long adj_start = entry->start < start ?
+                       start : entry->start;
+               unsigned long adj_end = entry->start + entry->size > end ?
+                       end : entry->start + entry->size;
 
-               if (!check_free_mm_node(entry, size, alignment))
+               if (!check_free_hole(adj_start, adj_end, size, alignment))
                        continue;
 
                if (!best_match)
@@ -449,7 +452,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
        node->free_stack.prev = prev_free;
        node->free_stack.next = next_free;
 
-       if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) {
+       if (check_free_hole(node->start, node->start + node->size,
+                           mm->scan_size, mm->scan_alignment)) {
                mm->scan_hit_start = node->start;
                mm->scan_hit_size = node->size;
 
index f1f473ea97d3bc3c7af129fe7e364c33e6a02de8..949326d2a8e5b6b3609d002daa75795c7cff6566 100644 (file)
@@ -251,7 +251,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
                drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
                /* Fill in HSync values */
                drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
-               drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC;
+               drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
+               /* Fill in VSync values */
+               drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
+               drm_mode->vsync_end = drm_mode->vsync_start + vsync;
        }
        /* 15/13. Find pixel clock frequency (kHz for xf86) */
        drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
index 3778360eceea0cc44d2f2213090d5e1ed2e670f4..fda67468e603b6169393b92bc4922afef8b4d8ce 100644 (file)
@@ -138,7 +138,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                                break;
                }
 
-               if (!agpmem)
+               if (&agpmem->head == &dev->agp->memory)
                        goto vm_fault_error;
 
                /*
index 0e6c131313d95b48e62a6dab63730049c7332355..61b4caf220fa83bd15815ea0f82b627f2d773727 100644 (file)
@@ -1255,21 +1255,21 @@ long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 }
 
 struct drm_ioctl_desc i810_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
 };
 
 int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
index 5168862c92271e5d7fddfb48d3470f4f70113d92..671aa18415ac52d17164e79b4c2a9f287b02da0d 100644 (file)
@@ -1524,20 +1524,20 @@ long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 }
 
 struct drm_ioctl_desc i830_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
 };
 
 int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
index 44af317731b67071c3fa9ec13280975ae7f7d639..a7ec93e62f811800003dcc655a7580473d748d33 100644 (file)
@@ -2367,46 +2367,46 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
 }
 
 struct drm_ioctl_desc i915_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
-       DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
-       DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
-       DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
index 23157e1de3bec24339f21ed95fac72bd341ab0fb..11a3394f5fe17bb772cb381de1708ad77ae14499 100644 (file)
@@ -992,7 +992,7 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
 
        /* Wait for vblank interrupt bit to set */
        if (wait_for((I915_READ(pipestat_reg) &
-                     PIPE_VBLANK_INTERRUPT_STATUS) == 0,
+                     PIPE_VBLANK_INTERRUPT_STATUS),
                     50, 0))
                DRM_DEBUG_KMS("vblank wait timed out\n");
 }
index fff82045c427eec5372c6ca3747f64bd16dcabd0..9ce2827f8c00d53a8b7a793f785024c846084d84 100644 (file)
@@ -1085,19 +1085,19 @@ file_priv)
 }
 
 struct drm_ioctl_desc mga_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 };
 
 int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
index 0b69a9628c95dee9585584672a1ec9f820aa600e..974b0f8ae0483cc462b1be9a81795413e7a7a0c1 100644 (file)
@@ -2166,7 +2166,7 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
        uint32_t val = 0;
 
        if (off < pci_resource_len(dev->pdev, 1)) {
-               uint32_t __iomem *p =
+               uint8_t __iomem *p =
                        io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
 
                val = ioread32(p + (off & ~PAGE_MASK));
@@ -2182,7 +2182,7 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
        uint32_t off, uint32_t val)
 {
        if (off < pci_resource_len(dev->pdev, 1)) {
-               uint32_t __iomem *p =
+               uint8_t __iomem *p =
                        io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
 
                iowrite32(val, p + (off & ~PAGE_MASK));
@@ -3869,27 +3869,10 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
        }
 #ifdef __powerpc__
        /* Powerbook specific quirks */
-       if ((dev->pci_device & 0xffff) == 0x0179 ||
-           (dev->pci_device & 0xffff) == 0x0189 ||
-           (dev->pci_device & 0xffff) == 0x0329) {
-               if (script == LVDS_RESET) {
-                       nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
-
-               } else if (script == LVDS_PANEL_ON) {
-                       bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
-                                 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
-                                 | (1 << 31));
-                       bios_wr32(bios, NV_PCRTC_GPIO_EXT,
-                                 bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
-
-               } else if (script == LVDS_PANEL_OFF) {
-                       bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
-                                 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
-                                 & ~(1 << 31));
-                       bios_wr32(bios, NV_PCRTC_GPIO_EXT,
-                                 bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
-               }
-       }
+       if (script == LVDS_RESET &&
+           (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
+            dev->pci_device == 0x0329))
+               nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
 #endif
 
        return 0;
@@ -4381,11 +4364,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
         *
         * For the moment, a quirk will do :)
         */
-       if ((dev->pdev->device == 0x01d7) &&
-           (dev->pdev->subsystem_vendor == 0x1028) &&
-           (dev->pdev->subsystem_device == 0x01c2)) {
+       if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
                bios->fp.duallink_transition_clk = 80000;
-       }
 
        /* set dual_link flag for EDID case */
        if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
@@ -4587,7 +4567,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                        return 1;
                }
 
-               NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
+               NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
                nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk == -1) {
@@ -4597,7 +4577,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                        return 1;
                }
 
-               NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
+               NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
                nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk == -2) {
@@ -4610,7 +4590,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                        return 1;
                }
 
-               NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
+               NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
                nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk > 0) {
@@ -4622,7 +4602,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                        return 1;
                }
 
-               NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
+               NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
                nouveau_bios_run_init_table(dev, script, dcbent);
        } else
        if (pxclk < 0) {
@@ -4634,7 +4614,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
                        return 1;
                }
 
-               NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
+               NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
                nouveau_bios_run_init_table(dev, script, dcbent);
        }
 
@@ -5357,19 +5337,17 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
        }
 
        tmdstableptr = ROM16(bios->data[bitentry->offset]);
-
-       if (tmdstableptr == 0x0) {
+       if (!tmdstableptr) {
                NV_ERROR(dev, "Pointer to TMDS table invalid\n");
                return -EINVAL;
        }
 
+       NV_INFO(dev, "TMDS table version %d.%d\n",
+               bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+
        /* nv50+ has v2.0, but we don't parse it atm */
-       if (bios->data[tmdstableptr] != 0x11) {
-               NV_WARN(dev,
-                       "TMDS table revision %d.%d not currently supported\n",
-                       bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
+       if (bios->data[tmdstableptr] != 0x11)
                return -ENOSYS;
-       }
 
        /*
         * These two scripts are odd: they don't seem to get run even when
@@ -5809,6 +5787,20 @@ parse_dcb_gpio_table(struct nvbios *bios)
                        gpio->line = tvdac_gpio[1] >> 4;
                        gpio->invert = tvdac_gpio[0] & 2;
                }
+       } else {
+               /*
+                * No systematic way to store GPIO info on pre-v2.2
+                * DCBs, try to match the PCI device IDs.
+                */
+
+               /* Apple iMac G4 NV18 */
+               if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+                       struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+
+                       gpio->tag = DCB_GPIO_TVDAC0;
+                       gpio->line = 4;
+               }
+
        }
 
        if (!gpio_table_ptr)
@@ -5884,9 +5876,7 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
        struct drm_device *dev = bios->dev;
 
        /* Gigabyte NX85T */
-       if ((dev->pdev->device == 0x0421) &&
-           (dev->pdev->subsystem_vendor == 0x1458) &&
-           (dev->pdev->subsystem_device == 0x344c)) {
+       if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
                if (cte->type == DCB_CONNECTOR_HDMI_1)
                        cte->type = DCB_CONNECTOR_DVI_I;
        }
@@ -6139,7 +6129,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
                        entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
 
                break;
-       case 0xe:
+       case OUTPUT_EOL:
                /* weird g80 mobile type that "nv" treats as a terminator */
                dcb->entries--;
                return false;
@@ -6176,22 +6166,14 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
                entry->type = OUTPUT_TV;
                break;
        case 2:
-       case 3:
-               entry->type = OUTPUT_LVDS;
-               break;
        case 4:
-               switch ((conn & 0x000000f0) >> 4) {
-               case 0:
-                       entry->type = OUTPUT_TMDS;
-                       break;
-               case 1:
+               if (conn & 0x10)
                        entry->type = OUTPUT_LVDS;
-                       break;
-               default:
-                       NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
-                                (conn & 0x000000f0) >> 4);
-                       return false;
-               }
+               else
+                       entry->type = OUTPUT_TMDS;
+               break;
+       case 3:
+               entry->type = OUTPUT_LVDS;
                break;
        default:
                NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
@@ -6307,9 +6289,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
         * nasty problems until this is sorted (assuming it's not a
         * VBIOS bug).
         */
-       if ((dev->pdev->device == 0x040d) &&
-           (dev->pdev->subsystem_vendor == 0x1028) &&
-           (dev->pdev->subsystem_device == 0x019b)) {
+       if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) {
                if (*conn == 0x02026312 && *conf == 0x00000020)
                        return false;
        }
index fd14dfd3d780f6b45a280d1cf4c4aed6eb41ed20..c1de2f3fcb0ea7e78ec193d88c74683d5090eaba 100644 (file)
@@ -95,6 +95,7 @@ enum dcb_type {
        OUTPUT_TMDS = 2,
        OUTPUT_LVDS = 3,
        OUTPUT_DP = 6,
+       OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
        OUTPUT_ANY = -1
 };
 
index 84f85183d041f38d9d848137abf24b236a32626f..f6f44779d82fb801ac738cc9e5f83e4179fd6de3 100644 (file)
 #include <linux/log2.h>
 #include <linux/slab.h>
 
+int
+nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
+{
+       struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
+       int ret;
+
+       if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
+               return 0;
+
+       spin_lock(&nvbo->bo.lock);
+       ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+       spin_unlock(&nvbo->bo.lock);
+       return ret;
+}
+
 static void
 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 {
index 90fdcda332be360ab07cb862c9a6ad533724a55c..0480f064f2c14fd8c4bf41672fa039f9f2bc9029 100644 (file)
@@ -426,18 +426,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
  ***********************************/
 
 struct drm_ioctl_desc nouveau_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
 };
 
 int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
index b1b22baf14284a6d2105a320f7602f3e81b00529..a1473fff06ac2d61bd3f629dcc9527be5975f165 100644 (file)
@@ -104,7 +104,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
        int i;
 
        for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               struct nouveau_i2c_chan *i2c;
+               struct nouveau_i2c_chan *i2c = NULL;
                struct nouveau_encoder *nv_encoder;
                struct drm_mode_object *obj;
                int id;
@@ -117,7 +117,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
                if (!obj)
                        continue;
                nv_encoder = nouveau_encoder(obj_to_encoder(obj));
-               i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+
+               if (nv_encoder->dcb->i2c_index < 0xf)
+                       i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
 
                if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
                        *pnv_encoder = nv_encoder;
index e424bf74d706307c237c7c3976498dafea0fd29c..b1be617373b63891dc05b09f2dc75807cd8b8472 100644 (file)
@@ -1165,6 +1165,7 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
 extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
 extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
 extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
 
 /* nouveau_fence.c */
 struct nouveau_fence;
@@ -1388,6 +1389,15 @@ nv_two_reg_pll(struct drm_device *dev)
        return false;
 }
 
+static inline bool
+nv_match_device(struct drm_device *dev, unsigned device,
+               unsigned sub_vendor, unsigned sub_device)
+{
+       return dev->pdev->device == device &&
+               dev->pdev->subsystem_vendor == sub_vendor &&
+               dev->pdev->subsystem_device == sub_device;
+}
+
 #define NV_SW                                                        0x0000506e
 #define NV_SW_DMA_SEMAPHORE                                          0x00000060
 #define NV_SW_SEMAPHORE_OFFSET                                       0x00000064
index 0f417ac1b696b769a0982f762bfa696a50637de1..581c67cd7b24c24446097797f54ffc5d689f92b0 100644 (file)
@@ -337,7 +337,9 @@ retry:
                                return -EINVAL;
                        }
 
+                       mutex_unlock(&drm_global_mutex);
                        ret = ttm_bo_wait_cpu(&nvbo->bo, false);
+                       mutex_lock(&drm_global_mutex);
                        if (ret) {
                                NV_ERROR(dev, "fail wait_cpu\n");
                                return ret;
@@ -361,16 +363,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
 
        list_for_each_entry(nvbo, list, entry) {
                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
-               struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
 
-               if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
-                       spin_lock(&nvbo->bo.lock);
-                       ret = ttm_bo_wait(&nvbo->bo, false, false, false);
-                       spin_unlock(&nvbo->bo.lock);
-                       if (unlikely(ret)) {
-                               NV_ERROR(dev, "fail wait other chan\n");
-                               return ret;
-                       }
+               ret = nouveau_bo_sync_gpu(nvbo, chan);
+               if (unlikely(ret)) {
+                       NV_ERROR(dev, "fail pre-validate sync\n");
+                       return ret;
                }
 
                ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
@@ -381,7 +378,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
                        return ret;
                }
 
-               nvbo->channel = chan;
+               nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
                ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
                                      false, false, false);
                nvbo->channel = NULL;
@@ -390,6 +387,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
                        return ret;
                }
 
+               ret = nouveau_bo_sync_gpu(nvbo, chan);
+               if (unlikely(ret)) {
+                       NV_ERROR(dev, "fail post-validate sync\n");
+                       return ret;
+               }
+
                if (nvbo->bo.offset == b->presumed.offset &&
                    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
                      b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -615,6 +618,21 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 
        mutex_lock(&dev->struct_mutex);
 
+       /* Mark push buffers as being used on PFIFO, the validation code
+        * will then make sure that if the pushbuf bo moves, that they
+        * happen on the kernel channel, which will in turn cause a sync
+        * to happen before we try and submit the push buffer.
+        */
+       for (i = 0; i < req->nr_push; i++) {
+               if (push[i].bo_index >= req->nr_buffers) {
+                       NV_ERROR(dev, "push %d buffer not in list\n", i);
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               bo[push[i].bo_index].read_domains |= (1 << 31);
+       }
+
        /* Validate buffer list */
        ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
                                           req->nr_buffers, &op, &do_reloc);
@@ -647,7 +665,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
                                      push[i].length);
                }
        } else
-       if (dev_priv->card_type >= NV_20) {
+       if (dev_priv->chipset >= 0x25) {
                ret = RING_SPACE(chan, req->nr_push * 2);
                if (ret) {
                        NV_ERROR(dev, "cal_space: %d\n", ret);
@@ -722,7 +740,7 @@ out_next:
                req->suffix0 = 0x00000000;
                req->suffix1 = 0x00000000;
        } else
-       if (dev_priv->card_type >= NV_20) {
+       if (dev_priv->chipset >= 0x25) {
                req->suffix0 = 0x00020000;
                req->suffix1 = 0x00000000;
        } else {
index 0bd407ca3d429117ac3cdf0e42f1ccf5b5e853d7..84614858728ba0f7ab959b54989cf9a512d89d6a 100644 (file)
@@ -163,7 +163,7 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
        if (entry->chan)
                return -EEXIST;
 
-       if (dev_priv->card_type == NV_C0 && entry->read >= NV50_I2C_PORTS) {
+       if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) {
                NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
                return -EINVAL;
        }
index 491767fe4fcfd3b705c3bdb82b4ebeded1bfe9bb..6b9187d7f67de4383502973ef5cdfc9259ec6f1a 100644 (file)
@@ -214,6 +214,7 @@ int
 nouveau_sgdma_init(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct pci_dev *pdev = dev->pdev;
        struct nouveau_gpuobj *gpuobj = NULL;
        uint32_t aper_size, obj_size;
        int i, ret;
@@ -239,10 +240,19 @@ nouveau_sgdma_init(struct drm_device *dev)
 
        dev_priv->gart_info.sg_dummy_page =
                alloc_page(GFP_KERNEL|__GFP_DMA32);
+       if (!dev_priv->gart_info.sg_dummy_page) {
+               nouveau_gpuobj_del(dev, &gpuobj);
+               return -ENOMEM;
+       }
+
        set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
        dev_priv->gart_info.sg_dummy_bus =
-               pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
+               pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
                             PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
+               nouveau_gpuobj_del(dev, &gpuobj);
+               return -EFAULT;
+       }
 
        if (dev_priv->card_type < NV_50) {
                /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
index a5dcf7685800c4effb3d1c4938736b4c48c36eb0..0d3206a7046cb008814237250d5fa9fe2502ac2c 100644 (file)
@@ -444,6 +444,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct dcb_entry *dcbe = nv_encoder->dcb;
        int head = nouveau_crtc(encoder->crtc)->index;
+       struct drm_encoder *slave_encoder;
 
        if (dcbe->type == OUTPUT_TMDS)
                run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -462,9 +463,10 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
                NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
 
        /* Init external transmitters */
-       if (get_tmds_slave(encoder))
-               get_slave_funcs(get_tmds_slave(encoder))->mode_set(
-                       encoder, &nv_encoder->mode, &nv_encoder->mode);
+       slave_encoder = get_tmds_slave(encoder);
+       if (slave_encoder)
+               get_slave_funcs(slave_encoder)->mode_set(
+                       slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
 
        helper->dpms(encoder, DRM_MODE_DPMS_ON);
 
@@ -473,6 +475,27 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
                nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
 }
 
+static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
+{
+#ifdef __powerpc__
+       struct drm_device *dev = encoder->dev;
+
+       /* BIOS scripts usually take care of the backlight, thanks
+        * Apple for your consistency.
+        */
+       if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
+           dev->pci_device == 0x0329) {
+               if (mode == DRM_MODE_DPMS_ON) {
+                       nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
+                       nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1);
+               } else {
+                       nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
+                       nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0);
+               }
+       }
+#endif
+}
+
 static inline bool is_powersaving_dpms(int mode)
 {
        return (mode != DRM_MODE_DPMS_ON);
@@ -520,6 +543,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
                                         LVDS_PANEL_OFF, 0);
        }
 
+       nv04_dfp_update_backlight(encoder, mode);
        nv04_dfp_update_fp_control(encoder, mode);
 
        if (mode == DRM_MODE_DPMS_ON)
@@ -543,6 +567,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
        NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
                     mode, nv_encoder->dcb->index);
 
+       nv04_dfp_update_backlight(encoder, mode);
        nv04_dfp_update_fp_control(encoder, mode);
 }
 
index 44fefb0c7083caf5c4e77011675927dc3d60aec6..13cdc05b7c2d0f4e7785cbe428ce4ab86205bd67 100644 (file)
@@ -121,10 +121,14 @@ static bool
 get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
 {
        /* Zotac FX5200 */
-       if (dev->pdev->device == 0x0322 &&
-           dev->pdev->subsystem_vendor == 0x19da &&
-           (dev->pdev->subsystem_device == 0x1035 ||
-            dev->pdev->subsystem_device == 0x2035)) {
+       if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) ||
+           nv_match_device(dev, 0x0322, 0x19da, 0x2035)) {
+               *pin_mask = 0xc;
+               return false;
+       }
+
+       /* MSI nForce2 IGP */
+       if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) {
                *pin_mask = 0xc;
                return false;
        }
index 37c7b48ab24af8a37c3e8ac29656b989ec5935f5..c95bf9b681ddc2352b8a251805e05456100f70b2 100644 (file)
@@ -278,7 +278,7 @@ nv50_instmem_init(struct drm_device *dev)
        /*XXX: incorrect, but needed to make hash func "work" */
        dev_priv->ramht_offset = 0x10000;
        dev_priv->ramht_bits   = 9;
-       dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
+       dev_priv->ramht_size   = (1 << dev_priv->ramht_bits) * 8;
        return 0;
 }
 
index 3ab3cdc42173300490c7e667eced2f39b0d36690..6b451f864783e5c1ed3610c13c86adb5588a5783 100644 (file)
@@ -142,14 +142,16 @@ int
 nvc0_instmem_suspend(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 *buf;
        int i;
 
        dev_priv->susres.ramin_copy = vmalloc(65536);
        if (!dev_priv->susres.ramin_copy)
                return -ENOMEM;
+       buf = dev_priv->susres.ramin_copy;
 
-       for (i = 0x700000; i < 0x710000; i += 4)
-               dev_priv->susres.ramin_copy[i/4] = nv_rd32(dev, i);
+       for (i = 0; i < 65536; i += 4)
+               buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
        return 0;
 }
 
@@ -157,14 +159,15 @@ void
 nvc0_instmem_resume(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       u32 *buf = dev_priv->susres.ramin_copy;
        u64 chan;
        int i;
 
        chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
        nv_wr32(dev, 0x001700, chan >> 16);
 
-       for (i = 0x700000; i < 0x710000; i += 4)
-               nv_wr32(dev, i, dev_priv->susres.ramin_copy[i/4]);
+       for (i = 0; i < 65536; i += 4)
+               nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
        vfree(dev_priv->susres.ramin_copy);
        dev_priv->susres.ramin_copy = NULL;
 
@@ -221,7 +224,7 @@ nvc0_instmem_init(struct drm_device *dev)
        /*XXX: incorrect, but needed to make hash func "work" */
        dev_priv->ramht_offset = 0x10000;
        dev_priv->ramht_bits   = 9;
-       dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
+       dev_priv->ramht_size   = (1 << dev_priv->ramht_bits) * 8;
        return 0;
 }
 
index 077af1f2f9b4faee1f3e840f8e4a52813cb05108..a9e33ce65918c98bed3c039dbc65fa8e02bac15e 100644 (file)
@@ -1639,30 +1639,29 @@ void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
                        r128_do_cleanup_pageflip(dev);
        }
 }
-
 void r128_driver_lastclose(struct drm_device *dev)
 {
        r128_do_cleanup_cce(dev);
 }
 
 struct drm_ioctl_desc r128_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
 };
 
 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
index 12ad512bd3d33ad316655400212626585126c6de..577239a24fd5e37a29c3bbc72a509e7361248a68 100644 (file)
@@ -471,6 +471,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
        struct radeon_encoder *radeon_encoder = NULL;
        u32 adjusted_clock = mode->clock;
        int encoder_mode = 0;
+       u32 dp_clock = mode->clock;
+       int bpc = 8;
 
        /* reset the pll flags */
        pll->flags = 0;
@@ -513,6 +515,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                if (encoder->crtc == crtc) {
                        radeon_encoder = to_radeon_encoder(encoder);
                        encoder_mode = atombios_get_encoder_mode(encoder);
+                       if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
+                               struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+                               if (connector) {
+                                       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+                                       struct radeon_connector_atom_dig *dig_connector =
+                                               radeon_connector->con_priv;
+
+                                       dp_clock = dig_connector->dp_clock;
+                               }
+                       }
+
                        if (ASIC_IS_AVIVO(rdev)) {
                                /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
                                if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
@@ -555,6 +568,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
                                args.v1.ucTransmitterID = radeon_encoder->encoder_id;
                                args.v1.ucEncodeMode = encoder_mode;
+                               if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+                                       /* may want to enable SS on DP eventually */
+                                       /* args.v1.ucConfig |=
+                                          ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/
+                               } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+                                       args.v1.ucConfig |=
+                                               ADJUST_DISPLAY_CONFIG_SS_ENABLE;
+                               }
 
                                atom_execute_table(rdev->mode_info.atom_context,
                                                   index, (uint32_t *)&args);
@@ -568,10 +589,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
                                        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
-                                       if (encoder_mode == ATOM_ENCODER_MODE_DP)
+                                       if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+                                               /* may want to enable SS on DP/eDP eventually */
+                                               /*args.v3.sInput.ucDispPllConfig |=
+                                                 DISPPLL_CONFIG_SS_ENABLE;*/
                                                args.v3.sInput.ucDispPllConfig |=
                                                        DISPPLL_CONFIG_COHERENT_MODE;
-                                       else {
+                                               /* 16200 or 27000 */
+                                               args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+                                       } else {
+                                               if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+                                                       /* deep color support */
+                                                       args.v3.sInput.usPixelClock =
+                                                               cpu_to_le16((mode->clock * bpc / 8) / 10);
+                                               }
                                                if (dig->coherent_mode)
                                                        args.v3.sInput.ucDispPllConfig |=
                                                                DISPPLL_CONFIG_COHERENT_MODE;
@@ -580,13 +611,19 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                                                DISPPLL_CONFIG_DUAL_LINK;
                                        }
                                } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-                                       /* may want to enable SS on DP/eDP eventually */
-                                       /*args.v3.sInput.ucDispPllConfig |=
-                                               DISPPLL_CONFIG_SS_ENABLE;*/
-                                       if (encoder_mode == ATOM_ENCODER_MODE_DP)
+                                       if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+                                               /* may want to enable SS on DP/eDP eventually */
+                                               /*args.v3.sInput.ucDispPllConfig |=
+                                                 DISPPLL_CONFIG_SS_ENABLE;*/
                                                args.v3.sInput.ucDispPllConfig |=
                                                        DISPPLL_CONFIG_COHERENT_MODE;
-                                       else {
+                                               /* 16200 or 27000 */
+                                               args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+                                       } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+                                               /* want to enable SS on LVDS eventually */
+                                               /*args.v3.sInput.ucDispPllConfig |=
+                                                 DISPPLL_CONFIG_SS_ENABLE;*/
+                                       } else {
                                                if (mode->clock > 165000)
                                                        args.v3.sInput.ucDispPllConfig |=
                                                                DISPPLL_CONFIG_DUAL_LINK;
index 36e0d4b545e60b674380746496525fed8b88ad39..4e7778d44b8d36fa4491c97de47aa5f4d400919f 100644 (file)
@@ -610,7 +610,7 @@ void dp_link_train(struct drm_encoder *encoder,
                enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
        else
                enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
-       if (dig_connector->linkb)
+       if (dig->linkb)
                enc_id |= ATOM_DP_CONFIG_LINK_B;
        else
                enc_id |= ATOM_DP_CONFIG_LINK_A;
index f40dfb77f9b12cfa75c90c83ec608714f15ae423..bd2f33e5c91a695c2110af95397a6fa33cad0c10 100644 (file)
@@ -156,7 +156,13 @@ int radeon_agp_init(struct radeon_device *rdev)
        }
 
        mode.mode = info.mode;
-       agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+       /* chips with the agp to pcie bridge don't have the AGP_STATUS register
+        * Just use the whatever mode the host sets up.
+        */
+       if (rdev->family <= CHIP_RV350)
+               agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+       else
+               agp_status = mode.mode;
        is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
 
        if (is_v3) {
index 646f96f97c77d8538919325be4bde664a7f3df8b..a21bf88e8c2d530637972b3ddd60863334ba1c1c 100644 (file)
@@ -733,6 +733,7 @@ static struct radeon_asic evergreen_asic = {
        .set_engine_clock = &radeon_atom_set_engine_clock,
        .get_memory_clock = &radeon_atom_get_memory_clock,
        .set_memory_clock = &radeon_atom_set_memory_clock,
+       .get_pcie_lanes = NULL,
        .set_pcie_lanes = NULL,
        .set_clock_gating = NULL,
        .set_surface_reg = r600_set_surface_reg,
index 6d30868744eeed8e1890a13532dd77a36161fa4b..61141981880d7d03908a9a582f0d5a0a9951d2b3 100644 (file)
 
 /* from radeon_encoder.c */
 extern uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
-                     uint8_t dac);
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+                       uint8_t dac);
 extern void radeon_link_encoder_connector(struct drm_device *dev);
 extern void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
                        uint32_t supported_device);
 
 /* from radeon_connector.c */
@@ -46,14 +46,14 @@ radeon_add_atom_connector(struct drm_device *dev,
                          uint32_t supported_device,
                          int connector_type,
                          struct radeon_i2c_bus_rec *i2c_bus,
-                         bool linkb, uint32_t igp_lane_info,
+                         uint32_t igp_lane_info,
                          uint16_t connector_object_id,
                          struct radeon_hpd *hpd,
                          struct radeon_router *router);
 
 /* from radeon_legacy_encoder.c */
 extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
                          uint32_t supported_device);
 
 union atom_supported_devices {
@@ -226,6 +226,8 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
        struct radeon_hpd hpd;
        u32 reg;
 
+       memset(&hpd, 0, sizeof(struct radeon_hpd));
+
        if (ASIC_IS_DCE4(rdev))
                reg = EVERGREEN_DC_GPIO_HPD_A;
        else
@@ -477,7 +479,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
        int i, j, k, path_size, device_support;
        int connector_type;
        u16 igp_lane_info, conn_id, connector_object_id;
-       bool linkb;
        struct radeon_i2c_bus_rec ddc_bus;
        struct radeon_router router;
        struct radeon_gpio_rec gpio;
@@ -510,7 +511,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                addr += path_size;
                path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
                path_size += le16_to_cpu(path->usSize);
-               linkb = false;
+
                if (device_support & le16_to_cpu(path->usDeviceTag)) {
                        uint8_t con_obj_id, con_obj_num, con_obj_type;
 
@@ -601,13 +602,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
 
                                if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
-                                       if (grph_obj_num == 2)
-                                               linkb = true;
-                                       else
-                                               linkb = false;
+                                       u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
 
                                        radeon_add_atom_encoder(dev,
-                                                               grph_obj_id,
+                                                               encoder_obj,
                                                                le16_to_cpu
                                                                (path->
                                                                 usDeviceTag));
@@ -744,7 +742,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                                  le16_to_cpu(path->
                                                              usDeviceTag),
                                                  connector_type, &ddc_bus,
-                                                 linkb, igp_lane_info,
+                                                 igp_lane_info,
                                                  connector_object_id,
                                                  &hpd,
                                                  &router);
@@ -933,13 +931,13 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
 
                if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
                        radeon_add_atom_encoder(dev,
-                                               radeon_get_encoder_id(dev,
+                                               radeon_get_encoder_enum(dev,
                                                                      (1 << i),
                                                                      dac),
                                                (1 << i));
                else
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        (1 << i),
                                                                        dac),
                                                  (1 << i));
@@ -996,7 +994,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
                                                  bios_connectors[i].
                                                  connector_type,
                                                  &bios_connectors[i].ddc_bus,
-                                                 false, 0,
+                                                 0,
                                                  connector_object_id,
                                                  &bios_connectors[i].hpd,
                                                  &router);
@@ -1183,7 +1181,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
                                return true;
                        break;
                case 2:
-                       if (igp_info->info_2.ucMemoryType & 0x0f)
+                       if (igp_info->info_2.ulBootUpSidePortClock)
                                return true;
                        break;
                default:
@@ -1305,6 +1303,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
        union lvds_info *lvds_info;
        uint8_t frev, crev;
        struct radeon_encoder_atom_dig *lvds = NULL;
+       int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
 
        if (atom_parse_data_header(mode_info->atom_context, index, NULL,
                                   &frev, &crev, &data_offset)) {
@@ -1368,6 +1367,12 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
                }
 
                encoder->native_mode = lvds->native_mode;
+
+               if (encoder_enum == 2)
+                       lvds->linkb = true;
+               else
+                       lvds->linkb = false;
+
        }
        return lvds;
 }
index 885dcfac1838e89367487760aa717d54d7d7f978..bd74e428bd147d0df444cc64858f28acbed0d8ed 100644 (file)
@@ -39,8 +39,8 @@
 
 /* from radeon_encoder.c */
 extern uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
-                     uint8_t dac);
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+                       uint8_t dac);
 extern void radeon_link_encoder_connector(struct drm_device *dev);
 
 /* from radeon_connector.c */
@@ -55,7 +55,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
 
 /* from radeon_legacy_encoder.c */
 extern void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
                          uint32_t supported_device);
 
 /* old legacy ATI BIOS routines */
@@ -1505,7 +1505,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                        hpd.hpd = RADEON_HPD_NONE;
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_CRT1_SUPPORT,
                                                                        1),
                                                  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1520,7 +1520,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
                        hpd.hpd = RADEON_HPD_NONE;
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_LCD1_SUPPORT,
                                                                        0),
                                                  ATOM_DEVICE_LCD1_SUPPORT);
@@ -1535,7 +1535,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                        hpd.hpd = RADEON_HPD_NONE;
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_CRT1_SUPPORT,
                                                                        1),
                                                  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1550,12 +1550,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
                        hpd.hpd = RADEON_HPD_1;
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_DFP1_SUPPORT,
                                                                        0),
                                                  ATOM_DEVICE_DFP1_SUPPORT);
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_CRT2_SUPPORT,
                                                                        2),
                                                  ATOM_DEVICE_CRT2_SUPPORT);
@@ -1571,7 +1571,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                        hpd.hpd = RADEON_HPD_NONE;
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_CRT1_SUPPORT,
                                                                        1),
                                                  ATOM_DEVICE_CRT1_SUPPORT);
@@ -1588,7 +1588,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                        ddc_i2c.valid = false;
                        hpd.hpd = RADEON_HPD_NONE;
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_TV1_SUPPORT,
                                                                        2),
                                                  ATOM_DEVICE_TV1_SUPPORT);
@@ -1607,7 +1607,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_LCD1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_LCD1_SUPPORT);
@@ -1619,7 +1619,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT2_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_CRT2_SUPPORT);
@@ -1631,7 +1631,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1648,7 +1648,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_LCD1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_LCD1_SUPPORT);
@@ -1660,12 +1660,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                hpd.hpd = RADEON_HPD_2; /* ??? */
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_DFP2_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_DFP2_SUPPORT);
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT1_SUPPORT,
                                                                1),
                                          ATOM_DEVICE_CRT1_SUPPORT);
@@ -1680,7 +1680,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1697,7 +1697,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_LCD1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_LCD1_SUPPORT);
@@ -1709,12 +1709,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                hpd.hpd = RADEON_HPD_1; /* ??? */
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_DFP1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_DFP1_SUPPORT);
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT1_SUPPORT,
                                                                1),
                                          ATOM_DEVICE_CRT1_SUPPORT);
@@ -1728,7 +1728,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1745,7 +1745,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_LCD1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_LCD1_SUPPORT);
@@ -1757,7 +1757,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT1_SUPPORT,
                                                                1),
                                          ATOM_DEVICE_CRT1_SUPPORT);
@@ -1769,7 +1769,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1786,12 +1786,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
                hpd.hpd = RADEON_HPD_2; /* ??? */
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_DFP2_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_DFP2_SUPPORT);
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT2_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_CRT2_SUPPORT);
@@ -1806,7 +1806,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1823,12 +1823,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
                hpd.hpd = RADEON_HPD_1; /* ??? */
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_DFP1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_DFP1_SUPPORT);
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT2_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_CRT2_SUPPORT);
@@ -1842,7 +1842,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1859,7 +1859,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
                hpd.hpd = RADEON_HPD_1; /* ??? */
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_DFP1_SUPPORT,
                                                                0),
                                          ATOM_DEVICE_DFP1_SUPPORT);
@@ -1871,7 +1871,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT2_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_CRT2_SUPPORT);
@@ -1883,7 +1883,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1900,7 +1900,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT1_SUPPORT,
                                                                1),
                                          ATOM_DEVICE_CRT1_SUPPORT);
@@ -1912,7 +1912,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT2_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_CRT2_SUPPORT);
@@ -1924,7 +1924,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c.valid = false;
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_TV1_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_TV1_SUPPORT);
@@ -1941,7 +1941,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT1_SUPPORT,
                                                                1),
                                          ATOM_DEVICE_CRT1_SUPPORT);
@@ -1952,7 +1952,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
                ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
                hpd.hpd = RADEON_HPD_NONE;
                radeon_add_legacy_encoder(dev,
-                                         radeon_get_encoder_id(dev,
+                                         radeon_get_encoder_enum(dev,
                                                                ATOM_DEVICE_CRT2_SUPPORT,
                                                                2),
                                          ATOM_DEVICE_CRT2_SUPPORT);
@@ -2109,7 +2109,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                else
                                        devices = ATOM_DEVICE_DFP1_SUPPORT;
                                radeon_add_legacy_encoder(dev,
-                                                         radeon_get_encoder_id
+                                                         radeon_get_encoder_enum
                                                          (dev, devices, 0),
                                                          devices);
                                radeon_add_legacy_connector(dev, i, devices,
@@ -2123,7 +2123,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                if (tmp & 0x1) {
                                        devices = ATOM_DEVICE_CRT2_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_CRT2_SUPPORT,
                                                                   2),
@@ -2131,7 +2131,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                } else {
                                        devices = ATOM_DEVICE_CRT1_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_CRT1_SUPPORT,
                                                                   1),
@@ -2151,7 +2151,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                if (tmp & 0x1) {
                                        devices |= ATOM_DEVICE_CRT2_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_CRT2_SUPPORT,
                                                                   2),
@@ -2159,7 +2159,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                } else {
                                        devices |= ATOM_DEVICE_CRT1_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_CRT1_SUPPORT,
                                                                   1),
@@ -2168,7 +2168,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                if ((tmp >> 4) & 0x1) {
                                        devices |= ATOM_DEVICE_DFP2_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_DFP2_SUPPORT,
                                                                   0),
@@ -2177,7 +2177,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                } else {
                                        devices |= ATOM_DEVICE_DFP1_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_DFP1_SUPPORT,
                                                                   0),
@@ -2202,7 +2202,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                        connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
                                }
                                radeon_add_legacy_encoder(dev,
-                                                         radeon_get_encoder_id
+                                                         radeon_get_encoder_enum
                                                          (dev, devices, 0),
                                                          devices);
                                radeon_add_legacy_connector(dev, i, devices,
@@ -2215,7 +2215,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                        case CONNECTOR_CTV_LEGACY:
                        case CONNECTOR_STV_LEGACY:
                                radeon_add_legacy_encoder(dev,
-                                                         radeon_get_encoder_id
+                                                         radeon_get_encoder_enum
                                                          (dev,
                                                           ATOM_DEVICE_TV1_SUPPORT,
                                                           2),
@@ -2242,12 +2242,12 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                        DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
 
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_CRT1_SUPPORT,
                                                                        1),
                                                  ATOM_DEVICE_CRT1_SUPPORT);
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_DFP1_SUPPORT,
                                                                        0),
                                                  ATOM_DEVICE_DFP1_SUPPORT);
@@ -2268,7 +2268,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                        DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
                        if (crt_info) {
                                radeon_add_legacy_encoder(dev,
-                                                         radeon_get_encoder_id(dev,
+                                                         radeon_get_encoder_enum(dev,
                                                                                ATOM_DEVICE_CRT1_SUPPORT,
                                                                                1),
                                                          ATOM_DEVICE_CRT1_SUPPORT);
@@ -2297,7 +2297,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                                     COMBIOS_LCD_DDC_INFO_TABLE);
 
                        radeon_add_legacy_encoder(dev,
-                                                 radeon_get_encoder_id(dev,
+                                                 radeon_get_encoder_enum(dev,
                                                                        ATOM_DEVICE_LCD1_SUPPORT,
                                                                        0),
                                                  ATOM_DEVICE_LCD1_SUPPORT);
@@ -2351,7 +2351,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                        hpd.hpd = RADEON_HPD_NONE;
                                        ddc_i2c.valid = false;
                                        radeon_add_legacy_encoder(dev,
-                                                                 radeon_get_encoder_id
+                                                                 radeon_get_encoder_enum
                                                                  (dev,
                                                                   ATOM_DEVICE_TV1_SUPPORT,
                                                                   2),
index 47c4b276d30c5fc4c0cd1fdbb9b7cdd5aa7bd634..1a5ee392e9c796ad7db3d8056b3fbf870dab4074 100644 (file)
@@ -977,27 +977,29 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        enum drm_connector_status ret = connector_status_disconnected;
        struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
-       u8 sink_type;
 
        if (radeon_connector->edid) {
                kfree(radeon_connector->edid);
                radeon_connector->edid = NULL;
        }
 
-       sink_type = radeon_dp_getsinktype(radeon_connector);
-       if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
-           (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
-               if (radeon_dp_getdpcd(radeon_connector)) {
-                       radeon_dig_connector->dp_sink_type = sink_type;
+       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+               /* eDP is always DP */
+               radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+               if (radeon_dp_getdpcd(radeon_connector))
                        ret = connector_status_connected;
-               }
        } else {
-               if (radeon_ddc_probe(radeon_connector)) {
-                       radeon_dig_connector->dp_sink_type = sink_type;
-                       ret = connector_status_connected;
+               radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+               if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+                       if (radeon_dp_getdpcd(radeon_connector))
+                               ret = connector_status_connected;
+               } else {
+                       if (radeon_ddc_probe(radeon_connector))
+                               ret = connector_status_connected;
                }
        }
 
+       radeon_connector_update_scratch_regs(connector, ret);
        return ret;
 }
 
@@ -1037,7 +1039,6 @@ radeon_add_atom_connector(struct drm_device *dev,
                          uint32_t supported_device,
                          int connector_type,
                          struct radeon_i2c_bus_rec *i2c_bus,
-                         bool linkb,
                          uint32_t igp_lane_info,
                          uint16_t connector_object_id,
                          struct radeon_hpd *hpd,
@@ -1128,7 +1129,6 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
                if (!radeon_dig_connector)
                        goto failed;
-               radeon_dig_connector->linkb = linkb;
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1158,7 +1158,6 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
                if (!radeon_dig_connector)
                        goto failed;
-               radeon_dig_connector->linkb = linkb;
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1182,7 +1181,6 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
                if (!radeon_dig_connector)
                        goto failed;
-               radeon_dig_connector->linkb = linkb;
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
@@ -1229,7 +1227,6 @@ radeon_add_atom_connector(struct drm_device *dev,
                radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
                if (!radeon_dig_connector)
                        goto failed;
-               radeon_dig_connector->linkb = linkb;
                radeon_dig_connector->igp_lane_info = igp_lane_info;
                radeon_connector->con_priv = radeon_dig_connector;
                drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
index 4f7a170d15663a3f3030f2e02e517718e6998968..69b3c2291e926730944107c76e1b1d52bb0270b9 100644 (file)
@@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
                mc->mc_vram_size = mc->aper_size;
        }
        mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
-       if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
+       if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
                dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
                mc->real_vram_size = mc->aper_size;
                mc->mc_vram_size = mc->aper_size;
index 5764f4d3b4f1a0804caf56f39e6a75c6c9afaf57..6dd434ad2429b9d9689ed861d33a2b1c0b09f092 100644 (file)
@@ -1094,6 +1094,18 @@ void radeon_modeset_fini(struct radeon_device *rdev)
        radeon_i2c_fini(rdev);
 }
 
+static bool is_hdtv_mode(struct drm_display_mode *mode)
+{
+       /* try and guess if this is a tv or a monitor */
+       if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
+           (mode->vdisplay == 576) || /* 576p */
+           (mode->vdisplay == 720) || /* 720p */
+           (mode->vdisplay == 1080)) /* 1080p */
+               return true;
+       else
+               return false;
+}
+
 bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
                                struct drm_display_mode *mode,
                                struct drm_display_mode *adjusted_mode)
@@ -1141,7 +1153,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
                        if (ASIC_IS_AVIVO(rdev) &&
                            ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
                             ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
-                             drm_detect_hdmi_monitor(radeon_connector->edid)))) {
+                             drm_detect_hdmi_monitor(radeon_connector->edid) &&
+                             is_hdtv_mode(mode)))) {
                                radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
                                radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
                                radeon_crtc->rmx_type = RMX_FULL;
index 263c8098d7dd279447ebba20e408c25f791f0b7d..2c293e8304d657d047144708bff75c6474d560f0 100644 (file)
@@ -81,7 +81,7 @@ void radeon_setup_encoder_clones(struct drm_device *dev)
 }
 
 uint32_t
-radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
 {
        struct radeon_device *rdev = dev->dev_private;
        uint32_t ret = 0;
@@ -97,59 +97,59 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
                        if ((rdev->family == CHIP_RS300) ||
                            (rdev->family == CHIP_RS400) ||
                            (rdev->family == CHIP_RS480))
-                               ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+                               ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
                        else if (ASIC_IS_AVIVO(rdev))
-                               ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+                               ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
                        else
-                               ret = ENCODER_OBJECT_ID_INTERNAL_DAC1;
+                               ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
                        break;
                case 2: /* dac b */
                        if (ASIC_IS_AVIVO(rdev))
-                               ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+                               ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
                        else {
                                /*if (rdev->family == CHIP_R200)
-                                 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+                                 ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
                                  else*/
-                               ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
+                               ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
                        }
                        break;
                case 3: /* external dac */
                        if (ASIC_IS_AVIVO(rdev))
-                               ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+                               ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
                        else
-                               ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+                               ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
                        break;
                }
                break;
        case ATOM_DEVICE_LCD1_SUPPORT:
                if (ASIC_IS_AVIVO(rdev))
-                       ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+                       ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
                else
-                       ret = ENCODER_OBJECT_ID_INTERNAL_LVDS;
+                       ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
                break;
        case ATOM_DEVICE_DFP1_SUPPORT:
                if ((rdev->family == CHIP_RS300) ||
                    (rdev->family == CHIP_RS400) ||
                    (rdev->family == CHIP_RS480))
-                       ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+                       ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
                else if (ASIC_IS_AVIVO(rdev))
-                       ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+                       ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
                else
-                       ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+                       ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
                break;
        case ATOM_DEVICE_LCD2_SUPPORT:
        case ATOM_DEVICE_DFP2_SUPPORT:
                if ((rdev->family == CHIP_RS600) ||
                    (rdev->family == CHIP_RS690) ||
                    (rdev->family == CHIP_RS740))
-                       ret = ENCODER_OBJECT_ID_INTERNAL_DDI;
+                       ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
                else if (ASIC_IS_AVIVO(rdev))
-                       ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
+                       ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
                else
-                       ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
+                       ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
                break;
        case ATOM_DEVICE_DFP3_SUPPORT:
-               ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+               ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
                break;
        }
 
@@ -228,32 +228,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
        return NULL;
 }
 
-static struct radeon_connector_atom_dig *
-radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
-{
-       struct drm_device *dev = encoder->dev;
-       struct radeon_device *rdev = dev->dev_private;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector;
-       struct radeon_connector_atom_dig *dig_connector;
-
-       if (!rdev->is_atom_bios)
-               return NULL;
-
-       connector = radeon_get_connector_for_encoder(encoder);
-       if (!connector)
-               return NULL;
-
-       radeon_connector = to_radeon_connector(connector);
-
-       if (!radeon_connector->con_priv)
-               return NULL;
-
-       dig_connector = radeon_connector->con_priv;
-
-       return dig_connector;
-}
-
 void radeon_panel_mode_fixup(struct drm_encoder *encoder,
                             struct drm_display_mode *adjusted_mode)
 {
@@ -512,14 +486,12 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       struct radeon_connector_atom_dig *dig_connector =
-               radeon_get_atom_connector_priv_from_encoder(encoder);
        union lvds_encoder_control args;
        int index = 0;
        int hdmi_detected = 0;
        uint8_t frev, crev;
 
-       if (!dig || !dig_connector)
+       if (!dig)
                return;
 
        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
@@ -562,7 +534,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
                                if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
                                        args.v1.ucMisc |= (1 << 1);
                        } else {
-                               if (dig_connector->linkb)
+                               if (dig->linkb)
                                        args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
                                if (radeon_encoder->pixel_clock > 165000)
                                        args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -601,7 +573,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
                                                args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
                                }
                        } else {
-                               if (dig_connector->linkb)
+                               if (dig->linkb)
                                        args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
                                if (radeon_encoder->pixel_clock > 165000)
                                        args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -623,6 +595,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
 int
 atombios_get_encoder_mode(struct drm_encoder *encoder)
 {
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_connector *connector;
        struct radeon_connector *radeon_connector;
        struct radeon_connector_atom_dig *dig_connector;
@@ -636,9 +610,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        switch (connector->connector_type) {
        case DRM_MODE_CONNECTOR_DVII:
        case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-               if (drm_detect_hdmi_monitor(radeon_connector->edid))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else if (radeon_connector->use_digital)
+               if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+                       /* fix me */
+                       if (ASIC_IS_DCE4(rdev))
+                               return ATOM_ENCODER_MODE_DVI;
+                       else
+                               return ATOM_ENCODER_MODE_HDMI;
+               } else if (radeon_connector->use_digital)
                        return ATOM_ENCODER_MODE_DVI;
                else
                        return ATOM_ENCODER_MODE_CRT;
@@ -646,9 +624,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        case DRM_MODE_CONNECTOR_DVID:
        case DRM_MODE_CONNECTOR_HDMIA:
        default:
-               if (drm_detect_hdmi_monitor(radeon_connector->edid))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else
+               if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+                       /* fix me */
+                       if (ASIC_IS_DCE4(rdev))
+                               return ATOM_ENCODER_MODE_DVI;
+                       else
+                               return ATOM_ENCODER_MODE_HDMI;
+               } else
                        return ATOM_ENCODER_MODE_DVI;
                break;
        case DRM_MODE_CONNECTOR_LVDS:
@@ -660,9 +642,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
                if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
                    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
                        return ATOM_ENCODER_MODE_DP;
-               else if (drm_detect_hdmi_monitor(radeon_connector->edid))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else
+               else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+                       /* fix me */
+                       if (ASIC_IS_DCE4(rdev))
+                               return ATOM_ENCODER_MODE_DVI;
+                       else
+                               return ATOM_ENCODER_MODE_HDMI;
+               } else
                        return ATOM_ENCODER_MODE_DVI;
                break;
        case DRM_MODE_CONNECTOR_DVIA:
@@ -729,13 +715,24 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       struct radeon_connector_atom_dig *dig_connector =
-               radeon_get_atom_connector_priv_from_encoder(encoder);
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        union dig_encoder_control args;
        int index = 0;
        uint8_t frev, crev;
+       int dp_clock = 0;
+       int dp_lane_count = 0;
+
+       if (connector) {
+               struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+               struct radeon_connector_atom_dig *dig_connector =
+                       radeon_connector->con_priv;
 
-       if (!dig || !dig_connector)
+               dp_clock = dig_connector->dp_clock;
+               dp_lane_count = dig_connector->dp_lane_count;
+       }
+
+       /* no dig encoder assigned */
+       if (dig->dig_encoder == -1)
                return;
 
        memset(&args, 0, sizeof(args));
@@ -757,9 +754,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
        args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
        if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
-               if (dig_connector->dp_clock == 270000)
+               if (dp_clock == 270000)
                        args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
-               args.v1.ucLaneNum = dig_connector->dp_lane_count;
+               args.v1.ucLaneNum = dp_lane_count;
        } else if (radeon_encoder->pixel_clock > 165000)
                args.v1.ucLaneNum = 8;
        else
@@ -781,7 +778,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
                        args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
                        break;
                }
-               if (dig_connector->linkb)
+               if (dig->linkb)
                        args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
                else
                        args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
@@ -804,38 +801,47 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       struct radeon_connector_atom_dig *dig_connector =
-               radeon_get_atom_connector_priv_from_encoder(encoder);
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector;
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        union dig_transmitter_control args;
        int index = 0;
        uint8_t frev, crev;
        bool is_dp = false;
        int pll_id = 0;
+       int dp_clock = 0;
+       int dp_lane_count = 0;
+       int connector_object_id = 0;
+       int igp_lane_info = 0;
 
-       if (!dig || !dig_connector)
-               return;
+       if (connector) {
+               struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+               struct radeon_connector_atom_dig *dig_connector =
+                       radeon_connector->con_priv;
 
-       connector = radeon_get_connector_for_encoder(encoder);
-       radeon_connector = to_radeon_connector(connector);
+               dp_clock = dig_connector->dp_clock;
+               dp_lane_count = dig_connector->dp_lane_count;
+               connector_object_id =
+                       (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+               igp_lane_info = dig_connector->igp_lane_info;
+       }
+
+       /* no dig encoder assigned */
+       if (dig->dig_encoder == -1)
+               return;
 
        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
                is_dp = true;
 
        memset(&args, 0, sizeof(args));
 
-       if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
+       switch (radeon_encoder->encoder_id) {
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+       case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
                index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
-       else {
-               switch (radeon_encoder->encoder_id) {
-               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-                       index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl);
-                       break;
-               case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-                       index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
-                       break;
-               }
+               break;
+       case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+               index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+               break;
        }
 
        if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
@@ -843,14 +849,14 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
 
        args.v1.ucAction = action;
        if (action == ATOM_TRANSMITTER_ACTION_INIT) {
-               args.v1.usInitInfo = radeon_connector->connector_object_id;
+               args.v1.usInitInfo = connector_object_id;
        } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
                args.v1.asMode.ucLaneSel = lane_num;
                args.v1.asMode.ucLaneSet = lane_set;
        } else {
                if (is_dp)
                        args.v1.usPixelClock =
-                               cpu_to_le16(dig_connector->dp_clock / 10);
+                               cpu_to_le16(dp_clock / 10);
                else if (radeon_encoder->pixel_clock > 165000)
                        args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
                else
@@ -858,13 +864,13 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
        }
        if (ASIC_IS_DCE4(rdev)) {
                if (is_dp)
-                       args.v3.ucLaneNum = dig_connector->dp_lane_count;
+                       args.v3.ucLaneNum = dp_lane_count;
                else if (radeon_encoder->pixel_clock > 165000)
                        args.v3.ucLaneNum = 8;
                else
                        args.v3.ucLaneNum = 4;
 
-               if (dig_connector->linkb) {
+               if (dig->linkb) {
                        args.v3.acConfig.ucLinkSel = 1;
                        args.v3.acConfig.ucEncoderSel = 1;
                }
@@ -904,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                }
        } else if (ASIC_IS_DCE32(rdev)) {
                args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
-               if (dig_connector->linkb)
+               if (dig->linkb)
                        args.v2.acConfig.ucLinkSel = 1;
 
                switch (radeon_encoder->encoder_id) {
@@ -938,23 +944,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                if ((rdev->flags & RADEON_IS_IGP) &&
                    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
                        if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
-                               if (dig_connector->igp_lane_info & 0x1)
+                               if (igp_lane_info & 0x1)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-                               else if (dig_connector->igp_lane_info & 0x2)
+                               else if (igp_lane_info & 0x2)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
-                               else if (dig_connector->igp_lane_info & 0x4)
+                               else if (igp_lane_info & 0x4)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
-                               else if (dig_connector->igp_lane_info & 0x8)
+                               else if (igp_lane_info & 0x8)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
                        } else {
-                               if (dig_connector->igp_lane_info & 0x3)
+                               if (igp_lane_info & 0x3)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
-                               else if (dig_connector->igp_lane_info & 0xc)
+                               else if (igp_lane_info & 0xc)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
                        }
                }
 
-               if (dig_connector->linkb)
+               if (dig->linkb)
                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
                else
                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
@@ -1072,8 +1078,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
        if (is_dig) {
                switch (mode) {
                case DRM_MODE_DPMS_ON:
-                       if (!ASIC_IS_DCE4(rdev))
-                               atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
                        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
                                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
@@ -1085,8 +1090,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
                case DRM_MODE_DPMS_STANDBY:
                case DRM_MODE_DPMS_SUSPEND:
                case DRM_MODE_DPMS_OFF:
-                       if (!ASIC_IS_DCE4(rdev))
-                               atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
                        if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
                                if (ASIC_IS_DCE4(rdev))
                                        atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
@@ -1290,24 +1294,22 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
        uint32_t dig_enc_in_use = 0;
 
        if (ASIC_IS_DCE4(rdev)) {
-               struct radeon_connector_atom_dig *dig_connector =
-                       radeon_get_atom_connector_priv_from_encoder(encoder);
-
+               dig = radeon_encoder->enc_priv;
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-                       if (dig_connector->linkb)
+                       if (dig->linkb)
                                return 1;
                        else
                                return 0;
                        break;
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-                       if (dig_connector->linkb)
+                       if (dig->linkb)
                                return 3;
                        else
                                return 2;
                        break;
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-                       if (dig_connector->linkb)
+                       if (dig->linkb)
                                return 5;
                        else
                                return 4;
@@ -1641,6 +1643,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
 struct radeon_encoder_atom_dig *
 radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
 {
+       int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
        struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
 
        if (!dig)
@@ -1650,11 +1653,16 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
        dig->coherent_mode = true;
        dig->dig_encoder = -1;
 
+       if (encoder_enum == 2)
+               dig->linkb = true;
+       else
+               dig->linkb = false;
+
        return dig;
 }
 
 void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
 {
        struct radeon_device *rdev = dev->dev_private;
        struct drm_encoder *encoder;
@@ -1663,7 +1671,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
        /* see if we already added it */
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                radeon_encoder = to_radeon_encoder(encoder);
-               if (radeon_encoder->encoder_id == encoder_id) {
+               if (radeon_encoder->encoder_enum == encoder_enum) {
                        radeon_encoder->devices |= supported_device;
                        return;
                }
@@ -1691,7 +1699,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
 
        radeon_encoder->enc_priv = NULL;
 
-       radeon_encoder->encoder_id = encoder_id;
+       radeon_encoder->encoder_enum = encoder_enum;
+       radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
        radeon_encoder->devices = supported_device;
        radeon_encoder->rmx_type = RMX_OFF;
        radeon_encoder->underscan_type = UNDERSCAN_OFF;
index dbf86962bdd16a5741c3de1369c4ebe7d6c36d29..c74a8b20d9413e921bc6a03cd92578146a9ee8ef 100644 (file)
@@ -118,7 +118,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
        aligned_size = ALIGN(size, PAGE_SIZE);
        ret = radeon_gem_object_create(rdev, aligned_size, 0,
                                       RADEON_GEM_DOMAIN_VRAM,
-                                      false, ttm_bo_type_kernel,
+                                      false, true,
                                       &gobj);
        if (ret) {
                printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
index bfd2ce5f53728dd322767ec186cda77993215f24..0416804d8f3010f7d0451e17e5c2428eecba3d51 100644 (file)
@@ -99,6 +99,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
                }
        }
 
+       /* switch the pads to ddc mode */
+       if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
+               temp = RREG32(rec->mask_clk_reg);
+               temp &= ~(1 << 16);
+               WREG32(rec->mask_clk_reg, temp);
+       }
+
        /* clear the output pin values */
        temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
        WREG32(rec->a_clk_reg, temp);
index 059bfa4098d7a5990034cf00c0d3bd84b8b09270..a108c7ed14f5941a4954839a583bc2f48f7cf44e 100644 (file)
@@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
         * chips.  Disable MSI on them for now.
         */
        if ((rdev->family >= CHIP_RV380) &&
-           (!(rdev->flags & RADEON_IS_IGP))) {
+           (!(rdev->flags & RADEON_IS_IGP)) &&
+           (!(rdev->flags & RADEON_IS_AGP))) {
                int ret = pci_enable_msi(rdev->pdev);
                if (!ret) {
                        rdev->msi_enabled = 1;
-                       DRM_INFO("radeon: using MSI.\n");
+                       dev_info(rdev->dev, "radeon: using MSI.\n");
                }
        }
        rdev->irq.installed = true;
index b1c8ace5f0802c908ce653022bbe2921470c8e90..5eee3c41d124bf49fbd5dfbc7264fb062699e961 100644 (file)
@@ -161,6 +161,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
                        return -EINVAL;
                }
+               break;
        case RADEON_INFO_WANT_HYPERZ:
                /* The "value" here is both an input and output parameter.
                 * If the input value is 1, filp requests hyper-z access.
@@ -323,45 +324,45 @@ KMS_INVALID_IOCTL(radeon_surface_free_kms)
 
 
 struct drm_ioctl_desc radeon_ioctls_kms[] = {
-       DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
        /* KMS */
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
 };
 int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
index 989df519a1e453b618e2599f0eeced23e637dc91..305049afde15235e558dc22f3992543508f362e6 100644 (file)
@@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
        if (!ref_div)
                return 1;
 
-       vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
+       vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
 
        /*
         * This is horribly crude: the VCO frequency range is divided into
index b8149cbc0c70ca3511f748d13e05cb85c16073e9..0b8397000f4c68ae98e80df717a652cb2ddf6253 100644 (file)
@@ -1345,7 +1345,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra
 }
 
 void
-radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
 {
        struct radeon_device *rdev = dev->dev_private;
        struct drm_encoder *encoder;
@@ -1354,7 +1354,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
        /* see if we already added it */
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                radeon_encoder = to_radeon_encoder(encoder);
-               if (radeon_encoder->encoder_id == encoder_id) {
+               if (radeon_encoder->encoder_enum == encoder_enum) {
                        radeon_encoder->devices |= supported_device;
                        return;
                }
@@ -1374,7 +1374,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
 
        radeon_encoder->enc_priv = NULL;
 
-       radeon_encoder->encoder_id = encoder_id;
+       radeon_encoder->encoder_enum = encoder_enum;
+       radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
        radeon_encoder->devices = supported_device;
        radeon_encoder->rmx_type = RMX_OFF;
 
index 5bbc086b9267bb16e6d324de5af25977cdf76dae..8f93e2b4b0c821153ca9969255e22e1f95c95513 100644 (file)
@@ -342,6 +342,7 @@ struct radeon_atom_ss {
 };
 
 struct radeon_encoder_atom_dig {
+       bool linkb;
        /* atom dig */
        bool coherent_mode;
        int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
@@ -360,6 +361,7 @@ struct radeon_encoder_atom_dac {
 
 struct radeon_encoder {
        struct drm_encoder base;
+       uint32_t encoder_enum;
        uint32_t encoder_id;
        uint32_t devices;
        uint32_t active_device;
@@ -378,7 +380,6 @@ struct radeon_encoder {
 
 struct radeon_connector_atom_dig {
        uint32_t igp_lane_info;
-       bool linkb;
        /* displayport */
        struct radeon_i2c_chan *dp_i2c_bus;
        u8 dpcd[8];
index 58038f5cab38d4b0c31ac7c2abfbad9a18c533b2..f87efec76236966ac790ff0709f5cf2ae3c57563 100644 (file)
@@ -226,6 +226,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
 {
        int i;
 
+       /* no need to take locks, etc. if nothing's going to change */
+       if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
+           (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
+               return;
+
        mutex_lock(&rdev->ddev->struct_mutex);
        mutex_lock(&rdev->vram_mutex);
        mutex_lock(&rdev->cp.mutex);
@@ -632,8 +637,6 @@ void radeon_pm_fini(struct radeon_device *rdev)
        }
 
        radeon_hwmon_fini(rdev);
-       if (rdev->pm.i2c_bus)
-               radeon_i2c_destroy(rdev->pm.i2c_bus);
 }
 
 void radeon_pm_compute_clocks(struct radeon_device *rdev)
index b3ba44c0a81801a9f528edf833be726a05a55c40..4ae5a3d1074e19c9a203f7433cfc12e1c7724042 100644 (file)
@@ -3228,34 +3228,34 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
 }
 
 struct drm_ioctl_desc radeon_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
+       DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
 };
 
 int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
index 976dc8d25280c749798f1c1f124219190c10ca84..bf5f83ea14fe19819874311b66618957859286f2 100644 (file)
@@ -1082,10 +1082,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
 }
 
 struct drm_ioctl_desc savage_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
 };
 
 int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
index 07d0f2979cac1158f4db86a90e28bf4184d2b1fa..7fe2b63412ce96aa2c0829127c25261a7bb7aa6d 100644 (file)
@@ -320,12 +320,12 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
 }
 
 struct drm_ioctl_desc sis_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
 };
 
 int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
index 68dda74a50ae54c8273eb0e076d7ff151f9f7231..cc0ffa9abd00da2ff0504679bad6f32b04f0d561 100644 (file)
@@ -722,20 +722,20 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
 }
 
 struct drm_ioctl_desc via_ioctls[] = {
-       DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
-       DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
-       DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
-       DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
+       DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
+       DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
+       DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
+       DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
 };
 
 int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
index 9dd395b90216b17c54598c9309502785a2684220..72ec2e2b6e9787196ca1de65f28e4c6a0f090051 100644 (file)
  */
 
 #define VMW_IOCTL_DEF(ioctl, func, flags) \
-       [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
 
 /**
  * Ioctl definitions.
  */
 
 static struct drm_ioctl_desc vmw_ioctls[] = {
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
+       VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+       VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+       VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
+       VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
                      vmw_kms_cursor_bypass_ioctl,
                      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
+       VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
                      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
+       VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
                      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
+       VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
                      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
+       VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
+       VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
+       VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
+       VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
+       VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
+       VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
+       VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
                      DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
+       VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
                      DRM_AUTH | DRM_UNLOCKED),
-       VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
+       VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
                      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
 };
 
index b300a2048af153f9aa09cbd546a06fcadb3a4588..52319340e182da8f189eae518d80bd3674d26425 100644 (file)
@@ -160,30 +160,12 @@ static const struct attribute_group ads7871_group = {
 
 static int __devinit ads7871_probe(struct spi_device *spi)
 {
-       int status, ret, err = 0;
+       int ret, err;
        uint8_t val;
        struct ads7871_data *pdata;
 
        dev_dbg(&spi->dev, "probe\n");
 
-       pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
-       if (!pdata) {
-               err = -ENOMEM;
-               goto exit;
-       }
-
-       status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
-       if (status < 0)
-               goto error_free;
-
-       pdata->hwmon_dev = hwmon_device_register(&spi->dev);
-       if (IS_ERR(pdata->hwmon_dev)) {
-               err = PTR_ERR(pdata->hwmon_dev);
-               goto error_remove;
-       }
-
-       spi_set_drvdata(spi, pdata);
-
        /* Configure the SPI bus */
        spi->mode = (SPI_MODE_0);
        spi->bits_per_word = 8;
@@ -201,6 +183,24 @@ static int __devinit ads7871_probe(struct spi_device *spi)
        we need to make sure we really have a chip*/
        if (val != ret) {
                err = -ENODEV;
+               goto exit;
+       }
+
+       pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
+       if (!pdata) {
+               err = -ENOMEM;
+               goto exit;
+       }
+
+       err = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
+       if (err < 0)
+               goto error_free;
+
+       spi_set_drvdata(spi, pdata);
+
+       pdata->hwmon_dev = hwmon_device_register(&spi->dev);
+       if (IS_ERR(pdata->hwmon_dev)) {
+               err = PTR_ERR(pdata->hwmon_dev);
                goto error_remove;
        }
 
index c070c9714cbe2fa84646b41626655b4c51a7157e..de8111114f469ec21567a5781349bb0d7f7cbc98 100644 (file)
@@ -518,7 +518,6 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
 static int __init coretemp_init(void)
 {
        int i, err = -ENODEV;
-       struct pdev_entry *p, *n;
 
        /* quick check if we run Intel */
        if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
index b9bb3e0ca53083bd152332be698c58dbd0ff66bd..39ead2a4d3c50bfa34acb37651739920c22c76ee 100644 (file)
@@ -143,6 +143,37 @@ static const struct pci_device_id k8temp_ids[] = {
 
 MODULE_DEVICE_TABLE(pci, k8temp_ids);
 
+static int __devinit is_rev_g_desktop(u8 model)
+{
+       u32 brandidx;
+
+       if (model < 0x69)
+               return 0;
+
+       if (model == 0xc1 || model == 0x6c || model == 0x7c)
+               return 0;
+
+       /*
+        * Differentiate between AM2 and ASB1.
+        * See "Constructing the processor Name String" in "Revision
+        * Guide for AMD NPT Family 0Fh Processors" (33610).
+        */
+       brandidx = cpuid_ebx(0x80000001);
+       brandidx = (brandidx >> 9) & 0x1f;
+
+       /* Single core */
+       if ((model == 0x6f || model == 0x7f) &&
+           (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc))
+               return 0;
+
+       /* Dual core */
+       if (model == 0x6b &&
+           (brandidx == 0xb || brandidx == 0xc))
+               return 0;
+
+       return 1;
+}
+
 static int __devinit k8temp_probe(struct pci_dev *pdev,
                                  const struct pci_device_id *id)
 {
@@ -179,9 +210,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
                                 "wrong - check erratum #141\n");
                }
 
-               if ((model >= 0x69) &&
-                   !(model == 0xc1 || model == 0x6c || model == 0x7c ||
-                     model == 0x6b || model == 0x6f || model == 0x7f)) {
+               if (is_rev_g_desktop(model)) {
                        /*
                         * RevG desktop CPUs (i.e. no socket S1G1 or
                         * ASB1 parts) need additional offset,
index dcc86b97a1535c6a3ba098543d691ee249d56cf5..19fa94af207a6fa37fbadc7a3d39a65ea2010482 100644 (file)
@@ -232,13 +232,13 @@ static void hil_dev_handle_ptr_events(struct hil_dev *ptr)
                if (absdev) {
                        val = lo + (hi << 8);
 #ifdef TABLET_AUTOADJUST
-                       if (val < input_abs_min(dev, ABS_X + i))
+                       if (val < input_abs_get_min(dev, ABS_X + i))
                                input_abs_set_min(dev, ABS_X + i, val);
-                       if (val > input_abs_max(dev, ABS_X + i))
+                       if (val > input_abs_get_max(dev, ABS_X + i))
                                input_abs_set_max(dev, ABS_X + i, val);
 #endif
                        if (i % 3)
-                               val = input_abs_max(dev, ABS_X + i) - val;
+                               val = input_abs_get_max(dev, ABS_X + i) - val;
                        input_report_abs(dev, ABS_X + i, val);
                } else {
                        val = (int) (((int8_t) lo) | ((int8_t) hi << 8));
@@ -388,11 +388,11 @@ static void hil_dev_pointer_setup(struct hil_dev *ptr)
 
 #ifdef TABLET_AUTOADJUST
                for (i = 0; i < ABS_MAX; i++) {
-                       int diff = input_abs_max(input_dev, ABS_X + i) / 10;
+                       int diff = input_abs_get_max(input_dev, ABS_X + i) / 10;
                        input_abs_set_min(input_dev, ABS_X + i,
-                               input_abs_min(input_dev, ABS_X + i) + diff)
+                               input_abs_get_min(input_dev, ABS_X + i) + diff);
                        input_abs_set_max(input_dev, ABS_X + i,
-                               input_abs_max(input_dev, ABS_X + i) - diff)
+                               input_abs_get_max(input_dev, ABS_X + i) - diff);
                }
 #endif
 
index 0e53b3bc39afe4d7925d4aec0cea66174dfc1e5e..f32404f991893ef4584ab5540d213945cbb0c738 100644 (file)
@@ -567,8 +567,6 @@ static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
        clk_put(keypad->clk);
 
        input_unregister_device(keypad->input_dev);
-       input_free_device(keypad->input_dev);
-
        iounmap(keypad->mmio_base);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index bb53fd33cd1cb64e5d3c108b84eea4e1f11ccff4..0d4266a533a524564adcc85cc546736248753628 100644 (file)
@@ -811,6 +811,8 @@ static struct miscdevice uinput_misc = {
        .minor          = UINPUT_MINOR,
        .name           = UINPUT_NAME,
 };
+MODULE_ALIAS_MISCDEV(UINPUT_MINOR);
+MODULE_ALIAS("devname:" UINPUT_NAME);
 
 static int __init uinput_init(void)
 {
index 83c24cca234a5849545adeda916b0e4a136769fb..d528a2dba06418eb5e80097552463e9abec5db05 100644 (file)
@@ -138,8 +138,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
 
                fx(0) = value;
                if (mousedev->touch && mousedev->pkt_count >= 2) {
-                       size = input_abs_get_min(dev, ABS_X) -
-                                       input_abs_get_max(dev, ABS_X);
+                       size = input_abs_get_max(dev, ABS_X) -
+                                       input_abs_get_min(dev, ABS_X);
                        if (size == 0)
                                size = 256 * 2;
 
@@ -155,8 +155,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
                fy(0) = value;
                if (mousedev->touch && mousedev->pkt_count >= 2) {
                        /* use X size for ABS_Y to keep the same scale */
-                       size = input_abs_get_min(dev, ABS_X) -
-                                       input_abs_get_max(dev, ABS_X);
+                       size = input_abs_get_max(dev, ABS_X) -
+                                       input_abs_get_min(dev, ABS_X);
                        if (size == 0)
                                size = 256 * 2;
 
index 5dbcbe3a54a610d81d61b6e0b4d227af7751636d..b99b906ea9b1bfef709bec43e61237a74b177949 100644 (file)
@@ -36,12 +36,13 @@ config ISDN_DRV_AVMB1_T1ISA
 
 config ISDN_DRV_AVMB1_B1PCMCIA
        tristate "AVM B1/M1/M2 PCMCIA support"
+       depends on PCMCIA
        help
          Enable support for the PCMCIA version of the AVM B1 card.
 
 config ISDN_DRV_AVMB1_AVM_CS
        tristate "AVM B1/M1/M2 PCMCIA cs module"
-       depends on ISDN_DRV_AVMB1_B1PCMCIA && PCMCIA
+       depends on ISDN_DRV_AVMB1_B1PCMCIA
        help
          Enable the PCMCIA client driver for the AVM B1/M1/M2
          PCMCIA cards.
index 35bc2737412fddaa3323e99ea34a3dd3de762355..2d17e76066bd0b640890d8a0e0ddc5ac7170ec85 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/syscalls.h>
 #include <linux/suspend.h>
 #include <linux/cpu.h>
+#include <linux/compat.h>
 #include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
@@ -2349,11 +2350,52 @@ static long pmu_unlocked_ioctl(struct file *filp,
        return ret;
 }
 
+#ifdef CONFIG_COMPAT
+#define PMU_IOC_GET_BACKLIGHT32        _IOR('B', 1, compat_size_t)
+#define PMU_IOC_SET_BACKLIGHT32        _IOW('B', 2, compat_size_t)
+#define PMU_IOC_GET_MODEL32    _IOR('B', 3, compat_size_t)
+#define PMU_IOC_HAS_ADB32      _IOR('B', 4, compat_size_t)
+#define PMU_IOC_CAN_SLEEP32    _IOR('B', 5, compat_size_t)
+#define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t)
+
+static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg)
+{
+       switch (cmd) {
+       case PMU_IOC_SLEEP:
+               break;
+       case PMU_IOC_GET_BACKLIGHT32:
+               cmd = PMU_IOC_GET_BACKLIGHT;
+               break;
+       case PMU_IOC_SET_BACKLIGHT32:
+               cmd = PMU_IOC_SET_BACKLIGHT;
+               break;
+       case PMU_IOC_GET_MODEL32:
+               cmd = PMU_IOC_GET_MODEL;
+               break;
+       case PMU_IOC_HAS_ADB32:
+               cmd = PMU_IOC_HAS_ADB;
+               break;
+       case PMU_IOC_CAN_SLEEP32:
+               cmd = PMU_IOC_CAN_SLEEP;
+               break;
+       case PMU_IOC_GRAB_BACKLIGHT32:
+               cmd = PMU_IOC_GRAB_BACKLIGHT;
+               break;
+       default:
+               return -ENOIOCTLCMD;
+       }
+       return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
 static const struct file_operations pmu_device_fops = {
        .read           = pmu_read,
        .write          = pmu_write,
        .poll           = pmu_fpoll,
        .unlocked_ioctl = pmu_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = compat_pmu_ioctl,
+#endif
        .open           = pmu_open,
        .release        = pmu_release,
 };
index decdeda840d01158f070a2dcf8778c1c749d41a8..fd0830ed10d81cccb53b574336daa0942353b96c 100644 (file)
@@ -1,6 +1,6 @@
 config MANTIS_CORE
        tristate "Mantis/Hopper PCI bridge based devices"
-       depends on PCI && I2C && INPUT
+       depends on PCI && I2C && INPUT && IR_CORE
 
        help
          Support for PCI cards based on the Mantis and Hopper PCi bridge.
index 5a6895320b48a8d7774acfb4e6b239713cc6bd9d..2cc81a54cbf322a49ccbf474f5d41f654faf109d 100644 (file)
@@ -928,6 +928,16 @@ config SMC91X
          The module will be called smc91x.  If you want to compile it as a
          module, say M here and read <file:Documentation/kbuild/modules.txt>.
 
+config PXA168_ETH
+       tristate "Marvell pxa168 ethernet support"
+       depends on CPU_PXA168
+       select PHYLIB
+       help
+         This driver supports the pxa168 Ethernet ports.
+
+         To compile this driver as a module, choose M here. The module
+         will be called pxa168_eth.
+
 config NET_NETX
        tristate "NetX Ethernet support"
        select MII
index 56e8c27f77cebe9ef3b9ac3c1284f7e2975f8448..3e8f150c4b14b0034edb3632b9de33b1338b9635 100644 (file)
@@ -244,6 +244,7 @@ obj-$(CONFIG_MYRI10GE) += myri10ge/
 obj-$(CONFIG_SMC91X) += smc91x.o
 obj-$(CONFIG_SMC911X) += smc911x.o
 obj-$(CONFIG_SMSC911X) += smsc911x.o
+obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
 obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
index 53af9c93e75c3bca661abfb8dce7aabe96589f52..0c2d96ed561c46ebd63f3c03c119c0751b5204e1 100644 (file)
@@ -20,8 +20,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.52.53-3"
-#define DRV_MODULE_RELDATE      "2010/18/04"
+#define DRV_MODULE_VERSION      "1.52.53-4"
+#define DRV_MODULE_RELDATE      "2010/16/08"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
index b4ec2b02a465cf7d822f23700c119d6e93a2121b..f8c3f08e4ce73fb4d5d37739fa7dd18cfc557da7 100644 (file)
@@ -4328,10 +4328,12 @@ static int bnx2x_init_port(struct bnx2x *bp)
                val |= aeu_gpio_mask;
                REG_WR(bp, offset, val);
                }
+               bp->port.need_hw_lock = 1;
                break;
 
-       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+               bp->port.need_hw_lock = 1;
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
                /* add SPIO 5 to group 0 */
                {
                u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4341,7 +4343,10 @@ static int bnx2x_init_port(struct bnx2x *bp)
                REG_WR(bp, reg_addr, val);
                }
                break;
-
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+               bp->port.need_hw_lock = 1;
+               break;
        default:
                break;
        }
index a4a0d2b6eb1c60e116811b31378a426ad00a44e2..d3d4a57e24505f9c36af9ef139bfc9133788a4bd 100644 (file)
@@ -936,12 +936,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
        ew32(IMC, 0xffffffff);
        icr = er32(ICR);
 
-       /* Install any alternate MAC address into RAR0 */
-       ret_val = e1000_check_alt_mac_addr_generic(hw);
-       if (ret_val)
-               return ret_val;
+       if (hw->mac.type == e1000_82571) {
+               /* Install any alternate MAC address into RAR0 */
+               ret_val = e1000_check_alt_mac_addr_generic(hw);
+               if (ret_val)
+                       return ret_val;
 
-       e1000e_set_laa_state_82571(hw, true);
+               e1000e_set_laa_state_82571(hw, true);
+       }
 
        /* Reinitialize the 82571 serdes link state machine */
        if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1618,14 +1620,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
 {
        s32 ret_val = 0;
 
-       /*
-        * If there's an alternate MAC address place it in RAR0
-        * so that it will override the Si installed default perm
-        * address.
-        */
-       ret_val = e1000_check_alt_mac_addr_generic(hw);
-       if (ret_val)
-               goto out;
+       if (hw->mac.type == e1000_82571) {
+               /*
+                * If there's an alternate MAC address place it in RAR0
+                * so that it will override the Si installed default perm
+                * address.
+                */
+               ret_val = e1000_check_alt_mac_addr_generic(hw);
+               if (ret_val)
+                       goto out;
+       }
 
        ret_val = e1000_read_mac_addr_generic(hw);
 
@@ -1833,6 +1837,7 @@ struct e1000_info e1000_82573_info = {
                                  | FLAG_HAS_SMART_POWER_DOWN
                                  | FLAG_HAS_AMT
                                  | FLAG_HAS_SWSM_ON_LOAD,
+       .flags2                 = FLAG2_DISABLE_ASPM_L1,
        .pba                    = 20,
        .max_hw_frame_size      = ETH_FRAME_LEN + ETH_FCS_LEN,
        .get_variants           = e1000_get_variants_82571,
index 307a72f483ee644fb1199776e4b0521739badb46..93b3bedae8d2b2457a88f52b4664fe1b4b6b7dd8 100644 (file)
 #define E1000_FLASH_UPDATES  2000
 
 /* NVM Word Offsets */
+#define NVM_COMPAT                 0x0003
 #define NVM_ID_LED_SETTINGS        0x0004
 #define NVM_INIT_CONTROL2_REG      0x000F
 #define NVM_INIT_CONTROL3_PORT_B   0x0014
 /* Mask bits for fields in Word 0x1a of the NVM */
 #define NVM_WORD1A_ASPM_MASK  0x000C
 
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM    0x0800
+
 /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
 #define NVM_SUM                    0xBABA
 
index df4a2792293123eba7b1b6f3a589220ce1185ea1..0fd4eb5ac5fb9241f57061ee5eb76dff06e8afbf 100644 (file)
@@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
        u16 offset, nvm_alt_mac_addr_offset, nvm_data;
        u8 alt_mac_addr[ETH_ALEN];
 
+       ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
+       if (ret_val)
+               goto out;
+
+       /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
+       if (!((nvm_data & NVM_COMPAT_LOM) ||
+             (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
+             (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
+               goto out;
+
        ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
                                 &nvm_alt_mac_addr_offset);
        if (ret_val) {
index 99a929964e3cebde15667e59d913debd6d205b20..1846623c6ae65d18b6223ada9be4531809f64a99 100644 (file)
@@ -40,7 +40,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME       "ehea"
-#define DRV_VERSION    "EHEA_0105"
+#define DRV_VERSION    "EHEA_0106"
 
 /* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
@@ -400,6 +400,7 @@ struct ehea_port_res {
        u32 poll_counter;
        struct net_lro_mgr lro_mgr;
        struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
+       int sq_restart_flag;
 };
 
 
index 897719b49f96859b11a1f01f4f45e46c03c4cd71..a333b42111b8c2ba20b92eca94bf5648704c4f9a 100644 (file)
@@ -776,6 +776,53 @@ static int ehea_proc_rwqes(struct net_device *dev,
        return processed;
 }
 
+#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
+
+static void reset_sq_restart_flag(struct ehea_port *port)
+{
+       int i;
+
+       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+               struct ehea_port_res *pr = &port->port_res[i];
+               pr->sq_restart_flag = 0;
+       }
+}
+
+static void check_sqs(struct ehea_port *port)
+{
+       struct ehea_swqe *swqe;
+       int swqe_index;
+       int i, k;
+
+       for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+               struct ehea_port_res *pr = &port->port_res[i];
+               k = 0;
+               swqe = ehea_get_swqe(pr->qp, &swqe_index);
+               memset(swqe, 0, SWQE_HEADER_SIZE);
+               atomic_dec(&pr->swqe_avail);
+
+               swqe->tx_control |= EHEA_SWQE_PURGE;
+               swqe->wr_id = SWQE_RESTART_CHECK;
+               swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
+               swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
+               swqe->immediate_data_length = 80;
+
+               ehea_post_swqe(pr->qp, swqe);
+
+               while (pr->sq_restart_flag == 0) {
+                       msleep(5);
+                       if (++k == 100) {
+                               ehea_error("HW/SW queues out of sync");
+                               ehea_schedule_port_reset(pr->port);
+                               return;
+                       }
+               }
+       }
+
+       return;
+}
+
+
 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
 {
        struct sk_buff *skb;
@@ -793,6 +840,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
 
                cqe_counter++;
                rmb();
+
+               if (cqe->wr_id == SWQE_RESTART_CHECK) {
+                       pr->sq_restart_flag = 1;
+                       swqe_av++;
+                       break;
+               }
+
                if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
                        ehea_error("Bad send completion status=0x%04X",
                                   cqe->status);
@@ -2675,8 +2729,10 @@ static void ehea_flush_sq(struct ehea_port *port)
                int k = 0;
                while (atomic_read(&pr->swqe_avail) < swqe_max) {
                        msleep(5);
-                       if (++k == 20)
+                       if (++k == 20) {
+                               ehea_error("WARNING: sq not flushed completely");
                                break;
+                       }
                }
        }
 }
@@ -2917,6 +2973,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
                                        port_napi_disable(port);
                                        mutex_unlock(&port->port_lock);
                                }
+                               reset_sq_restart_flag(port);
                        }
 
                        /* Unregister old memory region */
@@ -2951,6 +3008,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
                                                mutex_lock(&port->port_lock);
                                                port_napi_enable(port);
                                                ret = ehea_restart_qps(dev);
+                                               check_sqs(port);
                                                if (!ret)
                                                        netif_wake_queue(dev);
                                                mutex_unlock(&port->port_lock);
index 3995fafc1e08bb3adb8658bd4d0098c4aff09f60..8c6c1e2a87503e2f647fedd44c77dbb2ffcfb3c9 100644 (file)
@@ -238,7 +238,7 @@ void emac_dbg_dump_all(void)
 }
 
 #if defined(CONFIG_MAGIC_SYSRQ)
-static void emac_sysrq_handler(int key, struct tty_struct *tty)
+static void emac_sysrq_handler(int key)
 {
        emac_dbg_dump_all();
 }
index 2602852cc55a6037c5160575f3620632397fcd2c..4734c939ad03574a63e6dd0bc9ccf6d8cf29c06a 100644 (file)
@@ -1113,7 +1113,8 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
        struct ibmveth_adapter *adapter = netdev_priv(dev);
        struct vio_dev *viodev = adapter->vdev;
        int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
-       int i;
+       int i, rc;
+       int need_restart = 0;
 
        if (new_mtu < IBMVETH_MAX_MTU)
                return -EINVAL;
@@ -1127,35 +1128,32 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
 
        /* Deactivate all the buffer pools so that the next loop can activate
           only the buffer pools necessary to hold the new MTU */
-       for (i = 0; i < IbmVethNumBufferPools; i++)
-               if (adapter->rx_buff_pool[i].active) {
-                       ibmveth_free_buffer_pool(adapter,
-                                                &adapter->rx_buff_pool[i]);
-                       adapter->rx_buff_pool[i].active = 0;
-               }
+       if (netif_running(adapter->netdev)) {
+               need_restart = 1;
+               adapter->pool_config = 1;
+               ibmveth_close(adapter->netdev);
+               adapter->pool_config = 0;
+       }
 
        /* Look for an active buffer pool that can hold the new MTU */
        for(i = 0; i<IbmVethNumBufferPools; i++) {
                adapter->rx_buff_pool[i].active = 1;
 
                if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
-                       if (netif_running(adapter->netdev)) {
-                               adapter->pool_config = 1;
-                               ibmveth_close(adapter->netdev);
-                               adapter->pool_config = 0;
-                               dev->mtu = new_mtu;
-                               vio_cmo_set_dev_desired(viodev,
-                                               ibmveth_get_desired_dma
-                                               (viodev));
-                               return ibmveth_open(adapter->netdev);
-                       }
                        dev->mtu = new_mtu;
                        vio_cmo_set_dev_desired(viodev,
                                                ibmveth_get_desired_dma
                                                (viodev));
+                       if (need_restart) {
+                               return ibmveth_open(adapter->netdev);
+                       }
                        return 0;
                }
        }
+
+       if (need_restart && (rc = ibmveth_open(adapter->netdev)))
+               return rc;
+
        return -EINVAL;
 }
 
index c7b624711f5ecfc51981845857bda27ef264fa3e..bdf2149e529689b1135603904da6343fcbbbebc2 100644 (file)
@@ -902,8 +902,8 @@ temac_poll_controller(struct net_device *ndev)
        disable_irq(lp->tx_irq);
        disable_irq(lp->rx_irq);
 
-       ll_temac_rx_irq(lp->tx_irq, lp);
-       ll_temac_tx_irq(lp->rx_irq, lp);
+       ll_temac_rx_irq(lp->tx_irq, ndev);
+       ll_temac_tx_irq(lp->rx_irq, ndev);
 
        enable_irq(lp->tx_irq);
        enable_irq(lp->rx_irq);
index ffa1b9ce1cc5a8f4c474136940df4de36d853c55..6dca3574e35507a94ca7e5b1b518a06199e99e03 100644 (file)
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 73
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.73"
+#define _NETXEN_NIC_LINUX_SUBVERSION 74
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.74"
 
 #define NETXEN_VERSION_CODE(a, b, c)   (((a) << 24) + ((b) << 16) + (c))
 #define _major(v)      (((v) >> 24) & 0xff)
index c865dda2adf15f8b59a579640d416099e9fabc0c..cabae7bb1fc6777d3366c5a8728feadcd53d0aa3 100644 (file)
@@ -1805,8 +1805,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
        netxen_ctx_msg msg = 0;
        struct list_head *head;
 
-       spin_lock(&rds_ring->lock);
-
        producer = rds_ring->producer;
 
        head = &rds_ring->free_list;
@@ -1853,8 +1851,6 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
                                        NETXEN_RCV_PRODUCER_OFFSET), msg);
                }
        }
-
-       spin_unlock(&rds_ring->lock);
 }
 
 static void
index fd86e18604e636a5b1ede55a077d56dd2ba06713..cb30df106a2c3d7981d74c085a49194e790c86b9 100644 (file)
@@ -2032,8 +2032,6 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
        struct netxen_adapter *adapter = netdev_priv(netdev);
        struct net_device_stats *stats = &netdev->stats;
 
-       memset(stats, 0, sizeof(*stats));
-
        stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
        stats->tx_packets = adapter->stats.xmitfinished;
        stats->rx_bytes = adapter->stats.rxbytes;
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
new file mode 100644 (file)
index 0000000..ecc64d7
--- /dev/null
@@ -0,0 +1,1666 @@
+/*
+ * PXA168 ethernet driver.
+ * Most of the code is derived from mv643xx ethernet driver.
+ *
+ * Copyright (C) 2010 Marvell International Ltd.
+ *             Sachin Sanap <ssanap@marvell.com>
+ *             Philip Rakity <prakity@marvell.com>
+ *             Mark Brown <markb@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/etherdevice.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <linux/pxa168_eth.h>
+
+#define DRIVER_NAME    "pxa168-eth"
+#define DRIVER_VERSION "0.3"
+
+/*
+ * Registers
+ */
+
+#define PHY_ADDRESS            0x0000
+#define SMI                    0x0010
+#define PORT_CONFIG            0x0400
+#define PORT_CONFIG_EXT                0x0408
+#define PORT_COMMAND           0x0410
+#define PORT_STATUS            0x0418
+#define HTPR                   0x0428
+#define SDMA_CONFIG            0x0440
+#define SDMA_CMD               0x0448
+#define INT_CAUSE              0x0450
+#define INT_W_CLEAR            0x0454
+#define INT_MASK               0x0458
+#define ETH_F_RX_DESC_0                0x0480
+#define ETH_C_RX_DESC_0                0x04A0
+#define ETH_C_TX_DESC_1                0x04E4
+
+/* smi register */
+#define SMI_BUSY               (1 << 28)       /* 0 - Write, 1 - Read  */
+#define SMI_R_VALID            (1 << 27)       /* 0 - Write, 1 - Read  */
+#define SMI_OP_W               (0 << 26)       /* Write operation      */
+#define SMI_OP_R               (1 << 26)       /* Read operation */
+
+#define PHY_WAIT_ITERATIONS    10
+
+#define PXA168_ETH_PHY_ADDR_DEFAULT    0
+/* RX & TX descriptor command */
+#define BUF_OWNED_BY_DMA       (1 << 31)
+
+/* RX descriptor status */
+#define RX_EN_INT              (1 << 23)
+#define RX_FIRST_DESC          (1 << 17)
+#define RX_LAST_DESC           (1 << 16)
+#define RX_ERROR               (1 << 15)
+
+/* TX descriptor command */
+#define TX_EN_INT              (1 << 23)
+#define TX_GEN_CRC             (1 << 22)
+#define TX_ZERO_PADDING                (1 << 18)
+#define TX_FIRST_DESC          (1 << 17)
+#define TX_LAST_DESC           (1 << 16)
+#define TX_ERROR               (1 << 15)
+
+/* SDMA_CMD */
+#define SDMA_CMD_AT            (1 << 31)
+#define SDMA_CMD_TXDL          (1 << 24)
+#define SDMA_CMD_TXDH          (1 << 23)
+#define SDMA_CMD_AR            (1 << 15)
+#define SDMA_CMD_ERD           (1 << 7)
+
+/* Bit definitions of the Port Config Reg */
+#define PCR_HS                 (1 << 12)
+#define PCR_EN                 (1 << 7)
+#define PCR_PM                 (1 << 0)
+
+/* Bit definitions of the Port Config Extend Reg */
+#define PCXR_2BSM              (1 << 28)
+#define PCXR_DSCP_EN           (1 << 21)
+#define PCXR_MFL_1518          (0 << 14)
+#define PCXR_MFL_1536          (1 << 14)
+#define PCXR_MFL_2048          (2 << 14)
+#define PCXR_MFL_64K           (3 << 14)
+#define PCXR_FLP               (1 << 11)
+#define PCXR_PRIO_TX_OFF       3
+#define PCXR_TX_HIGH_PRI       (7 << PCXR_PRIO_TX_OFF)
+
+/* Bit definitions of the SDMA Config Reg */
+#define SDCR_BSZ_OFF           12
+#define SDCR_BSZ8              (3 << SDCR_BSZ_OFF)
+#define SDCR_BSZ4              (2 << SDCR_BSZ_OFF)
+#define SDCR_BSZ2              (1 << SDCR_BSZ_OFF)
+#define SDCR_BSZ1              (0 << SDCR_BSZ_OFF)
+#define SDCR_BLMR              (1 << 6)
+#define SDCR_BLMT              (1 << 7)
+#define SDCR_RIFB              (1 << 9)
+#define SDCR_RC_OFF            2
+#define SDCR_RC_MAX_RETRANS    (0xf << SDCR_RC_OFF)
+
+/*
+ * Bit definitions of the Interrupt Cause Reg
+ * and Interrupt MASK Reg is the same
+ */
+#define ICR_RXBUF              (1 << 0)
+#define ICR_TXBUF_H            (1 << 2)
+#define ICR_TXBUF_L            (1 << 3)
+#define ICR_TXEND_H            (1 << 6)
+#define ICR_TXEND_L            (1 << 7)
+#define ICR_RXERR              (1 << 8)
+#define ICR_TXERR_H            (1 << 10)
+#define ICR_TXERR_L            (1 << 11)
+#define ICR_TX_UDR             (1 << 13)
+#define ICR_MII_CH             (1 << 28)
+
+#define ALL_INTS (ICR_TXBUF_H  | ICR_TXBUF_L  | ICR_TX_UDR |\
+                               ICR_TXERR_H  | ICR_TXERR_L |\
+                               ICR_TXEND_H  | ICR_TXEND_L |\
+                               ICR_RXBUF | ICR_RXERR  | ICR_MII_CH)
+
+#define ETH_HW_IP_ALIGN                2       /* hw aligns IP header */
+
+#define NUM_RX_DESCS           64
+#define NUM_TX_DESCS           64
+
+#define HASH_ADD               0
+#define HASH_DELETE            1
+#define HASH_ADDR_TABLE_SIZE   0x4000  /* 16K (1/2K address - PCR_HS == 1) */
+#define HOP_NUMBER             12
+
+/* Bit definitions for Port status */
+#define PORT_SPEED_100         (1 << 0)
+#define FULL_DUPLEX            (1 << 1)
+#define FLOW_CONTROL_ENABLED   (1 << 2)
+#define LINK_UP                        (1 << 3)
+
+/* Bit definitions for work to be done */
+#define WORK_LINK              (1 << 0)
+#define WORK_TX_DONE           (1 << 1)
+
+/*
+ * Misc definitions.
+ */
+#define SKB_DMA_REALIGN                ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+
+struct rx_desc {
+       u32 cmd_sts;            /* Descriptor command status            */
+       u16 byte_cnt;           /* Descriptor buffer byte count         */
+       u16 buf_size;           /* Buffer size                          */
+       u32 buf_ptr;            /* Descriptor buffer pointer            */
+       u32 next_desc_ptr;      /* Next descriptor pointer              */
+};
+
+struct tx_desc {
+       u32 cmd_sts;            /* Command/status field                 */
+       u16 reserved;
+       u16 byte_cnt;           /* buffer byte count                    */
+       u32 buf_ptr;            /* pointer to buffer for this descriptor */
+       u32 next_desc_ptr;      /* Pointer to next descriptor           */
+};
+
+struct pxa168_eth_private {
+       int port_num;           /* User Ethernet port number    */
+
+       int rx_resource_err;    /* Rx ring resource error flag */
+
+       /* Next available and first returning Rx resource */
+       int rx_curr_desc_q, rx_used_desc_q;
+
+       /* Next available and first returning Tx resource */
+       int tx_curr_desc_q, tx_used_desc_q;
+
+       struct rx_desc *p_rx_desc_area;
+       dma_addr_t rx_desc_dma;
+       int rx_desc_area_size;
+       struct sk_buff **rx_skb;
+
+       struct tx_desc *p_tx_desc_area;
+       dma_addr_t tx_desc_dma;
+       int tx_desc_area_size;
+       struct sk_buff **tx_skb;
+
+       struct work_struct tx_timeout_task;
+
+       struct net_device *dev;
+       struct napi_struct napi;
+       u8 work_todo;
+       int skb_size;
+
+       struct net_device_stats stats;
+       /* Size of Tx Ring per queue */
+       int tx_ring_size;
+       /* Number of tx descriptors in use */
+       int tx_desc_count;
+       /* Size of Rx Ring per queue */
+       int rx_ring_size;
+       /* Number of rx descriptors in use */
+       int rx_desc_count;
+
+       /*
+        * Used in case RX Ring is empty, which can occur when
+        * system does not have resources (skb's)
+        */
+       struct timer_list timeout;
+       struct mii_bus *smi_bus;
+       struct phy_device *phy;
+
+       /* clock */
+       struct clk *clk;
+       struct pxa168_eth_platform_data *pd;
+       /*
+        * Ethernet controller base address.
+        */
+       void __iomem *base;
+
+       /* Pointer to the hardware address filter table */
+       void *htpr;
+       dma_addr_t htpr_dma;
+};
+
+struct addr_table_entry {
+       __le32 lo;
+       __le32 hi;
+};
+
+/* Bit fields of a Hash Table Entry */
+enum hash_table_entry {
+       HASH_ENTRY_VALID = 1,
+       SKIP = 2,
+       HASH_ENTRY_RECEIVE_DISCARD = 4,
+       HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
+};
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_init_hw(struct pxa168_eth_private *pep);
+static void eth_port_reset(struct net_device *dev);
+static void eth_port_start(struct net_device *dev);
+static int pxa168_eth_open(struct net_device *dev);
+static int pxa168_eth_stop(struct net_device *dev);
+static int ethernet_phy_setup(struct net_device *dev);
+
+static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
+{
+       return readl(pep->base + offset);
+}
+
+static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
+{
+       writel(data, pep->base + offset);
+}
+
+static void abort_dma(struct pxa168_eth_private *pep)
+{
+       int delay;
+       int max_retries = 40;
+
+       do {
+               wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
+               udelay(100);
+
+               delay = 10;
+               while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
+                      && delay-- > 0) {
+                       udelay(10);
+               }
+       } while (max_retries-- > 0 && delay <= 0);
+
+       if (max_retries <= 0)
+               printk(KERN_ERR "%s : DMA Stuck\n", __func__);
+}
+
+static int ethernet_phy_get(struct pxa168_eth_private *pep)
+{
+       unsigned int reg_data;
+
+       reg_data = rdl(pep, PHY_ADDRESS);
+
+       return (reg_data >> (5 * pep->port_num)) & 0x1f;
+}
+
+static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr)
+{
+       u32 reg_data;
+       int addr_shift = 5 * pep->port_num;
+
+       reg_data = rdl(pep, PHY_ADDRESS);
+       reg_data &= ~(0x1f << addr_shift);
+       reg_data |= (phy_addr & 0x1f) << addr_shift;
+       wrl(pep, PHY_ADDRESS, reg_data);
+}
+
+static void ethernet_phy_reset(struct pxa168_eth_private *pep)
+{
+       int data;
+
+       data = phy_read(pep->phy, MII_BMCR);
+       if (data < 0)
+               return;
+
+       data |= BMCR_RESET;
+       if (phy_write(pep->phy, MII_BMCR, data) < 0)
+               return;
+
+       do {
+               data = phy_read(pep->phy, MII_BMCR);
+       } while (data >= 0 && data & BMCR_RESET);
+}
+
+static void rxq_refill(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct sk_buff *skb;
+       struct rx_desc *p_used_rx_desc;
+       int used_rx_desc;
+
+       while (pep->rx_desc_count < pep->rx_ring_size) {
+               int size;
+
+               skb = dev_alloc_skb(pep->skb_size);
+               if (!skb)
+                       break;
+               if (SKB_DMA_REALIGN)
+                       skb_reserve(skb, SKB_DMA_REALIGN);
+               pep->rx_desc_count++;
+               /* Get 'used' Rx descriptor */
+               used_rx_desc = pep->rx_used_desc_q;
+               p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
+               size = skb->end - skb->data;
+               p_used_rx_desc->buf_ptr = dma_map_single(NULL,
+                                                        skb->data,
+                                                        size,
+                                                        DMA_FROM_DEVICE);
+               p_used_rx_desc->buf_size = size;
+               pep->rx_skb[used_rx_desc] = skb;
+
+               /* Return the descriptor to DMA ownership */
+               wmb();
+               p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
+               wmb();
+
+               /* Move the used descriptor pointer to the next descriptor */
+               pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
+
+               /* Any Rx return cancels the Rx resource error status */
+               pep->rx_resource_err = 0;
+
+               skb_reserve(skb, ETH_HW_IP_ALIGN);
+       }
+
+       /*
+        * If RX ring is empty of SKB, set a timer to try allocating
+        * again at a later time.
+        */
+       if (pep->rx_desc_count == 0) {
+               pep->timeout.expires = jiffies + (HZ / 10);
+               add_timer(&pep->timeout);
+       }
+}
+
+static inline void rxq_refill_timer_wrapper(unsigned long data)
+{
+       struct pxa168_eth_private *pep = (void *)data;
+       napi_schedule(&pep->napi);
+}
+
+static inline u8 flip_8_bits(u8 x)
+{
+       return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
+           | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
+           | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
+           | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
+}
+
+static void nibble_swap_every_byte(unsigned char *mac_addr)
+{
+       int i;
+       for (i = 0; i < ETH_ALEN; i++) {
+               mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
+                               ((mac_addr[i] & 0xf0) >> 4);
+       }
+}
+
+static void inverse_every_nibble(unsigned char *mac_addr)
+{
+       int i;
+       for (i = 0; i < ETH_ALEN; i++)
+               mac_addr[i] = flip_8_bits(mac_addr[i]);
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will calculate the hash function of the address.
+ * Inputs
+ * mac_addr_orig    - MAC address.
+ * Outputs
+ * return the calculated entry.
+ */
+static u32 hash_function(unsigned char *mac_addr_orig)
+{
+       u32 hash_result;
+       u32 addr0;
+       u32 addr1;
+       u32 addr2;
+       u32 addr3;
+       unsigned char mac_addr[ETH_ALEN];
+
+       /* Make a copy of MAC address since we are going to performe bit
+        * operations on it
+        */
+       memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
+
+       nibble_swap_every_byte(mac_addr);
+       inverse_every_nibble(mac_addr);
+
+       addr0 = (mac_addr[5] >> 2) & 0x3f;
+       addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
+       addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
+       addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
+
+       hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
+       hash_result = hash_result & 0x07ff;
+       return hash_result;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * This function will add/del an entry to the address table.
+ * Inputs
+ * pep - ETHERNET .
+ * mac_addr - MAC address.
+ * skip - if 1, skip this address.Used in case of deleting an entry which is a
+ *       part of chain in the hash table.We cant just delete the entry since
+ *       that will break the chain.We need to defragment the tables time to
+ *       time.
+ * rd   - 0 Discard packet upon match.
+ *     - 1 Receive packet upon match.
+ * Outputs
+ * address table entry is added/deleted.
+ * 0 if success.
+ * -ENOSPC if table full
+ */
+static int add_del_hash_entry(struct pxa168_eth_private *pep,
+                             unsigned char *mac_addr,
+                             u32 rd, u32 skip, int del)
+{
+       struct addr_table_entry *entry, *start;
+       u32 new_high;
+       u32 new_low;
+       u32 i;
+
+       new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
+           | (((mac_addr[1] >> 0) & 0xf) << 11)
+           | (((mac_addr[0] >> 4) & 0xf) << 7)
+           | (((mac_addr[0] >> 0) & 0xf) << 3)
+           | (((mac_addr[3] >> 4) & 0x1) << 31)
+           | (((mac_addr[3] >> 0) & 0xf) << 27)
+           | (((mac_addr[2] >> 4) & 0xf) << 23)
+           | (((mac_addr[2] >> 0) & 0xf) << 19)
+           | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
+           | HASH_ENTRY_VALID;
+
+       new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
+           | (((mac_addr[5] >> 0) & 0xf) << 11)
+           | (((mac_addr[4] >> 4) & 0xf) << 7)
+           | (((mac_addr[4] >> 0) & 0xf) << 3)
+           | (((mac_addr[3] >> 5) & 0x7) << 0);
+
+       /*
+        * Pick the appropriate table, start scanning for free/reusable
+        * entries at the index obtained by hashing the specified MAC address
+        */
+       start = (struct addr_table_entry *)(pep->htpr);
+       entry = start + hash_function(mac_addr);
+       for (i = 0; i < HOP_NUMBER; i++) {
+               if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
+                       break;
+               } else {
+                       /* if same address put in same position */
+                       if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
+                               (new_low & 0xfffffff8)) &&
+                               (le32_to_cpu(entry->hi) == new_high)) {
+                               break;
+                       }
+               }
+               if (entry == start + 0x7ff)
+                       entry = start;
+               else
+                       entry++;
+       }
+
+       if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
+           (le32_to_cpu(entry->hi) != new_high) && del)
+               return 0;
+
+       if (i == HOP_NUMBER) {
+               if (!del) {
+                       printk(KERN_INFO "%s: table section is full, need to "
+                                       "move to 16kB implementation?\n",
+                                        __FILE__);
+                       return -ENOSPC;
+               } else
+                       return 0;
+       }
+
+       /*
+        * Update the selected entry
+        */
+       if (del) {
+               entry->hi = 0;
+               entry->lo = 0;
+       } else {
+               entry->hi = cpu_to_le32(new_high);
+               entry->lo = cpu_to_le32(new_low);
+       }
+
+       return 0;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ *  Create an addressTable entry from MAC address info
+ *  found in the specifed net_device struct
+ *
+ *  Input : pointer to ethernet interface network device structure
+ *  Output : N/A
+ */
+static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
+                                         unsigned char *oaddr,
+                                         unsigned char *addr)
+{
+       /* Delete old entry */
+       if (oaddr)
+               add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
+       /* Add new entry */
+       add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
+}
+
+static int init_hash_table(struct pxa168_eth_private *pep)
+{
+       /*
+        * Hardware expects CPU to build a hash table based on a predefined
+        * hash function and populate it based on hardware address. The
+        * location of the hash table is identified by 32-bit pointer stored
+        * in HTPR internal register. Two possible sizes exists for the hash
+        * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
+        * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
+        * 1/2kB.
+        */
+       /* TODO: Add support for 8kB hash table and alternative hash
+        * function.Driver can dynamically switch to them if the 1/2kB hash
+        * table is full.
+        */
+       if (pep->htpr == NULL) {
+               pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
+                                             HASH_ADDR_TABLE_SIZE,
+                                             &pep->htpr_dma, GFP_KERNEL);
+               if (pep->htpr == NULL)
+                       return -ENOMEM;
+       }
+       memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+       wrl(pep, HTPR, pep->htpr_dma);
+       return 0;
+}
+
+static void pxa168_eth_set_rx_mode(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct netdev_hw_addr *ha;
+       u32 val;
+
+       val = rdl(pep, PORT_CONFIG);
+       if (dev->flags & IFF_PROMISC)
+               val |= PCR_PM;
+       else
+               val &= ~PCR_PM;
+       wrl(pep, PORT_CONFIG, val);
+
+       /*
+        * Remove the old list of MAC address and add dev->addr
+        * and multicast address.
+        */
+       memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
+       update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+       netdev_for_each_mc_addr(ha, dev)
+               update_hash_table_mac_address(pep, NULL, ha->addr);
+}
+
+static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
+{
+       struct sockaddr *sa = addr;
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       unsigned char oldMac[ETH_ALEN];
+
+       if (!is_valid_ether_addr(sa->sa_data))
+               return -EINVAL;
+       memcpy(oldMac, dev->dev_addr, ETH_ALEN);
+       memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+       netif_addr_lock_bh(dev);
+       update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
+       netif_addr_unlock_bh(dev);
+       return 0;
+}
+
+static void eth_port_start(struct net_device *dev)
+{
+       unsigned int val = 0;
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       int tx_curr_desc, rx_curr_desc;
+
+       /* Perform PHY reset, if there is a PHY. */
+       if (pep->phy != NULL) {
+               struct ethtool_cmd cmd;
+
+               pxa168_get_settings(pep->dev, &cmd);
+               ethernet_phy_reset(pep);
+               pxa168_set_settings(pep->dev, &cmd);
+       }
+
+       /* Assignment of Tx CTRP of given queue */
+       tx_curr_desc = pep->tx_curr_desc_q;
+       wrl(pep, ETH_C_TX_DESC_1,
+           (u32) ((struct tx_desc *)pep->tx_desc_dma + tx_curr_desc));
+
+       /* Assignment of Rx CRDP of given queue */
+       rx_curr_desc = pep->rx_curr_desc_q;
+       wrl(pep, ETH_C_RX_DESC_0,
+           (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc));
+
+       wrl(pep, ETH_F_RX_DESC_0,
+           (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc));
+
+       /* Clear all interrupts */
+       wrl(pep, INT_CAUSE, 0);
+
+       /* Enable all interrupts for receive, transmit and error. */
+       wrl(pep, INT_MASK, ALL_INTS);
+
+       val = rdl(pep, PORT_CONFIG);
+       val |= PCR_EN;
+       wrl(pep, PORT_CONFIG, val);
+
+       /* Start RX DMA engine */
+       val = rdl(pep, SDMA_CMD);
+       val |= SDMA_CMD_ERD;
+       wrl(pep, SDMA_CMD, val);
+}
+
+static void eth_port_reset(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       unsigned int val = 0;
+
+       /* Stop all interrupts for receive, transmit and error. */
+       wrl(pep, INT_MASK, 0);
+
+       /* Clear all interrupts */
+       wrl(pep, INT_CAUSE, 0);
+
+       /* Stop RX DMA */
+       val = rdl(pep, SDMA_CMD);
+       val &= ~SDMA_CMD_ERD;   /* abort dma command */
+
+       /* Abort any transmit and receive operations and put DMA
+        * in idle state.
+        */
+       abort_dma(pep);
+
+       /* Disable port */
+       val = rdl(pep, PORT_CONFIG);
+       val &= ~PCR_EN;
+       wrl(pep, PORT_CONFIG, val);
+}
+
+/*
+ * txq_reclaim - Free the tx desc data for completed descriptors
+ * If force is non-zero, frees uncompleted descriptors as well
+ */
+static int txq_reclaim(struct net_device *dev, int force)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct tx_desc *desc;
+       u32 cmd_sts;
+       struct sk_buff *skb;
+       int tx_index;
+       dma_addr_t addr;
+       int count;
+       int released = 0;
+
+       netif_tx_lock(dev);
+
+       pep->work_todo &= ~WORK_TX_DONE;
+       while (pep->tx_desc_count > 0) {
+               tx_index = pep->tx_used_desc_q;
+               desc = &pep->p_tx_desc_area[tx_index];
+               cmd_sts = desc->cmd_sts;
+               if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
+                       if (released > 0) {
+                               goto txq_reclaim_end;
+                       } else {
+                               released = -1;
+                               goto txq_reclaim_end;
+                       }
+               }
+               pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
+               pep->tx_desc_count--;
+               addr = desc->buf_ptr;
+               count = desc->byte_cnt;
+               skb = pep->tx_skb[tx_index];
+               if (skb)
+                       pep->tx_skb[tx_index] = NULL;
+
+               if (cmd_sts & TX_ERROR) {
+                       if (net_ratelimit())
+                               printk(KERN_ERR "%s: Error in TX\n", dev->name);
+                       dev->stats.tx_errors++;
+               }
+               dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
+               if (skb)
+                       dev_kfree_skb_irq(skb);
+               released++;
+       }
+txq_reclaim_end:
+       netif_tx_unlock(dev);
+       return released;
+}
+
+static void pxa168_eth_tx_timeout(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       printk(KERN_INFO "%s: TX timeout  desc_count %d\n",
+              dev->name, pep->tx_desc_count);
+
+       schedule_work(&pep->tx_timeout_task);
+}
+
+static void pxa168_eth_tx_timeout_task(struct work_struct *work)
+{
+       struct pxa168_eth_private *pep = container_of(work,
+                                                struct pxa168_eth_private,
+                                                tx_timeout_task);
+       struct net_device *dev = pep->dev;
+       pxa168_eth_stop(dev);
+       pxa168_eth_open(dev);
+}
+
+static int rxq_process(struct net_device *dev, int budget)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       unsigned int received_packets = 0;
+       struct sk_buff *skb;
+
+       while (budget-- > 0) {
+               int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
+               struct rx_desc *rx_desc;
+               unsigned int cmd_sts;
+
+               /* Do not process Rx ring in case of Rx ring resource error */
+               if (pep->rx_resource_err)
+                       break;
+               rx_curr_desc = pep->rx_curr_desc_q;
+               rx_used_desc = pep->rx_used_desc_q;
+               rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
+               cmd_sts = rx_desc->cmd_sts;
+               rmb();
+               if (cmd_sts & (BUF_OWNED_BY_DMA))
+                       break;
+               skb = pep->rx_skb[rx_curr_desc];
+               pep->rx_skb[rx_curr_desc] = NULL;
+
+               rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
+               pep->rx_curr_desc_q = rx_next_curr_desc;
+
+               /* Rx descriptors exhausted. */
+               /* Set the Rx ring resource error flag */
+               if (rx_next_curr_desc == rx_used_desc)
+                       pep->rx_resource_err = 1;
+               pep->rx_desc_count--;
+               dma_unmap_single(NULL, rx_desc->buf_ptr,
+                                rx_desc->buf_size,
+                                DMA_FROM_DEVICE);
+               received_packets++;
+               /*
+                * Update statistics.
+                * Note byte count includes 4 byte CRC count
+                */
+               stats->rx_packets++;
+               stats->rx_bytes += rx_desc->byte_cnt;
+               /*
+                * In case received a packet without first / last bits on OR
+                * the error summary bit is on, the packets needs to be droped.
+                */
+               if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+                    (RX_FIRST_DESC | RX_LAST_DESC))
+                   || (cmd_sts & RX_ERROR)) {
+
+                       stats->rx_dropped++;
+                       if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
+                           (RX_FIRST_DESC | RX_LAST_DESC)) {
+                               if (net_ratelimit())
+                                       printk(KERN_ERR
+                                              "%s: Rx pkt on multiple desc\n",
+                                              dev->name);
+                       }
+                       if (cmd_sts & RX_ERROR)
+                               stats->rx_errors++;
+                       dev_kfree_skb_irq(skb);
+               } else {
+                       /*
+                        * The -4 is for the CRC in the trailer of the
+                        * received packet
+                        */
+                       skb_put(skb, rx_desc->byte_cnt - 4);
+                       skb->protocol = eth_type_trans(skb, dev);
+                       netif_receive_skb(skb);
+               }
+               dev->last_rx = jiffies;
+       }
+       /* Fill RX ring with skb's */
+       rxq_refill(dev);
+       return received_packets;
+}
+
+static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
+                                    struct net_device *dev)
+{
+       u32 icr;
+       int ret = 0;
+
+       icr = rdl(pep, INT_CAUSE);
+       if (icr == 0)
+               return IRQ_NONE;
+
+       wrl(pep, INT_CAUSE, ~icr);
+       if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
+               pep->work_todo |= WORK_TX_DONE;
+               ret = 1;
+       }
+       if (icr & ICR_RXBUF)
+               ret = 1;
+       if (icr & ICR_MII_CH) {
+               pep->work_todo |= WORK_LINK;
+               ret = 1;
+       }
+       return ret;
+}
+
+static void handle_link_event(struct pxa168_eth_private *pep)
+{
+       struct net_device *dev = pep->dev;
+       u32 port_status;
+       int speed;
+       int duplex;
+       int fc;
+
+       port_status = rdl(pep, PORT_STATUS);
+       if (!(port_status & LINK_UP)) {
+               if (netif_carrier_ok(dev)) {
+                       printk(KERN_INFO "%s: link down\n", dev->name);
+                       netif_carrier_off(dev);
+                       txq_reclaim(dev, 1);
+               }
+               return;
+       }
+       if (port_status & PORT_SPEED_100)
+               speed = 100;
+       else
+               speed = 10;
+
+       duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
+       fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
+       printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+              "flow control %sabled\n", dev->name,
+              speed, duplex ? "full" : "half", fc ? "en" : "dis");
+       if (!netif_carrier_ok(dev))
+               netif_carrier_on(dev);
+}
+
+static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
+{
+       struct net_device *dev = (struct net_device *)dev_id;
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       if (unlikely(!pxa168_eth_collect_events(pep, dev)))
+               return IRQ_NONE;
+       /* Disable interrupts */
+       wrl(pep, INT_MASK, 0);
+       napi_schedule(&pep->napi);
+       return IRQ_HANDLED;
+}
+
+static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
+{
+       int skb_size;
+
+       /*
+        * Reserve 2+14 bytes for an ethernet header (the hardware
+        * automatically prepends 2 bytes of dummy data to each
+        * received packet), 16 bytes for up to four VLAN tags, and
+        * 4 bytes for the trailing FCS -- 36 bytes total.
+        */
+       skb_size = pep->dev->mtu + 36;
+
+       /*
+        * Make sure that the skb size is a multiple of 8 bytes, as
+        * the lower three bits of the receive descriptor's buffer
+        * size field are ignored by the hardware.
+        */
+       pep->skb_size = (skb_size + 7) & ~7;
+
+       /*
+        * If NET_SKB_PAD is smaller than a cache line,
+        * netdev_alloc_skb() will cause skb->data to be misaligned
+        * to a cache line boundary.  If this is the case, include
+        * some extra space to allow re-aligning the data area.
+        */
+       pep->skb_size += SKB_DMA_REALIGN;
+
+}
+
+static int set_port_config_ext(struct pxa168_eth_private *pep)
+{
+       int skb_size;
+
+       pxa168_eth_recalc_skb_size(pep);
+       if  (pep->skb_size <= 1518)
+               skb_size = PCXR_MFL_1518;
+       else if (pep->skb_size <= 1536)
+               skb_size = PCXR_MFL_1536;
+       else if (pep->skb_size <= 2048)
+               skb_size = PCXR_MFL_2048;
+       else
+               skb_size = PCXR_MFL_64K;
+
+       /* Extended Port Configuration */
+       wrl(pep,
+           PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */
+           PCXR_DSCP_EN |               /* Enable DSCP in IP */
+           skb_size | PCXR_FLP |        /* do not force link pass */
+           PCXR_TX_HIGH_PRI);           /* Transmit - high priority queue */
+
+       return 0;
+}
+
+static int pxa168_init_hw(struct pxa168_eth_private *pep)
+{
+       int err = 0;
+
+       /* Disable interrupts */
+       wrl(pep, INT_MASK, 0);
+       wrl(pep, INT_CAUSE, 0);
+       /* Write to ICR to clear interrupts. */
+       wrl(pep, INT_W_CLEAR, 0);
+       /* Abort any transmit and receive operations and put DMA
+        * in idle state.
+        */
+       abort_dma(pep);
+       /* Initialize address hash table */
+       err = init_hash_table(pep);
+       if (err)
+               return err;
+       /* SDMA configuration */
+       wrl(pep, SDMA_CONFIG, SDCR_BSZ8 |       /* Burst size = 32 bytes */
+           SDCR_RIFB |                         /* Rx interrupt on frame */
+           SDCR_BLMT |                         /* Little endian transmit */
+           SDCR_BLMR |                         /* Little endian receive */
+           SDCR_RC_MAX_RETRANS);               /* Max retransmit count */
+       /* Port Configuration */
+       wrl(pep, PORT_CONFIG, PCR_HS);          /* Hash size is 1/2kb */
+       set_port_config_ext(pep);
+
+       return err;
+}
+
+static int rxq_init(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct rx_desc *p_rx_desc;
+       int size = 0, i = 0;
+       int rx_desc_num = pep->rx_ring_size;
+
+       /* Allocate RX skb rings */
+       pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
+                            GFP_KERNEL);
+       if (!pep->rx_skb) {
+               printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name);
+               return -ENOMEM;
+       }
+       /* Allocate RX ring */
+       pep->rx_desc_count = 0;
+       size = pep->rx_ring_size * sizeof(struct rx_desc);
+       pep->rx_desc_area_size = size;
+       pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+                                               &pep->rx_desc_dma, GFP_KERNEL);
+       if (!pep->p_rx_desc_area) {
+               printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
+                      dev->name, size);
+               goto out;
+       }
+       memset((void *)pep->p_rx_desc_area, 0, size);
+       /* initialize the next_desc_ptr links in the Rx descriptors ring */
+       p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+       for (i = 0; i < rx_desc_num; i++) {
+               p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
+                   ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
+       }
+       /* Save Rx desc pointer to driver struct. */
+       pep->rx_curr_desc_q = 0;
+       pep->rx_used_desc_q = 0;
+       pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
+       return 0;
+out:
+       kfree(pep->rx_skb);
+       return -ENOMEM;
+}
+
+static void rxq_deinit(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       int curr;
+
+       /* Free preallocated skb's on RX rings */
+       for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
+               if (pep->rx_skb[curr]) {
+                       dev_kfree_skb(pep->rx_skb[curr]);
+                       pep->rx_desc_count--;
+               }
+       }
+       if (pep->rx_desc_count)
+               printk(KERN_ERR
+                      "Error in freeing Rx Ring. %d skb's still\n",
+                      pep->rx_desc_count);
+       /* Free RX ring */
+       if (pep->p_rx_desc_area)
+               dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
+                                 pep->p_rx_desc_area, pep->rx_desc_dma);
+       kfree(pep->rx_skb);
+}
+
+static int txq_init(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct tx_desc *p_tx_desc;
+       int size = 0, i = 0;
+       int tx_desc_num = pep->tx_ring_size;
+
+       pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
+                            GFP_KERNEL);
+       if (!pep->tx_skb) {
+               printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name);
+               return -ENOMEM;
+       }
+       /* Allocate TX ring */
+       pep->tx_desc_count = 0;
+       size = pep->tx_ring_size * sizeof(struct tx_desc);
+       pep->tx_desc_area_size = size;
+       pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
+                                               &pep->tx_desc_dma, GFP_KERNEL);
+       if (!pep->p_tx_desc_area) {
+               printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
+                      dev->name, size);
+               goto out;
+       }
+       memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
+       /* Initialize the next_desc_ptr links in the Tx descriptors ring */
+       p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+       for (i = 0; i < tx_desc_num; i++) {
+               p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
+                   ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
+       }
+       pep->tx_curr_desc_q = 0;
+       pep->tx_used_desc_q = 0;
+       pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
+       return 0;
+out:
+       kfree(pep->tx_skb);
+       return -ENOMEM;
+}
+
+static void txq_deinit(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       /* Free outstanding skb's on TX ring */
+       txq_reclaim(dev, 1);
+       BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
+       /* Free TX ring */
+       if (pep->p_tx_desc_area)
+               dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
+                                 pep->p_tx_desc_area, pep->tx_desc_dma);
+       kfree(pep->tx_skb);
+}
+
+static int pxa168_eth_open(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       int err;
+
+       err = request_irq(dev->irq, pxa168_eth_int_handler,
+                         IRQF_DISABLED, dev->name, dev);
+       if (err) {
+               dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
+               return -EAGAIN;
+       }
+       pep->rx_resource_err = 0;
+       err = rxq_init(dev);
+       if (err != 0)
+               goto out_free_irq;
+       err = txq_init(dev);
+       if (err != 0)
+               goto out_free_rx_skb;
+       pep->rx_used_desc_q = 0;
+       pep->rx_curr_desc_q = 0;
+
+       /* Fill RX ring with skb's */
+       rxq_refill(dev);
+       pep->rx_used_desc_q = 0;
+       pep->rx_curr_desc_q = 0;
+       netif_carrier_off(dev);
+       eth_port_start(dev);
+       napi_enable(&pep->napi);
+       return 0;
+out_free_rx_skb:
+       rxq_deinit(dev);
+out_free_irq:
+       free_irq(dev->irq, dev);
+       return err;
+}
+
+static int pxa168_eth_stop(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       eth_port_reset(dev);
+
+       /* Disable interrupts */
+       wrl(pep, INT_MASK, 0);
+       wrl(pep, INT_CAUSE, 0);
+       /* Write to ICR to clear interrupts. */
+       wrl(pep, INT_W_CLEAR, 0);
+       napi_disable(&pep->napi);
+       del_timer_sync(&pep->timeout);
+       netif_carrier_off(dev);
+       free_irq(dev->irq, dev);
+       rxq_deinit(dev);
+       txq_deinit(dev);
+
+       return 0;
+}
+
+static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
+{
+       int retval;
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       if ((mtu > 9500) || (mtu < 68))
+               return -EINVAL;
+
+       dev->mtu = mtu;
+       retval = set_port_config_ext(pep);
+
+       if (!netif_running(dev))
+               return 0;
+
+       /*
+        * Stop and then re-open the interface. This will allocate RX
+        * skbs of the new MTU.
+        * There is a possible danger that the open will not succeed,
+        * due to memory being full.
+        */
+       pxa168_eth_stop(dev);
+       if (pxa168_eth_open(dev)) {
+               dev_printk(KERN_ERR, &dev->dev,
+                          "fatal error on re-opening device after "
+                          "MTU change\n");
+       }
+
+       return 0;
+}
+
+static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
+{
+       int tx_desc_curr;
+
+       tx_desc_curr = pep->tx_curr_desc_q;
+       pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
+       BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
+       pep->tx_desc_count++;
+
+       return tx_desc_curr;
+}
+
+static int pxa168_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct pxa168_eth_private *pep =
+           container_of(napi, struct pxa168_eth_private, napi);
+       struct net_device *dev = pep->dev;
+       int work_done = 0;
+
+       if (unlikely(pep->work_todo & WORK_LINK)) {
+               pep->work_todo &= ~(WORK_LINK);
+               handle_link_event(pep);
+       }
+       /*
+        * We call txq_reclaim every time since in NAPI interupts are disabled
+        * and due to this we miss the TX_DONE interrupt,which is not updated in
+        * interrupt status register.
+        */
+       txq_reclaim(dev, 0);
+       if (netif_queue_stopped(dev)
+           && pep->tx_ring_size - pep->tx_desc_count > 1) {
+               netif_wake_queue(dev);
+       }
+       work_done = rxq_process(dev, budget);
+       if (work_done < budget) {
+               napi_complete(napi);
+               wrl(pep, INT_MASK, ALL_INTS);
+       }
+
+       return work_done;
+}
+
+static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
+       struct tx_desc *desc;
+       int tx_index;
+       int length;
+
+       tx_index = eth_alloc_tx_desc_index(pep);
+       desc = &pep->p_tx_desc_area[tx_index];
+       length = skb->len;
+       pep->tx_skb[tx_index] = skb;
+       desc->byte_cnt = length;
+       desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+       wmb();
+       desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
+                       TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
+       wmb();
+       wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
+
+       stats->tx_bytes += skb->len;
+       stats->tx_packets++;
+       dev->trans_start = jiffies;
+       if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
+               /* We handled the current skb, but now we are out of space.*/
+               netif_stop_queue(dev);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+static int smi_wait_ready(struct pxa168_eth_private *pep)
+{
+       int i = 0;
+
+       /* wait for the SMI register to become available */
+       for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
+               if (i == PHY_WAIT_ITERATIONS)
+                       return -ETIMEDOUT;
+               msleep(10);
+       }
+
+       return 0;
+}
+
+static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+       struct pxa168_eth_private *pep = bus->priv;
+       int i = 0;
+       int val;
+
+       if (smi_wait_ready(pep)) {
+               printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+               return -ETIMEDOUT;
+       }
+       wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
+       /* now wait for the data to be valid */
+       for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
+               if (i == PHY_WAIT_ITERATIONS) {
+                       printk(KERN_WARNING
+                               "pxa168_eth: SMI bus read not valid\n");
+                       return -ENODEV;
+               }
+               msleep(10);
+       }
+
+       return val & 0xffff;
+}
+
+static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
+                           u16 value)
+{
+       struct pxa168_eth_private *pep = bus->priv;
+
+       if (smi_wait_ready(pep)) {
+               printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
+           SMI_OP_W | (value & 0xffff));
+
+       if (smi_wait_ready(pep)) {
+               printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+                              int cmd)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       if (pep->phy != NULL)
+               return phy_mii_ioctl(pep->phy, if_mii(ifr), cmd);
+
+       return -EOPNOTSUPP;
+}
+
+static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr)
+{
+       struct mii_bus *bus = pep->smi_bus;
+       struct phy_device *phydev;
+       int start;
+       int num;
+       int i;
+
+       if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) {
+               /* Scan entire range */
+               start = ethernet_phy_get(pep);
+               num = 32;
+       } else {
+               /* Use phy addr specific to platform */
+               start = phy_addr & 0x1f;
+               num = 1;
+       }
+       phydev = NULL;
+       for (i = 0; i < num; i++) {
+               int addr = (start + i) & 0x1f;
+               if (bus->phy_map[addr] == NULL)
+                       mdiobus_scan(bus, addr);
+
+               if (phydev == NULL) {
+                       phydev = bus->phy_map[addr];
+                       if (phydev != NULL)
+                               ethernet_phy_set_addr(pep, addr);
+               }
+       }
+
+       return phydev;
+}
+
+static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
+{
+       struct phy_device *phy = pep->phy;
+       ethernet_phy_reset(pep);
+
+       phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
+
+       if (speed == 0) {
+               phy->autoneg = AUTONEG_ENABLE;
+               phy->speed = 0;
+               phy->duplex = 0;
+               phy->supported &= PHY_BASIC_FEATURES;
+               phy->advertising = phy->supported | ADVERTISED_Autoneg;
+       } else {
+               phy->autoneg = AUTONEG_DISABLE;
+               phy->advertising = 0;
+               phy->speed = speed;
+               phy->duplex = duplex;
+       }
+       phy_start_aneg(phy);
+}
+
+static int ethernet_phy_setup(struct net_device *dev)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       if (pep->pd != NULL) {
+               if (pep->pd->init)
+                       pep->pd->init();
+       }
+       pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
+       if (pep->phy != NULL)
+               phy_init(pep, pep->pd->speed, pep->pd->duplex);
+       update_hash_table_mac_address(pep, NULL, dev->dev_addr);
+
+       return 0;
+}
+
+static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+       int err;
+
+       err = phy_read_status(pep->phy);
+       if (err == 0)
+               err = phy_ethtool_gset(pep->phy, cmd);
+
+       return err;
+}
+
+static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       return phy_ethtool_sset(pep->phy, cmd);
+}
+
+static void pxa168_get_drvinfo(struct net_device *dev,
+                              struct ethtool_drvinfo *info)
+{
+       strncpy(info->driver, DRIVER_NAME, 32);
+       strncpy(info->version, DRIVER_VERSION, 32);
+       strncpy(info->fw_version, "N/A", 32);
+       strncpy(info->bus_info, "N/A", 32);
+}
+
+static u32 pxa168_get_link(struct net_device *dev)
+{
+       return !!netif_carrier_ok(dev);
+}
+
+static const struct ethtool_ops pxa168_ethtool_ops = {
+       .get_settings = pxa168_get_settings,
+       .set_settings = pxa168_set_settings,
+       .get_drvinfo = pxa168_get_drvinfo,
+       .get_link = pxa168_get_link,
+};
+
+static const struct net_device_ops pxa168_eth_netdev_ops = {
+       .ndo_open = pxa168_eth_open,
+       .ndo_stop = pxa168_eth_stop,
+       .ndo_start_xmit = pxa168_eth_start_xmit,
+       .ndo_set_rx_mode = pxa168_eth_set_rx_mode,
+       .ndo_set_mac_address = pxa168_eth_set_mac_address,
+       .ndo_validate_addr = eth_validate_addr,
+       .ndo_do_ioctl = pxa168_eth_do_ioctl,
+       .ndo_change_mtu = pxa168_eth_change_mtu,
+       .ndo_tx_timeout = pxa168_eth_tx_timeout,
+};
+
+static int pxa168_eth_probe(struct platform_device *pdev)
+{
+       struct pxa168_eth_private *pep = NULL;
+       struct net_device *dev = NULL;
+       struct resource *res;
+       struct clk *clk;
+       int err;
+
+       printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
+
+       clk = clk_get(&pdev->dev, "MFUCLK");
+       if (IS_ERR(clk)) {
+               printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n",
+                       DRIVER_NAME);
+               return -ENODEV;
+       }
+       clk_enable(clk);
+
+       dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
+       if (!dev) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       platform_set_drvdata(pdev, dev);
+       pep = netdev_priv(dev);
+       pep->dev = dev;
+       pep->clk = clk;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (res == NULL) {
+               err = -ENODEV;
+               goto out;
+       }
+       pep->base = ioremap(res->start, res->end - res->start + 1);
+       if (pep->base == NULL) {
+               err = -ENOMEM;
+               goto out;
+       }
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       BUG_ON(!res);
+       dev->irq = res->start;
+       dev->netdev_ops = &pxa168_eth_netdev_ops;
+       dev->watchdog_timeo = 2 * HZ;
+       dev->base_addr = 0;
+       SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+
+       INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
+
+       printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
+       random_ether_addr(dev->dev_addr);
+
+       pep->pd = pdev->dev.platform_data;
+       pep->rx_ring_size = NUM_RX_DESCS;
+       if (pep->pd->rx_queue_size)
+               pep->rx_ring_size = pep->pd->rx_queue_size;
+
+       pep->tx_ring_size = NUM_TX_DESCS;
+       if (pep->pd->tx_queue_size)
+               pep->tx_ring_size = pep->pd->tx_queue_size;
+
+       pep->port_num = pep->pd->port_number;
+       /* Hardware supports only 3 ports */
+       BUG_ON(pep->port_num > 2);
+       netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
+
+       memset(&pep->timeout, 0, sizeof(struct timer_list));
+       init_timer(&pep->timeout);
+       pep->timeout.function = rxq_refill_timer_wrapper;
+       pep->timeout.data = (unsigned long)pep;
+
+       pep->smi_bus = mdiobus_alloc();
+       if (pep->smi_bus == NULL) {
+               err = -ENOMEM;
+               goto out;
+       }
+       pep->smi_bus->priv = pep;
+       pep->smi_bus->name = "pxa168_eth smi";
+       pep->smi_bus->read = pxa168_smi_read;
+       pep->smi_bus->write = pxa168_smi_write;
+       snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+       pep->smi_bus->parent = &pdev->dev;
+       pep->smi_bus->phy_mask = 0xffffffff;
+       if (mdiobus_register(pep->smi_bus) < 0) {
+               err = -ENOMEM;
+               goto out;
+       }
+       pxa168_init_hw(pep);
+       err = ethernet_phy_setup(dev);
+       if (err)
+               goto out;
+       SET_NETDEV_DEV(dev, &pdev->dev);
+       err = register_netdev(dev);
+       if (err)
+               goto out;
+       return 0;
+out:
+       if (pep->clk) {
+               clk_disable(pep->clk);
+               clk_put(pep->clk);
+               pep->clk = NULL;
+       }
+       if (pep->base) {
+               iounmap(pep->base);
+               pep->base = NULL;
+       }
+       if (dev)
+               free_netdev(dev);
+       return err;
+}
+
+static int pxa168_eth_remove(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct pxa168_eth_private *pep = netdev_priv(dev);
+
+       if (pep->htpr) {
+               dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
+                                 pep->htpr, pep->htpr_dma);
+               pep->htpr = NULL;
+       }
+       if (pep->clk) {
+               clk_disable(pep->clk);
+               clk_put(pep->clk);
+               pep->clk = NULL;
+       }
+       if (pep->phy != NULL)
+               phy_detach(pep->phy);
+
+       iounmap(pep->base);
+       pep->base = NULL;
+       unregister_netdev(dev);
+       flush_scheduled_work();
+       free_netdev(dev);
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+static void pxa168_eth_shutdown(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       eth_port_reset(dev);
+}
+
+#ifdef CONFIG_PM
+static int pxa168_eth_resume(struct platform_device *pdev)
+{
+       return -ENOSYS;
+}
+
+static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       return -ENOSYS;
+}
+
+#else
+#define pxa168_eth_resume NULL
+#define pxa168_eth_suspend NULL
+#endif
+
+static struct platform_driver pxa168_eth_driver = {
+       .probe = pxa168_eth_probe,
+       .remove = pxa168_eth_remove,
+       .shutdown = pxa168_eth_shutdown,
+       .resume = pxa168_eth_resume,
+       .suspend = pxa168_eth_suspend,
+       .driver = {
+                  .name = DRIVER_NAME,
+                  },
+};
+
+static int __init pxa168_init_module(void)
+{
+       return platform_driver_register(&pxa168_eth_driver);
+}
+
+static void __exit pxa168_cleanup_module(void)
+{
+       platform_driver_unregister(&pxa168_eth_driver);
+}
+
+module_init(pxa168_init_module);
+module_exit(pxa168_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
+MODULE_ALIAS("platform:pxa168_eth");
index bf6d87adda4fff7a5aa1d1e20728383d08e621f1..213e3656d953be316732ec22e82dbaf8552b0555 100644 (file)
@@ -1983,8 +1983,6 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct net_device_stats *stats = &netdev->stats;
 
-       memset(stats, 0, sizeof(*stats));
-
        stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
        stats->tx_packets = adapter->stats.xmitfinished;
        stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
index f5a9eb1df59332f0d105d7ee83380471a0ff48f1..79fd02bc69fd0854f14e0fdac452980cd7fed74b 100644 (file)
@@ -1437,7 +1437,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
 
 static int sh_eth_drv_probe(struct platform_device *pdev)
 {
-       int ret, i, devno = 0;
+       int ret, devno = 0;
        struct resource *res;
        struct net_device *ndev = NULL;
        struct sh_eth_private *mdp;
index 08e7b6abacdd29f3fc78678834ed54feeabe6c6d..8ed30fa35d0a5d789121eb3042c4b87a5e53a7f2 100644 (file)
@@ -58,6 +58,7 @@
 #define USB_PRODUCT_IPHONE      0x1290
 #define USB_PRODUCT_IPHONE_3G   0x1292
 #define USB_PRODUCT_IPHONE_3GS  0x1294
+#define USB_PRODUCT_IPHONE_4   0x1297
 
 #define IPHETH_USBINTF_CLASS    255
 #define IPHETH_USBINTF_SUBCLASS 253
@@ -92,6 +93,10 @@ static struct usb_device_id ipheth_table[] = {
                USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
                IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
                IPHETH_USBINTF_PROTO) },
+       { USB_DEVICE_AND_INTERFACE_INFO(
+               USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
+               IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+               IPHETH_USBINTF_PROTO) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, ipheth_table);
index a105087af9639884c1ca773ba96cf096fdde7954..f9aa1bc0a94756465bbd4d8ac7660cb3b95718c7 100644 (file)
@@ -732,7 +732,7 @@ static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan)
 
        /* Nothing to do for ADMtek BBP */
        } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK)
-               wiphy_debug(dev->wiphy, "unsupported bbp type %d\n",
+               wiphy_debug(dev->wiphy, "unsupported BBP type %d\n",
                            priv->bbp_type);
 
        ADM8211_RESTORE();
@@ -1032,7 +1032,7 @@ static int adm8211_hw_init_bbp(struct ieee80211_hw *dev)
                        break;
                }
        } else
-               wiphy_debug(dev->wiphy, "unsupported bbp %d\n", priv->bbp_type);
+               wiphy_debug(dev->wiphy, "unsupported BBP %d\n", priv->bbp_type);
 
        ADM8211_CSR_WRITE(SYNRF, 0);
 
@@ -1525,7 +1525,7 @@ static int adm8211_start(struct ieee80211_hw *dev)
        retval = request_irq(priv->pdev->irq, adm8211_interrupt,
                             IRQF_SHARED, "adm8211", dev);
        if (retval) {
-               wiphy_err(dev->wiphy, "failed to register irq handler\n");
+               wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
                goto fail;
        }
 
@@ -1902,7 +1902,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
                goto err_free_eeprom;
        }
 
-       wiphy_info(dev->wiphy, "hwaddr %pm, rev 0x%02x\n",
+       wiphy_info(dev->wiphy, "hwaddr %pM, Rev 0x%02x\n",
                   dev->wiphy->perm_addr, pdev->revision);
 
        return 0;
index d5140a87f0737d92adde6ce9bb725d679be16538..1128fa8c9ed5ef3d0b9be5191edc4bd7320baf90 100644 (file)
@@ -655,7 +655,7 @@ static int at76_get_hw_config(struct at76_priv *priv)
 exit:
        kfree(hwcfg);
        if (ret < 0)
-               wiphy_err(priv->hw->wiphy, "cannot get hw config (error %d)\n",
+               wiphy_err(priv->hw->wiphy, "cannot get HW Config (error %d)\n",
                          ret);
 
        return ret;
@@ -960,7 +960,7 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv)
                           sizeof(struct mib_mac_addr));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (mac_addr) failed: %d\n", ret);
+                         "at76_get_mib (MAC_ADDR) failed: %d\n", ret);
                goto exit;
        }
 
@@ -989,7 +989,7 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
                           sizeof(struct mib_mac_wep));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (mac_wep) failed: %d\n", ret);
+                         "at76_get_mib (MAC_WEP) failed: %d\n", ret);
                goto exit;
        }
 
@@ -1026,7 +1026,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
                           sizeof(struct mib_mac_mgmt));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (mac_mgmt) failed: %d\n", ret);
+                         "at76_get_mib (MAC_MGMT) failed: %d\n", ret);
                goto exit;
        }
 
@@ -1062,7 +1062,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
        ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (mac) failed: %d\n", ret);
+                         "at76_get_mib (MAC) failed: %d\n", ret);
                goto exit;
        }
 
@@ -1099,7 +1099,7 @@ static void at76_dump_mib_phy(struct at76_priv *priv)
        ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (phy) failed: %d\n", ret);
+                         "at76_get_mib (PHY) failed: %d\n", ret);
                goto exit;
        }
 
@@ -1132,7 +1132,7 @@ static void at76_dump_mib_local(struct at76_priv *priv)
        ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(struct mib_local));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (local) failed: %d\n", ret);
+                         "at76_get_mib (LOCAL) failed: %d\n", ret);
                goto exit;
        }
 
@@ -1158,7 +1158,7 @@ static void at76_dump_mib_mdomain(struct at76_priv *priv)
                           sizeof(struct mib_mdomain));
        if (ret < 0) {
                wiphy_err(priv->hw->wiphy,
-                         "at76_get_mib (mdomain) failed: %d\n", ret);
+                         "at76_get_mib (MDOMAIN) failed: %d\n", ret);
                goto exit;
        }
 
@@ -1229,7 +1229,7 @@ static int at76_submit_rx_urb(struct at76_priv *priv)
        struct sk_buff *skb = priv->rx_skb;
 
        if (!priv->rx_urb) {
-               wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is null\n",
+               wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is NULL\n",
                          __func__);
                return -EFAULT;
        }
@@ -1792,7 +1792,7 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
                wiphy_err(priv->hw->wiphy, "error in tx submit urb: %d\n", ret);
                if (ret == -EINVAL)
                        wiphy_err(priv->hw->wiphy,
-                                 "-einval: tx urb %p hcpriv %p complete %p\n",
+                                 "-EINVAL: tx urb %p hcpriv %p complete %p\n",
                                  priv->tx_urb,
                                  priv->tx_urb->hcpriv, priv->tx_urb->complete);
        }
@@ -2310,7 +2310,7 @@ static int at76_init_new_device(struct at76_priv *priv,
 
        priv->mac80211_registered = 1;
 
-       wiphy_info(priv->hw->wiphy, "usb %s, mac %pm, firmware %d.%d.%d-%d\n",
+       wiphy_info(priv->hw->wiphy, "USB %s, MAC %pM, firmware %d.%d.%d-%d\n",
                   dev_name(&interface->dev), priv->mac_addr,
                   priv->fw_version.major, priv->fw_version.minor,
                   priv->fw_version.patch, priv->fw_version.build);
index c67b05f3bcbdcace54e806ccdf69de5793766d0c..debfb0fbc7c5e7316931b2e0ac0c4913ff7428d6 100644 (file)
@@ -245,7 +245,7 @@ static void __ar9170_dump_txstats(struct ar9170 *ar)
 {
        int i;
 
-       wiphy_debug(ar->hw->wiphy, "qos queue stats\n");
+       wiphy_debug(ar->hw->wiphy, "QoS queue stats\n");
 
        for (i = 0; i < __AR9170_NUM_TXQ; i++)
                wiphy_debug(ar->hw->wiphy,
@@ -387,7 +387,7 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
                if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
 #ifdef AR9170_QUEUE_DEBUG
                        wiphy_debug(ar->hw->wiphy,
-                                   "skip frame => da %pm != %pm\n",
+                                   "skip frame => DA %pM != %pM\n",
                                    mac, ieee80211_get_DA(hdr));
                        ar9170_print_txheader(ar, skb);
 #endif /* AR9170_QUEUE_DEBUG */
index 1189dbb6e2a62abcdc090ca5eea61a9eb5824aa7..996e9d7d7586b240847526bec92e721ef8a20d9c 100644 (file)
@@ -2723,14 +2723,6 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv)
 
                packet = &priv->rx_buffers[i];
 
-               /* Sync the DMA for the STATUS buffer so CPU is sure to get
-                * the correct values */
-               pci_dma_sync_single_for_cpu(priv->pci_dev,
-                                           sq->nic +
-                                           sizeof(struct ipw2100_status) * i,
-                                           sizeof(struct ipw2100_status),
-                                           PCI_DMA_FROMDEVICE);
-
                /* Sync the DMA for the RX buffer so CPU is sure to get
                 * the correct values */
                pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr,
index fec026212326dce857220196dd312c3b2d5b6c69..0b779a41a1426b9153a55f30a59a052a9e66f618 100644 (file)
@@ -265,7 +265,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 128,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -297,7 +297,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 128,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
index 6950a783913b83561839f20a08657857ee242a86..8ccfcd08218d894895524c3995fca28f2695aa6c 100644 (file)
@@ -2731,7 +2731,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
        .led_compensation = 64,
        .broken_powersave = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .tx_power_by_driver = true,
 };
@@ -2752,7 +2752,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
        .led_compensation = 64,
        .broken_powersave = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .tx_power_by_driver = true,
 };
index d6da356608fa46efe34a317bdda60c6680b65d7e..d92b729092336523a5c0a2a541bfbd7cb1b4b640 100644 (file)
@@ -2322,7 +2322,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
        .led_compensation = 61,
        .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .temperature_kelvin = true,
        .max_event_log_size = 512,
        .tx_power_by_driver = true,
index aacf3770f075b4abd1de4a474f79cc60eb42c9cc..48bdcd8d2e94c3355c877a55d8e7815249350fdc 100644 (file)
@@ -510,7 +510,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -541,7 +541,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -570,7 +570,7 @@ struct iwl_cfg iwl5100_abg_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -601,7 +601,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -632,7 +632,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -663,7 +663,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -693,7 +693,7 @@ struct iwl_cfg iwl5150_abg_cfg = {
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
index af4fd50f3405db334ce2e44e1ce648c7c510ed7c..cee06b968de807a642adb4db4e08e62cfe2237ab 100644 (file)
@@ -388,7 +388,7 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -424,7 +424,7 @@ struct iwl_cfg iwl6000g2a_2abg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -459,7 +459,7 @@ struct iwl_cfg iwl6000g2a_2bg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -496,7 +496,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -532,7 +532,7 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -570,7 +570,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -606,7 +606,7 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -644,7 +644,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -680,7 +680,7 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
        .max_event_log_size = 512,
        .sensitivity_calib_by_driver = true,
        .chain_noise_calib_by_driver = true,
@@ -721,7 +721,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -756,7 +756,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -791,7 +791,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -828,7 +828,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1500,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -866,7 +866,7 @@ struct iwl_cfg iwl6050g2_bgn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1500,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -902,7 +902,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1500,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
@@ -940,7 +940,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
        .chain_noise_scale = 1000,
-       .monitor_recover_period = IWL_MONITORING_PERIOD,
+       .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
        .max_event_log_size = 1024,
        .ucode_tracing = true,
        .sensitivity_calib_by_driver = true,
index c1882fd8345d43dd630718c1f9475533511e5f7c..10d7b9b7f064f529af9d0cdc6aa2b0a144d9438d 100644 (file)
@@ -3667,6 +3667,49 @@ out_exit:
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
+static void iwlagn_configure_filter(struct ieee80211_hw *hw,
+                                   unsigned int changed_flags,
+                                   unsigned int *total_flags,
+                                   u64 multicast)
+{
+       struct iwl_priv *priv = hw->priv;
+       __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag)        do { \
+       if (*total_flags & (test))              \
+               filter_or |= (flag);            \
+       else                                    \
+               filter_nand |= (flag);          \
+       } while (0)
+
+       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+                       changed_flags, *total_flags);
+
+       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+       mutex_lock(&priv->mutex);
+
+       priv->staging_rxon.filter_flags &= ~filter_nand;
+       priv->staging_rxon.filter_flags |= filter_or;
+
+       iwlcore_commit_rxon(priv);
+
+       mutex_unlock(&priv->mutex);
+
+       /*
+        * Receiving all multicast frames is always enabled by the
+        * default flags setup in iwl_connection_init_rx_config()
+        * since we currently do not support programming multicast
+        * filters into the device.
+        */
+       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
 static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop)
 {
        struct iwl_priv *priv = hw->priv;
@@ -3867,7 +3910,7 @@ static struct ieee80211_ops iwl_hw_ops = {
        .add_interface = iwl_mac_add_interface,
        .remove_interface = iwl_mac_remove_interface,
        .config = iwl_mac_config,
-       .configure_filter = iwl_configure_filter,
+       .configure_filter = iwlagn_configure_filter,
        .set_key = iwl_mac_set_key,
        .update_tkip_key = iwl_mac_update_tkip_key,
        .conf_tx = iwl_mac_conf_tx,
index 2c03c6e20a72d73ded70f8c96d37bfce6aa8e263..07dbc27964480eebedb67760029dcadab9584f84 100644 (file)
@@ -1328,51 +1328,6 @@ out:
 EXPORT_SYMBOL(iwl_apm_init);
 
 
-
-void iwl_configure_filter(struct ieee80211_hw *hw,
-                         unsigned int changed_flags,
-                         unsigned int *total_flags,
-                         u64 multicast)
-{
-       struct iwl_priv *priv = hw->priv;
-       __le32 filter_or = 0, filter_nand = 0;
-
-#define CHK(test, flag)        do { \
-       if (*total_flags & (test))              \
-               filter_or |= (flag);            \
-       else                                    \
-               filter_nand |= (flag);          \
-       } while (0)
-
-       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
-                       changed_flags, *total_flags);
-
-       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
-       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
-       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
-       mutex_lock(&priv->mutex);
-
-       priv->staging_rxon.filter_flags &= ~filter_nand;
-       priv->staging_rxon.filter_flags |= filter_or;
-
-       iwlcore_commit_rxon(priv);
-
-       mutex_unlock(&priv->mutex);
-
-       /*
-        * Receiving all multicast frames is always enabled by the
-        * default flags setup in iwl_connection_init_rx_config()
-        * since we currently do not support programming multicast
-        * filters into the device.
-        */
-       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
-                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-EXPORT_SYMBOL(iwl_configure_filter);
-
 int iwl_set_hw_params(struct iwl_priv *priv)
 {
        priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
index 4a71dfb10a15d0f6ebf4007944d011d249333902..5e6ee3da6bbf740846033e4a57e44956e326a73c 100644 (file)
@@ -372,9 +372,6 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
                           u32 decrypt_res,
                           struct ieee80211_rx_status *stats);
 void iwl_irq_handle_error(struct iwl_priv *priv);
-void iwl_configure_filter(struct ieee80211_hw *hw,
-                         unsigned int changed_flags,
-                         unsigned int *total_flags, u64 multicast);
 int iwl_set_hw_params(struct iwl_priv *priv);
 void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
 void iwl_bss_info_changed(struct ieee80211_hw *hw,
index f35bcad56e36bd68779edb3acf930d6b7595359a..2e97cd2fa98a6e0c7d9cdc6d98582ac54fe28459 100644 (file)
@@ -1049,7 +1049,8 @@ struct iwl_event_log {
 #define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
 
 /* timer constants use to monitor and recover stuck tx queues in mSecs */
-#define IWL_MONITORING_PERIOD  (1000)
+#define IWL_DEF_MONITORING_PERIOD      (1000)
+#define IWL_LONG_MONITORING_PERIOD     (5000)
 #define IWL_ONE_HUNDRED_MSECS   (100)
 #define IWL_SIXTY_SECS          (60000)
 
index 70c4b8fba0ee89093c56c056e7152b927da5a37c..59a308b02f95fdc077a84a7d12d71db7451b1816 100644 (file)
@@ -3391,6 +3391,55 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
 
        return 0;
 }
+
+static void iwl3945_configure_filter(struct ieee80211_hw *hw,
+                                    unsigned int changed_flags,
+                                    unsigned int *total_flags,
+                                    u64 multicast)
+{
+       struct iwl_priv *priv = hw->priv;
+       __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag)        do { \
+       if (*total_flags & (test))              \
+               filter_or |= (flag);            \
+       else                                    \
+               filter_nand |= (flag);          \
+       } while (0)
+
+       IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+                       changed_flags, *total_flags);
+
+       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+       CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+       mutex_lock(&priv->mutex);
+
+       priv->staging_rxon.filter_flags &= ~filter_nand;
+       priv->staging_rxon.filter_flags |= filter_or;
+
+       /*
+        * Committing directly here breaks for some reason,
+        * but we'll eventually commit the filter flags
+        * change anyway.
+        */
+
+       mutex_unlock(&priv->mutex);
+
+       /*
+        * Receiving all multicast frames is always enabled by the
+        * default flags setup in iwl_connection_init_rx_config()
+        * since we currently do not support programming multicast
+        * filters into the device.
+        */
+       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+                       FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+
 /*****************************************************************************
  *
  * sysfs attributes
@@ -3796,7 +3845,7 @@ static struct ieee80211_ops iwl3945_hw_ops = {
        .add_interface = iwl_mac_add_interface,
        .remove_interface = iwl_mac_remove_interface,
        .config = iwl_mac_config,
-       .configure_filter = iwl_configure_filter,
+       .configure_filter = iwl3945_configure_filter,
        .set_key = iwl3945_mac_set_key,
        .conf_tx = iwl_mac_conf_tx,
        .reset_tsf = iwl_mac_reset_tsf,
index 01ad7f77383a8f18a58a69dc6648ba907618e069..86fa8abdd66fda1cba26ef0e2bfe90a249d14128 100644 (file)
@@ -486,7 +486,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
        struct ieee80211_rx_status rx_status;
 
        if (data->idle) {
-               wiphy_debug(hw->wiphy, "trying to tx when idle - reject\n");
+               wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
                return false;
        }
 
index d761ed2d8af46b8d8be19eab01101dc511321b00..f152a25be59f7020998d35ceb72ba55100ebff2b 100644 (file)
@@ -910,14 +910,14 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
 
        rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma);
        if (rxq->rxd == NULL) {
-               wiphy_err(hw->wiphy, "failed to alloc rx descriptors\n");
+               wiphy_err(hw->wiphy, "failed to alloc RX descriptors\n");
                return -ENOMEM;
        }
        memset(rxq->rxd, 0, size);
 
        rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
        if (rxq->buf == NULL) {
-               wiphy_err(hw->wiphy, "failed to alloc rx skbuff list\n");
+               wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
                pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
                return -ENOMEM;
        }
@@ -1145,14 +1145,14 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
 
        txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma);
        if (txq->txd == NULL) {
-               wiphy_err(hw->wiphy, "failed to alloc tx descriptors\n");
+               wiphy_err(hw->wiphy, "failed to alloc TX descriptors\n");
                return -ENOMEM;
        }
        memset(txq->txd, 0, size);
 
        txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
        if (txq->skb == NULL) {
-               wiphy_err(hw->wiphy, "failed to alloc tx skbuff list\n");
+               wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
                pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
                return -ENOMEM;
        }
@@ -1573,7 +1573,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
                                        PCI_DMA_BIDIRECTIONAL);
 
        if (!timeout) {
-               wiphy_err(hw->wiphy, "command %s timeout after %u ms\n",
+               wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n",
                          mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
                          MWL8K_CMD_TIMEOUT_MS);
                rc = -ETIMEDOUT;
@@ -1584,11 +1584,11 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
 
                rc = cmd->result ? -EINVAL : 0;
                if (rc)
-                       wiphy_err(hw->wiphy, "command %s error 0x%x\n",
+                       wiphy_err(hw->wiphy, "Command %s error 0x%x\n",
                                  mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
                                  le16_to_cpu(cmd->result));
                else if (ms > 2000)
-                       wiphy_notice(hw->wiphy, "command %s took %d ms\n",
+                       wiphy_notice(hw->wiphy, "Command %s took %d ms\n",
                                     mwl8k_cmd_name(cmd->code,
                                                    buf, sizeof(buf)),
                                     ms);
@@ -3210,7 +3210,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
        rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
                         IRQF_SHARED, MWL8K_NAME, hw);
        if (rc) {
-               wiphy_err(hw->wiphy, "failed to register irq handler\n");
+               wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
                return -EIO;
        }
 
@@ -3926,7 +3926,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
 
        priv->sram = pci_iomap(pdev, 0, 0x10000);
        if (priv->sram == NULL) {
-               wiphy_err(hw->wiphy, "cannot map device sram\n");
+               wiphy_err(hw->wiphy, "Cannot map device SRAM\n");
                goto err_iounmap;
        }
 
@@ -3938,7 +3938,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
        if (priv->regs == NULL) {
                priv->regs = pci_iomap(pdev, 2, 0x10000);
                if (priv->regs == NULL) {
-                       wiphy_err(hw->wiphy, "cannot map device registers\n");
+                       wiphy_err(hw->wiphy, "Cannot map device registers\n");
                        goto err_iounmap;
                }
        }
@@ -3950,14 +3950,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
        /* Ask userland hotplug daemon for the device firmware */
        rc = mwl8k_request_firmware(priv);
        if (rc) {
-               wiphy_err(hw->wiphy, "firmware files not found\n");
+               wiphy_err(hw->wiphy, "Firmware files not found\n");
                goto err_stop_firmware;
        }
 
        /* Load firmware into hardware */
        rc = mwl8k_load_firmware(hw);
        if (rc) {
-               wiphy_err(hw->wiphy, "cannot start firmware\n");
+               wiphy_err(hw->wiphy, "Cannot start firmware\n");
                goto err_stop_firmware;
        }
 
@@ -4047,7 +4047,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
        rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
                         IRQF_SHARED, MWL8K_NAME, hw);
        if (rc) {
-               wiphy_err(hw->wiphy, "failed to register irq handler\n");
+               wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
                goto err_free_queues;
        }
 
@@ -4067,7 +4067,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
                rc = mwl8k_cmd_get_hw_spec_sta(hw);
        }
        if (rc) {
-               wiphy_err(hw->wiphy, "cannot initialise firmware\n");
+               wiphy_err(hw->wiphy, "Cannot initialise firmware\n");
                goto err_free_irq;
        }
 
@@ -4081,14 +4081,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
        /* Turn radio off */
        rc = mwl8k_cmd_radio_disable(hw);
        if (rc) {
-               wiphy_err(hw->wiphy, "cannot disable\n");
+               wiphy_err(hw->wiphy, "Cannot disable\n");
                goto err_free_irq;
        }
 
        /* Clear MAC address */
        rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
        if (rc) {
-               wiphy_err(hw->wiphy, "cannot clear mac address\n");
+               wiphy_err(hw->wiphy, "Cannot clear MAC address\n");
                goto err_free_irq;
        }
 
@@ -4098,7 +4098,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
 
        rc = ieee80211_register_hw(hw);
        if (rc) {
-               wiphy_err(hw->wiphy, "cannot register device\n");
+               wiphy_err(hw->wiphy, "Cannot register device\n");
                goto err_free_queues;
        }
 
index d687cb7f2a599c28d04d6c875f5b4b2796604cdf..78347041ec40a3c51d094328495c8d86cb5c5a3b 100644 (file)
@@ -167,7 +167,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
        }
 
        if (j == 0) {
-               wiphy_err(dev->wiphy, "disabling totally damaged %d GHz band\n",
+               wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n",
                          (band == IEEE80211_BAND_2GHZ) ? 2 : 5);
 
                ret = -ENODATA;
@@ -695,12 +695,12 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
                u8 perm_addr[ETH_ALEN];
 
                wiphy_warn(dev->wiphy,
-                          "invalid hwaddr! using randomly generated mac addr\n");
+                          "Invalid hwaddr! Using randomly generated MAC addr\n");
                random_ether_addr(perm_addr);
                SET_IEEE80211_PERM_ADDR(dev, perm_addr);
        }
 
-       wiphy_info(dev->wiphy, "hwaddr %pm, mac:isl38%02x rf:%s\n",
+       wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n",
                   dev->wiphy->perm_addr, priv->version,
                   p54_rf_chips[priv->rxhw]);
 
index 47006bca485216609afb0f51a201c4d2ef1ef04a..15b20c29a6042ae6e3dc5c99dae07e8c317127ca 100644 (file)
@@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
 
        if (fw_version)
                wiphy_info(priv->hw->wiphy,
-                          "fw rev %s - softmac protocol %x.%x\n",
+                          "FW rev %s - Softmac protocol %x.%x\n",
                           fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
 
        if (priv->fw_var < 0x500)
index ea91f5cce6b3f62d31f8c5d45609e650a2cc40a7..3837e1eec5f4a56fdbf80a2b8f13984f3d15742b 100644 (file)
@@ -58,7 +58,7 @@ static void p54_update_leds(struct work_struct *work)
        err = p54_set_leds(priv);
        if (err && net_ratelimit())
                wiphy_err(priv->hw->wiphy,
-                         "failed to update leds (%d).\n", err);
+                         "failed to update LEDs (%d).\n", err);
 
        if (rerun)
                ieee80211_queue_delayed_work(priv->hw, &priv->led_work,
@@ -103,7 +103,7 @@ static int p54_register_led(struct p54_common *priv,
        err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev);
        if (err)
                wiphy_err(priv->hw->wiphy,
-                         "failed to register %s led.\n", name);
+                         "Failed to register %s LED.\n", name);
        else
                led->registered = 1;
 
index 822f8dc26e9c051d3b9ee34e79ef715b0f204067..1eacba4daa5bb1d35edc265ac8862c464eac4387 100644 (file)
@@ -466,7 +466,7 @@ static int p54p_open(struct ieee80211_hw *dev)
        P54P_READ(dev_int);
 
        if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
-               wiphy_err(dev->wiphy, "cannot boot firmware!\n");
+               wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
                p54p_stop(dev);
                return -ETIMEDOUT;
        }
index 427b46f558ed4f57b0066a2ae312be0d0e82c7ae..173aec3d6e7eba64896da75ceaf42436489f3fe8 100644 (file)
@@ -540,7 +540,7 @@ static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb)
        case P54_TRAP_BEACON_TX:
                break;
        case P54_TRAP_RADAR:
-               wiphy_info(priv->hw->wiphy, "radar (freq:%d mhz)\n", freq);
+               wiphy_info(priv->hw->wiphy, "radar (freq:%d MHz)\n", freq);
                break;
        case P54_TRAP_NO_BEACON:
                if (priv->vif)
index b50c39aaec0522e63c89b30ad6cc6433747b6853..30107ce78dfb6cc8613c83c5ca525a5e8bf42314 100644 (file)
@@ -445,7 +445,7 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
                                             &priv->rx_ring_dma);
 
        if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
-               wiphy_err(dev->wiphy, "cannot allocate rx ring\n");
+               wiphy_err(dev->wiphy, "Cannot allocate RX ring\n");
                return -ENOMEM;
        }
 
@@ -502,7 +502,7 @@ static int rtl8180_init_tx_ring(struct ieee80211_hw *dev,
 
        ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
        if (!ring || (unsigned long)ring & 0xFF) {
-               wiphy_err(dev->wiphy, "cannot allocate tx ring (prio = %d)\n",
+               wiphy_err(dev->wiphy, "Cannot allocate TX ring (prio = %d)\n",
                          prio);
                return -ENOMEM;
        }
@@ -568,7 +568,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
        ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
                          IRQF_SHARED, KBUILD_MODNAME, dev);
        if (ret) {
-               wiphy_err(dev->wiphy, "failed to register irq handler\n");
+               wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
                goto err_free_rings;
        }
 
index 5738a55c1b06b4c9bd7a8c30323266b0af2151b4..98e0351c1dd6923214b17919161793c873d89967 100644 (file)
@@ -573,7 +573,7 @@ static int rtl8187_cmd_reset(struct ieee80211_hw *dev)
        } while (--i);
 
        if (!i) {
-               wiphy_err(dev->wiphy, "reset timeout!\n");
+               wiphy_err(dev->wiphy, "Reset timeout!\n");
                return -ETIMEDOUT;
        }
 
@@ -1526,7 +1526,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
        mutex_init(&priv->conf_mutex);
        skb_queue_head_init(&priv->b_tx_status.queue);
 
-       wiphy_info(dev->wiphy, "hwaddr %pm, %s v%d + %s, rfkill mask %d\n",
+       wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
                   mac_addr, chip_name, priv->asic_rev, priv->rf->name,
                   priv->rfkill_mask);
 
index fd96f9112322439f7bf3f72716f943459a9c347c..97eebdcf7eb9f432729c18a3ad090fe8934e0171 100644 (file)
@@ -366,7 +366,7 @@ static void rtl8225_rf_init(struct ieee80211_hw *dev)
                rtl8225_write(dev, 0x02, 0x044d);
                msleep(100);
                if (!(rtl8225_read(dev, 6) & (1 << 7)))
-                       wiphy_warn(dev->wiphy, "rf calibration failed! %x\n",
+                       wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n",
                                   rtl8225_read(dev, 6));
        }
 
@@ -735,7 +735,7 @@ static void rtl8225z2_rf_init(struct ieee80211_hw *dev)
                rtl8225_write(dev, 0x02, 0x044D);
                msleep(100);
                if (!(rtl8225_read(dev, 6) & (1 << 7)))
-                       wiphy_warn(dev->wiphy, "rf calibration failed! %x\n",
+                       wiphy_warn(dev->wiphy, "RF Calibration Failed! %x\n",
                                   rtl8225_read(dev, 6));
        }
 
index 044f430f3b4324eb031f32b3e9aeaf1307309a7d..cff7cc2c1f025295746416e86e88a07859e29154 100644 (file)
@@ -486,10 +486,12 @@ config TOPSTAR_LAPTOP
 config ACPI_TOSHIBA
        tristate "Toshiba Laptop Extras"
        depends on ACPI
+       depends on LEDS_CLASS
+       depends on NEW_LEDS
+       depends on BACKLIGHT_CLASS_DEVICE
        depends on INPUT
        depends on RFKILL || RFKILL = n
        select INPUT_POLLDEV
-       select BACKLIGHT_CLASS_DEVICE
        ---help---
          This driver adds support for access to certain system settings
          on "legacy free" Toshiba laptops.  These laptops can be recognized by
index f15516374987cef2cf0bae21383259759c4095a0..c1741142a4cb64c3cf1c705bbd1385c03d89778c 100644 (file)
@@ -79,12 +79,13 @@ struct bios_args {
        u32 command;
        u32 commandtype;
        u32 datasize;
-       char *data;
+       u32 data;
 };
 
 struct bios_return {
        u32 sigpass;
        u32 return_code;
+       u32 value;
 };
 
 struct key_entry {
@@ -148,7 +149,7 @@ static struct platform_driver hp_wmi_driver = {
  *       buffer = kzalloc(128, GFP_KERNEL);
  *       ret = hp_wmi_perform_query(0x7, 0, buffer, 128)
  */
-static int hp_wmi_perform_query(int query, int write, char *buffer,
+static int hp_wmi_perform_query(int query, int write, u32 *buffer,
                                int buffersize)
 {
        struct bios_return bios_return;
@@ -159,7 +160,7 @@ static int hp_wmi_perform_query(int query, int write, char *buffer,
                .command = write ? 0x2 : 0x1,
                .commandtype = query,
                .datasize = buffersize,
-               .data = buffer,
+               .data = *buffer,
        };
        struct acpi_buffer input = { sizeof(struct bios_args), &args };
        struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -177,29 +178,14 @@ static int hp_wmi_perform_query(int query, int write, char *buffer,
 
        bios_return = *((struct bios_return *)obj->buffer.pointer);
 
-       if (bios_return.return_code) {
-               printk(KERN_WARNING PREFIX "Query %d returned %d\n", query,
-                      bios_return.return_code);
-               kfree(obj);
-               return bios_return.return_code;
-       }
-       if (obj->buffer.length - sizeof(bios_return) > buffersize) {
-               kfree(obj);
-               return -EINVAL;
-       }
-
-       memset(buffer, 0, buffersize);
-       memcpy(buffer,
-              ((char *)obj->buffer.pointer) + sizeof(struct bios_return),
-              obj->buffer.length - sizeof(bios_return));
-       kfree(obj);
+       memcpy(buffer, &bios_return.value, sizeof(bios_return.value));
        return 0;
 }
 
 static int hp_wmi_display_state(void)
 {
-       int state;
-       int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, (char *)&state,
+       int state = 0;
+       int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
                                       sizeof(state));
        if (ret)
                return -EINVAL;
@@ -208,8 +194,8 @@ static int hp_wmi_display_state(void)
 
 static int hp_wmi_hddtemp_state(void)
 {
-       int state;
-       int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, (char *)&state,
+       int state = 0;
+       int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
                                       sizeof(state));
        if (ret)
                return -EINVAL;
@@ -218,8 +204,8 @@ static int hp_wmi_hddtemp_state(void)
 
 static int hp_wmi_als_state(void)
 {
-       int state;
-       int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, (char *)&state,
+       int state = 0;
+       int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
                                       sizeof(state));
        if (ret)
                return -EINVAL;
@@ -228,8 +214,8 @@ static int hp_wmi_als_state(void)
 
 static int hp_wmi_dock_state(void)
 {
-       int state;
-       int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state,
+       int state = 0;
+       int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
                                       sizeof(state));
 
        if (ret)
@@ -240,8 +226,8 @@ static int hp_wmi_dock_state(void)
 
 static int hp_wmi_tablet_state(void)
 {
-       int state;
-       int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state,
+       int state = 0;
+       int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
                                       sizeof(state));
        if (ret)
                return ret;
@@ -256,7 +242,7 @@ static int hp_wmi_set_block(void *data, bool blocked)
        int ret;
 
        ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
-                                  (char *)&query, sizeof(query));
+                                  &query, sizeof(query));
        if (ret)
                return -EINVAL;
        return 0;
@@ -268,10 +254,10 @@ static const struct rfkill_ops hp_wmi_rfkill_ops = {
 
 static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
 {
-       int wireless;
+       int wireless = 0;
        int mask;
        hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
-                            (char *)&wireless, sizeof(wireless));
+                            &wireless, sizeof(wireless));
        /* TBD: Pass error */
 
        mask = 0x200 << (r * 8);
@@ -284,10 +270,10 @@ static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
 
 static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
 {
-       int wireless;
+       int wireless = 0;
        int mask;
        hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
-                            (char *)&wireless, sizeof(wireless));
+                            &wireless, sizeof(wireless));
        /* TBD: Pass error */
 
        mask = 0x800 << (r * 8);
@@ -347,7 +333,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr,
                       const char *buf, size_t count)
 {
        u32 tmp = simple_strtoul(buf, NULL, 10);
-       int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, (char *)&tmp,
+       int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
                                       sizeof(tmp));
        if (ret)
                return -EINVAL;
@@ -421,7 +407,7 @@ static void hp_wmi_notify(u32 value, void *context)
        static struct key_entry *key;
        union acpi_object *obj;
        u32 event_id, event_data;
-       int key_code, ret;
+       int key_code = 0, ret;
        u32 *location;
        acpi_status status;
 
@@ -475,7 +461,7 @@ static void hp_wmi_notify(u32 value, void *context)
                break;
        case HPWMI_BEZEL_BUTTON:
                ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
-                                          (char *)&key_code,
+                                          &key_code,
                                           sizeof(key_code));
                if (ret)
                        break;
@@ -578,9 +564,9 @@ static void cleanup_sysfs(struct platform_device *device)
 static int __devinit hp_wmi_bios_setup(struct platform_device *device)
 {
        int err;
-       int wireless;
+       int wireless = 0;
 
-       err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, (char *)&wireless,
+       err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless,
                                   sizeof(wireless));
        if (err)
                return err;
index 73f8e6d726694ad918ba89fa6da7e5507885a2a7..2b11a33325e6a93bf872e67e7d1ab549b98ccc9c 100644 (file)
@@ -145,7 +145,7 @@ static void free_rar_device(struct rar_device *rar)
  */
 static struct rar_device *_rar_to_device(int rar, int *off)
 {
-       if (rar >= 0 && rar <= 3) {
+       if (rar >= 0 && rar < MRST_NUM_RAR) {
                *off = rar;
                return &my_rar_device;
        }
index 943f9084dcb11734e73064332f903320478c3c1e..6abe18e638e976f325ba92c9892bd2cc1501f4a8 100644 (file)
@@ -487,7 +487,7 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
                mdelay(1);
                *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
        } else if (cmd == IPC_I2C_WRITE) {
-               writel(addr, ipcdev.i2c_base + I2C_DATA_ADDR);
+               writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
                mdelay(1);
                writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
        } else {
index c6cbcb3f925e094445d1935a6e44df791ea61f10..0e9a309b96691d6ec1c2209bb01a6f351a3a04d6 100644 (file)
 
 #ifdef CONFIG_MAGIC_SYSRQ
 static int ctrlchar_sysrq_key;
-static struct tty_struct *sysrq_tty;
 
 static void
 ctrlchar_handle_sysrq(struct work_struct *work)
 {
-       handle_sysrq(ctrlchar_sysrq_key, sysrq_tty);
+       handle_sysrq(ctrlchar_sysrq_key);
 }
 
 static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
@@ -54,7 +53,6 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
        /* racy */
        if (len == 3 && buf[1] == '-') {
                ctrlchar_sysrq_key = buf[2];
-               sysrq_tty = tty;
                schedule_work(&ctrlchar_work);
                return CTRLCHAR_SYSRQ;
        }
index 18d9a497863bd5c2cdc3b4ba2160bdd450a19e4e..8cd58e412b5eae2cfe3188a03f616190146f8db0 100644 (file)
@@ -305,7 +305,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
                if (kbd->sysrq) {
                        if (kbd->sysrq == K(KT_LATIN, '-')) {
                                kbd->sysrq = 0;
-                               handle_sysrq(value, kbd->tty);
+                               handle_sysrq(value);
                                return;
                        }
                        if (value == '-') {
index 7356a56ac458f697c54bdd8d1335b763a47f000b..be0ebce36e54906fce49dd9c1993d184ebaddbb9 100644 (file)
@@ -869,7 +869,9 @@ static int get_serial_info(struct m68k_serial * info,
        tmp.close_delay = info->close_delay;
        tmp.closing_wait = info->closing_wait;
        tmp.custom_divisor = info->custom_divisor;
-       copy_to_user(retinfo,&tmp,sizeof(*retinfo));
+       if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+               return -EFAULT;
+
        return 0;
 }
 
@@ -882,7 +884,8 @@ static int set_serial_info(struct m68k_serial * info,
 
        if (!new_info)
                return -EFAULT;
-       copy_from_user(&new_serial,new_info,sizeof(new_serial));
+       if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+               return -EFAULT;
        old_info = *info;
 
        if (!capable(CAP_SYS_ADMIN)) {
@@ -943,8 +946,7 @@ static int get_lsr_info(struct m68k_serial * info, unsigned int *value)
        status = 0;
 #endif
        local_irq_restore(flags);
-       put_user(status,value);
-       return 0;
+       return put_user(status, value);
 }
 
 /*
@@ -999,27 +1001,18 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
                        send_break(info, arg ? arg*(100) : 250);
                        return 0;
                case TIOCGSERIAL:
-                       if (access_ok(VERIFY_WRITE, (void *) arg,
-                                               sizeof(struct serial_struct)))
-                               return get_serial_info(info,
-                                              (struct serial_struct *) arg);
-                       return -EFAULT;
+                       return get_serial_info(info,
+                                      (struct serial_struct *) arg);
                case TIOCSSERIAL:
                        return set_serial_info(info,
                                               (struct serial_struct *) arg);
                case TIOCSERGETLSR: /* Get line status register */
-                       if (access_ok(VERIFY_WRITE, (void *) arg,
-                                               sizeof(unsigned int)))
-                               return get_lsr_info(info, (unsigned int *) arg);
-                       return -EFAULT;
+                       return get_lsr_info(info, (unsigned int *) arg);
                case TIOCSERGSTRUCT:
-                       if (!access_ok(VERIFY_WRITE, (void *) arg,
-                                               sizeof(struct m68k_serial)))
+                       if (copy_to_user((struct m68k_serial *) arg,
+                                   info, sizeof(struct m68k_serial)))
                                return -EFAULT;
-                       copy_to_user((struct m68k_serial *) arg,
-                                   info, sizeof(struct m68k_serial));
                        return 0;
-                       
                default:
                        return -ENOIOCTLCMD;
                }
index b745792ec25a08e9981d22bbdd2897c99b6f738e..eaafb98debed89e2f1b2e5e83ac33cd79301792e 100644 (file)
@@ -203,13 +203,13 @@ static int __init parse_options(struct early_serial8250_device *device,
 
        if (mmio || mmio32)
                printk(KERN_INFO
-                      "Early serial console at MMIO%s 0x%llu (options '%s')\n",
+                      "Early serial console at MMIO%s 0x%llx (options '%s')\n",
                        mmio32 ? "32" : "",
                        (unsigned long long)port->mapbase,
                        device->options);
        else
                printk(KERN_INFO
-                     "Early serial console at I/O port 0x%lu (options '%s')\n",
+                     "Early serial console at I/O port 0x%lx (options '%s')\n",
                        port->iobase,
                        device->options);
 
index 7e5e5efea4e27659ef564f28652e07377fcf19db..cff9a306660faede7827d705b5cf5d98fdef9067 100644 (file)
@@ -492,7 +492,7 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
                         sysrq_requested = 0;
                         if (ch && time_before(jiffies, sysrq_timeout)) {
                                 spin_unlock_irqrestore(&port->sc_port.lock, flags);
-                                handle_sysrq(ch, NULL);
+                                handle_sysrq(ch);
                                 spin_lock_irqsave(&port->sc_port.lock, flags);
                                 /* ignore actual sysrq command char */
                                 continue;
index 4a7a7a7f11b67b6efd6b559023824dadeefdff07..335311a98fdcb0d4451c8fa2bfd604f937bae637 100644 (file)
@@ -113,8 +113,6 @@ source "drivers/staging/vme/Kconfig"
 
 source "drivers/staging/memrar/Kconfig"
 
-source "drivers/staging/sep/Kconfig"
-
 source "drivers/staging/iio/Kconfig"
 
 source "drivers/staging/zram/Kconfig"
index ca5c03eb3ce36e233280f7f4bb00ceae5697e4e9..e3f1e1b6095e5e790db3b9af1de06e7e29d4f9b7 100644 (file)
@@ -38,7 +38,6 @@ obj-$(CONFIG_FB_UDL)          += udlfb/
 obj-$(CONFIG_HYPERV)           += hv/
 obj-$(CONFIG_VME_BUS)          += vme/
 obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
-obj-$(CONFIG_DX_SEP)           += sep/
 obj-$(CONFIG_IIO)              += iio/
 obj-$(CONFIG_ZRAM)             += zram/
 obj-$(CONFIG_WLAGS49_H2)       += wlags49_h2/
index b4a8d5eb64fabc4879bd11e27577c2ed12f5b00a..05ca15a6c9f8b7f919f587e35e856d6de1310c2a 100644 (file)
@@ -267,6 +267,10 @@ static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr,
        if (atomic_read(&bat_priv->log_level) == log_level_tmp)
                return count;
 
+       bat_info(net_dev, "Changing log level from: %i to: %li\n",
+                atomic_read(&bat_priv->log_level),
+                log_level_tmp);
+
        atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp);
        return count;
 }
index 92c216a568856b4592e6a38427163bcec386aab3..baa8b05b9e8d70a49ba2dcb63e5e8c3cea7ed341 100644 (file)
@@ -129,6 +129,9 @@ static bool hardif_is_iface_up(struct batman_if *batman_if)
 
 static void update_mac_addresses(struct batman_if *batman_if)
 {
+       if (!batman_if || !batman_if->packet_buff)
+               return;
+
        addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
 
        memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
@@ -194,8 +197,6 @@ static void hardif_activate_interface(struct net_device *net_dev,
        if (batman_if->if_status != IF_INACTIVE)
                return;
 
-       dev_hold(batman_if->net_dev);
-
        update_mac_addresses(batman_if);
        batman_if->if_status = IF_TO_BE_ACTIVATED;
 
@@ -222,8 +223,6 @@ static void hardif_deactivate_interface(struct net_device *net_dev,
           (batman_if->if_status != IF_TO_BE_ACTIVATED))
                return;
 
-       dev_put(batman_if->net_dev);
-
        batman_if->if_status = IF_INACTIVE;
 
        bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev);
@@ -318,11 +317,13 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
        if (ret != 1)
                goto out;
 
+       dev_hold(net_dev);
+
        batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
        if (!batman_if) {
                pr_err("Can't add interface (%s): out of memory\n",
                       net_dev->name);
-               goto out;
+               goto release_dev;
        }
 
        batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC);
@@ -336,6 +337,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
        batman_if->if_num = -1;
        batman_if->net_dev = net_dev;
        batman_if->if_status = IF_NOT_IN_USE;
+       batman_if->packet_buff = NULL;
        INIT_LIST_HEAD(&batman_if->list);
 
        check_known_mac_addr(batman_if->net_dev->dev_addr);
@@ -346,6 +348,8 @@ free_dev:
        kfree(batman_if->dev);
 free_if:
        kfree(batman_if);
+release_dev:
+       dev_put(net_dev);
 out:
        return NULL;
 }
@@ -374,6 +378,7 @@ static void hardif_remove_interface(struct batman_if *batman_if)
        batman_if->if_status = IF_TO_BE_REMOVED;
        list_del_rcu(&batman_if->list);
        sysfs_del_hardif(&batman_if->hardif_obj);
+       dev_put(batman_if->net_dev);
        call_rcu(&batman_if->rcu, hardif_free_interface);
 }
 
@@ -393,15 +398,13 @@ static int hard_if_event(struct notifier_block *this,
        /* FIXME: each batman_if will be attached to a softif */
        struct bat_priv *bat_priv = netdev_priv(soft_device);
 
-       if (!batman_if)
-               batman_if = hardif_add_interface(net_dev);
+       if (!batman_if && event == NETDEV_REGISTER)
+                       batman_if = hardif_add_interface(net_dev);
 
        if (!batman_if)
                goto out;
 
        switch (event) {
-       case NETDEV_REGISTER:
-               break;
        case NETDEV_UP:
                hardif_activate_interface(soft_device, bat_priv, batman_if);
                break;
@@ -442,8 +445,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
        struct bat_priv *bat_priv = netdev_priv(soft_device);
        struct batman_packet *batman_packet;
        struct batman_if *batman_if;
-       struct net_device_stats *stats;
-       struct rtnl_link_stats64 temp;
        int ret;
 
        skb = skb_share_check(skb, GFP_ATOMIC);
@@ -479,12 +480,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
        if (batman_if->if_status != IF_ACTIVE)
                goto err_free;
 
-       stats = (struct net_device_stats *)dev_get_stats(skb->dev, &temp);
-       if (stats) {
-               stats->rx_packets++;
-               stats->rx_bytes += skb->len;
-       }
-
        batman_packet = (struct batman_packet *)skb->data;
 
        if (batman_packet->version != COMPAT_VERSION) {
index fc3d32c127292ab5dc717b59e169d6e19f52f10b..3ae7dd2d2d4d9fbea848b7b57a41a4e1d78b2b18 100644 (file)
@@ -67,6 +67,7 @@ static int bat_socket_open(struct inode *inode, struct file *file)
        INIT_LIST_HEAD(&socket_client->queue_list);
        socket_client->queue_len = 0;
        socket_client->index = i;
+       socket_client->bat_priv = inode->i_private;
        spin_lock_init(&socket_client->lock);
        init_waitqueue_head(&socket_client->queue_wait);
 
@@ -151,9 +152,8 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
 static ssize_t bat_socket_write(struct file *file, const char __user *buff,
                                size_t len, loff_t *off)
 {
-       /* FIXME: each orig_node->batman_if will be attached to a softif */
-       struct bat_priv *bat_priv = netdev_priv(soft_device);
        struct socket_client *socket_client = file->private_data;
+       struct bat_priv *bat_priv = socket_client->bat_priv;
        struct icmp_packet_rr icmp_packet;
        struct orig_node *orig_node;
        struct batman_if *batman_if;
@@ -168,6 +168,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
                return -EINVAL;
        }
 
+       if (!bat_priv->primary_if)
+               return -EFAULT;
+
        if (len >= sizeof(struct icmp_packet_rr))
                packet_len = sizeof(struct icmp_packet_rr);
 
@@ -223,7 +226,8 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
        if (batman_if->if_status != IF_ACTIVE)
                goto dst_unreach;
 
-       memcpy(icmp_packet.orig, batman_if->net_dev->dev_addr, ETH_ALEN);
+       memcpy(icmp_packet.orig,
+              bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
 
        if (packet_len == sizeof(struct icmp_packet_rr))
                memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN);
@@ -271,7 +275,7 @@ int bat_socket_setup(struct bat_priv *bat_priv)
                goto err;
 
        d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
-                               bat_priv->debug_dir, NULL, &fops);
+                               bat_priv->debug_dir, bat_priv, &fops);
        if (d)
                goto err;
 
index 2686019fe4e133f60e445cf2f2b5eddcf211cd73..ef7c20ae7979af145b0dcf592bf1c226acdf9f71 100644 (file)
@@ -250,10 +250,13 @@ int choose_orig(void *data, int32_t size)
 int is_my_mac(uint8_t *addr)
 {
        struct batman_if *batman_if;
+
        rcu_read_lock();
        list_for_each_entry_rcu(batman_if, &if_list, list) {
-               if ((batman_if->net_dev) &&
-                   (compare_orig(batman_if->net_dev->dev_addr, addr))) {
+               if (batman_if->if_status != IF_ACTIVE)
+                       continue;
+
+               if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
                        rcu_read_unlock();
                        return 1;
                }
index 28bb627ffa135ceb9022621d33362e8611887b46..de5a8c1a810462da1d2f7e8779afd582eb779784 100644 (file)
@@ -391,11 +391,12 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
 int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
 {
        struct orig_node *orig_node;
+       unsigned long flags;
        HASHIT(hashit);
 
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
         * if_num */
-       spin_lock(&orig_hash_lock);
+       spin_lock_irqsave(&orig_hash_lock, flags);
 
        while (hash_iterate(orig_hash, &hashit)) {
                orig_node = hashit.bucket->data;
@@ -404,11 +405,11 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
                        goto err;
        }
 
-       spin_unlock(&orig_hash_lock);
+       spin_unlock_irqrestore(&orig_hash_lock, flags);
        return 0;
 
 err:
-       spin_unlock(&orig_hash_lock);
+       spin_unlock_irqrestore(&orig_hash_lock, flags);
        return -ENOMEM;
 }
 
@@ -468,12 +469,13 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
 {
        struct batman_if *batman_if_tmp;
        struct orig_node *orig_node;
+       unsigned long flags;
        HASHIT(hashit);
        int ret;
 
        /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
         * if_num */
-       spin_lock(&orig_hash_lock);
+       spin_lock_irqsave(&orig_hash_lock, flags);
 
        while (hash_iterate(orig_hash, &hashit)) {
                orig_node = hashit.bucket->data;
@@ -500,10 +502,10 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
        rcu_read_unlock();
 
        batman_if->if_num = -1;
-       spin_unlock(&orig_hash_lock);
+       spin_unlock_irqrestore(&orig_hash_lock, flags);
        return 0;
 
 err:
-       spin_unlock(&orig_hash_lock);
+       spin_unlock_irqrestore(&orig_hash_lock, flags);
        return -ENOMEM;
 }
index 066cc9149bf1be3c38cb1984ea39c8a3201f1a68..032195e6de94a5eed40831c116246628fb2458f9 100644 (file)
@@ -783,6 +783,8 @@ int recv_bat_packet(struct sk_buff *skb,
 
 static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
 {
+       /* FIXME: each batman_if will be attached to a softif */
+       struct bat_priv *bat_priv = netdev_priv(soft_device);
        struct orig_node *orig_node;
        struct icmp_packet_rr *icmp_packet;
        struct ethhdr *ethhdr;
@@ -801,6 +803,9 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
                return NET_RX_DROP;
        }
 
+       if (!bat_priv->primary_if)
+               return NET_RX_DROP;
+
        /* answer echo request (ping) */
        /* get routing information */
        spin_lock_irqsave(&orig_hash_lock, flags);
@@ -830,7 +835,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
                }
 
                memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-               memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
+               memcpy(icmp_packet->orig,
+                      bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
                icmp_packet->msg_type = ECHO_REPLY;
                icmp_packet->ttl = TTL;
 
@@ -845,6 +851,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
 
 static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
 {
+       /* FIXME: each batman_if will be attached to a softif */
+       struct bat_priv *bat_priv = netdev_priv(soft_device);
        struct orig_node *orig_node;
        struct icmp_packet *icmp_packet;
        struct ethhdr *ethhdr;
@@ -865,6 +873,9 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
                return NET_RX_DROP;
        }
 
+       if (!bat_priv->primary_if)
+               return NET_RX_DROP;
+
        /* get routing information */
        spin_lock_irqsave(&orig_hash_lock, flags);
        orig_node = ((struct orig_node *)
@@ -892,7 +903,8 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
                }
 
                memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-               memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
+               memcpy(icmp_packet->orig,
+                      bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
                icmp_packet->msg_type = TTL_EXCEEDED;
                icmp_packet->ttl = TTL;
 
index 21d0717afb09ef7d7f4c6d066a6218c3085392bf..9aa9d369c7522cb9c1db35edab0af15e9e39419a 100644 (file)
@@ -126,6 +126,7 @@ struct socket_client {
        unsigned char index;
        spinlock_t lock;
        wait_queue_head_t queue_wait;
+       struct bat_priv *bat_priv;
 };
 
 struct socket_packet {
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
deleted file mode 100644 (file)
index 0a9c39c..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-config DX_SEP
-       tristate "Discretix SEP driver"
-#      depends on MRST
-       depends on RAR_REGISTER && PCI
-       default y
-       help
-         Discretix SEP driver
-
-         If unsure say M. The compiled module will be
-         called sep_driver.ko
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
deleted file mode 100644 (file)
index 628d5f9..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_DX_SEP) := sep_driver.o
-
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
deleted file mode 100644 (file)
index ff0e931..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-Todo's so far (from Alan Cox)
-- Fix firmware loading
-- Get firmware into firmware git tree
-- Review and tidy each algorithm function
-- Check whether it can be plugged into any of the kernel crypto API
-  interfaces
-- Do something about the magic shared memory interface and replace it
-  with something saner (in Linux terms)
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
deleted file mode 100644 (file)
index 9200524..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-#ifndef __SEP_DEV_H__
-#define __SEP_DEV_H__
-
-/*
- *
- *  sep_dev.h - Security Processor Device Structures
- *
- *  Copyright(c) 2009 Intel Corporation. All rights reserved.
- *  Copyright(c) 2009 Discretix. All rights reserved.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  This program is distributed in the hope that it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, write to the Free Software Foundation, Inc., 59
- *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- *  CONTACTS:
- *
- *  Alan Cox           alan@linux.intel.com
- *
- */
-
-struct sep_device {
-       /* pointer to pci dev */
-       struct pci_dev *pdev;
-
-       unsigned long in_use;
-
-       /* address of the shared memory allocated during init for SEP driver
-          (coherent alloc) */
-       void *shared_addr;
-       /* the physical address of the shared area */
-       dma_addr_t shared_bus;
-
-       /* restricted access region (coherent alloc) */
-       dma_addr_t rar_bus;
-       void *rar_addr;
-       /* firmware regions: cache is at rar_addr */
-       unsigned long cache_size;
-
-       /* follows the cache */
-       dma_addr_t resident_bus;
-       unsigned long resident_size;
-       void *resident_addr;
-
-       /* start address of the access to the SEP registers from driver */
-       void __iomem *reg_addr;
-       /* transaction counter that coordinates the transactions between SEP and HOST */
-       unsigned long send_ct;
-       /* counter for the messages from sep */
-       unsigned long reply_ct;
-       /* counter for the number of bytes allocated in the pool for the current
-          transaction */
-       unsigned long data_pool_bytes_allocated;
-
-       /* array of pointers to the pages that represent input data for the synchronic
-          DMA action */
-       struct page **in_page_array;
-
-       /* array of pointers to the pages that represent out data for the synchronic
-          DMA action */
-       struct page **out_page_array;
-
-       /* number of pages in the sep_in_page_array */
-       unsigned long in_num_pages;
-
-       /* number of pages in the sep_out_page_array */
-       unsigned long out_num_pages;
-
-       /* global data for every flow */
-       struct sep_flow_context_t flows[SEP_DRIVER_NUM_FLOWS];
-
-       /* pointer to the workqueue that handles the flow done interrupts */
-       struct workqueue_struct *flow_wq;
-
-};
-
-static struct sep_device *sep_dev;
-
-static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
-{
-       void __iomem *addr = dev->reg_addr + reg;
-       writel(value, addr);
-}
-
-static inline u32 sep_read_reg(struct sep_device *dev, int reg)
-{
-       void __iomem *addr = dev->reg_addr + reg;
-       return readl(addr);
-}
-
-/* wait for SRAM write complete(indirect write */
-static inline void sep_wait_sram_write(struct sep_device *dev)
-{
-       u32 reg_val;
-       do
-               reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR);
-       while (!(reg_val & 1));
-}
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644 (file)
index ecbde34..0000000
+++ /dev/null
@@ -1,2742 +0,0 @@
-/*
- *
- *  sep_driver.c - Security Processor Driver main group of functions
- *
- *  Copyright(c) 2009 Intel Corporation. All rights reserved.
- *  Copyright(c) 2009 Discretix. All rights reserved.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  This program is distributed in the hope that it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, write to the Free Software Foundation, Inc., 59
- *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- *  CONTACTS:
- *
- *  Mark Allyn         mark.a.allyn@intel.com
- *
- *  CHANGES:
- *
- *  2009.06.26 Initial publish
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/kdev_t.h>
-#include <linux/mutex.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/poll.h>
-#include <linux/wait.h>
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <asm/ioctl.h>
-#include <linux/ioport.h>
-#include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/pagemap.h>
-#include <asm/cacheflush.h>
-#include "sep_driver_hw_defs.h"
-#include "sep_driver_config.h"
-#include "sep_driver_api.h"
-#include "sep_dev.h"
-
-#if SEP_DRIVER_ARM_DEBUG_MODE
-
-#define  CRYS_SEP_ROM_length                  0x4000
-#define  CRYS_SEP_ROM_start_address           0x8000C000UL
-#define  CRYS_SEP_ROM_start_address_offset    0xC000UL
-#define  SEP_ROM_BANK_register                0x80008420UL
-#define  SEP_ROM_BANK_register_offset         0x8420UL
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS   0x82000000
-
-/*
- * THESE 2 definitions are specific to the board - must be
- * defined during integration
- */
-#define SEP_RAR_IO_MEM_REGION_START_ADDRESS   0xFF0D0000
-
-/* 2M size */
-
-static void sep_load_rom_code(struct sep_device *sep)
-{
-       /* Index variables */
-       unsigned long i, k, j;
-       u32 reg;
-       u32 error;
-       u32 warning;
-
-       /* Loading ROM from SEP_ROM_image.h file */
-       k = sizeof(CRYS_SEP_ROM);
-
-       edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
-
-       edbg("SEP Driver: k is %lu\n", k);
-       edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
-       edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
-
-       for (i = 0; i < 4; i++) {
-               /* write bank */
-               sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
-
-               for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
-                       sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
-
-                       k = k - 4;
-
-                       if (k == 0) {
-                               j = CRYS_SEP_ROM_length;
-                               i = 4;
-                       }
-               }
-       }
-
-       /* reset the SEP */
-       sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
-
-       /* poll for SEP ROM boot finish */
-       do
-               reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
-       while (!reg);
-
-       edbg("SEP Driver: ROM polling ended\n");
-
-       switch (reg) {
-       case 0x1:
-               /* fatal error - read erro status from GPRO */
-               error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
-               edbg("SEP Driver: ROM polling case 1\n");
-               break;
-       case 0x4:
-               /* Cold boot ended successfully  */
-       case 0x8:
-               /* Warmboot ended successfully */
-       case 0x10:
-               /* ColdWarm boot ended successfully */
-               error = 0;
-       case 0x2:
-               /* Boot First Phase ended  */
-               warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
-       case 0x20:
-               edbg("SEP Driver: ROM polling case %d\n", reg);
-               break;
-       }
-
-}
-
-#else
-static void sep_load_rom_code(struct sep_device *sep) { }
-#endif                         /* SEP_DRIVER_ARM_DEBUG_MODE */
-
-
-
-/*----------------------------------------
-       DEFINES
------------------------------------------*/
-
-#define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
-#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
-
-/*--------------------------------------------
-       GLOBAL variables
---------------------------------------------*/
-
-/* debug messages level */
-static int debug;
-module_param(debug, int , 0);
-MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
-
-/* Keep this a single static object for now to keep the conversion easy */
-
-static struct sep_device sep_instance;
-static struct sep_device *sep_dev = &sep_instance;
-
-/*
-  mutex for the access to the internals of the sep driver
-*/
-static DEFINE_MUTEX(sep_mutex);
-
-
-/* wait queue head (event) of the driver */
-static DECLARE_WAIT_QUEUE_HEAD(sep_event);
-
-/**
- *     sep_load_firmware       -       copy firmware cache/resident
- *     @sep: device we are loading
- *
- *     This functions copies the cache and resident from their source
- *     location into destination shared memory.
- */
-
-static int sep_load_firmware(struct sep_device *sep)
-{
-       const struct firmware *fw;
-       char *cache_name = "sep/cache.image.bin";
-       char *res_name = "sep/resident.image.bin";
-       int error;
-
-       edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
-       edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
-
-       /* load cache */
-       error = request_firmware(&fw, cache_name, &sep->pdev->dev);
-       if (error) {
-               edbg("SEP Driver:cant request cache fw\n");
-               return error;
-       }
-       edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
-
-       memcpy(sep->rar_addr, (void *)fw->data, fw->size);
-       sep->cache_size = fw->size;
-       release_firmware(fw);
-
-       sep->resident_bus = sep->rar_bus + sep->cache_size;
-       sep->resident_addr = sep->rar_addr + sep->cache_size;
-
-       /* load resident */
-       error = request_firmware(&fw, res_name, &sep->pdev->dev);
-       if (error) {
-               edbg("SEP Driver:cant request res fw\n");
-               return error;
-       }
-       edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
-
-       memcpy(sep->resident_addr, (void *) fw->data, fw->size);
-       sep->resident_size = fw->size;
-       release_firmware(fw);
-
-       edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
-               sep->resident_addr, (unsigned long long)sep->resident_bus,
-               sep->rar_addr, (unsigned long long)sep->rar_bus);
-       return 0;
-}
-
-MODULE_FIRMWARE("sep/cache.image.bin");
-MODULE_FIRMWARE("sep/resident.image.bin");
-
-/**
- *     sep_map_and_alloc_shared_area   -       allocate shared block
- *     @sep: security processor
- *     @size: size of shared area
- *
- *     Allocate a shared buffer in host memory that can be used by both the
- *     kernel and also the hardware interface via DMA.
- */
-
-static int sep_map_and_alloc_shared_area(struct sep_device *sep,
-                                                       unsigned long size)
-{
-       /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
-       sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
-                                       &sep->shared_bus, GFP_KERNEL);
-
-       if (!sep->shared_addr) {
-               edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
-               return -ENOMEM;
-       }
-       /* set the bus address of the shared area */
-       edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
-               size, sep->shared_addr, (unsigned long long)sep->shared_bus);
-       return 0;
-}
-
-/**
- *     sep_unmap_and_free_shared_area  -       free shared block
- *     @sep: security processor
- *
- *     Free the shared area allocated to the security processor. The
- *     processor must have finished with this and any final posted
- *     writes cleared before we do so.
- */
-static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
-{
-       dma_free_coherent(&sep->pdev->dev, size,
-                               sep->shared_addr, sep->shared_bus);
-}
-
-/**
- *     sep_shared_virt_to_bus  -       convert bus/virt addresses
- *
- *     Returns the bus address inside the shared area according
- *     to the virtual address.
- */
-
-static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
-                                               void *virt_address)
-{
-       dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
-       edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
-                                                               virt_address);
-       return pa;
-}
-
-/**
- *     sep_shared_bus_to_virt  -       convert bus/virt addresses
- *
- *     Returns virtual address inside the shared area according
- *     to the bus address.
- */
-
-static void *sep_shared_bus_to_virt(struct sep_device *sep,
-                                               dma_addr_t bus_address)
-{
-       return sep->shared_addr + (bus_address - sep->shared_bus);
-}
-
-
-/**
- *     sep_try_open            -       attempt to open a SEP device
- *     @sep: device to attempt to open
- *
- *     Atomically attempt to get ownership of a SEP device.
- *     Returns 1 if the device was opened, 0 on failure.
- */
-
-static int sep_try_open(struct sep_device *sep)
-{
-       if (!test_and_set_bit(0, &sep->in_use))
-               return 1;
-       return 0;
-}
-
-/**
- *     sep_open                -       device open method
- *     @inode: inode of sep device
- *     @filp: file handle to sep device
- *
- *     Open method for the SEP device. Called when userspace opens
- *     the SEP device node. Must also release the memory data pool
- *     allocations.
- *
- *     Returns zero on success otherwise an error code.
- */
-
-static int sep_open(struct inode *inode, struct file *filp)
-{
-       if (sep_dev == NULL)
-               return -ENODEV;
-
-       /* check the blocking mode */
-       if (filp->f_flags & O_NDELAY) {
-               if (sep_try_open(sep_dev) == 0)
-                       return -EAGAIN;
-       } else
-               if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
-                       return -EINTR;
-
-       /* Bind to the device, we only have one which makes it easy */
-       filp->private_data = sep_dev;
-       /* release data pool allocations */
-       sep_dev->data_pool_bytes_allocated = 0;
-       return 0;
-}
-
-
-/**
- *     sep_release             -       close a SEP device
- *     @inode: inode of SEP device
- *     @filp: file handle being closed
- *
- *     Called on the final close of a SEP device. As the open protects against
- *     multiple simultaenous opens that means this method is called when the
- *     final reference to the open handle is dropped.
- */
-
-static int sep_release(struct inode *inode, struct file *filp)
-{
-       struct sep_device *sep =  filp->private_data;
-#if 0                          /*!SEP_DRIVER_POLLING_MODE */
-       /* close IMR */
-       sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
-       /* release IRQ line */
-       free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
-#endif
-       /* Ensure any blocked open progresses */
-       clear_bit(0, &sep->in_use);
-       wake_up(&sep_event);
-       return 0;
-}
-
-/*---------------------------------------------------------------
-  map function - this functions maps the message shared area
------------------------------------------------------------------*/
-static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-       dma_addr_t bus_addr;
-       struct sep_device *sep = filp->private_data;
-
-       dbg("-------->SEP Driver: mmap start\n");
-
-       /* check that the size of the mapped range is as the size of the message
-          shared area */
-       if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
-               edbg("SEP Driver mmap requested size is more than allowed\n");
-               printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n");
-               printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
-               printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
-               return -EAGAIN;
-       }
-
-       edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
-       /* get bus address */
-       bus_addr = sep->shared_bus;
-
-       edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
-
-       if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
-               edbg("SEP Driver remap_page_range failed\n");
-               printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
-               return -EAGAIN;
-       }
-
-       dbg("SEP Driver:<-------- mmap end\n");
-
-       return 0;
-}
-
-
-/*-----------------------------------------------
-  poll function
-*----------------------------------------------*/
-static unsigned int sep_poll(struct file *filp, poll_table * wait)
-{
-       unsigned long count;
-       unsigned int mask = 0;
-       unsigned long retval = 0;       /* flow id */
-       struct sep_device *sep = filp->private_data;
-
-       dbg("---------->SEP Driver poll: start\n");
-
-
-#if SEP_DRIVER_POLLING_MODE
-
-       while (sep->send_ct != (retval & 0x7FFFFFFF)) {
-               retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
-
-               for (count = 0; count < 10 * 4; count += 4)
-                       edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
-       }
-
-       sep->reply_ct++;
-#else
-       /* add the event to the polling wait table */
-       poll_wait(filp, &sep_event, wait);
-
-#endif
-
-       edbg("sep->send_ct is %lu\n", sep->send_ct);
-       edbg("sep->reply_ct is %lu\n", sep->reply_ct);
-
-       /* check if the data is ready */
-       if (sep->send_ct == sep->reply_ct) {
-               for (count = 0; count < 12 * 4; count += 4)
-                       edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
-
-               for (count = 0; count < 10 * 4; count += 4)
-                       edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
-
-               retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
-               edbg("retval is %lu\n", retval);
-               /* check if the this is sep reply or request */
-               if (retval >> 31) {
-                       edbg("SEP Driver: sep request in\n");
-                       /* request */
-                       mask |= POLLOUT | POLLWRNORM;
-               } else {
-                       edbg("SEP Driver: sep reply in\n");
-                       mask |= POLLIN | POLLRDNORM;
-               }
-       }
-       dbg("SEP Driver:<-------- poll exit\n");
-       return mask;
-}
-
-/**
- *     sep_time_address        -       address in SEP memory of time
- *     @sep: SEP device we want the address from
- *
- *     Return the address of the two dwords in memory used for time
- *     setting.
- */
-
-static u32 *sep_time_address(struct sep_device *sep)
-{
-       return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
-}
-
-/**
- *     sep_set_time            -       set the SEP time
- *     @sep: the SEP we are setting the time for
- *
- *     Calculates time and sets it at the predefined address.
- *     Called with the sep mutex held.
- */
-static unsigned long sep_set_time(struct sep_device *sep)
-{
-       struct timeval time;
-       u32 *time_addr; /* address of time as seen by the kernel */
-
-
-       dbg("sep:sep_set_time start\n");
-
-       do_gettimeofday(&time);
-
-       /* set value in the SYSTEM MEMORY offset */
-       time_addr = sep_time_address(sep);
-
-       time_addr[0] = SEP_TIME_VAL_TOKEN;
-       time_addr[1] = time.tv_sec;
-
-       edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
-       edbg("SEP Driver:time_addr is %p\n", time_addr);
-       edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
-
-       return time.tv_sec;
-}
-
-/**
- *     sep_dump_message        - dump the message that is pending
- *     @sep: sep device
- *
- *     Dump out the message pending in the shared message area
- */
-
-static void sep_dump_message(struct sep_device *sep)
-{
-       int count;
-       for (count = 0; count < 12 * 4; count += 4)
-               edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
-}
-
-/**
- *     sep_send_command_handler        -       kick off a command
- *     @sep: sep being signalled
- *
- *     This function raises interrupt to SEP that signals that is has a new
- *     command from the host
- */
-
-static void sep_send_command_handler(struct sep_device *sep)
-{
-       dbg("sep:sep_send_command_handler start\n");
-
-       mutex_lock(&sep_mutex);
-       sep_set_time(sep);
-
-       /* FIXME: flush cache */
-       flush_cache_all();
-
-       sep_dump_message(sep);
-       /* update counter */
-       sep->send_ct++;
-       /* send interrupt to SEP */
-       sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
-       dbg("SEP Driver:<-------- sep_send_command_handler end\n");
-       mutex_unlock(&sep_mutex);
-       return;
-}
-
-/**
- *     sep_send_reply_command_handler  -       kick off a command reply
- *     @sep: sep being signalled
- *
- *     This function raises interrupt to SEP that signals that is has a new
- *     command from the host
- */
-
-static void sep_send_reply_command_handler(struct sep_device *sep)
-{
-       dbg("sep:sep_send_reply_command_handler start\n");
-
-       /* flash cache */
-       flush_cache_all();
-
-       sep_dump_message(sep);
-
-       mutex_lock(&sep_mutex);
-       sep->send_ct++;         /* update counter */
-       /* send the interrupt to SEP */
-       sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
-       /* update both counters */
-       sep->send_ct++;
-       sep->reply_ct++;
-       mutex_unlock(&sep_mutex);
-       dbg("sep: sep_send_reply_command_handler end\n");
-}
-
-/*
-  This function handles the allocate data pool memory request
-  This function returns calculates the bus address of the
-  allocated memory, and the offset of this area from the mapped address.
-  Therefore, the FVOs in user space can calculate the exact virtual
-  address of this allocated memory
-*/
-static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
-                                                       unsigned long arg)
-{
-       int error;
-       struct sep_driver_alloc_t command_args;
-
-       dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
-
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       /* allocate memory */
-       if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
-               error = -ENOMEM;
-               goto end_function;
-       }
-
-       /* set the virtual and bus address */
-       command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
-       command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
-
-       /* write the memory back to the user space */
-       error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       /* set the allocation */
-       sep->data_pool_bytes_allocated += command_args.num_bytes;
-
-end_function:
-       dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
-       return error;
-}
-
-/*
-  This function  handles write into allocated data pool command
-*/
-static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
-       int error;
-       void *virt_address;
-       unsigned long va;
-       unsigned long app_in_address;
-       unsigned long num_bytes;
-       void *data_pool_area_addr;
-
-       dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
-
-       /* get the application address */
-       error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
-       if (error)
-               goto end_function;
-
-       /* get the virtual kernel address address */
-       error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
-       if (error)
-               goto end_function;
-       virt_address = (void *)va;
-
-       /* get the number of bytes */
-       error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
-       if (error)
-               goto end_function;
-
-       /* calculate the start of the data pool */
-       data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
-
-       /* check that the range of the virtual kernel address is correct */
-       if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
-               error = -EINVAL;
-               goto end_function;
-       }
-       /* copy the application data */
-       error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
-       if (error)
-               error = -EFAULT;
-end_function:
-       dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
-       return error;
-}
-
-/*
-  this function handles the read from data pool command
-*/
-static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
-{
-       int error;
-       /* virtual address of dest application buffer */
-       unsigned long app_out_address;
-       /* virtual address of the data pool */
-       unsigned long va;
-       void *virt_address;
-       unsigned long num_bytes;
-       void *data_pool_area_addr;
-
-       dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
-
-       /* get the application address */
-       error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
-       if (error)
-               goto end_function;
-
-       /* get the virtual kernel address address */
-       error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
-       if (error)
-               goto end_function;
-       virt_address = (void *)va;
-
-       /* get the number of bytes */
-       error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
-       if (error)
-               goto end_function;
-
-       /* calculate the start of the data pool */
-       data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
-
-       /* FIXME: These are incomplete all over the driver: what about + len
-          and when doing that also overflows */
-       /* check that the range of the virtual kernel address is correct */
-       if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
-               error = -EINVAL;
-               goto end_function;
-       }
-
-       /* copy the application data */
-       error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
-       if (error)
-               error = -EFAULT;
-end_function:
-       dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
-       return error;
-}
-
-/*
-  This function releases all the application virtual buffer physical pages,
-       that were previously locked
-*/
-static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
-{
-       unsigned long count;
-
-       if (dirtyFlag) {
-               for (count = 0; count < num_pages; count++) {
-                       /* the out array was written, therefore the data was changed */
-                       if (!PageReserved(page_array_ptr[count]))
-                               SetPageDirty(page_array_ptr[count]);
-                       page_cache_release(page_array_ptr[count]);
-               }
-       } else {
-               /* free in pages - the data was only read, therefore no update was done
-                  on those pages */
-               for (count = 0; count < num_pages; count++)
-                       page_cache_release(page_array_ptr[count]);
-       }
-
-       if (page_array_ptr)
-               /* free the array */
-               kfree(page_array_ptr);
-
-       return 0;
-}
-
-/*
-  This function locks all the physical pages of the kernel virtual buffer
-  and construct a basic lli  array, where each entry holds the physical
-  page address and the size that application data holds in this physical pages
-*/
-static int sep_lock_kernel_pages(struct sep_device *sep,
-                                unsigned long kernel_virt_addr,
-                                unsigned long data_size,
-                                unsigned long *num_pages_ptr,
-                                struct sep_lli_entry_t **lli_array_ptr,
-                                struct page ***page_array_ptr)
-{
-       int error = 0;
-       /* the the page of the end address of the user space buffer */
-       unsigned long end_page;
-       /* the page of the start address of the user space buffer */
-       unsigned long start_page;
-       /* the range in pages */
-       unsigned long num_pages;
-       struct sep_lli_entry_t *lli_array;
-       /* next kernel address to map */
-       unsigned long next_kernel_address;
-       unsigned long count;
-
-       dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
-
-       /* set start and end pages  and num pages */
-       end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
-       start_page = kernel_virt_addr >> PAGE_SHIFT;
-       num_pages = end_page - start_page + 1;
-
-       edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
-       edbg("SEP Driver: data_size is %lu\n", data_size);
-       edbg("SEP Driver: start_page is %lx\n", start_page);
-       edbg("SEP Driver: end_page is %lx\n", end_page);
-       edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
-       lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
-       if (!lli_array) {
-               edbg("SEP Driver: kmalloc for lli_array failed\n");
-               error = -ENOMEM;
-               goto end_function;
-       }
-
-       /* set the start address of the first page - app data may start not at
-          the beginning of the page */
-       lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
-
-       /* check that not all the data is in the first page only */
-       if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
-               lli_array[0].block_size = data_size;
-       else
-               lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
-
-       /* debug print */
-       dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
-       /* advance the address to the start of the next page */
-       next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
-
-       /* go from the second page to the prev before last */
-       for (count = 1; count < (num_pages - 1); count++) {
-               lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
-               lli_array[count].block_size = PAGE_SIZE;
-
-               edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
-               next_kernel_address += PAGE_SIZE;
-       }
-
-       /* if more then 1 pages locked - then update for the last page size needed */
-       if (num_pages > 1) {
-               /* update the address of the last page */
-               lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
-
-               /* set the size of the last page */
-               lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
-
-               if (lli_array[count].block_size == 0) {
-                       dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
-                       dbg("data_size is %lu\n", data_size);
-                       while (1);
-               }
-
-               edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
-       }
-       /* set output params */
-       *lli_array_ptr = lli_array;
-       *num_pages_ptr = num_pages;
-       *page_array_ptr = 0;
-end_function:
-       dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
-       return 0;
-}
-
-/*
-  This function locks all the physical pages of the application virtual buffer
-  and construct a basic lli  array, where each entry holds the physical page
-  address and the size that application data holds in this physical pages
-*/
-static int sep_lock_user_pages(struct sep_device *sep,
-                       unsigned long app_virt_addr,
-                       unsigned long data_size,
-                       unsigned long *num_pages_ptr,
-                       struct sep_lli_entry_t **lli_array_ptr,
-                       struct page ***page_array_ptr)
-{
-       int error = 0;
-       /* the the page of the end address of the user space buffer */
-       unsigned long end_page;
-       /* the page of the start address of the user space buffer */
-       unsigned long start_page;
-       /* the range in pages */
-       unsigned long num_pages;
-       struct page **page_array;
-       struct sep_lli_entry_t *lli_array;
-       unsigned long count;
-       int result;
-
-       dbg("SEP Driver:--------> sep_lock_user_pages start\n");
-
-       /* set start and end pages  and num pages */
-       end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
-       start_page = app_virt_addr >> PAGE_SHIFT;
-       num_pages = end_page - start_page + 1;
-
-       edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
-       edbg("SEP Driver: data_size is %lu\n", data_size);
-       edbg("SEP Driver: start_page is %lu\n", start_page);
-       edbg("SEP Driver: end_page is %lu\n", end_page);
-       edbg("SEP Driver: num_pages is %lu\n", num_pages);
-
-       /* allocate array of pages structure pointers */
-       page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
-       if (!page_array) {
-               edbg("SEP Driver: kmalloc for page_array failed\n");
-
-               error = -ENOMEM;
-               goto end_function;
-       }
-
-       lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
-       if (!lli_array) {
-               edbg("SEP Driver: kmalloc for lli_array failed\n");
-
-               error = -ENOMEM;
-               goto end_function_with_error1;
-       }
-
-       /* convert the application virtual address into a set of physical */
-       down_read(&current->mm->mmap_sem);
-       result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
-       up_read(&current->mm->mmap_sem);
-
-       /* check the number of pages locked - if not all then exit with error */
-       if (result != num_pages) {
-               dbg("SEP Driver: not all pages locked by get_user_pages\n");
-
-               error = -ENOMEM;
-               goto end_function_with_error2;
-       }
-
-       /* flush the cache */
-       for (count = 0; count < num_pages; count++)
-               flush_dcache_page(page_array[count]);
-
-       /* set the start address of the first page - app data may start not at
-          the beginning of the page */
-       lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
-
-       /* check that not all the data is in the first page only */
-       if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
-               lli_array[0].block_size = data_size;
-       else
-               lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
-
-       /* debug print */
-       dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
-
-       /* go from the second page to the prev before last */
-       for (count = 1; count < (num_pages - 1); count++) {
-               lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
-               lli_array[count].block_size = PAGE_SIZE;
-
-               edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
-       }
-
-       /* if more then 1 pages locked - then update for the last page size needed */
-       if (num_pages > 1) {
-               /* update the address of the last page */
-               lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
-
-               /* set the size of the last page */
-               lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
-
-               if (lli_array[count].block_size == 0) {
-                       dbg("app_virt_addr is %08lx\n", app_virt_addr);
-                       dbg("data_size is %lu\n", data_size);
-                       while (1);
-               }
-               edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n",
-                    count, lli_array[count].physical_address,
-                    count, lli_array[count].block_size);
-       }
-
-       /* set output params */
-       *lli_array_ptr = lli_array;
-       *num_pages_ptr = num_pages;
-       *page_array_ptr = page_array;
-       goto end_function;
-
-end_function_with_error2:
-       /* release the cache */
-       for (count = 0; count < num_pages; count++)
-               page_cache_release(page_array[count]);
-       kfree(lli_array);
-end_function_with_error1:
-       kfree(page_array);
-end_function:
-       dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
-       return 0;
-}
-
-
-/*
-  this function calculates the size of data that can be inserted into the lli
-  table from this array the condition is that either the table is full
-  (all etnries are entered), or there are no more entries in the lli array
-*/
-static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
-{
-       unsigned long table_data_size = 0;
-       unsigned long counter;
-
-       /* calculate the data in the out lli table if till we fill the whole
-          table or till the data has ended */
-       for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
-               table_data_size += lli_in_array_ptr[counter].block_size;
-       return table_data_size;
-}
-
-/*
-  this functions builds ont lli table from the lli_array according to
-  the given size of data
-*/
-static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
-{
-       unsigned long curr_table_data_size;
-       /* counter of lli array entry */
-       unsigned long array_counter;
-
-       dbg("SEP Driver:--------> sep_build_lli_table start\n");
-
-       /* init currrent table data size and lli array entry counter */
-       curr_table_data_size = 0;
-       array_counter = 0;
-       *num_table_entries_ptr = 1;
-
-       edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
-       /* fill the table till table size reaches the needed amount */
-       while (curr_table_data_size < table_data_size) {
-               /* update the number of entries in table */
-               (*num_table_entries_ptr)++;
-
-               lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
-               lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
-               curr_table_data_size += lli_table_ptr->block_size;
-
-               edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
-               edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-               edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
-               /* check for overflow of the table data */
-               if (curr_table_data_size > table_data_size) {
-                       edbg("SEP Driver:curr_table_data_size > table_data_size\n");
-
-                       /* update the size of block in the table */
-                       lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
-
-                       /* update the physical address in the lli array */
-                       lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
-
-                       /* update the block size left in the lli array */
-                       lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
-               } else
-                       /* advance to the next entry in the lli_array */
-                       array_counter++;
-
-               edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-               edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
-               /* move to the next entry in table */
-               lli_table_ptr++;
-       }
-
-       /* set the info entry to default */
-       lli_table_ptr->physical_address = 0xffffffff;
-       lli_table_ptr->block_size = 0;
-
-       edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
-       edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-       edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-
-       /* set the output parameter */
-       *num_processed_entries_ptr += array_counter;
-
-       edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
-       dbg("SEP Driver:<-------- sep_build_lli_table end\n");
-       return;
-}
-
-/*
-  this function goes over the list of the print created tables and
-  prints all the data
-*/
-static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
-{
-       unsigned long table_count;
-       unsigned long entries_count;
-
-       dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
-
-       table_count = 1;
-       while ((unsigned long) lli_table_ptr != 0xffffffff) {
-               edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
-               edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
-
-               /* print entries of the table (without info entry) */
-               for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
-                       edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
-                       edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
-               }
-
-               /* point to the info entry */
-               lli_table_ptr--;
-
-               edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
-               edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
-
-
-               table_data_size = lli_table_ptr->block_size & 0xffffff;
-               num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
-               lli_table_ptr = (struct sep_lli_entry_t *)
-                   (lli_table_ptr->physical_address);
-
-               edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
-
-               if ((unsigned long) lli_table_ptr != 0xffffffff)
-                       lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
-
-               table_count++;
-       }
-       dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
-}
-
-
-/*
-  This function prepares only input DMA table for synhronic symmetric
-  operations (HASH)
-*/
-static int sep_prepare_input_dma_table(struct sep_device *sep,
-                               unsigned long app_virt_addr,
-                               unsigned long data_size,
-                               unsigned long block_size,
-                               unsigned long *lli_table_ptr,
-                               unsigned long *num_entries_ptr,
-                               unsigned long *table_data_size_ptr,
-                               bool isKernelVirtualAddress)
-{
-       /* pointer to the info entry of the table - the last entry */
-       struct sep_lli_entry_t *info_entry_ptr;
-       /* array of pointers ot page */
-       struct sep_lli_entry_t *lli_array_ptr;
-       /* points to the first entry to be processed in the lli_in_array */
-       unsigned long current_entry;
-       /* num entries in the virtual buffer */
-       unsigned long sep_lli_entries;
-       /* lli table pointer */
-       struct sep_lli_entry_t *in_lli_table_ptr;
-       /* the total data in one table */
-       unsigned long table_data_size;
-       /* number of entries in lli table */
-       unsigned long num_entries_in_table;
-       /* next table address */
-       void *lli_table_alloc_addr;
-       unsigned long result;
-
-       dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
-
-       edbg("SEP Driver:data_size is %lu\n", data_size);
-       edbg("SEP Driver:block_size is %lu\n", block_size);
-
-       /* initialize the pages pointers */
-       sep->in_page_array = 0;
-       sep->in_num_pages = 0;
-
-       if (data_size == 0) {
-               /* special case  - created 2 entries table with zero data */
-               in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
-               /* FIXME: Should the entry below not be for _bus */
-               in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-               in_lli_table_ptr->block_size = 0;
-
-               in_lli_table_ptr++;
-               in_lli_table_ptr->physical_address = 0xFFFFFFFF;
-               in_lli_table_ptr->block_size = 0;
-
-               *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-               *num_entries_ptr = 2;
-               *table_data_size_ptr = 0;
-
-               goto end_function;
-       }
-
-       /* check if the pages are in Kernel Virtual Address layout */
-       if (isKernelVirtualAddress == true)
-               /* lock the pages of the kernel buffer and translate them to pages */
-               result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
-       else
-               /* lock the pages of the user buffer and translate them to pages */
-               result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
-
-       if (result)
-               return result;
-
-       edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
-
-       current_entry = 0;
-       info_entry_ptr = 0;
-       sep_lli_entries = sep->in_num_pages;
-
-       /* initiate to point after the message area */
-       lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
-       /* loop till all the entries in in array are not processed */
-       while (current_entry < sep_lli_entries) {
-               /* set the new input and output tables */
-               in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
-               lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
-               /* calculate the maximum size of data for input table */
-               table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
-
-               /* now calculate the table size so that it will be module block size */
-               table_data_size = (table_data_size / block_size) * block_size;
-
-               edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
-
-               /* construct input lli table */
-               sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
-
-               if (info_entry_ptr == 0) {
-                       /* set the output parameters to physical addresses */
-                       *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
-                       *num_entries_ptr = num_entries_in_table;
-                       *table_data_size_ptr = table_data_size;
-
-                       edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
-               } else {
-                       /* update the info entry of the previous in table */
-                       info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
-                       info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
-               }
-
-               /* save the pointer to the info entry of the current tables */
-               info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
-       }
-
-       /* print input tables */
-       sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
-                                  sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
-
-       /* the array of the pages */
-       kfree(lli_array_ptr);
-end_function:
-       dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
-       return 0;
-
-}
-
-/*
- This function creates the input and output dma tables for
- symmetric operations (AES/DES) according to the block size from LLI arays
-*/
-static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
-                                     struct sep_lli_entry_t *lli_in_array,
-                                     unsigned long sep_in_lli_entries,
-                                     struct sep_lli_entry_t *lli_out_array,
-                                     unsigned long sep_out_lli_entries,
-                                     unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
-{
-       /* points to the area where next lli table can be allocated: keep void *
-          as there is pointer scaling to fix otherwise */
-       void *lli_table_alloc_addr;
-       /* input lli table */
-       struct sep_lli_entry_t *in_lli_table_ptr;
-       /* output lli table */
-       struct sep_lli_entry_t *out_lli_table_ptr;
-       /* pointer to the info entry of the table - the last entry */
-       struct sep_lli_entry_t *info_in_entry_ptr;
-       /* pointer to the info entry of the table - the last entry */
-       struct sep_lli_entry_t *info_out_entry_ptr;
-       /* points to the first entry to be processed in the lli_in_array */
-       unsigned long current_in_entry;
-       /* points to the first entry to be processed in the lli_out_array */
-       unsigned long current_out_entry;
-       /* max size of the input table */
-       unsigned long in_table_data_size;
-       /* max size of the output table */
-       unsigned long out_table_data_size;
-       /* flag te signifies if this is the first tables build from the arrays */
-       unsigned long first_table_flag;
-       /* the data size that should be in table */
-       unsigned long table_data_size;
-       /* number of etnries in the input table */
-       unsigned long num_entries_in_table;
-       /* number of etnries in the output table */
-       unsigned long num_entries_out_table;
-
-       dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
-
-       /* initiate to pint after the message area */
-       lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
-       current_in_entry = 0;
-       current_out_entry = 0;
-       first_table_flag = 1;
-       info_in_entry_ptr = 0;
-       info_out_entry_ptr = 0;
-
-       /* loop till all the entries in in array are not processed */
-       while (current_in_entry < sep_in_lli_entries) {
-               /* set the new input and output tables */
-               in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
-               lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
-               /* set the first output tables */
-               out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
-
-               lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
-
-               /* calculate the maximum size of data for input table */
-               in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
-
-               /* calculate the maximum size of data for output table */
-               out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
-
-               edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
-               edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
-
-               /* check where the data is smallest */
-               table_data_size = in_table_data_size;
-               if (table_data_size > out_table_data_size)
-                       table_data_size = out_table_data_size;
-
-               /* now calculate the table size so that it will be module block size */
-               table_data_size = (table_data_size / block_size) * block_size;
-
-               dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
-
-               /* construct input lli table */
-               sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
-
-               /* construct output lli table */
-               sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
-
-               /* if info entry is null - this is the first table built */
-               if (info_in_entry_ptr == 0) {
-                       /* set the output parameters to physical addresses */
-                       *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
-                       *in_num_entries_ptr = num_entries_in_table;
-                       *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
-                       *out_num_entries_ptr = num_entries_out_table;
-                       *table_data_size_ptr = table_data_size;
-
-                       edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
-                       edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
-               } else {
-                       /* update the info entry of the previous in table */
-                       info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
-                       info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
-
-                       /* update the info entry of the previous in table */
-                       info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
-                       info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
-               }
-
-               /* save the pointer to the info entry of the current tables */
-               info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
-               info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
-
-               edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
-               edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
-               edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
-       }
-
-       /* print input tables */
-       sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
-                                  sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
-       /* print output tables */
-       sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
-                                  sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
-       dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
-       return 0;
-}
-
-
-/*
-  This function builds input and output DMA tables for synhronic
-  symmetric operations (AES, DES). It also checks that each table
-  is of the modular block size
-*/
-static int sep_prepare_input_output_dma_table(struct sep_device *sep,
-                                      unsigned long app_virt_in_addr,
-                                      unsigned long app_virt_out_addr,
-                                      unsigned long data_size,
-                                      unsigned long block_size,
-                                      unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
-{
-       /* array of pointers of page */
-       struct sep_lli_entry_t *lli_in_array;
-       /* array of pointers of page */
-       struct sep_lli_entry_t *lli_out_array;
-       int result = 0;
-
-       dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
-
-       /* initialize the pages pointers */
-       sep->in_page_array = 0;
-       sep->out_page_array = 0;
-
-       /* check if the pages are in Kernel Virtual Address layout */
-       if (isKernelVirtualAddress == true) {
-               /* lock the pages of the kernel buffer and translate them to pages */
-               result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
-               if (result) {
-                       edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
-                       goto end_function;
-               }
-       } else {
-               /* lock the pages of the user buffer and translate them to pages */
-               result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
-               if (result) {
-                       edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
-                       goto end_function;
-               }
-       }
-
-       if (isKernelVirtualAddress == true) {
-               result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
-               if (result) {
-                       edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
-                       goto end_function_with_error1;
-               }
-       } else {
-               result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
-               if (result) {
-                       edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
-                       goto end_function_with_error1;
-               }
-       }
-       edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
-       edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
-       edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
-
-
-       /* call the fucntion that creates table from the lli arrays */
-       result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
-       if (result) {
-               edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
-               goto end_function_with_error2;
-       }
-
-       /* fall through - free the lli entry arrays */
-       dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
-       dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
-       dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
-end_function_with_error2:
-       kfree(lli_out_array);
-end_function_with_error1:
-       kfree(lli_in_array);
-end_function:
-       dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
-       return result;
-
-}
-
-/*
-  this function handles tha request for creation of the DMA table
-  for the synchronic symmetric operations (AES,DES)
-*/
-static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
-                                               unsigned long arg)
-{
-       int error;
-       /* command arguments */
-       struct sep_driver_build_sync_table_t command_args;
-
-       dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
-
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       edbg("app_in_address is %08lx\n", command_args.app_in_address);
-       edbg("app_out_address is %08lx\n", command_args.app_out_address);
-       edbg("data_size is %lu\n", command_args.data_in_size);
-       edbg("block_size is %lu\n", command_args.block_size);
-
-       /* check if we need to build only input table or input/output */
-       if (command_args.app_out_address)
-               /* prepare input and output tables */
-               error = sep_prepare_input_output_dma_table(sep,
-                                                          command_args.app_in_address,
-                                                          command_args.app_out_address,
-                                                          command_args.data_in_size,
-                                                          command_args.block_size,
-                                                          &command_args.in_table_address,
-                                                          &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
-       else
-               /* prepare input tables */
-               error = sep_prepare_input_dma_table(sep,
-                                                   command_args.app_in_address,
-                                                   command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
-
-       if (error)
-               goto end_function;
-       /* copy to user */
-       if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
-               error = -EFAULT;
-end_function:
-       dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
-       return error;
-}
-
-/*
-  this function handles the request for freeing dma table for synhronic actions
-*/
-static int sep_free_dma_table_data_handler(struct sep_device *sep)
-{
-       dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
-
-       /* free input pages array */
-       sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
-
-       /* free output pages array if needed */
-       if (sep->out_page_array)
-               sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
-
-       /* reset all the values */
-       sep->in_page_array = 0;
-       sep->out_page_array = 0;
-       sep->in_num_pages = 0;
-       sep->out_num_pages = 0;
-       dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
-       return 0;
-}
-
-/*
-  this function find a space for the new flow dma table
-*/
-static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
-                                       unsigned long **table_address_ptr)
-{
-       int error = 0;
-       /* pointer to the id field of the flow dma table */
-       unsigned long *start_table_ptr;
-       /* Do not make start_addr unsigned long * unless fixing the offset
-          computations ! */
-       void *flow_dma_area_start_addr;
-       unsigned long *flow_dma_area_end_addr;
-       /* maximum table size in words */
-       unsigned long table_size_in_words;
-
-       /* find the start address of the flow DMA table area */
-       flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
-
-       /* set end address of the flow table area */
-       flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
-
-       /* set table size in words */
-       table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
-
-       /* set the pointer to the start address of DMA area */
-       start_table_ptr = flow_dma_area_start_addr;
-
-       /* find the space for the next table */
-       while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
-               start_table_ptr += table_size_in_words;
-
-       /* check if we reached the end of floa tables area */
-       if (start_table_ptr >= flow_dma_area_end_addr)
-               error = -1;
-       else
-               *table_address_ptr = start_table_ptr;
-
-       return error;
-}
-
-/*
-  This function creates one DMA table for flow and returns its data,
-  and pointer to its info entry
-*/
-static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
-                                       unsigned long virt_buff_addr,
-                                       unsigned long virt_buff_size,
-                                       struct sep_lli_entry_t *table_data,
-                                       struct sep_lli_entry_t **info_entry_ptr,
-                                       struct sep_flow_context_t *flow_data_ptr,
-                                       bool isKernelVirtualAddress)
-{
-       int error;
-       /* the range in pages */
-       unsigned long lli_array_size;
-       struct sep_lli_entry_t *lli_array;
-       struct sep_lli_entry_t *flow_dma_table_entry_ptr;
-       unsigned long *start_dma_table_ptr;
-       /* total table data counter */
-       unsigned long dma_table_data_count;
-       /* pointer that will keep the pointer to the pages of the virtual buffer */
-       struct page **page_array_ptr;
-       unsigned long entry_count;
-
-       /* find the space for the new table */
-       error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
-       if (error)
-               goto end_function;
-
-       /* check if the pages are in Kernel Virtual Address layout */
-       if (isKernelVirtualAddress == true)
-               /* lock kernel buffer in the memory */
-               error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
-       else
-               /* lock user buffer in the memory */
-               error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
-
-       if (error)
-               goto end_function;
-
-       /* set the pointer to page array at the beginning of table - this table is
-          now considered taken */
-       *start_dma_table_ptr = lli_array_size;
-
-       /* point to the place of the pages pointers of the table */
-       start_dma_table_ptr++;
-
-       /* set the pages pointer */
-       *start_dma_table_ptr = (unsigned long) page_array_ptr;
-
-       /* set the pointer to the first entry */
-       flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
-
-       /* now create the entries for table */
-       for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
-               flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
-
-               flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
-
-               /* set the total data of a table */
-               dma_table_data_count += lli_array[entry_count].block_size;
-
-               flow_dma_table_entry_ptr++;
-       }
-
-       /* set the physical address */
-       table_data->physical_address = virt_to_phys(start_dma_table_ptr);
-
-       /* set the num_entries and total data size */
-       table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
-
-       /* set the info entry */
-       flow_dma_table_entry_ptr->physical_address = 0xffffffff;
-       flow_dma_table_entry_ptr->block_size = 0;
-
-       /* set the pointer to info entry */
-       *info_entry_ptr = flow_dma_table_entry_ptr;
-
-       /* the array of the lli entries */
-       kfree(lli_array);
-end_function:
-       return error;
-}
-
-
-
-/*
-  This function creates a list of tables for flow and returns the data for
-       the first and last tables of the list
-*/
-static int sep_prepare_flow_dma_tables(struct sep_device *sep,
-                                       unsigned long num_virtual_buffers,
-                                       unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
-{
-       int error;
-       unsigned long virt_buff_addr;
-       unsigned long virt_buff_size;
-       struct sep_lli_entry_t table_data;
-       struct sep_lli_entry_t *info_entry_ptr;
-       struct sep_lli_entry_t *prev_info_entry_ptr;
-       unsigned long i;
-
-       /* init vars */
-       error = 0;
-       prev_info_entry_ptr = 0;
-
-       /* init the first table to default */
-       table_data.physical_address = 0xffffffff;
-       first_table_data_ptr->physical_address = 0xffffffff;
-       table_data.block_size = 0;
-
-       for (i = 0; i < num_virtual_buffers; i++) {
-               /* get the virtual buffer address */
-               error = get_user(virt_buff_addr, &first_buff_addr);
-               if (error)
-                       goto end_function;
-
-               /* get the virtual buffer size */
-               first_buff_addr++;
-               error = get_user(virt_buff_size, &first_buff_addr);
-               if (error)
-                       goto end_function;
-
-               /* advance the address to point to the next pair of address|size */
-               first_buff_addr++;
-
-               /* now prepare the one flow LLI table from the data */
-               error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
-               if (error)
-                       goto end_function;
-
-               if (i == 0) {
-                       /* if this is the first table - save it to return to the user
-                          application */
-                       *first_table_data_ptr = table_data;
-
-                       /* set the pointer to info entry */
-                       prev_info_entry_ptr = info_entry_ptr;
-               } else {
-                       /* not first table - the previous table info entry should
-                          be updated */
-                       prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
-
-                       /* set the pointer to info entry */
-                       prev_info_entry_ptr = info_entry_ptr;
-               }
-       }
-
-       /* set the last table data */
-       *last_table_data_ptr = table_data;
-end_function:
-       return error;
-}
-
-/*
-  this function goes over all the flow tables connected to the given
-       table and deallocate them
-*/
-static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
-{
-       /* id pointer */
-       unsigned long *table_ptr;
-       /* end address of the flow dma area */
-       unsigned long num_entries;
-       unsigned long num_pages;
-       struct page **pages_ptr;
-       /* maximum table size in words */
-       struct sep_lli_entry_t *info_entry_ptr;
-
-       /* set the pointer to the first table */
-       table_ptr = (unsigned long *) first_table_ptr->physical_address;
-
-       /* set the num of entries */
-       num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
-           & SEP_NUM_ENTRIES_MASK;
-
-       /* go over all the connected tables */
-       while (*table_ptr != 0xffffffff) {
-               /* get number of pages */
-               num_pages = *(table_ptr - 2);
-
-               /* get the pointer to the pages */
-               pages_ptr = (struct page **) (*(table_ptr - 1));
-
-               /* free the pages */
-               sep_free_dma_pages(pages_ptr, num_pages, 1);
-
-               /* goto to the info entry */
-               info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
-
-               table_ptr = (unsigned long *) info_entry_ptr->physical_address;
-               num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-       }
-
-       return;
-}
-
-/**
- *     sep_find_flow_context   -       find a flow
- *     @sep: the SEP we are working with
- *     @flow_id: flow identifier
- *
- *     Returns a pointer the matching flow, or NULL if the flow does not
- *     exist.
- */
-
-static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
-                               unsigned long flow_id)
-{
-       int count;
-       /*
-        *  always search for flow with id default first - in case we
-        *  already started working on the flow there can be no situation
-        *  when 2 flows are with default flag
-        */
-       for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
-               if (sep->flows[count].flow_id == flow_id)
-                       return &sep->flows[count];
-       }
-       return NULL;
-}
-
-
-/*
-  this function handles the request to create the DMA tables for flow
-*/
-static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
-                                                       unsigned long arg)
-{
-       int error = -ENOENT;
-       struct sep_driver_build_flow_table_t command_args;
-       /* first table - output */
-       struct sep_lli_entry_t first_table_data;
-       /* dma table data */
-       struct sep_lli_entry_t last_table_data;
-       /* pointer to the info entry of the previuos DMA table */
-       struct sep_lli_entry_t *prev_info_entry_ptr;
-       /* pointer to the flow data strucutre */
-       struct sep_flow_context_t *flow_context_ptr;
-
-       dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
-
-       /* init variables */
-       prev_info_entry_ptr = 0;
-       first_table_data.physical_address = 0xffffffff;
-
-       /* find the free structure for flow data */
-       error = -EINVAL;
-       flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
-       if (flow_context_ptr == NULL)
-               goto end_function;
-
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       /* create flow tables */
-       error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
-       if (error)
-               goto end_function_with_error;
-
-       /* check if flow is static */
-       if (!command_args.flow_type)
-               /* point the info entry of the last to the info entry of the first */
-               last_table_data = first_table_data;
-
-       /* set output params */
-       command_args.first_table_addr = first_table_data.physical_address;
-       command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
-       command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
-       /* send the parameters to user application */
-       error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function_with_error;
-       }
-
-       /* all the flow created  - update the flow entry with temp id */
-       flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
-
-       /* set the processing tables data in the context */
-       if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
-               flow_context_ptr->input_tables_in_process = first_table_data;
-       else
-               flow_context_ptr->output_tables_in_process = first_table_data;
-
-       goto end_function;
-
-end_function_with_error:
-       /* free the allocated tables */
-       sep_deallocated_flow_tables(&first_table_data);
-end_function:
-       dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
-       return error;
-}
-
-/*
-  this function handles add tables to flow
-*/
-static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
-{
-       int error;
-       unsigned long num_entries;
-       struct sep_driver_add_flow_table_t command_args;
-       struct sep_flow_context_t *flow_context_ptr;
-       /* first dma table data */
-       struct sep_lli_entry_t first_table_data;
-       /* last dma table data */
-       struct sep_lli_entry_t last_table_data;
-       /* pointer to the info entry of the current DMA table */
-       struct sep_lli_entry_t *info_entry_ptr;
-
-       dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
-
-       /* get input parameters */
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       /* find the flow structure for the flow id */
-       flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
-       if (flow_context_ptr == NULL)
-               goto end_function;
-
-       /* prepare the flow dma tables */
-       error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
-       if (error)
-               goto end_function_with_error;
-
-       /* now check if there is already an existing add table for this flow */
-       if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
-               /* this buffer was for input buffers */
-               if (flow_context_ptr->input_tables_flag) {
-                       /* add table already exists - add the new tables to the end
-                          of the previous */
-                       num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
-                       info_entry_ptr = (struct sep_lli_entry_t *)
-                           (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
-                       /* connect to list of tables */
-                       *info_entry_ptr = first_table_data;
-
-                       /* set the first table data */
-                       first_table_data = flow_context_ptr->first_input_table;
-               } else {
-                       /* set the input flag */
-                       flow_context_ptr->input_tables_flag = 1;
-
-                       /* set the first table data */
-                       flow_context_ptr->first_input_table = first_table_data;
-               }
-               /* set the last table data */
-               flow_context_ptr->last_input_table = last_table_data;
-       } else {                /* this is output tables */
-
-               /* this buffer was for input buffers */
-               if (flow_context_ptr->output_tables_flag) {
-                       /* add table already exists - add the new tables to
-                          the end of the previous */
-                       num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
-
-                       info_entry_ptr = (struct sep_lli_entry_t *)
-                           (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
-
-                       /* connect to list of tables */
-                       *info_entry_ptr = first_table_data;
-
-                       /* set the first table data */
-                       first_table_data = flow_context_ptr->first_output_table;
-               } else {
-                       /* set the input flag */
-                       flow_context_ptr->output_tables_flag = 1;
-
-                       /* set the first table data */
-                       flow_context_ptr->first_output_table = first_table_data;
-               }
-               /* set the last table data */
-               flow_context_ptr->last_output_table = last_table_data;
-       }
-
-       /* set output params */
-       command_args.first_table_addr = first_table_data.physical_address;
-       command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
-       command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
-
-       /* send the parameters to user application */
-       error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
-       if (error)
-               error = -EFAULT;
-end_function_with_error:
-       /* free the allocated tables */
-       sep_deallocated_flow_tables(&first_table_data);
-end_function:
-       dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
-       return error;
-}
-
-/*
-  this function add the flow add message to the specific flow
-*/
-static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
-{
-       int error;
-       struct sep_driver_add_message_t command_args;
-       struct sep_flow_context_t *flow_context_ptr;
-
-       dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
-
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       /* check input */
-       if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
-               error = -ENOMEM;
-               goto end_function;
-       }
-
-       /* find the flow context */
-       flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
-       if (flow_context_ptr == NULL)
-               goto end_function;
-
-       /* copy the message into context */
-       flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
-       error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
-       if (error)
-               error = -EFAULT;
-end_function:
-       dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
-       return error;
-}
-
-
-/*
-  this function returns the bus and virtual addresses of the static pool
-*/
-static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
-{
-       int error;
-       struct sep_driver_static_pool_addr_t command_args;
-
-       dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
-
-       /*prepare the output parameters in the struct */
-       command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
-       command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
-
-       edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
-
-       /* send the parameters to user application */
-       error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
-       if (error)
-               error = -EFAULT;
-       dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
-       return error;
-}
-
-/*
-  this address gets the offset of the physical address from the start
-  of the mapped area
-*/
-static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
-{
-       int error;
-       struct sep_driver_get_mapped_offset_t command_args;
-
-       dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
-
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-
-       if (command_args.physical_address < sep->shared_bus) {
-               error = -EINVAL;
-               goto end_function;
-       }
-
-       /*prepare the output parameters in the struct */
-       command_args.offset = command_args.physical_address - sep->shared_bus;
-
-       edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
-
-       /* send the parameters to user application */
-       error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
-       if (error)
-               error = -EFAULT;
-end_function:
-       dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
-       return error;
-}
-
-
-/*
-  ?
-*/
-static int sep_start_handler(struct sep_device *sep)
-{
-       unsigned long reg_val;
-       unsigned long error = 0;
-
-       dbg("SEP Driver:--------> sep_start_handler start\n");
-
-       /* wait in polling for message from SEP */
-       do
-               reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
-       while (!reg_val);
-
-       /* check the value */
-       if (reg_val == 0x1)
-               /* fatal error - read error status from GPRO */
-               error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
-       dbg("SEP Driver:<-------- sep_start_handler end\n");
-       return error;
-}
-
-/*
-  this function handles the request for SEP initialization
-*/
-static int sep_init_handler(struct sep_device *sep, unsigned long arg)
-{
-       unsigned long message_word;
-       unsigned long *message_ptr;
-       struct sep_driver_init_t command_args;
-       unsigned long counter;
-       unsigned long error;
-       unsigned long reg_val;
-
-       dbg("SEP Driver:--------> sep_init_handler start\n");
-       error = 0;
-
-       error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
-       if (error) {
-               error = -EFAULT;
-               goto end_function;
-       }
-       dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user\n");
-
-       /* PATCH - configure the DMA to single -burst instead of multi-burst */
-       /*sep_configure_dma_burst(); */
-
-       dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
-
-       message_ptr = (unsigned long *) command_args.message_addr;
-
-       /* set the base address of the SRAM  */
-       sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
-
-       for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
-               get_user(message_word, message_ptr);
-               /* write data to SRAM */
-               sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
-               edbg("SEP Driver:message_word is %lu\n", message_word);
-               /* wait for write complete */
-               sep_wait_sram_write(sep);
-       }
-       dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
-       /* signal SEP */
-       sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
-
-       do
-               reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
-       while (!(reg_val & 0xFFFFFFFD));
-
-       dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
-
-       /* check the value */
-       if (reg_val == 0x1) {
-               edbg("SEP Driver:init failed\n");
-
-               error = sep_read_reg(sep, 0x8060);
-               edbg("SEP Driver:sw monitor is %lu\n", error);
-
-               /* fatal error - read erro status from GPRO */
-               error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
-               edbg("SEP Driver:error is %lu\n", error);
-       }
-end_function:
-       dbg("SEP Driver:<-------- sep_init_handler end\n");
-       return error;
-
-}
-
-/*
-  this function handles the request cache and resident reallocation
-*/
-static int sep_realloc_cache_resident_handler(struct sep_device *sep,
-                                               unsigned long arg)
-{
-       struct sep_driver_realloc_cache_resident_t command_args;
-       int error;
-
-       /* copy cache and resident to the their intended locations */
-       error = sep_load_firmware(sep);
-       if (error)
-               return error;
-
-       command_args.new_base_addr = sep->shared_bus;
-
-       /* find the new base address according to the lowest address between
-          cache, resident and shared area */
-       if (sep->resident_bus < command_args.new_base_addr)
-               command_args.new_base_addr = sep->resident_bus;
-       if (sep->rar_bus < command_args.new_base_addr)
-               command_args.new_base_addr = sep->rar_bus;
-
-       /* set the return parameters */
-       command_args.new_cache_addr = sep->rar_bus;
-       command_args.new_resident_addr = sep->resident_bus;
-
-       /* set the new shared area */
-       command_args.new_shared_area_addr = sep->shared_bus;
-
-       edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
-       edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
-       edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
-       edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
-
-       /* return to user */
-       if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
-               return -EFAULT;
-       return 0;
-}
-
-/**
- *     sep_get_time_handler    -       time request from user space
- *     @sep: sep we are to set the time for
- *     @arg: pointer to user space arg buffer
- *
- *     This function reports back the time and the address in the SEP
- *     shared buffer at which it has been placed. (Do we really need this!!!)
- */
-
-static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
-{
-       struct sep_driver_get_time_t command_args;
-
-       mutex_lock(&sep_mutex);
-       command_args.time_value = sep_set_time(sep);
-       command_args.time_physical_address = (unsigned long)sep_time_address(sep);
-       mutex_unlock(&sep_mutex);
-       if (copy_to_user((void __user *)arg,
-                       &command_args, sizeof(struct sep_driver_get_time_t)))
-                       return -EFAULT;
-       return 0;
-
-}
-
-/*
-  This API handles the end transaction request
-*/
-static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
-{
-       dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
-
-#if 0                          /*!SEP_DRIVER_POLLING_MODE */
-       /* close IMR */
-       sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
-
-       /* release IRQ line */
-       free_irq(SEP_DIRVER_IRQ_NUM, sep);
-
-       /* lock the sep mutex */
-       mutex_unlock(&sep_mutex);
-#endif
-
-       dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
-
-       return 0;
-}
-
-
-/**
- *     sep_set_flow_id_handler -       handle flow setting
- *     @sep: the SEP we are configuring
- *     @flow_id: the flow we are setting
- *
- * This function handler the set flow id command
- */
-static int sep_set_flow_id_handler(struct sep_device *sep,
-                                               unsigned long flow_id)
-{
-       int error = 0;
-       struct sep_flow_context_t *flow_data_ptr;
-
-       /* find the flow data structure that was just used for creating new flow
-          - its id should be default */
-
-       mutex_lock(&sep_mutex);
-       flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
-       if (flow_data_ptr)
-               flow_data_ptr->flow_id = flow_id;       /* set flow id */
-       else
-               error = -EINVAL;
-       mutex_unlock(&sep_mutex);
-       return error;
-}
-
-static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-       int error = 0;
-       struct sep_device *sep = filp->private_data;
-
-       dbg("------------>SEP Driver: ioctl start\n");
-
-       edbg("SEP Driver: cmd is %x\n", cmd);
-
-       switch (cmd) {
-       case SEP_IOCSENDSEPCOMMAND:
-               /* send command to SEP */
-               sep_send_command_handler(sep);
-               edbg("SEP Driver: after sep_send_command_handler\n");
-               break;
-       case SEP_IOCSENDSEPRPLYCOMMAND:
-               /* send reply command to SEP */
-               sep_send_reply_command_handler(sep);
-               break;
-       case SEP_IOCALLOCDATAPOLL:
-               /* allocate data pool */
-               error = sep_allocate_data_pool_memory_handler(sep, arg);
-               break;
-       case SEP_IOCWRITEDATAPOLL:
-               /* write data into memory pool */
-               error = sep_write_into_data_pool_handler(sep, arg);
-               break;
-       case SEP_IOCREADDATAPOLL:
-               /* read data from data pool into application memory */
-               error = sep_read_from_data_pool_handler(sep, arg);
-               break;
-       case SEP_IOCCREATESYMDMATABLE:
-               /* create dma table for synhronic operation */
-               error = sep_create_sync_dma_tables_handler(sep, arg);
-               break;
-       case SEP_IOCCREATEFLOWDMATABLE:
-               /* create flow dma tables */
-               error = sep_create_flow_dma_tables_handler(sep, arg);
-               break;
-       case SEP_IOCFREEDMATABLEDATA:
-               /* free the pages */
-               error = sep_free_dma_table_data_handler(sep);
-               break;
-       case SEP_IOCSETFLOWID:
-               /* set flow id */
-               error = sep_set_flow_id_handler(sep, (unsigned long)arg);
-               break;
-       case SEP_IOCADDFLOWTABLE:
-               /* add tables to the dynamic flow */
-               error = sep_add_flow_tables_handler(sep, arg);
-               break;
-       case SEP_IOCADDFLOWMESSAGE:
-               /* add message of add tables to flow */
-               error = sep_add_flow_tables_message_handler(sep, arg);
-               break;
-       case SEP_IOCSEPSTART:
-               /* start command to sep */
-               error = sep_start_handler(sep);
-               break;
-       case SEP_IOCSEPINIT:
-               /* init command to sep */
-               error = sep_init_handler(sep, arg);
-               break;
-       case SEP_IOCGETSTATICPOOLADDR:
-               /* get the physical and virtual addresses of the static pool */
-               error = sep_get_static_pool_addr_handler(sep, arg);
-               break;
-       case SEP_IOCENDTRANSACTION:
-               error = sep_end_transaction_handler(sep, arg);
-               break;
-       case SEP_IOCREALLOCCACHERES:
-               error = sep_realloc_cache_resident_handler(sep, arg);
-               break;
-       case SEP_IOCGETMAPPEDADDROFFSET:
-               error = sep_get_physical_mapped_offset_handler(sep, arg);
-               break;
-       case SEP_IOCGETIME:
-               error = sep_get_time_handler(sep, arg);
-               break;
-       default:
-               error = -ENOTTY;
-               break;
-       }
-       dbg("SEP Driver:<-------- ioctl end\n");
-       return error;
-}
-
-
-
-#if !SEP_DRIVER_POLLING_MODE
-
-/* handler for flow done interrupt */
-
-static void sep_flow_done_handler(struct work_struct *work)
-{
-       struct sep_flow_context_t *flow_data_ptr;
-
-       /* obtain the mutex */
-       mutex_lock(&sep_mutex);
-
-       /* get the pointer to context */
-       flow_data_ptr = (struct sep_flow_context_t *) work;
-
-       /* free all the current input tables in sep */
-       sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
-
-       /* free all the current tables output tables in SEP (if needed) */
-       if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
-               sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
-
-       /* check if we have additional tables to be sent to SEP only input
-          flag may be checked */
-       if (flow_data_ptr->input_tables_flag) {
-               /* copy the message to the shared RAM and signal SEP */
-               memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
-
-               sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
-       }
-       mutex_unlock(&sep_mutex);
-}
-/*
-  interrupt handler function
-*/
-static irqreturn_t sep_inthandler(int irq, void *dev_id)
-{
-       irqreturn_t int_error;
-       unsigned long reg_val;
-       unsigned long flow_id;
-       struct sep_flow_context_t *flow_context_ptr;
-       struct sep_device *sep = dev_id;
-
-       int_error = IRQ_HANDLED;
-
-       /* read the IRR register to check if this is SEP interrupt */
-       reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
-       edbg("SEP Interrupt - reg is %08lx\n", reg_val);
-
-       /* check if this is the flow interrupt */
-       if (0 /*reg_val & (0x1 << 11) */ ) {
-               /* read GPRO to find out the which flow is done */
-               flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
-
-               /* find the contex of the flow */
-               flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
-               if (flow_context_ptr == NULL)
-                       goto end_function_with_error;
-
-               /* queue the work */
-               INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
-               queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
-
-       } else {
-               /* check if this is reply interrupt from SEP */
-               if (reg_val & (0x1 << 13)) {
-                       /* update the counter of reply messages */
-                       sep->reply_ct++;
-                       /* wake up the waiting process */
-                       wake_up(&sep_event);
-               } else {
-                       int_error = IRQ_NONE;
-                       goto end_function;
-               }
-       }
-end_function_with_error:
-       /* clear the interrupt */
-       sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
-end_function:
-       return int_error;
-}
-
-#endif
-
-
-
-#if 0
-
-static void sep_wait_busy(struct sep_device *sep)
-{
-       u32 reg;
-
-       do {
-               reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
-       } while (reg);
-}
-
-/*
-  PATCH for configuring the DMA to single burst instead of multi-burst
-*/
-static void sep_configure_dma_burst(struct sep_device *sep)
-{
-#define         HW_AHB_RD_WR_BURSTS_REG_ADDR            0x0E10UL
-
-       dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
-
-       /* request access to registers from SEP */
-       sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
-
-       dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg)  \n");
-
-       sep_wait_busy(sep);
-
-       dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop)  \n");
-
-       /* set the DMA burst register to single burst */
-       sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
-
-       /* release the sep busy */
-       sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
-       sep_wait_busy(sep);
-
-       dbg("SEP Driver:<-------- sep_configure_dma_burst done  \n");
-
-}
-
-#endif
-
-/*
-  Function that is activated on the successful probe of the SEP device
-*/
-static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       int error = 0;
-       struct sep_device *sep;
-       int counter;
-       int size;               /* size of memory for allocation */
-
-       edbg("Sep pci probe starting\n");
-       if (sep_dev != NULL) {
-               dev_warn(&pdev->dev, "only one SEP supported.\n");
-               return -EBUSY;
-       }
-
-       /* enable the device */
-       error = pci_enable_device(pdev);
-       if (error) {
-               edbg("error enabling pci device\n");
-               goto end_function;
-       }
-
-       /* set the pci dev pointer */
-       sep_dev = &sep_instance;
-       sep = &sep_instance;
-
-       edbg("sep->shared_addr = %p\n", sep->shared_addr);
-       /* transaction counter that coordinates the transactions between SEP
-       and HOST */
-       sep->send_ct = 0;
-       /* counter for the messages from sep */
-       sep->reply_ct = 0;
-       /* counter for the number of bytes allocated in the pool
-       for the current transaction */
-       sep->data_pool_bytes_allocated = 0;
-
-       /* calculate the total size for allocation */
-       size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
-           SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
-
-       /* allocate the shared area */
-       if (sep_map_and_alloc_shared_area(sep, size)) {
-               error = -ENOMEM;
-               /* allocation failed */
-               goto end_function_error;
-       }
-       /* now set the memory regions */
-#if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
-       /* Note: this test section will need moving before it could ever
-          work as the registers are not yet mapped ! */
-       /* send the new SHARED MESSAGE AREA to the SEP */
-       sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
-
-       /* poll for SEP response */
-       retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-       while (retval != 0xffffffff && retval != sep->shared_bus)
-               retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
-
-       /* check the return value (register) */
-       if (retval != sep->shared_bus) {
-               error = -ENOMEM;
-               goto end_function_deallocate_sep_shared_area;
-       }
-#endif
-       /* init the flow contextes */
-       for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
-               sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
-
-       sep->flow_wq = create_singlethread_workqueue("sepflowwq");
-       if (sep->flow_wq == NULL) {
-               error = -ENOMEM;
-               edbg("sep_driver:flow queue creation failed\n");
-               goto end_function_deallocate_sep_shared_area;
-       }
-       edbg("SEP Driver: create flow workqueue \n");
-       sep->pdev = pci_dev_get(pdev);
-
-       sep->reg_addr = pci_ioremap_bar(pdev, 0);
-       if (!sep->reg_addr) {
-               edbg("sep: ioremap of registers failed.\n");
-               goto end_function_deallocate_sep_shared_area;
-       }
-       edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
-
-       /* load the rom code */
-       sep_load_rom_code(sep);
-
-       /* set up system base address and shared memory location */
-       sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
-                       2 * SEP_RAR_IO_MEM_REGION_SIZE,
-                       &sep->rar_bus, GFP_KERNEL);
-
-       if (!sep->rar_addr) {
-               edbg("SEP Driver:can't allocate rar\n");
-               goto end_function_uniomap;
-       }
-
-
-       edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
-       edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
-
-#if !SEP_DRIVER_POLLING_MODE
-
-       edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
-
-       /* clear ICR register */
-       sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
-
-       /* set the IMR register - open only GPR 2 */
-       sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
-       edbg("SEP Driver: about to call request_irq\n");
-       /* get the interrupt line */
-       error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
-       if (error)
-               goto end_function_free_res;
-       return 0;
-       edbg("SEP Driver: about to write IMR REG_ADDR");
-
-       /* set the IMR register - open only GPR 2 */
-       sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
-
-end_function_free_res:
-       dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
-                       sep->rar_addr, sep->rar_bus);
-#endif                         /* SEP_DRIVER_POLLING_MODE */
-end_function_uniomap:
-       iounmap(sep->reg_addr);
-end_function_deallocate_sep_shared_area:
-       /* de-allocate shared area */
-       sep_unmap_and_free_shared_area(sep, size);
-end_function_error:
-       sep_dev = NULL;
-end_function:
-       return error;
-}
-
-static const struct pci_device_id sep_pci_id_tbl[] = {
-       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
-       {0}
-};
-
-MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
-
-/* field for registering driver to PCI device */
-static struct pci_driver sep_pci_driver = {
-       .name = "sep_sec_driver",
-       .id_table = sep_pci_id_tbl,
-       .probe = sep_probe
-       /* FIXME: remove handler */
-};
-
-/* major and minor device numbers */
-static dev_t sep_devno;
-
-/* the files operations structure of the driver */
-static struct file_operations sep_file_operations = {
-       .owner = THIS_MODULE,
-       .unlocked_ioctl = sep_ioctl,
-       .poll = sep_poll,
-       .open = sep_open,
-       .release = sep_release,
-       .mmap = sep_mmap,
-};
-
-
-/* cdev struct of the driver */
-static struct cdev sep_cdev;
-
-/*
-  this function registers the driver to the file system
-*/
-static int sep_register_driver_to_fs(void)
-{
-       int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
-       if (ret_val) {
-               edbg("sep: major number allocation failed, retval is %d\n",
-                                                               ret_val);
-               return ret_val;
-       }
-       /* init cdev */
-       cdev_init(&sep_cdev, &sep_file_operations);
-       sep_cdev.owner = THIS_MODULE;
-
-       /* register the driver with the kernel */
-       ret_val = cdev_add(&sep_cdev, sep_devno, 1);
-       if (ret_val) {
-               edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
-               /* unregister dev numbers */
-               unregister_chrdev_region(sep_devno, 1);
-       }
-       return ret_val;
-}
-
-
-/*--------------------------------------------------------------
-  init function
-----------------------------------------------------------------*/
-static int __init sep_init(void)
-{
-       int ret_val = 0;
-       dbg("SEP Driver:-------->Init start\n");
-       /* FIXME: Probe can occur before we are ready to survive a probe */
-       ret_val = pci_register_driver(&sep_pci_driver);
-       if (ret_val) {
-               edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
-               goto end_function_unregister_from_fs;
-       }
-       /* register driver to fs */
-       ret_val = sep_register_driver_to_fs();
-       if (ret_val)
-               goto end_function_unregister_pci;
-       goto end_function;
-end_function_unregister_pci:
-       pci_unregister_driver(&sep_pci_driver);
-end_function_unregister_from_fs:
-       /* unregister from fs */
-       cdev_del(&sep_cdev);
-       /* unregister dev numbers */
-       unregister_chrdev_region(sep_devno, 1);
-end_function:
-       dbg("SEP Driver:<-------- Init end\n");
-       return ret_val;
-}
-
-
-/*-------------------------------------------------------------
-  exit function
---------------------------------------------------------------*/
-static void __exit sep_exit(void)
-{
-       int size;
-
-       dbg("SEP Driver:--------> Exit start\n");
-
-       /* unregister from fs */
-       cdev_del(&sep_cdev);
-       /* unregister dev numbers */
-       unregister_chrdev_region(sep_devno, 1);
-       /* calculate the total size for de-allocation */
-       size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
-           SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
-       /* FIXME: We need to do this in the unload for the device */
-       /* free shared area  */
-       if (sep_dev) {
-               sep_unmap_and_free_shared_area(sep_dev, size);
-               edbg("SEP Driver: free pages SEP SHARED AREA \n");
-               iounmap((void *) sep_dev->reg_addr);
-               edbg("SEP Driver: iounmap \n");
-       }
-       edbg("SEP Driver: release_mem_region \n");
-       dbg("SEP Driver:<-------- Exit end\n");
-}
-
-
-module_init(sep_init);
-module_exit(sep_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
deleted file mode 100644 (file)
index 7ef16da..0000000
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- *
- *  sep_driver_api.h - Security Processor Driver api definitions
- *
- *  Copyright(c) 2009 Intel Corporation. All rights reserved.
- *  Copyright(c) 2009 Discretix. All rights reserved.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  This program is distributed in the hope that it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, write to the Free Software Foundation, Inc., 59
- *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- *  CONTACTS:
- *
- *  Mark Allyn         mark.a.allyn@intel.com
- *
- *  CHANGES:
- *
- *  2009.06.26 Initial publish
- *
- */
-
-#ifndef __SEP_DRIVER_API_H__
-#define __SEP_DRIVER_API_H__
-
-
-
-/*----------------------------------------------------------------
-  IOCTL command defines
-  -----------------------------------------------------------------*/
-
-/* magic number 1 of the sep IOCTL command */
-#define SEP_IOC_MAGIC_NUMBER                           's'
-
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPCOMMAND                 _IO(SEP_IOC_MAGIC_NUMBER , 0)
-
-/* sends interrupt to sep that message is ready */
-#define SEP_IOCSENDSEPRPLYCOMMAND             _IO(SEP_IOC_MAGIC_NUMBER , 1)
-
-/* allocate memory in data pool */
-#define SEP_IOCALLOCDATAPOLL                  _IO(SEP_IOC_MAGIC_NUMBER , 2)
-
-/* write to pre-allocated  memory in data pool */
-#define SEP_IOCWRITEDATAPOLL                  _IO(SEP_IOC_MAGIC_NUMBER , 3)
-
-/* read from  pre-allocated  memory in data pool */
-#define SEP_IOCREADDATAPOLL                   _IO(SEP_IOC_MAGIC_NUMBER , 4)
-
-/* create sym dma lli tables */
-#define SEP_IOCCREATESYMDMATABLE              _IO(SEP_IOC_MAGIC_NUMBER , 5)
-
-/* create flow dma lli tables */
-#define SEP_IOCCREATEFLOWDMATABLE             _IO(SEP_IOC_MAGIC_NUMBER , 6)
-
-/* free dynamic data aalocated during table creation */
-#define SEP_IOCFREEDMATABLEDATA                _IO(SEP_IOC_MAGIC_NUMBER , 7)
-
-/* get the static pool area addresses (physical and virtual) */
-#define SEP_IOCGETSTATICPOOLADDR               _IO(SEP_IOC_MAGIC_NUMBER , 8)
-
-/* set flow id command */
-#define SEP_IOCSETFLOWID                       _IO(SEP_IOC_MAGIC_NUMBER , 9)
-
-/* add tables to the dynamic flow */
-#define SEP_IOCADDFLOWTABLE                    _IO(SEP_IOC_MAGIC_NUMBER , 10)
-
-/* add flow add tables message */
-#define SEP_IOCADDFLOWMESSAGE                  _IO(SEP_IOC_MAGIC_NUMBER , 11)
-
-/* start sep command */
-#define SEP_IOCSEPSTART                        _IO(SEP_IOC_MAGIC_NUMBER , 12)
-
-/* init sep command */
-#define SEP_IOCSEPINIT                         _IO(SEP_IOC_MAGIC_NUMBER , 13)
-
-/* end transaction command */
-#define SEP_IOCENDTRANSACTION                  _IO(SEP_IOC_MAGIC_NUMBER , 15)
-
-/* reallocate cache and resident */
-#define SEP_IOCREALLOCCACHERES                 _IO(SEP_IOC_MAGIC_NUMBER , 16)
-
-/* get the offset of the address starting from the beginnnig of the map area */
-#define SEP_IOCGETMAPPEDADDROFFSET             _IO(SEP_IOC_MAGIC_NUMBER , 17)
-
-/* get time address and value */
-#define SEP_IOCGETIME                          _IO(SEP_IOC_MAGIC_NUMBER , 19)
-
-/*-------------------------------------------
-    TYPEDEFS
-----------------------------------------------*/
-
-/*
-  init command struct
-*/
-struct sep_driver_init_t {
-       /* start of the 1G of the host memory address that SEP can access */
-       unsigned long message_addr;
-
-       /* start address of resident */
-       unsigned long message_size_in_words;
-
-};
-
-
-/*
-  realloc cache resident command
-*/
-struct sep_driver_realloc_cache_resident_t {
-       /* new cache address */
-       u64 new_cache_addr;
-       /* new resident address */
-       u64 new_resident_addr;
-       /* new resident address */
-       u64  new_shared_area_addr;
-       /* new base address */
-       u64 new_base_addr;
-};
-
-struct sep_driver_alloc_t {
-       /* virtual address of allocated space */
-       unsigned long offset;
-
-       /* physical address of allocated space */
-       unsigned long phys_address;
-
-       /* number of bytes to allocate */
-       unsigned long num_bytes;
-};
-
-/*
- */
-struct sep_driver_write_t {
-       /* application space address */
-       unsigned long app_address;
-
-       /* address of the data pool */
-       unsigned long datapool_address;
-
-       /* number of bytes to write */
-       unsigned long num_bytes;
-};
-
-/*
- */
-struct sep_driver_read_t {
-       /* application space address */
-       unsigned long app_address;
-
-       /* address of the data pool */
-       unsigned long datapool_address;
-
-       /* number of bytes to read */
-       unsigned long num_bytes;
-};
-
-/*
-*/
-struct sep_driver_build_sync_table_t {
-       /* address value of the data in */
-       unsigned long app_in_address;
-
-       /* size of data in */
-       unsigned long data_in_size;
-
-       /* address of the data out */
-       unsigned long app_out_address;
-
-       /* the size of the block of the operation - if needed,
-          every table will be modulo this parameter */
-       unsigned long block_size;
-
-       /* the physical address of the first input DMA table */
-       unsigned long in_table_address;
-
-       /* number of entries in the first input DMA table */
-       unsigned long in_table_num_entries;
-
-       /* the physical address of the first output DMA table */
-       unsigned long out_table_address;
-
-       /* number of entries in the first output DMA table */
-       unsigned long out_table_num_entries;
-
-       /* data in the first input table */
-       unsigned long table_data_size;
-
-       /* distinct user/kernel layout */
-       bool isKernelVirtualAddress;
-
-};
-
-/*
-*/
-struct sep_driver_build_flow_table_t {
-       /* flow type */
-       unsigned long flow_type;
-
-       /* flag for input output */
-       unsigned long input_output_flag;
-
-       /* address value of the data in */
-       unsigned long virt_buff_data_addr;
-
-       /* size of data in */
-       unsigned long num_virtual_buffers;
-
-       /* the physical address of the first input DMA table */
-       unsigned long first_table_addr;
-
-       /* number of entries in the first input DMA table */
-       unsigned long first_table_num_entries;
-
-       /* data in the first input table */
-       unsigned long first_table_data_size;
-
-       /* distinct user/kernel layout */
-       bool isKernelVirtualAddress;
-};
-
-
-struct sep_driver_add_flow_table_t {
-       /* flow id  */
-       unsigned long flow_id;
-
-       /* flag for input output */
-       unsigned long inputOutputFlag;
-
-       /* address value of the data in */
-       unsigned long virt_buff_data_addr;
-
-       /* size of data in */
-       unsigned long num_virtual_buffers;
-
-       /* address of the first table */
-       unsigned long first_table_addr;
-
-       /* number of entries in the first table */
-       unsigned long first_table_num_entries;
-
-       /* data size of the first table */
-       unsigned long first_table_data_size;
-
-       /* distinct user/kernel layout */
-       bool isKernelVirtualAddress;
-
-};
-
-/*
-  command struct for set flow id
-*/
-struct sep_driver_set_flow_id_t {
-       /* flow id to set */
-       unsigned long flow_id;
-};
-
-
-/* command struct for add tables message */
-struct sep_driver_add_message_t {
-       /* flow id to set */
-       unsigned long flow_id;
-
-       /* message size in bytes */
-       unsigned long message_size_in_bytes;
-
-       /* address of the message */
-       unsigned long message_address;
-};
-
-/* command struct for static pool addresses  */
-struct sep_driver_static_pool_addr_t {
-       /* physical address of the static pool */
-       unsigned long physical_static_address;
-
-       /* virtual address of the static pool */
-       unsigned long virtual_static_address;
-};
-
-/* command struct for getiing offset of the physical address from
-       the start of the mapped area  */
-struct sep_driver_get_mapped_offset_t {
-       /* physical address of the static pool */
-       unsigned long physical_address;
-
-       /* virtual address of the static pool */
-       unsigned long offset;
-};
-
-/* command struct for getting time value and address */
-struct sep_driver_get_time_t {
-       /* physical address of stored time */
-       unsigned long time_physical_address;
-
-       /* value of the stored time */
-       unsigned long time_value;
-};
-
-
-/*
-  structure that represent one entry in the DMA LLI table
-*/
-struct sep_lli_entry_t {
-       /* physical address */
-       unsigned long physical_address;
-
-       /* block size */
-       unsigned long block_size;
-};
-
-/*
-  structure that reperesents data needed for lli table construction
-*/
-struct sep_lli_prepare_table_data_t {
-       /* pointer to the memory where the first lli entry to be built */
-       struct sep_lli_entry_t *lli_entry_ptr;
-
-       /* pointer to the array of lli entries from which the table is to be built */
-       struct sep_lli_entry_t *lli_array_ptr;
-
-       /* number of elements in lli array */
-       int lli_array_size;
-
-       /* number of entries in the created table */
-       int num_table_entries;
-
-       /* number of array entries processed during table creation */
-       int num_array_entries_processed;
-
-       /* the totatl data size in the created table */
-       int lli_table_total_data_size;
-};
-
-/*
-  structure that represent tone table - it is not used in code, jkust
-  to show what table looks like
-*/
-struct sep_lli_table_t {
-       /* number of pages mapped in this tables. If 0 - means that the table
-          is not defined (used as a valid flag) */
-       unsigned long num_pages;
-       /*
-          pointer to array of page pointers that represent the mapping of the
-          virtual buffer defined by the table to the physical memory. If this
-          pointer is NULL, it means that the table is not defined
-          (used as a valid flag)
-        */
-       struct page **table_page_array_ptr;
-
-       /* maximum flow entries in table */
-       struct sep_lli_entry_t lli_entries[SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE];
-};
-
-
-/*
-  structure for keeping the mapping of the virtual buffer into physical pages
-*/
-struct sep_flow_buffer_data {
-       /* pointer to the array of page structs pointers to the pages of the
-          virtual buffer */
-       struct page **page_array_ptr;
-
-       /* number of pages taken by the virtual buffer */
-       unsigned long num_pages;
-
-       /* this flag signals if this page_array is the last one among many that were
-          sent in one setting to SEP */
-       unsigned long last_page_array_flag;
-};
-
-/*
-  struct that keeps all the data for one flow
-*/
-struct sep_flow_context_t {
-       /*
-          work struct for handling the flow done interrupt in the workqueue
-          this structure must be in the first place, since it will be used
-          forcasting to the containing flow context
-        */
-       struct work_struct flow_wq;
-
-       /* flow id */
-       unsigned long flow_id;
-
-       /* additional input tables exists */
-       unsigned long input_tables_flag;
-
-       /* additional output tables exists */
-       unsigned long output_tables_flag;
-
-       /*  data of the first input file */
-       struct sep_lli_entry_t first_input_table;
-
-       /* data of the first output table */
-       struct sep_lli_entry_t first_output_table;
-
-       /* last input table data */
-       struct sep_lli_entry_t last_input_table;
-
-       /* last output table data */
-       struct sep_lli_entry_t last_output_table;
-
-       /* first list of table */
-       struct sep_lli_entry_t input_tables_in_process;
-
-       /* output table in process (in sep) */
-       struct sep_lli_entry_t output_tables_in_process;
-
-       /* size of messages in bytes */
-       unsigned long message_size_in_bytes;
-
-       /* message */
-       unsigned char message[SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES];
-};
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
deleted file mode 100644 (file)
index 6008fe5..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- *
- *  sep_driver_config.h - Security Processor Driver configuration
- *
- *  Copyright(c) 2009 Intel Corporation. All rights reserved.
- *  Copyright(c) 2009 Discretix. All rights reserved.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  This program is distributed in the hope that it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, write to the Free Software Foundation, Inc., 59
- *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- *  CONTACTS:
- *
- *  Mark Allyn         mark.a.allyn@intel.com
- *
- *  CHANGES:
- *
- *  2009.06.26 Initial publish
- *
- */
-
-#ifndef __SEP_DRIVER_CONFIG_H__
-#define __SEP_DRIVER_CONFIG_H__
-
-
-/*--------------------------------------
-  DRIVER CONFIGURATION FLAGS
-  -------------------------------------*/
-
-/* if flag is on , then the driver is running in polling and
-       not interrupt mode */
-#define SEP_DRIVER_POLLING_MODE                         1
-
-/* flag which defines if the shared area address should be
-       reconfiged (send to SEP anew) during init of the driver */
-#define SEP_DRIVER_RECONFIG_MESSAGE_AREA                0
-
-/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
-#define SEP_DRIVER_ARM_DEBUG_MODE                       0
-
-/*-------------------------------------------
-       INTERNAL DATA CONFIGURATION
-       -------------------------------------------*/
-
-/* flag for the input array */
-#define SEP_DRIVER_IN_FLAG                              0
-
-/* flag for output array */
-#define SEP_DRIVER_OUT_FLAG                             1
-
-/* maximum number of entries in one LLI tables */
-#define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP             8
-
-
-/*--------------------------------------------------------
-       SHARED AREA  memory total size is 36K
-       it is divided is following:
-
-       SHARED_MESSAGE_AREA                     8K         }
-                                                                       }
-       STATIC_POOL_AREA                        4K         } MAPPED AREA ( 24 K)
-                                                                       }
-       DATA_POOL_AREA                          12K        }
-
-       SYNCHRONIC_DMA_TABLES_AREA              5K
-
-       FLOW_DMA_TABLES_AREA                    4K
-
-       SYSTEM_MEMORY_AREA                      3k
-
-       SYSTEM_MEMORY total size is 3k
-       it is divided as following:
-
-       TIME_MEMORY_AREA                     8B
------------------------------------------------------------*/
-
-
-
-/*
-       the maximum length of the message - the rest of the message shared
-       area will be dedicated to the dma lli tables
-*/
-#define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES                  (8 * 1024)
-
-/* the size of the message shared area in pages */
-#define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES          (8 * 1024)
-
-/* the size of the data pool static area in pages */
-#define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES                  (4 * 1024)
-
-/* the size of the data pool shared area size in pages */
-#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES        (12 * 1024)
-
-/* the size of the message shared area in pages */
-#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES   (1024 * 5)
-
-
-/* the size of the data pool shared area size in pages */
-#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES         (1024 * 4)
-
-/* system data (time, caller id etc') pool */
-#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES           100
-
-
-/* area size that is mapped  - we map the MESSAGE AREA, STATIC POOL and
-       DATA POOL areas. area must be module 4k */
-#define SEP_DRIVER_MMMAP_AREA_SIZE                            (1024 * 24)
-
-
-/*-----------------------------------------------
-       offsets of the areas starting from the shared area start address
-*/
-
-/* message area offset */
-#define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES               0
-
-/* static pool area offset */
-#define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \
-               (SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
-
-/* data pool area offset */
-#define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \
-       (SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \
-       SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES)
-
-/* synhronic dma tables area offset */
-#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES \
-       (SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \
-       SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)
-
-/* sep driver flow dma tables area offset */
-#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES \
-       (SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
-       SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES)
-
-/* system memory offset in bytes */
-#define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \
-       (SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
-       SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES)
-
-/* offset of the time area */
-#define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \
-       (SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES)
-
-
-
-/* start physical address of the SEP registers memory in HOST */
-#define SEP_IO_MEM_REGION_START_ADDRESS                       0x80000000
-
-/* size of the SEP registers memory region  in HOST (for now 100 registers) */
-#define SEP_IO_MEM_REGION_SIZE                                (2 * 0x100000)
-
-/* define the number of IRQ for SEP interrupts */
-#define SEP_DIRVER_IRQ_NUM                                    1
-
-/* maximum number of add buffers */
-#define SEP_MAX_NUM_ADD_BUFFERS                               100
-
-/* number of flows */
-#define SEP_DRIVER_NUM_FLOWS                                  4
-
-/* maximum number of entries in flow table */
-#define SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE              25
-
-/* offset of the num entries in the block length entry of the LLI */
-#define SEP_NUM_ENTRIES_OFFSET_IN_BITS                        24
-
-/* offset of the interrupt flag in the block length entry of the LLI */
-#define SEP_INT_FLAG_OFFSET_IN_BITS                           31
-
-/* mask for extracting data size from LLI */
-#define SEP_TABLE_DATA_SIZE_MASK                              0xFFFFFF
-
-/* mask for entries after being shifted left */
-#define SEP_NUM_ENTRIES_MASK                                  0x7F
-
-/* default flow id */
-#define SEP_FREE_FLOW_ID                                      0xFFFFFFFF
-
-/* temp flow id used during cretiong of new flow until receiving
-       real flow id from sep */
-#define SEP_TEMP_FLOW_ID                   (SEP_DRIVER_NUM_FLOWS + 1)
-
-/* maximum add buffers message length in bytes */
-#define SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES                   (7 * 4)
-
-/* maximum number of concurrent virtual buffers */
-#define SEP_MAX_VIRT_BUFFERS_CONCURRENT                       100
-
-/* the token that defines the start of time address */
-#define SEP_TIME_VAL_TOKEN                                    0x12345678
-
-/* DEBUG LEVEL MASKS */
-#define SEP_DEBUG_LEVEL_BASIC       0x1
-
-#define SEP_DEBUG_LEVEL_EXTENDED    0x4
-
-
-/* Debug helpers */
-
-#define dbg(fmt, args...) \
-do {\
-       if (debug & SEP_DEBUG_LEVEL_BASIC) \
-               printk(KERN_DEBUG fmt, ##args); \
-} while(0);
-
-#define edbg(fmt, args...) \
-do { \
-       if (debug & SEP_DEBUG_LEVEL_EXTENDED) \
-               printk(KERN_DEBUG fmt, ##args); \
-} while(0);
-
-
-
-#endif
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
deleted file mode 100644 (file)
index ea6abd8..0000000
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- *
- *  sep_driver_hw_defs.h - Security Processor Driver hardware definitions
- *
- *  Copyright(c) 2009 Intel Corporation. All rights reserved.
- *  Copyright(c) 2009 Discretix. All rights reserved.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  This program is distributed in the hope that it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  You should have received a copy of the GNU General Public License along with
- *  this program; if not, write to the Free Software Foundation, Inc., 59
- *  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- *  CONTACTS:
- *
- *  Mark Allyn         mark.a.allyn@intel.com
- *
- *  CHANGES:
- *
- *  2009.06.26 Initial publish
- *
- */
-
-#ifndef SEP_DRIVER_HW_DEFS__H
-#define SEP_DRIVER_HW_DEFS__H
-
-/*--------------------------------------------------------------------------*/
-/* Abstract: HW Registers Defines.                                          */
-/*                                                                          */
-/* Note: This file was automatically created !!!                            */
-/*       DO NOT EDIT THIS FILE !!!                                          */
-/*--------------------------------------------------------------------------*/
-
-
-/* cf registers */
-#define         HW_R0B_ADDR_0_REG_ADDR                  0x0000UL
-#define         HW_R0B_ADDR_1_REG_ADDR                  0x0004UL
-#define         HW_R0B_ADDR_2_REG_ADDR                  0x0008UL
-#define         HW_R0B_ADDR_3_REG_ADDR                  0x000cUL
-#define         HW_R0B_ADDR_4_REG_ADDR                  0x0010UL
-#define         HW_R0B_ADDR_5_REG_ADDR                  0x0014UL
-#define         HW_R0B_ADDR_6_REG_ADDR                  0x0018UL
-#define         HW_R0B_ADDR_7_REG_ADDR                  0x001cUL
-#define         HW_R0B_ADDR_8_REG_ADDR                  0x0020UL
-#define         HW_R2B_ADDR_0_REG_ADDR                  0x0080UL
-#define         HW_R2B_ADDR_1_REG_ADDR                  0x0084UL
-#define         HW_R2B_ADDR_2_REG_ADDR                  0x0088UL
-#define         HW_R2B_ADDR_3_REG_ADDR                  0x008cUL
-#define         HW_R2B_ADDR_4_REG_ADDR                  0x0090UL
-#define         HW_R2B_ADDR_5_REG_ADDR                  0x0094UL
-#define         HW_R2B_ADDR_6_REG_ADDR                  0x0098UL
-#define         HW_R2B_ADDR_7_REG_ADDR                  0x009cUL
-#define         HW_R2B_ADDR_8_REG_ADDR                  0x00a0UL
-#define         HW_R3B_REG_ADDR                         0x00C0UL
-#define         HW_R4B_REG_ADDR                         0x0100UL
-#define         HW_CSA_ADDR_0_REG_ADDR                  0x0140UL
-#define         HW_CSA_ADDR_1_REG_ADDR                  0x0144UL
-#define         HW_CSA_ADDR_2_REG_ADDR                  0x0148UL
-#define         HW_CSA_ADDR_3_REG_ADDR                  0x014cUL
-#define         HW_CSA_ADDR_4_REG_ADDR                  0x0150UL
-#define         HW_CSA_ADDR_5_REG_ADDR                  0x0154UL
-#define         HW_CSA_ADDR_6_REG_ADDR                  0x0158UL
-#define         HW_CSA_ADDR_7_REG_ADDR                  0x015cUL
-#define         HW_CSA_ADDR_8_REG_ADDR                  0x0160UL
-#define         HW_CSA_REG_ADDR                         0x0140UL
-#define         HW_SINB_REG_ADDR                        0x0180UL
-#define         HW_SOUTB_REG_ADDR                       0x0184UL
-#define         HW_PKI_CONTROL_REG_ADDR                 0x01C0UL
-#define         HW_PKI_STATUS_REG_ADDR                  0x01C4UL
-#define         HW_PKI_BUSY_REG_ADDR                0x01C8UL
-#define         HW_PKI_A_1025_REG_ADDR                  0x01CCUL
-#define         HW_PKI_SDMA_CTL_REG_ADDR                0x01D0UL
-#define         HW_PKI_SDMA_OFFSET_REG_ADDR     0x01D4UL
-#define         HW_PKI_SDMA_POINTERS_REG_ADDR   0x01D8UL
-#define         HW_PKI_SDMA_DLENG_REG_ADDR              0x01DCUL
-#define         HW_PKI_SDMA_EXP_POINTERS_REG_ADDR       0x01E0UL
-#define         HW_PKI_SDMA_RES_POINTERS_REG_ADDR       0x01E4UL
-#define         HW_PKI_CLR_REG_ADDR                     0x01E8UL
-#define         HW_PKI_SDMA_BUSY_REG_ADDR                   0x01E8UL
-#define         HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR        0x01ECUL
-#define         HW_PKI_SDMA_MUL_BY1_REG_ADDR            0x01F0UL
-#define         HW_PKI_SDMA_RMUL_SEL_REG_ADDR           0x01F4UL
-#define         HW_DES_KEY_0_REG_ADDR                   0x0208UL
-#define         HW_DES_KEY_1_REG_ADDR                   0x020CUL
-#define         HW_DES_KEY_2_REG_ADDR                   0x0210UL
-#define         HW_DES_KEY_3_REG_ADDR                   0x0214UL
-#define         HW_DES_KEY_4_REG_ADDR                   0x0218UL
-#define         HW_DES_KEY_5_REG_ADDR                   0x021CUL
-#define         HW_DES_CONTROL_0_REG_ADDR               0x0220UL
-#define         HW_DES_CONTROL_1_REG_ADDR               0x0224UL
-#define         HW_DES_IV_0_REG_ADDR                    0x0228UL
-#define         HW_DES_IV_1_REG_ADDR                    0x022CUL
-#define         HW_AES_KEY_0_ADDR_0_REG_ADDR            0x0400UL
-#define         HW_AES_KEY_0_ADDR_1_REG_ADDR            0x0404UL
-#define         HW_AES_KEY_0_ADDR_2_REG_ADDR            0x0408UL
-#define         HW_AES_KEY_0_ADDR_3_REG_ADDR            0x040cUL
-#define         HW_AES_KEY_0_ADDR_4_REG_ADDR            0x0410UL
-#define         HW_AES_KEY_0_ADDR_5_REG_ADDR            0x0414UL
-#define         HW_AES_KEY_0_ADDR_6_REG_ADDR            0x0418UL
-#define         HW_AES_KEY_0_ADDR_7_REG_ADDR            0x041cUL
-#define         HW_AES_KEY_0_REG_ADDR                   0x0400UL
-#define         HW_AES_IV_0_ADDR_0_REG_ADDR             0x0440UL
-#define         HW_AES_IV_0_ADDR_1_REG_ADDR             0x0444UL
-#define         HW_AES_IV_0_ADDR_2_REG_ADDR             0x0448UL
-#define         HW_AES_IV_0_ADDR_3_REG_ADDR             0x044cUL
-#define         HW_AES_IV_0_REG_ADDR                    0x0440UL
-#define         HW_AES_CTR1_ADDR_0_REG_ADDR             0x0460UL
-#define         HW_AES_CTR1_ADDR_1_REG_ADDR             0x0464UL
-#define         HW_AES_CTR1_ADDR_2_REG_ADDR             0x0468UL
-#define         HW_AES_CTR1_ADDR_3_REG_ADDR             0x046cUL
-#define         HW_AES_CTR1_REG_ADDR                    0x0460UL
-#define         HW_AES_SK_REG_ADDR                      0x0478UL
-#define         HW_AES_MAC_OK_REG_ADDR                      0x0480UL
-#define         HW_AES_PREV_IV_0_ADDR_0_REG_ADDR        0x0490UL
-#define         HW_AES_PREV_IV_0_ADDR_1_REG_ADDR        0x0494UL
-#define         HW_AES_PREV_IV_0_ADDR_2_REG_ADDR        0x0498UL
-#define         HW_AES_PREV_IV_0_ADDR_3_REG_ADDR        0x049cUL
-#define         HW_AES_PREV_IV_0_REG_ADDR                   0x0490UL
-#define         HW_AES_CONTROL_REG_ADDR                     0x04C0UL
-#define         HW_HASH_H0_REG_ADDR                 0x0640UL
-#define         HW_HASH_H1_REG_ADDR                 0x0644UL
-#define         HW_HASH_H2_REG_ADDR                 0x0648UL
-#define         HW_HASH_H3_REG_ADDR                 0x064CUL
-#define         HW_HASH_H4_REG_ADDR                 0x0650UL
-#define         HW_HASH_H5_REG_ADDR                 0x0654UL
-#define         HW_HASH_H6_REG_ADDR                 0x0658UL
-#define         HW_HASH_H7_REG_ADDR                 0x065CUL
-#define         HW_HASH_H8_REG_ADDR                 0x0660UL
-#define         HW_HASH_H9_REG_ADDR                 0x0664UL
-#define         HW_HASH_H10_REG_ADDR                0x0668UL
-#define         HW_HASH_H11_REG_ADDR                0x066CUL
-#define         HW_HASH_H12_REG_ADDR                0x0670UL
-#define         HW_HASH_H13_REG_ADDR                0x0674UL
-#define         HW_HASH_H14_REG_ADDR                0x0678UL
-#define         HW_HASH_H15_REG_ADDR                0x067CUL
-#define         HW_HASH_CONTROL_REG_ADDR                0x07C0UL
-#define         HW_HASH_PAD_EN_REG_ADDR                 0x07C4UL
-#define         HW_HASH_PAD_CFG_REG_ADDR                0x07C8UL
-#define         HW_HASH_CUR_LEN_0_REG_ADDR      0x07CCUL
-#define         HW_HASH_CUR_LEN_1_REG_ADDR      0x07D0UL
-#define         HW_HASH_CUR_LEN_2_REG_ADDR      0x07D4UL
-#define         HW_HASH_CUR_LEN_3_REG_ADDR      0x07D8UL
-#define         HW_HASH_PARAM_REG_ADDR                  0x07DCUL
-#define         HW_HASH_INT_BUSY_REG_ADDR               0x07E0UL
-#define         HW_HASH_SW_RESET_REG_ADDR               0x07E4UL
-#define         HW_HASH_ENDIANESS_REG_ADDR      0x07E8UL
-#define         HW_HASH_DATA_REG_ADDR               0x07ECUL
-#define         HW_DRNG_CONTROL_REG_ADDR                0x0800UL
-#define         HW_DRNG_VALID_REG_ADDR                  0x0804UL
-#define         HW_DRNG_DATA_REG_ADDR               0x0808UL
-#define         HW_RND_SRC_EN_REG_ADDR                  0x080CUL
-#define         HW_AES_CLK_ENABLE_REG_ADDR      0x0810UL
-#define         HW_DES_CLK_ENABLE_REG_ADDR      0x0814UL
-#define         HW_HASH_CLK_ENABLE_REG_ADDR     0x0818UL
-#define         HW_PKI_CLK_ENABLE_REG_ADDR      0x081CUL
-#define         HW_CLK_STATUS_REG_ADDR                  0x0824UL
-#define         HW_CLK_ENABLE_REG_ADDR                  0x0828UL
-#define         HW_DRNG_SAMPLE_REG_ADDR                 0x0850UL
-#define         HW_RND_SRC_CTL_REG_ADDR                 0x0858UL
-#define         HW_CRYPTO_CTL_REG_ADDR                  0x0900UL
-#define         HW_CRYPTO_STATUS_REG_ADDR               0x090CUL
-#define         HW_CRYPTO_BUSY_REG_ADDR                 0x0910UL
-#define         HW_AES_BUSY_REG_ADDR                0x0914UL
-#define         HW_DES_BUSY_REG_ADDR                0x0918UL
-#define         HW_HASH_BUSY_REG_ADDR               0x091CUL
-#define         HW_CONTENT_REG_ADDR                 0x0924UL
-#define         HW_VERSION_REG_ADDR                 0x0928UL
-#define         HW_CONTEXT_ID_REG_ADDR                  0x0930UL
-#define         HW_DIN_BUFFER_REG_ADDR                  0x0C00UL
-#define         HW_DIN_MEM_DMA_BUSY_REG_ADDR    0x0c20UL
-#define         HW_SRC_LLI_MEM_ADDR_REG_ADDR    0x0c24UL
-#define         HW_SRC_LLI_WORD0_REG_ADDR               0x0C28UL
-#define         HW_SRC_LLI_WORD1_REG_ADDR               0x0C2CUL
-#define         HW_SRAM_SRC_ADDR_REG_ADDR               0x0c30UL
-#define         HW_DIN_SRAM_BYTES_LEN_REG_ADDR  0x0c34UL
-#define         HW_DIN_SRAM_DMA_BUSY_REG_ADDR   0x0C38UL
-#define         HW_WRITE_ALIGN_REG_ADDR                 0x0C3CUL
-#define         HW_OLD_DATA_REG_ADDR                0x0C48UL
-#define         HW_WRITE_ALIGN_LAST_REG_ADDR    0x0C4CUL
-#define         HW_DOUT_BUFFER_REG_ADDR                 0x0C00UL
-#define         HW_DST_LLI_WORD0_REG_ADDR               0x0D28UL
-#define         HW_DST_LLI_WORD1_REG_ADDR               0x0D2CUL
-#define         HW_DST_LLI_MEM_ADDR_REG_ADDR    0x0D24UL
-#define         HW_DOUT_MEM_DMA_BUSY_REG_ADDR   0x0D20UL
-#define         HW_SRAM_DEST_ADDR_REG_ADDR      0x0D30UL
-#define         HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
-#define         HW_DOUT_SRAM_DMA_BUSY_REG_ADDR  0x0D38UL
-#define         HW_READ_ALIGN_REG_ADDR                  0x0D3CUL
-#define         HW_READ_LAST_DATA_REG_ADDR      0x0D44UL
-#define         HW_RC4_THRU_CPU_REG_ADDR                0x0D4CUL
-#define         HW_AHB_SINGLE_REG_ADDR                  0x0E00UL
-#define         HW_SRAM_DATA_REG_ADDR               0x0F00UL
-#define         HW_SRAM_ADDR_REG_ADDR               0x0F04UL
-#define         HW_SRAM_DATA_READY_REG_ADDR     0x0F08UL
-#define         HW_HOST_IRR_REG_ADDR                     0x0A00UL
-#define         HW_HOST_IMR_REG_ADDR                     0x0A04UL
-#define         HW_HOST_ICR_REG_ADDR                     0x0A08UL
-#define         HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR  0x0A10UL
-#define         HW_HOST_SEP_BUSY_REG_ADDR                    0x0A14UL
-#define         HW_HOST_SEP_LCS_REG_ADDR                     0x0A18UL
-#define         HW_HOST_CC_SW_RST_REG_ADDR               0x0A40UL
-#define         HW_HOST_SEP_SW_RST_REG_ADDR              0x0A44UL
-#define         HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR        0x0A80UL
-#define         HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR        0x0A84UL
-#define         HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR        0x0A88UL
-#define         HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR        0x0A8cUL
-#define         HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR        0x0A90UL
-#define         HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR        0x0A94UL
-#define         HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR    0x0A98UL
-#define         HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR        0x0A9cUL
-#define         HW_HOST_SEP_HOST_GPR0_REG_ADDR           0x0B00UL
-#define         HW_HOST_SEP_HOST_GPR1_REG_ADDR           0x0B04UL
-#define         HW_HOST_SEP_HOST_GPR2_REG_ADDR           0x0B08UL
-#define         HW_HOST_SEP_HOST_GPR3_REG_ADDR       0x0B0CUL
-#define         HW_HOST_HOST_SEP_GPR0_REG_ADDR       0x0B80UL
-#define         HW_HOST_HOST_SEP_GPR1_REG_ADDR       0x0B84UL
-#define         HW_HOST_HOST_SEP_GPR2_REG_ADDR       0x0B88UL
-#define         HW_HOST_HOST_SEP_GPR3_REG_ADDR       0x0B8CUL
-#define         HW_HOST_HOST_ENDIAN_REG_ADDR         0x0B90UL
-#define         HW_HOST_HOST_COMM_CLK_EN_REG_ADDR        0x0B94UL
-#define         HW_CLR_SRAM_BUSY_REG_REG_ADDR        0x0F0CUL
-#define    HW_CC_SRAM_BASE_ADDRESS              0x5800UL
-
-#endif                         /* ifndef HW_DEFS */
index d0c5c97eda3e6a3791bf91454f63c3a86bf79944..44a7fbe7eccd45282cd94ab35169331513cbde91 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/kthread.h>
 #include <linux/log2.h>
 #include <linux/init.h>
+#include <linux/smp_lock.h>
 
 /**** Helper functions used for Div, Remainder operation on u64 ****/
 
@@ -113,7 +114,6 @@ u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
 
 #define GLOB_SBD_NAME          "nd"
 #define GLOB_SBD_IRQ_NUM       (29)
-#define GLOB_VERSION           "driver version 20091110"
 
 #define GLOB_SBD_IOCTL_GC                        (0x7701)
 #define GLOB_SBD_IOCTL_WL                        (0x7702)
@@ -272,13 +272,6 @@ static int get_res_blk_num_os(void)
        return res_blks;
 }
 
-static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
-{
-       rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
-       /* rq->timeout = 5 * HZ; */
-       rq->cmd[0] = REQ_LB_OP_FLUSH;
-}
-
 /* Transfer a full request. */
 static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
 {
@@ -296,8 +289,7 @@ static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
                        IdentifyDeviceData.PagesPerBlock *
                        res_blks_os;
 
-       if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
-                       req->cmd[0] == REQ_LB_OP_FLUSH) {
+       if (req->cmd_type & REQ_FLUSH) {
                if (force_flush_cache()) /* Fail to flush cache */
                        return -EIO;
                else
@@ -597,11 +589,23 @@ int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
        return -ENOTTY;
 }
 
+int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
+               unsigned int cmd, unsigned long arg)
+{
+       int ret;
+
+       lock_kernel();
+       ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
+       unlock_kernel();
+
+       return ret;
+}
+
 static struct block_device_operations GLOB_SBD_ops = {
        .owner = THIS_MODULE,
        .open = GLOB_SBD_open,
        .release = GLOB_SBD_release,
-       .locked_ioctl = GLOB_SBD_ioctl,
+       .ioctl = GLOB_SBD_unlocked_ioctl,
        .getgeo = GLOB_SBD_getgeo,
 };
 
@@ -650,8 +654,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
        /* Here we force report 512 byte hardware sector size to Kernel */
        blk_queue_logical_block_size(dev->queue, 512);
 
-       blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH,
-                                       SBD_prepare_flush);
+       blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH);
 
        dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
        if (IS_ERR(dev->thread)) {
index 134aa5166a8d5321cb377c82f0603284e9aaeb8a..9b5218b6ada80095a7a525749c58c0ad619a46f8 100644 (file)
@@ -61,7 +61,6 @@ static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
 static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
                                 u8 cache_blk, u16 flag);
 static int FTL_Cache_Write(void);
-static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr);
 static void FTL_Calculate_LRU(void);
 static u32 FTL_Get_Block_Index(u32 wBlockNum);
 
@@ -86,8 +85,6 @@ static u32 FTL_Replace_MWBlock(void);
 static int FTL_Replace_Block(u64 blk_addr);
 static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
 
-static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr);
-
 struct device_info_tag DeviceInfo;
 struct flash_cache_tag Cache;
 static struct spectra_l2_cache_info cache_l2;
@@ -775,7 +772,7 @@ static void dump_cache_l2_table(void)
 {
        struct list_head *p;
        struct spectra_l2_cache_list *pnd;
-       int n, i;
+       int n;
 
        n = 0;
        list_for_each(p, &cache_l2.table.list) {
@@ -1537,79 +1534,6 @@ static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
        return wResult;
 }
 
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function:     FTL_Cache_Update_Block
-* Inputs:       pointer to buffer,page address,block address
-* Outputs:      PASS=0 / FAIL=1
-* Description:  It updates the cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Update_Block(u8 *pData,
-                       u64 old_page_addr, u64 blk_addr)
-{
-       int i, j;
-       u8 *buf = pData;
-       int wResult = PASS;
-       int wFoundInCache;
-       u64 page_addr;
-       u64 addr;
-       u64 old_blk_addr;
-       u16 page_offset;
-
-       nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-                               __FILE__, __LINE__, __func__);
-
-       old_blk_addr = (u64)(old_page_addr >>
-               DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize;
-       page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >>
-               DeviceInfo.nBitsInPageDataSize);
-
-       for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
-               page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize;
-               if (i != page_offset) {
-                       wFoundInCache = FAIL;
-                       for (j = 0; j < CACHE_ITEM_NUM; j++) {
-                               addr = Cache.array[j].address;
-                               addr = FTL_Get_Physical_Block_Addr(addr) +
-                                       GLOB_u64_Remainder(addr, 2);
-                               if ((addr >= page_addr) && addr <
-                                       (page_addr + Cache.cache_item_size)) {
-                                       wFoundInCache = PASS;
-                                       buf = Cache.array[j].buf;
-                                       Cache.array[j].changed = SET;
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-                                       int_cache[ftl_cmd_cnt].item = j;
-                                       int_cache[ftl_cmd_cnt].cache.address =
-                                               Cache.array[j].address;
-                                       int_cache[ftl_cmd_cnt].cache.changed =
-                                               Cache.array[j].changed;
-#endif
-#endif
-                                       break;
-                               }
-                       }
-                       if (FAIL == wFoundInCache) {
-                               if (ERR == FTL_Cache_Read_All(g_pTempBuf,
-                                       page_addr)) {
-                                       wResult = FAIL;
-                                       break;
-                               }
-                               buf = g_pTempBuf;
-                       }
-               } else {
-                       buf = pData;
-               }
-
-               if (FAIL == FTL_Cache_Write_All(buf,
-                       blk_addr + (page_addr - old_blk_addr))) {
-                       wResult = FAIL;
-                       break;
-               }
-       }
-
-       return wResult;
-}
-
 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
 * Function:     FTL_Copy_Block
 * Inputs:       source block address
@@ -1698,7 +1622,7 @@ static int get_l2_cache_blks(void)
 static int erase_l2_cache_blocks(void)
 {
        int i, ret = PASS;
-       u32 pblk, lblk;
+       u32 pblk, lblk = BAD_BLOCK;
        u64 addr;
        u32 *pbt = (u32 *)g_pBlockTable;
 
@@ -2004,87 +1928,6 @@ static int search_l2_cache(u8 *buf, u64 logical_addr)
        return ret;
 }
 
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function:     FTL_Cache_Write_Back
-* Inputs:       pointer to data cached in sys memory
-*               address of free block in flash
-* Outputs:      PASS=0 / FAIL=1
-* Description:  writes all the pages of Cache Block to flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr)
-{
-       int i, j, iErase;
-       u64 old_page_addr, addr, phy_addr;
-       u32 *pbt = (u32 *)g_pBlockTable;
-       u32 lba;
-       
-       nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-                              __FILE__, __LINE__, __func__);
-
-       old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) +
-               GLOB_u64_Remainder(blk_addr, 2);
-
-       iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL;
-
-       pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK);
-
-#if CMD_DMA
-       p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
-       g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
-       p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
-       p_BTableChangesDelta->BT_Index = (u32)(blk_addr >>
-               DeviceInfo.nBitsInBlockDataSize);
-       p_BTableChangesDelta->BT_Entry_Value =
-               pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)];
-       p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
-
-       if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
-               g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-               FTL_Write_IN_Progress_Block_Table_Page();
-       }
-
-       for (i = 0; i < RETRY_TIMES; i++) {
-               if (PASS == iErase) {
-                       phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
-                       if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
-                               lba = BLK_FROM_ADDR(blk_addr);
-                               MARK_BLOCK_AS_BAD(pbt[lba]);
-                               i = RETRY_TIMES;
-                               break;
-                       }
-               }
-
-               for (j = 0; j < CACHE_ITEM_NUM; j++) {
-                       addr = Cache.array[j].address;
-                       if ((addr <= blk_addr) &&
-                               ((addr + Cache.cache_item_size) > blk_addr))
-                               cache_block_to_write = j;
-               }
-
-               phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
-               if (PASS == FTL_Cache_Update_Block(pData,
-                                       old_page_addr, phy_addr)) {
-                       cache_block_to_write = UNHIT_CACHE_ITEM;
-                       break;
-               } else {
-                       iErase = PASS;
-               }
-       }
-
-       if (i >= RETRY_TIMES) {
-               if (ERR == FTL_Flash_Error_Handle(pData,
-                                       old_page_addr, blk_addr))
-                       return ERR;
-               else
-                       return FAIL;
-       }
-
-       return PASS;
-}
-
 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
 * Function:     FTL_Cache_Write_Page
 * Inputs:       Pointer to buffer, page address, cache block number
@@ -2370,159 +2213,6 @@ static int FTL_Write_Block_Table(int wForce)
        return 1;
 }
 
-/******************************************************************
-* Function:     GLOB_FTL_Flash_Format
-* Inputs:       none
-* Outputs:      PASS
-* Description:  The block table stores bad block info, including MDF+
-*               blocks gone bad over the ages. Therefore, if we have a
-*               block table in place, then use it to scan for bad blocks
-*               If not, then scan for MDF.
-*               Now, a block table will only be found if spectra was already
-*               being used. For a fresh flash, we'll go thru scanning for
-*               MDF. If spectra was being used, then there is a chance that
-*               the MDF has been corrupted. Spectra avoids writing to the
-*               first 2 bytes of the spare area to all pages in a block. This
-*               covers all known flash devices. However, since flash
-*               manufacturers have no standard of where the MDF is stored,
-*               this cannot guarantee that the MDF is protected for future
-*               devices too. The initial scanning for the block table assures
-*               this. It is ok even if the block table is outdated, as all
-*               we're looking for are bad block markers.
-*               Use this when mounting a file system or starting a
-*               new flash.
-*
-*********************************************************************/
-static int  FTL_Format_Flash(u8 valid_block_table)
-{
-       u32 i, j;
-       u32 *pbt = (u32 *)g_pBlockTable;
-       u32 tempNode;
-       int ret;
-
-#if CMD_DMA
-       u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy;
-       if (ftl_cmd_cnt)
-               return FAIL;
-#endif
-
-       if (FAIL == FTL_Check_Block_Table(FAIL))
-               valid_block_table = 0;
-
-       if (valid_block_table) {
-               u8 switched = 1;
-               u32 block, k;
-
-               k = DeviceInfo.wSpectraStartBlock;
-               while (switched && (k < DeviceInfo.wSpectraEndBlock)) {
-                       switched = 0;
-                       k++;
-                       for (j = DeviceInfo.wSpectraStartBlock, i = 0;
-                       j <= DeviceInfo.wSpectraEndBlock;
-                       j++, i++) {
-                               block = (pbt[i] & ~BAD_BLOCK) -
-                                       DeviceInfo.wSpectraStartBlock;
-                               if (block != i) {
-                                       switched = 1;
-                                       tempNode = pbt[i];
-                                       pbt[i] = pbt[block];
-                                       pbt[block] = tempNode;
-                               }
-                       }
-               }
-               if ((k == DeviceInfo.wSpectraEndBlock) && switched)
-                       valid_block_table = 0;
-       }
-
-       if (!valid_block_table) {
-               memset(g_pBlockTable, 0,
-                       DeviceInfo.wDataBlockNum * sizeof(u32));
-               memset(g_pWearCounter, 0,
-                       DeviceInfo.wDataBlockNum * sizeof(u8));
-               if (DeviceInfo.MLCDevice)
-                       memset(g_pReadCounter, 0,
-                               DeviceInfo.wDataBlockNum * sizeof(u16));
-#if CMD_DMA
-               memset(g_pBTStartingCopy, 0,
-                       DeviceInfo.wDataBlockNum * sizeof(u32));
-               memset(g_pWearCounterCopy, 0,
-                               DeviceInfo.wDataBlockNum * sizeof(u8));
-               if (DeviceInfo.MLCDevice)
-                       memset(g_pReadCounterCopy, 0,
-                               DeviceInfo.wDataBlockNum * sizeof(u16));
-#endif
-               for (j = DeviceInfo.wSpectraStartBlock, i = 0;
-                       j <= DeviceInfo.wSpectraEndBlock;
-                       j++, i++) {
-                       if (GLOB_LLD_Get_Bad_Block((u32)j))
-                               pbt[i] = (u32)(BAD_BLOCK | j);
-               }
-       }
-
-       nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n");
-
-       for (j = DeviceInfo.wSpectraStartBlock, i = 0;
-               j <= DeviceInfo.wSpectraEndBlock;
-               j++, i++) {
-               if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) {
-                       ret = GLOB_LLD_Erase_Block(j);
-                       if (FAIL == ret) {
-                               pbt[i] = (u32)(j);
-                               MARK_BLOCK_AS_BAD(pbt[i]);
-                               nand_dbg_print(NAND_DBG_WARN,
-                              "NAND Program fail in %s, Line %d, "
-                              "Function: %s, new Bad Block %d generated!\n",
-                              __FILE__, __LINE__, __func__, (int)j);
-                       } else {
-                               pbt[i] = (u32)(SPARE_BLOCK | j);
-                       }
-               }
-#if CMD_DMA
-               pbtStartingCopy[i] = pbt[i];
-#endif
-       }
-
-       g_wBlockTableOffset = 0;
-       for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock -
-                       DeviceInfo.wSpectraStartBlock))
-                       && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++)
-               ;
-       if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) {
-               printk(KERN_ERR "All blocks bad!\n");
-               return FAIL;
-       } else {
-               g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK;
-               if (i != BLOCK_TABLE_INDEX) {
-                       tempNode = pbt[i];
-                       pbt[i] = pbt[BLOCK_TABLE_INDEX];
-                       pbt[BLOCK_TABLE_INDEX] = tempNode;
-               }
-       }
-       pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
-
-#if CMD_DMA
-       pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
-#endif
-
-       g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-       memset(g_pBTBlocks, 0xFF,
-                       (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32));
-       g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex;
-       FTL_Write_Block_Table(FAIL);
-
-       for (i = 0; i < CACHE_ITEM_NUM; i++) {
-               Cache.array[i].address = NAND_CACHE_INIT_ADDR;
-               Cache.array[i].use_cnt = 0;
-               Cache.array[i].changed  = CLEAR;
-       }
-
-#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
-       memcpy((void *)&cache_start_copy, (void *)&Cache,
-                       sizeof(struct flash_cache_tag));
-#endif
-       return PASS;
-}
-
 static int  force_format_nand(void)
 {
        u32 i;
@@ -3031,112 +2721,6 @@ static int FTL_Read_Block_Table(void)
        return wResult;
 }
 
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function:     FTL_Flash_Error_Handle
-* Inputs:       Pointer to data
-*               Page address
-*               Block address
-* Outputs:      PASS=0 / FAIL=1
-* Description:  It handles any error occured during Spectra operation
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr,
-                               u64 blk_addr)
-{
-       u32 i;
-       int j;
-       u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr);
-       u64 phy_addr;
-       int wErase = FAIL;
-       int wResult = FAIL;
-       u32 *pbt = (u32 *)g_pBlockTable;
-
-       nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
-                      __FILE__, __LINE__, __func__);
-
-       if (ERR == GLOB_FTL_Garbage_Collection())
-               return ERR;
-
-       do {
-               for (i = DeviceInfo.wSpectraEndBlock -
-                       DeviceInfo.wSpectraStartBlock;
-                                       i > 0; i--) {
-                       if (IS_SPARE_BLOCK(i)) {
-                               tmp_node = (u32)(BAD_BLOCK |
-                                       pbt[blk_node]);
-                               pbt[blk_node] = (u32)(pbt[i] &
-                                       (~SPARE_BLOCK));
-                               pbt[i] = tmp_node;
-#if CMD_DMA
-                               p_BTableChangesDelta =
-                                   (struct BTableChangesDelta *)
-                                   g_pBTDelta_Free;
-                               g_pBTDelta_Free +=
-                                   sizeof(struct BTableChangesDelta);
-
-                               p_BTableChangesDelta->ftl_cmd_cnt =
-                                   ftl_cmd_cnt;
-                               p_BTableChangesDelta->BT_Index =
-                                   blk_node;
-                               p_BTableChangesDelta->BT_Entry_Value =
-                                   pbt[blk_node];
-                               p_BTableChangesDelta->ValidFields = 0x0C;
-
-                               p_BTableChangesDelta =
-                                   (struct BTableChangesDelta *)
-                                   g_pBTDelta_Free;
-                               g_pBTDelta_Free +=
-                                   sizeof(struct BTableChangesDelta);
-
-                               p_BTableChangesDelta->ftl_cmd_cnt =
-                                   ftl_cmd_cnt;
-                               p_BTableChangesDelta->BT_Index = i;
-                               p_BTableChangesDelta->BT_Entry_Value = pbt[i];
-                               p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
-                               wResult = PASS;
-                               break;
-                       }
-               }
-
-               if (FAIL == wResult) {
-                       if (FAIL == GLOB_FTL_Garbage_Collection())
-                               break;
-                       else
-                               continue;
-               }
-
-               if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
-                       g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
-                       FTL_Write_IN_Progress_Block_Table_Page();
-               }
-
-               phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
-
-               for (j = 0; j < RETRY_TIMES; j++) {
-                       if (PASS == wErase) {
-                               if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
-                                       MARK_BLOCK_AS_BAD(pbt[blk_node]);
-                                       break;
-                               }
-                       }
-                       if (PASS == FTL_Cache_Update_Block(pData,
-                                                          old_page_addr,
-                                                          phy_addr)) {
-                               wResult = PASS;
-                               break;
-                       } else {
-                               wResult = FAIL;
-                               wErase = PASS;
-                       }
-               }
-       } while (FAIL == wResult);
-
-       FTL_Write_Block_Table(FAIL);
-
-       return wResult;
-}
-
 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
 * Function:     FTL_Get_Page_Num
 * Inputs:       Size in bytes
index e483f80822d27003c7da89f0b472a91cde93b110..1160c55de7f280122038aa21ee063ac1cfa06cb3 100644 (file)
@@ -723,12 +723,12 @@ int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str)
 
 /**
  * usb_string_ids_n() - allocate unused string IDs in batch
- * @cdev: the device whose string descriptor IDs are being allocated
+ * @c: the device whose string descriptor IDs are being allocated
  * @n: number of string IDs to allocate
  * Context: single threaded during gadget setup
  *
  * Returns the first requested ID.  This ID and next @n-1 IDs are now
- * valid IDs.  At least providind that @n is non zore because if it
+ * valid IDs.  At least provided that @n is non-zero because if it
  * is, returns last requested ID which is now very useful information.
  *
  * @usb_string_ids_n() is called from bind() callbacks to allocate
index 166bf71fd3482252e00fde5455fc529e8f22337b..e03058fe23cbc91394474e1130aed8fcaa6c5f84 100644 (file)
@@ -1609,6 +1609,7 @@ static int __init m66592_probe(struct platform_device *pdev)
        /* initialize ucd */
        m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL);
        if (m66592 == NULL) {
+               ret = -ENOMEM;
                pr_err("kzalloc error\n");
                goto clean_up;
        }
index 70a817842755484f5ee0b19e51db505b32532bf5..2456ccd9965e3432ab4fd32a33052c104e725698 100644 (file)
@@ -1557,6 +1557,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
        /* initialize ucd */
        r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
        if (r8a66597 == NULL) {
+               ret = -ENOMEM;
                printk(KERN_ERR "kzalloc error\n");
                goto clean_up;
        }
index 2dcffdac86d29d061a150706cf5bbfc4d95f8064..5e807f083bc820e1251fbb7a08e8c023ed462edd 100644 (file)
@@ -94,7 +94,7 @@ uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt)
                        break;
        }
 
-       if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) {
+       if (i == ARRAY_SIZE(uvc_formats)) {
                printk(KERN_INFO "Unsupported format 0x%08x.\n",
                        fmt->fmt.pix.pixelformat);
                return -EINVAL;
index d1a3dfc9a40873ea46df2a81668b065d105fb9cc..bdba8c5d844aa4ba099e128e1ed5f765d2ae349b 100644 (file)
@@ -829,6 +829,7 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
         * almost immediately. With ISP1761, this register requires a delay of
         * 195ns between a write and subsequent read (see section 15.1.1.3).
         */
+       mmiowb();
        ndelay(195);
        skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
 
@@ -870,6 +871,7 @@ static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
         * almost immediately. With ISP1761, this register requires a delay of
         * 195ns between a write and subsequent read (see section 15.1.1.3).
         */
+       mmiowb();
        ndelay(195);
        skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG);
 
index bc3f4f427065901059737ca960bd03d0fa75a8fc..48e60d166ff04c3b749607ebab77f1b797321875 100644 (file)
@@ -131,7 +131,7 @@ static void next_trb(struct xhci_hcd *xhci,
                *seg = (*seg)->next;
                *trb = ((*seg)->trbs);
        } else {
-               *trb = (*trb)++;
+               (*trb)++;
        }
 }
 
@@ -1551,6 +1551,10 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
        /* calc actual length */
        if (ep->skip) {
                td->urb->iso_frame_desc[idx].actual_length = 0;
+               /* Update ring dequeue pointer */
+               while (ep_ring->dequeue != td->last_trb)
+                       inc_deq(xhci, ep_ring, false);
+               inc_deq(xhci, ep_ring, false);
                return finish_td(xhci, td, event_trb, event, ep, status, true);
        }
 
index d240de097c6211afd6ff2ef1399c1b2add9ca751..801324af9470a08d5b42011bfa397a03d97cb99d 100644 (file)
@@ -439,7 +439,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
                        /* drain secondary buffer */
                        int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary;
                        i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount);
-                       if (i < 0) {
+                       if (i) {
                                retval = -EFAULT;
                                goto exit;
                        }
index 2de49c8887c5f772b0f166cc7b98354f4ad902ea..bc88c79875a146712cae5fcb7461325501f20ad7 100644 (file)
@@ -542,7 +542,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
                        retval = io_res;
                else {
                        io_res = copy_to_user(user_buffer, buffer, dev->report_size);
-                       if (io_res < 0)
+                       if (io_res)
                                retval = -EFAULT;
                }
                break;
@@ -574,7 +574,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
                        }
                        io_res = copy_to_user((struct iowarrior_info __user *)arg, &info,
                                         sizeof(struct iowarrior_info));
-                       if (io_res < 0)
+                       if (io_res)
                                retval = -EFAULT;
                        break;
                }
index 0e8888588d4e01db602762276d0561d15739ed27..05aaac1c3861e5be2f8d30c4311e7faa5a2c40a8 100644 (file)
@@ -550,6 +550,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
        struct twl4030_usb_data *pdata = pdev->dev.platform_data;
        struct twl4030_usb      *twl;
        int                     status, err;
+       u8                      pwr;
 
        if (!pdata) {
                dev_dbg(&pdev->dev, "platform_data not available\n");
@@ -568,7 +569,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
        twl->otg.set_peripheral = twl4030_set_peripheral;
        twl->otg.set_suspend    = twl4030_set_suspend;
        twl->usb_mode           = pdata->usb_mode;
-       twl->asleep             = 1;
+
+       pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
+
+       twl->asleep             = (pwr & PHY_PWR_PHYPWD);
 
        /* init spinlock for workqueue */
        spin_lock_init(&twl->lock);
index 2bef4415c19c6627ef2555ab9a7f462bb8ed71f2..80bf8333bb037a67fc8d141fd0e33449eff18886 100644 (file)
@@ -222,8 +222,8 @@ static struct usb_serial_driver cp210x_device = {
 #define BITS_STOP_2            0x0002
 
 /* CP210X_SET_BREAK */
-#define BREAK_ON               0x0000
-#define BREAK_OFF              0x0001
+#define BREAK_ON               0x0001
+#define BREAK_OFF              0x0000
 
 /* CP210X_(SET_MHS|GET_MDMSTS) */
 #define CONTROL_DTR            0x0001
index eb12d9b096b4f54f803b9ee34e9302e694a799b8..c792c96f590e80918221c78caf7227e60ececaa1 100644 (file)
@@ -180,6 +180,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
        { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+       { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
@@ -750,6 +751,8 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
+       { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { },                                    /* Optional parameter entry */
        { }                                     /* Terminating entry */
 };
@@ -1376,7 +1379,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
        }
 
        /* set max packet size based on descriptor */
-       priv->max_packet_size = ep_desc->wMaxPacketSize;
+       priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
 
        dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
 }
@@ -1831,7 +1834,7 @@ static int ftdi_process_packet(struct tty_struct *tty,
 
        if (port->port.console && port->sysrq) {
                for (i = 0; i < len; i++, ch++) {
-                       if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+                       if (!usb_serial_handle_sysrq_char(port, *ch))
                                tty_insert_flip_char(tty, *ch, flag);
                }
        } else {
index 6e612c52e763a0095bcb4e0f377cf02eaca8b1b8..2e95857c9633c3f6e2e52148854ea0620a6bf43e 100644 (file)
 /* Propox devices */
 #define FTDI_PROPOX_JTAGCABLEII_PID    0xD738
 
+/* Lenz LI-USB Computer Interface. */
+#define FTDI_LENZ_LIUSB_PID    0xD780
+
 /*
  * Xsens Technologies BV products (http://www.xsens.com).
  */
 #define ALTI2_VID      0x1BC9
 #define ALTI2_N3_PID   0x6001  /* Neptune 3 */
 
+/*
+ * Ionics PlugComputer
+ */
+#define IONICS_VID                     0x1c0c
+#define IONICS_PLUGCOMPUTER_PID                0x0102
+
 /*
  * Dresden Elektronik Sensor Terminal Board
  */
index ca92f67747cc2f2c4b1f5a411bc3763be9a0b009..e6833e216fc9fa4e3628813d57f14891835caf6a 100644 (file)
@@ -343,7 +343,7 @@ void usb_serial_generic_process_read_urb(struct urb *urb)
                tty_insert_flip_string(tty, ch, urb->actual_length);
        else {
                for (i = 0; i < urb->actual_length; i++, ch++) {
-                       if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+                       if (!usb_serial_handle_sysrq_char(port, *ch))
                                tty_insert_flip_char(tty, *ch, TTY_NORMAL);
                }
        }
@@ -448,12 +448,11 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
 EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
 
 #ifdef CONFIG_MAGIC_SYSRQ
-int usb_serial_handle_sysrq_char(struct tty_struct *tty,
-                       struct usb_serial_port *port, unsigned int ch)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
 {
        if (port->sysrq && port->port.console) {
                if (ch && time_before(jiffies, port->sysrq)) {
-                       handle_sysrq(ch, tty);
+                       handle_sysrq(ch);
                        port->sysrq = 0;
                        return 1;
                }
@@ -462,8 +461,7 @@ int usb_serial_handle_sysrq_char(struct tty_struct *tty,
        return 0;
 }
 #else
-int usb_serial_handle_sysrq_char(struct tty_struct *tty,
-                       struct usb_serial_port *port, unsigned int ch)
+int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
 {
        return 0;
 }
@@ -518,6 +516,7 @@ void usb_serial_generic_disconnect(struct usb_serial *serial)
        for (i = 0; i < serial->num_ports; ++i)
                generic_cleanup(serial->port[i]);
 }
+EXPORT_SYMBOL_GPL(usb_serial_generic_disconnect);
 
 void usb_serial_generic_release(struct usb_serial *serial)
 {
index dc47f986df57f739b18ceb121dc8f19b0acd8c23..a7cfc59529377b16f084e6bd6e2bf118d4cc9cb5 100644 (file)
@@ -1151,7 +1151,7 @@ static int download_fw(struct edgeport_serial *serial)
 
                        /* Check if we have an old version in the I2C and
                           update if necessary */
-                       if (download_cur_ver != download_new_ver) {
+                       if (download_cur_ver < download_new_ver) {
                                dbg("%s - Update I2C dld from %d.%d to %d.%d",
                                    __func__,
                                    firmware_version->Ver_Major,
@@ -1284,7 +1284,7 @@ static int download_fw(struct edgeport_serial *serial)
                                kfree(header);
                                kfree(rom_desc);
                                kfree(ti_manuf_desc);
-                               return status;
+                               return -EINVAL;
                        }
 
                        /* Update I2C with type 0xf2 record with correct
index a6b207c84917425db5f4c9ea1a1da075a6798002..1f00f243c26cf5e1825632e40fc9c2a60287f9e3 100644 (file)
@@ -25,6 +25,7 @@ static int debug;
 
 static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
+       { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
        { },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index 9fc6ea2c681fae8ce6a43404109ca23c3177a310..adcbdb994de3eab65e0d963538f08df6f6cf1a49 100644 (file)
@@ -365,6 +365,10 @@ static void option_instat_callback(struct urb *urb);
 #define OLIVETTI_VENDOR_ID                     0x0b3c
 #define OLIVETTI_PRODUCT_OLICARD100            0xc000
 
+/* Celot products */
+#define CELOT_VENDOR_ID                                0x211f
+#define CELOT_PRODUCT_CT680M                   0x6801
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
                OPTION_BLACKLIST_NONE = 0,
@@ -887,10 +891,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
        { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
        { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
-
        { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
-
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+       { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 6b60018222790bed2ec8f77b637a19bfd08caa2a..8ae4c6cbc38a04ea250d43aad68aa43081098245 100644 (file)
@@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
        { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
+       { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
        { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
        { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
        { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
@@ -788,7 +789,7 @@ static void pl2303_process_read_urb(struct urb *urb)
 
        if (port->port.console && port->sysrq) {
                for (i = 0; i < urb->actual_length; ++i)
-                       if (!usb_serial_handle_sysrq_char(tty, port, data[i]))
+                       if (!usb_serial_handle_sysrq_char(port, data[i]))
                                tty_insert_flip_char(tty, data[i], tty_flag);
        } else {
                tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
index a871645389dd365764485481bc8ba773c6c68f37..43eb9bdad422c91e4290f2204ee2f178d393b002 100644 (file)
 #define CRESSI_VENDOR_ID       0x04b8
 #define CRESSI_EDY_PRODUCT_ID  0x0521
 
+/* Zeagle dive computer interface */
+#define ZEAGLE_VENDOR_ID       0x04b8
+#define ZEAGLE_N2ITION3_PRODUCT_ID     0x0522
+
 /* Sony, USB data cable for CMD-Jxx mobile phones */
 #define SONY_VENDOR_ID         0x054c
 #define SONY_QN3USB_PRODUCT_ID 0x0437
index 6e82d4f54bc87c167e6052119f052ca35bbfcf14..68c18fdfc6da320978f9212325fd3f1ecf2ec64c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/serial.h>
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
+#include <linux/serial_reg.h>
 #include <linux/uaccess.h>
 
 #define QT_OPEN_CLOSE_CHANNEL       0xca
 #define QT_HW_FLOW_CONTROL_MASK     0xc5
 #define QT_SW_FLOW_CONTROL_MASK     0xc6
 
-#define MODEM_CTL_REGISTER         0x04
-#define MODEM_STATUS_REGISTER      0x06
-
-
-#define SERIAL_LSR_OE       0x02
-#define SERIAL_LSR_PE       0x04
-#define SERIAL_LSR_FE       0x08
-#define SERIAL_LSR_BI       0x10
-
-#define SERIAL_LSR_TEMT     0x40
-
-#define  SERIAL_MCR_DTR             0x01
-#define  SERIAL_MCR_RTS             0x02
-#define  SERIAL_MCR_LOOP            0x10
-
-#define  SERIAL_MSR_CTS             0x10
-#define  SERIAL_MSR_CD              0x80
-#define  SERIAL_MSR_RI              0x40
-#define  SERIAL_MSR_DSR             0x20
 #define  SERIAL_MSR_MASK            0xf0
 
-#define  SERIAL_CRTSCTS ((SERIAL_MCR_RTS << 8) | SERIAL_MSR_CTS)
+#define  SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS)
 
-#define  SERIAL_8_DATA              0x03
-#define  SERIAL_7_DATA              0x02
-#define  SERIAL_6_DATA              0x01
-#define  SERIAL_5_DATA              0x00
-
-#define  SERIAL_ODD_PARITY          0X08
-#define  SERIAL_EVEN_PARITY         0X18
+#define  SERIAL_EVEN_PARITY         (UART_LCR_PARITY | UART_LCR_EPAR)
 
 #define  MAX_BAUD_RATE              460800
 
@@ -99,10 +75,12 @@ static struct usb_driver ssu100_driver = {
 };
 
 struct ssu100_port_private {
+       spinlock_t status_lock;
        u8 shadowLSR;
        u8 shadowMSR;
        wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
        unsigned short max_packet_size;
+       struct async_icount icount;
 };
 
 static void ssu100_release(struct usb_serial *serial)
@@ -150,9 +128,10 @@ static inline int ssu100_getregister(struct usb_device *dev,
 
 static inline int ssu100_setregister(struct usb_device *dev,
                                     unsigned short uart,
+                                    unsigned short reg,
                                     u16 data)
 {
-       u16 value = (data << 8) | MODEM_CTL_REGISTER;
+       u16 value = (data << 8) | reg;
 
        return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
                               QT_SET_GET_REGISTER, 0x40, value, uart,
@@ -178,11 +157,11 @@ static inline int update_mctrl(struct usb_device *dev, unsigned int set,
        clear &= ~set;  /* 'set' takes precedence over 'clear' */
        urb_value = 0;
        if (set & TIOCM_DTR)
-               urb_value |= SERIAL_MCR_DTR;
+               urb_value |= UART_MCR_DTR;
        if (set & TIOCM_RTS)
-               urb_value |= SERIAL_MCR_RTS;
+               urb_value |= UART_MCR_RTS;
 
-       result = ssu100_setregister(dev, 0, urb_value);
+       result = ssu100_setregister(dev, 0, UART_MCR, urb_value);
        if (result < 0)
                dbg("%s Error from MODEM_CTRL urb", __func__);
 
@@ -264,24 +243,24 @@ static void ssu100_set_termios(struct tty_struct *tty,
 
        if (cflag & PARENB) {
                if (cflag & PARODD)
-                       urb_value |= SERIAL_ODD_PARITY;
+                       urb_value |= UART_LCR_PARITY;
                else
                        urb_value |= SERIAL_EVEN_PARITY;
        }
 
        switch (cflag & CSIZE) {
        case CS5:
-               urb_value |= SERIAL_5_DATA;
+               urb_value |= UART_LCR_WLEN5;
                break;
        case CS6:
-               urb_value |= SERIAL_6_DATA;
+               urb_value |= UART_LCR_WLEN6;
                break;
        case CS7:
-               urb_value |= SERIAL_7_DATA;
+               urb_value |= UART_LCR_WLEN7;
                break;
        default:
        case CS8:
-               urb_value |= SERIAL_8_DATA;
+               urb_value |= UART_LCR_WLEN8;
                break;
        }
 
@@ -333,6 +312,7 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
        struct ssu100_port_private *priv = usb_get_serial_port_data(port);
        u8 *data;
        int result;
+       unsigned long flags;
 
        dbg("%s - port %d", __func__, port->number);
 
@@ -350,11 +330,10 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
                return result;
        }
 
-       priv->shadowLSR = data[0]  & (SERIAL_LSR_OE | SERIAL_LSR_PE |
-                                     SERIAL_LSR_FE | SERIAL_LSR_BI);
-
-       priv->shadowMSR = data[1]  & (SERIAL_MSR_CTS | SERIAL_MSR_DSR |
-                                     SERIAL_MSR_RI | SERIAL_MSR_CD);
+       spin_lock_irqsave(&priv->status_lock, flags);
+       priv->shadowLSR = data[0];
+       priv->shadowMSR = data[1];
+       spin_unlock_irqrestore(&priv->status_lock, flags);
 
        kfree(data);
 
@@ -398,11 +377,51 @@ static int get_serial_info(struct usb_serial_port *port,
        return 0;
 }
 
+static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+{
+       struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+       struct async_icount prev, cur;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->status_lock, flags);
+       prev = priv->icount;
+       spin_unlock_irqrestore(&priv->status_lock, flags);
+
+       while (1) {
+               wait_event_interruptible(priv->delta_msr_wait,
+                                        ((priv->icount.rng != prev.rng) ||
+                                         (priv->icount.dsr != prev.dsr) ||
+                                         (priv->icount.dcd != prev.dcd) ||
+                                         (priv->icount.cts != prev.cts)));
+
+               if (signal_pending(current))
+                       return -ERESTARTSYS;
+
+               spin_lock_irqsave(&priv->status_lock, flags);
+               cur = priv->icount;
+               spin_unlock_irqrestore(&priv->status_lock, flags);
+
+               if ((prev.rng == cur.rng) &&
+                   (prev.dsr == cur.dsr) &&
+                   (prev.dcd == cur.dcd) &&
+                   (prev.cts == cur.cts))
+                       return -EIO;
+
+               if ((arg & TIOCM_RNG && (prev.rng != cur.rng)) ||
+                   (arg & TIOCM_DSR && (prev.dsr != cur.dsr)) ||
+                   (arg & TIOCM_CD  && (prev.dcd != cur.dcd)) ||
+                   (arg & TIOCM_CTS && (prev.cts != cur.cts)))
+                       return 0;
+       }
+       return 0;
+}
+
 static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
                    unsigned int cmd, unsigned long arg)
 {
        struct usb_serial_port *port = tty->driver_data;
        struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+       void __user *user_arg = (void __user *)arg;
 
        dbg("%s cmd 0x%04x", __func__, cmd);
 
@@ -412,28 +431,28 @@ static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
                                       (struct serial_struct __user *) arg);
 
        case TIOCMIWAIT:
-               while (priv != NULL) {
-                       u8 prevMSR = priv->shadowMSR & SERIAL_MSR_MASK;
-                       interruptible_sleep_on(&priv->delta_msr_wait);
-                       /* see if a signal did it */
-                       if (signal_pending(current))
-                               return -ERESTARTSYS;
-                       else {
-                               u8 diff = (priv->shadowMSR & SERIAL_MSR_MASK) ^ prevMSR;
-                               if (!diff)
-                                       return -EIO; /* no change => error */
-
-                               /* Return 0 if caller wanted to know about
-                                  these bits */
-
-                               if (((arg & TIOCM_RNG) && (diff & SERIAL_MSR_RI)) ||
-                                   ((arg & TIOCM_DSR) && (diff & SERIAL_MSR_DSR)) ||
-                                   ((arg & TIOCM_CD) && (diff & SERIAL_MSR_CD)) ||
-                                   ((arg & TIOCM_CTS) && (diff & SERIAL_MSR_CTS)))
-                                       return 0;
-                       }
-               }
+               return wait_modem_info(port, arg);
+
+       case TIOCGICOUNT:
+       {
+               struct serial_icounter_struct icount;
+               struct async_icount cnow = priv->icount;
+               memset(&icount, 0, sizeof(icount));
+               icount.cts = cnow.cts;
+               icount.dsr = cnow.dsr;
+               icount.rng = cnow.rng;
+               icount.dcd = cnow.dcd;
+               icount.rx = cnow.rx;
+               icount.tx = cnow.tx;
+               icount.frame = cnow.frame;
+               icount.overrun = cnow.overrun;
+               icount.parity = cnow.parity;
+               icount.brk = cnow.brk;
+               icount.buf_overrun = cnow.buf_overrun;
+               if (copy_to_user(user_arg, &icount, sizeof(icount)))
+                       return -EFAULT;
                return 0;
+       }
 
        default:
                break;
@@ -455,6 +474,7 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port)
 
        unsigned num_endpoints;
        int i;
+       unsigned long flags;
 
        num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
        dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
@@ -466,7 +486,9 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port)
        }
 
        /* set max packet size based on descriptor */
+       spin_lock_irqsave(&priv->status_lock, flags);
        priv->max_packet_size = ep_desc->wMaxPacketSize;
+       spin_unlock_irqrestore(&priv->status_lock, flags);
 
        dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
 }
@@ -485,9 +507,9 @@ static int ssu100_attach(struct usb_serial *serial)
                return -ENOMEM;
        }
 
+       spin_lock_init(&priv->status_lock);
        init_waitqueue_head(&priv->delta_msr_wait);
        usb_set_serial_port_data(port, priv);
-
        ssu100_set_max_packet_size(port);
 
        return ssu100_initdevice(serial->dev);
@@ -506,20 +528,20 @@ static int ssu100_tiocmget(struct tty_struct *tty, struct file *file)
        if (!d)
                return -ENOMEM;
 
-       r = ssu100_getregister(dev, 0, MODEM_CTL_REGISTER, d);
+       r = ssu100_getregister(dev, 0, UART_MCR, d);
        if (r < 0)
                goto mget_out;
 
-       r = ssu100_getregister(dev, 0, MODEM_STATUS_REGISTER, d+1);
+       r = ssu100_getregister(dev, 0, UART_MSR, d+1);
        if (r < 0)
                goto mget_out;
 
-       r = (d[0] & SERIAL_MCR_DTR ? TIOCM_DTR : 0) |
-               (d[0] & SERIAL_MCR_RTS ? TIOCM_RTS : 0) |
-               (d[1] & SERIAL_MSR_CTS ? TIOCM_CTS : 0) |
-               (d[1] & SERIAL_MSR_CD ? TIOCM_CAR : 0) |
-               (d[1] & SERIAL_MSR_RI ? TIOCM_RI : 0) |
-               (d[1] & SERIAL_MSR_DSR ? TIOCM_DSR : 0);
+       r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) |
+               (d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) |
+               (d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) |
+               (d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) |
+               (d[1] & UART_MSR_RI ? TIOCM_RI : 0) |
+               (d[1] & UART_MSR_DSR ? TIOCM_DSR : 0);
 
 mget_out:
        kfree(d);
@@ -546,7 +568,7 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
        if (!port->serial->disconnected) {
                /* Disable flow control */
                if (!on &&
-                   ssu100_setregister(dev, 0, 0) < 0)
+                   ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
                        dev_err(&port->dev, "error from flowcontrol urb\n");
                /* drop RTS and DTR */
                if (on)
@@ -557,34 +579,88 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
        mutex_unlock(&port->serial->disc_mutex);
 }
 
+static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
+{
+       struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->status_lock, flags);
+       priv->shadowMSR = msr;
+       spin_unlock_irqrestore(&priv->status_lock, flags);
+
+       if (msr & UART_MSR_ANY_DELTA) {
+               /* update input line counters */
+               if (msr & UART_MSR_DCTS)
+                       priv->icount.cts++;
+               if (msr & UART_MSR_DDSR)
+                       priv->icount.dsr++;
+               if (msr & UART_MSR_DDCD)
+                       priv->icount.dcd++;
+               if (msr & UART_MSR_TERI)
+                       priv->icount.rng++;
+               wake_up_interruptible(&priv->delta_msr_wait);
+       }
+}
+
+static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
+                             char *tty_flag)
+{
+       struct ssu100_port_private *priv = usb_get_serial_port_data(port);
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->status_lock, flags);
+       priv->shadowLSR = lsr;
+       spin_unlock_irqrestore(&priv->status_lock, flags);
+
+       *tty_flag = TTY_NORMAL;
+       if (lsr & UART_LSR_BRK_ERROR_BITS) {
+               /* we always want to update icount, but we only want to
+                * update tty_flag for one case */
+               if (lsr & UART_LSR_BI) {
+                       priv->icount.brk++;
+                       *tty_flag = TTY_BREAK;
+                       usb_serial_handle_break(port);
+               }
+               if (lsr & UART_LSR_PE) {
+                       priv->icount.parity++;
+                       if (*tty_flag == TTY_NORMAL)
+                               *tty_flag = TTY_PARITY;
+               }
+               if (lsr & UART_LSR_FE) {
+                       priv->icount.frame++;
+                       if (*tty_flag == TTY_NORMAL)
+                               *tty_flag = TTY_FRAME;
+               }
+               if (lsr & UART_LSR_OE){
+                       priv->icount.overrun++;
+                       if (*tty_flag == TTY_NORMAL)
+                               *tty_flag = TTY_OVERRUN;
+               }
+       }
+
+}
+
 static int ssu100_process_packet(struct tty_struct *tty,
                                 struct usb_serial_port *port,
                                 struct ssu100_port_private *priv,
                                 char *packet, int len)
 {
        int i;
-       char flag;
+       char flag = TTY_NORMAL;
        char *ch;
 
        dbg("%s - port %d", __func__, port->number);
 
-       if (len < 4) {
-               dbg("%s - malformed packet", __func__);
-               return 0;
-       }
-
-       if ((packet[0] == 0x1b) && (packet[1] == 0x1b) &&
+       if ((len >= 4) &&
+           (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
            ((packet[2] == 0x00) || (packet[2] == 0x01))) {
-               if (packet[2] == 0x00)
-                       priv->shadowLSR = packet[3] & (SERIAL_LSR_OE |
-                                                      SERIAL_LSR_PE |
-                                                      SERIAL_LSR_FE |
-                                                      SERIAL_LSR_BI);
-
-               if (packet[2] == 0x01) {
-                       priv->shadowMSR = packet[3];
-                       wake_up_interruptible(&priv->delta_msr_wait);
+               if (packet[2] == 0x00) {
+                       ssu100_update_lsr(port, packet[3], &flag);
+                       if (flag == TTY_OVERRUN)
+                               tty_insert_flip_char(tty, 0, TTY_OVERRUN);
                }
+               if (packet[2] == 0x01)
+                       ssu100_update_msr(port, packet[3]);
 
                len -= 4;
                ch = packet + 4;
@@ -596,7 +672,7 @@ static int ssu100_process_packet(struct tty_struct *tty,
 
        if (port->port.console && port->sysrq) {
                for (i = 0; i < len; i++, ch++) {
-                       if (!usb_serial_handle_sysrq_char(tty, port, *ch))
+                       if (!usb_serial_handle_sysrq_char(port, *ch))
                                tty_insert_flip_char(tty, *ch, flag);
                }
        } else
@@ -631,7 +707,6 @@ static void ssu100_process_read_urb(struct urb *urb)
        tty_kref_put(tty);
 }
 
-
 static struct usb_serial_driver ssu100_device = {
        .driver = {
                .owner = THIS_MODULE,
@@ -653,6 +728,7 @@ static struct usb_serial_driver ssu100_device = {
        .tiocmset            = ssu100_tiocmset,
        .ioctl               = ssu100_ioctl,
        .set_termios         = ssu100_set_termios,
+       .disconnect          = usb_serial_generic_disconnect,
 };
 
 static int __init ssu100_init(void)
index 2a982e62963b5b76a7bd894c412690b1ddb9381d..7a2177c79bdefb05fe88d6c67ee5b19cd7fae0f5 100644 (file)
@@ -736,6 +736,7 @@ int usb_serial_probe(struct usb_interface *interface,
 
        serial = create_serial(dev, interface, type);
        if (!serial) {
+               module_put(type->driver.owner);
                dev_err(&interface->dev, "%s - out of memory\n", __func__);
                return -ENOMEM;
        }
@@ -746,11 +747,11 @@ int usb_serial_probe(struct usb_interface *interface,
 
                id = get_iface_id(type, interface);
                retval = type->probe(serial, id);
-               module_put(type->driver.owner);
 
                if (retval) {
                        dbg("sub driver rejected device");
                        kfree(serial);
+                       module_put(type->driver.owner);
                        return retval;
                }
        }
@@ -822,6 +823,7 @@ int usb_serial_probe(struct usb_interface *interface,
                if (num_bulk_in == 0 || num_bulk_out == 0) {
                        dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
                        kfree(serial);
+                       module_put(type->driver.owner);
                        return -ENODEV;
                }
        }
@@ -835,22 +837,15 @@ int usb_serial_probe(struct usb_interface *interface,
                        dev_err(&interface->dev,
                            "Generic device with no bulk out, not allowed.\n");
                        kfree(serial);
+                       module_put(type->driver.owner);
                        return -EIO;
                }
        }
 #endif
        if (!num_ports) {
                /* if this device type has a calc_num_ports function, call it */
-               if (type->calc_num_ports) {
-                       if (!try_module_get(type->driver.owner)) {
-                               dev_err(&interface->dev,
-                                       "module get failed, exiting\n");
-                               kfree(serial);
-                               return -EIO;
-                       }
+               if (type->calc_num_ports)
                        num_ports = type->calc_num_ports(serial);
-                       module_put(type->driver.owner);
-               }
                if (!num_ports)
                        num_ports = type->num_ports;
        }
@@ -1039,13 +1034,7 @@ int usb_serial_probe(struct usb_interface *interface,
 
        /* if this device type has an attach function, call it */
        if (type->attach) {
-               if (!try_module_get(type->driver.owner)) {
-                       dev_err(&interface->dev,
-                                       "module get failed, exiting\n");
-                       goto probe_error;
-               }
                retval = type->attach(serial);
-               module_put(type->driver.owner);
                if (retval < 0)
                        goto probe_error;
                serial->attached = 1;
@@ -1088,10 +1077,12 @@ int usb_serial_probe(struct usb_interface *interface,
 exit:
        /* success */
        usb_set_intfdata(interface, serial);
+       module_put(type->driver.owner);
        return 0;
 
 probe_error:
        usb_serial_put(serial);
+       module_put(type->driver.owner);
        return -EIO;
 }
 EXPORT_SYMBOL_GPL(usb_serial_probe);
index 72f91bff29c7d836d86844c224225e1353c76648..13365ba3521853eb738f6fa9b8eb68a010c3b04b 100644 (file)
@@ -112,6 +112,7 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
 #define VALID_EVTCHN(chn)      ((chn) != 0)
 
 static struct irq_chip xen_dynamic_chip;
+static struct irq_chip xen_percpu_chip;
 
 /* Constructor for packed IRQ information. */
 static struct irq_info mk_unbound_info(void)
@@ -377,7 +378,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
                irq = find_unbound_irq();
 
                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
-                                             handle_level_irq, "event");
+                                             handle_edge_irq, "event");
 
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_evtchn_info(evtchn);
@@ -403,8 +404,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
                if (irq < 0)
                        goto out;
 
-               set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
-                                             handle_level_irq, "ipi");
+               set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+                                             handle_percpu_irq, "ipi");
 
                bind_ipi.vcpu = cpu;
                if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
@@ -444,8 +445,8 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
 
                irq = find_unbound_irq();
 
-               set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
-                                             handle_level_irq, "virq");
+               set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
+                                             handle_percpu_irq, "virq");
 
                evtchn_to_irq[evtchn] = irq;
                irq_info[irq] = mk_virq_info(evtchn, virq);
@@ -964,6 +965,16 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
        .retrigger      = retrigger_dynirq,
 };
 
+static struct irq_chip xen_percpu_chip __read_mostly = {
+       .name           = "xen-percpu",
+
+       .disable        = disable_dynirq,
+       .mask           = disable_dynirq,
+       .unmask         = enable_dynirq,
+
+       .ack            = ack_dynirq,
+};
+
 int xen_set_callback_via(uint64_t via)
 {
        struct xen_hvm_param a;
index 1799bd8903151db422bd28cf1173564f0d6872d4..ef9c7db52077c6d58c0d5f036be0a03bd771dffa 100644 (file)
@@ -237,7 +237,7 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
                goto again;
 
        if (sysrq_key != '\0')
-               handle_sysrq(sysrq_key, NULL);
+               handle_sysrq(sysrq_key);
 }
 
 static struct xenbus_watch sysrq_watch = {
index b27f09f05d177fb45cd0dc25f674853656ad3789..9c2d19452d0bf08a4c3642c7596d6056a8456797 100644 (file)
@@ -142,7 +142,7 @@ fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin
 fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
 
 # Directories which we _might_ need to create, so we have a rule for them.
-firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all))))
+firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all))))
 
 quiet_cmd_mkdir = MKDIR   $(patsubst $(objtree)/%,%,$@)
       cmd_mkdir = mkdir -p $@
index 5598a0d02295d11eaa3646e137497a083910251e..4cfce1ee31faaf4f2f6aab966acd5d6001753940 100644 (file)
@@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
 
        /* dirty the head */
        spin_lock(&inode->i_lock);
-       if (ci->i_wrbuffer_ref_head == 0)
+       if (ci->i_head_snapc == NULL)
                ci->i_head_snapc = ceph_get_snap_context(snapc);
        ++ci->i_wrbuffer_ref_head;
        if (ci->i_wrbuffer_ref == 0)
@@ -105,13 +105,7 @@ static int ceph_set_page_dirty(struct page *page)
        spin_lock_irq(&mapping->tree_lock);
        if (page->mapping) {    /* Race with truncate? */
                WARN_ON_ONCE(!PageUptodate(page));
-
-               if (mapping_cap_account_dirty(mapping)) {
-                       __inc_zone_page_state(page, NR_FILE_DIRTY);
-                       __inc_bdi_stat(mapping->backing_dev_info,
-                                       BDI_RECLAIMABLE);
-                       task_io_account_write(PAGE_CACHE_SIZE);
-               }
+               account_page_dirtied(page, page->mapping);
                radix_tree_tag_set(&mapping->page_tree,
                                page_index(page), PAGECACHE_TAG_DIRTY);
 
@@ -352,7 +346,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
                        break;
                }
        }
-       if (!snapc && ci->i_head_snapc) {
+       if (!snapc && ci->i_wrbuffer_ref_head) {
                snapc = ceph_get_snap_context(ci->i_head_snapc);
                dout(" head snapc %p has %d dirty pages\n",
                     snapc, ci->i_wrbuffer_ref_head);
index 582e0b2caf8a3ce6c2b943c0980c119be7950a63..a2d002cbdec23d15d30624a345ae6090ea452a53 100644 (file)
@@ -376,7 +376,7 @@ static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed)
 
                th = get_ticket_handler(ac, service);
 
-               if (!th) {
+               if (IS_ERR(th)) {
                        *pneed |= service;
                        continue;
                }
@@ -399,6 +399,9 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
        struct ceph_x_ticket_handler *th =
                get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
 
+       if (IS_ERR(th))
+               return PTR_ERR(th);
+
        ceph_x_validate_tickets(ac, &need);
 
        dout("build_request want %x have %x need %x\n",
@@ -450,7 +453,6 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
                        return -ERANGE;
                head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY);
 
-               BUG_ON(!th);
                ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer);
                if (ret)
                        return ret;
@@ -505,7 +507,8 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
 
        case CEPHX_GET_PRINCIPAL_SESSION_KEY:
                th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
-               BUG_ON(!th);
+               if (IS_ERR(th))
+                       return PTR_ERR(th);
                ret = ceph_x_proc_ticket_reply(ac, &th->session_key,
                                               buf + sizeof(*head), end);
                break;
@@ -563,8 +566,8 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
        void *end = p + sizeof(au->reply_buf);
 
        th = get_ticket_handler(ac, au->service);
-       if (!th)
-               return -EIO;  /* hrm! */
+       if (IS_ERR(th))
+               return PTR_ERR(th);
        ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
        if (ret < 0)
                return ret;
@@ -626,7 +629,7 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
        struct ceph_x_ticket_handler *th;
 
        th = get_ticket_handler(ac, peer_type);
-       if (th && !IS_ERR(th))
+       if (!IS_ERR(th))
                remove_ticket_handler(ac, th);
 }
 
index 7bf182b0397396f961151a1c1b59218bc1cb2749..a2069b6680aed83eb0be0af7c584b4a619ae239a 100644 (file)
@@ -1082,6 +1082,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        gid_t gid;
        struct ceph_mds_session *session;
        u64 xattr_version = 0;
+       struct ceph_buffer *xattr_blob = NULL;
        int delayed = 0;
        u64 flush_tid = 0;
        int i;
@@ -1142,6 +1143,10 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
                for (i = 0; i < CEPH_CAP_BITS; i++)
                        if (flushing & (1 << i))
                                ci->i_cap_flush_tid[i] = flush_tid;
+
+               follows = ci->i_head_snapc->seq;
+       } else {
+               follows = 0;
        }
 
        keep = cap->implemented;
@@ -1155,14 +1160,14 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        mtime = inode->i_mtime;
        atime = inode->i_atime;
        time_warp_seq = ci->i_time_warp_seq;
-       follows = ci->i_snap_realm->cached_context->seq;
        uid = inode->i_uid;
        gid = inode->i_gid;
        mode = inode->i_mode;
 
-       if (dropping & CEPH_CAP_XATTR_EXCL) {
+       if (flushing & CEPH_CAP_XATTR_EXCL) {
                __ceph_build_xattrs_blob(ci);
-               xattr_version = ci->i_xattrs.version + 1;
+               xattr_blob = ci->i_xattrs.blob;
+               xattr_version = ci->i_xattrs.version;
        }
 
        spin_unlock(&inode->i_lock);
@@ -1170,9 +1175,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
                op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
                size, max_size, &mtime, &atime, time_warp_seq,
-               uid, gid, mode,
-               xattr_version,
-               (flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL,
+               uid, gid, mode, xattr_version, xattr_blob,
                follows);
        if (ret < 0) {
                dout("error sending cap msg, must requeue %p\n", inode);
@@ -1282,7 +1285,7 @@ retry:
                             &capsnap->mtime, &capsnap->atime,
                             capsnap->time_warp_seq,
                             capsnap->uid, capsnap->gid, capsnap->mode,
-                            0, NULL,
+                            capsnap->xattr_version, capsnap->xattr_blob,
                             capsnap->follows);
 
                next_follows = capsnap->follows + 1;
@@ -1332,7 +1335,11 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
             ceph_cap_string(was | mask));
        ci->i_dirty_caps |= mask;
        if (was == 0) {
-               dout(" inode %p now dirty\n", &ci->vfs_inode);
+               if (!ci->i_head_snapc)
+                       ci->i_head_snapc = ceph_get_snap_context(
+                               ci->i_snap_realm->cached_context);
+               dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
+                       ci->i_head_snapc);
                BUG_ON(!list_empty(&ci->i_dirty_item));
                spin_lock(&mdsc->cap_dirty_lock);
                list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
@@ -2190,7 +2197,9 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
 
        if (ci->i_head_snapc == snapc) {
                ci->i_wrbuffer_ref_head -= nr;
-               if (!ci->i_wrbuffer_ref_head) {
+               if (ci->i_wrbuffer_ref_head == 0 &&
+                   ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
+                       BUG_ON(!ci->i_head_snapc);
                        ceph_put_snap_context(ci->i_head_snapc);
                        ci->i_head_snapc = NULL;
                }
@@ -2483,6 +2492,11 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                        dout(" inode %p now clean\n", inode);
                        BUG_ON(!list_empty(&ci->i_dirty_item));
                        drop = 1;
+                       if (ci->i_wrbuffer_ref_head == 0) {
+                               BUG_ON(!ci->i_head_snapc);
+                               ceph_put_snap_context(ci->i_head_snapc);
+                               ci->i_head_snapc = NULL;
+                       }
                } else {
                        BUG_ON(list_empty(&ci->i_dirty_item));
                }
index 360c4f22718d5e079c2ce99c2484af9376751a76..6fd8b20a86112c367c788a20c2f134108acc40e8 100644 (file)
@@ -171,6 +171,8 @@ static int mdsc_show(struct seq_file *s, void *p)
                } else if (req->r_dentry) {
                        path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
                                                    &pathbase, 0);
+                       if (IS_ERR(path))
+                               path = NULL;
                        spin_lock(&req->r_dentry->d_lock);
                        seq_printf(s, " #%llx/%.*s (%s)",
                                   ceph_ino(req->r_dentry->d_parent->d_inode),
@@ -187,6 +189,8 @@ static int mdsc_show(struct seq_file *s, void *p)
                if (req->r_old_dentry) {
                        path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
                                                    &pathbase, 0);
+                       if (IS_ERR(path))
+                               path = NULL;
                        spin_lock(&req->r_old_dentry->d_lock);
                        seq_printf(s, " #%llx/%.*s (%s)",
                           ceph_ino(req->r_old_dentry->d_parent->d_inode),
index 67bbb41d5526fcbdfb74f48f0e261b59b957ef9c..6e4f43ff23ec587050eab1b0e735e8d519827c85 100644 (file)
@@ -46,7 +46,7 @@ int ceph_init_dentry(struct dentry *dentry)
        else
                dentry->d_op = &ceph_snap_dentry_ops;
 
-       di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS);
+       di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
        if (!di)
                return -ENOMEM;          /* oh well */
 
index 5d893d31e399b81f57543ede560dc2258f7400da..e7cca414da03bcbd7549889a5ecb00d05ee11901 100644 (file)
@@ -677,6 +677,7 @@ static int fill_inode(struct inode *inode,
                if (ci->i_files == 0 && ci->i_subdirs == 0 &&
                    ceph_snap(inode) == CEPH_NOSNAP &&
                    (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
+                   (issued & CEPH_CAP_FILE_EXCL) == 0 &&
                    (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
                        dout(" marking %p complete (empty)\n", inode);
                        ci->i_ceph_flags |= CEPH_I_COMPLETE;
@@ -1229,11 +1230,11 @@ retry_lookup:
                        in = dn->d_inode;
                } else {
                        in = ceph_get_inode(parent->d_sb, vino);
-                       if (in == NULL) {
+                       if (IS_ERR(in)) {
                                dout("new_inode badness\n");
                                d_delete(dn);
                                dput(dn);
-                               err = -ENOMEM;
+                               err = PTR_ERR(in);
                                goto out;
                        }
                        dn = splice_dentry(dn, in, NULL);
index ae85af06454fd3544840329be8639a0910d2cb3d..ff4e753aae929d37d414567d22fd6afef7316c7e 100644 (file)
@@ -82,7 +82,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
                length = fl->fl_end - fl->fl_start + 1;
 
        err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
-                               (u64)fl->fl_pid, (u64)fl->fl_nspid,
+                               (u64)fl->fl_pid,
+                               (u64)(unsigned long)fl->fl_nspid,
                                lock_cmd, fl->fl_start,
                                length, wait);
        if (!err) {
@@ -92,7 +93,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
                        /* undo! This should only happen if the kernel detects
                         * local deadlock. */
                        ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
-                                         (u64)fl->fl_pid, (u64)fl->fl_nspid,
+                                         (u64)fl->fl_pid,
+                                         (u64)(unsigned long)fl->fl_nspid,
                                          CEPH_LOCK_UNLOCK, fl->fl_start,
                                          length, 0);
                        dout("got %d on posix_lock_file, undid lock", err);
@@ -132,7 +134,8 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
                length = fl->fl_end - fl->fl_start + 1;
 
        err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
-                               file, (u64)fl->fl_pid, (u64)fl->fl_nspid,
+                               file, (u64)fl->fl_pid,
+                               (u64)(unsigned long)fl->fl_nspid,
                                lock_cmd, fl->fl_start,
                                length, wait);
        if (!err) {
@@ -141,7 +144,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
                        ceph_lock_message(CEPH_LOCK_FLOCK,
                                          CEPH_MDS_OP_SETFILELOCK,
                                          file, (u64)fl->fl_pid,
-                                         (u64)fl->fl_nspid,
+                                         (u64)(unsigned long)fl->fl_nspid,
                                          CEPH_LOCK_UNLOCK, fl->fl_start,
                                          length, 0);
                        dout("got %d on flock_lock_file_wait, undid lock", err);
@@ -235,7 +238,8 @@ int lock_to_ceph_filelock(struct file_lock *lock,
        cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
        cephlock->client = cpu_to_le64(0);
        cephlock->pid = cpu_to_le64(lock->fl_pid);
-       cephlock->pid_namespace = cpu_to_le64((u64)lock->fl_nspid);
+       cephlock->pid_namespace =
+               cpu_to_le64((u64)(unsigned long)lock->fl_nspid);
 
        switch (lock->fl_type) {
        case F_RDLCK:
index a75ddbf9fe3743973c6fd02724f005b8504c3738..f091b1351786368de18757d8cb262a19d1006bf1 100644 (file)
@@ -560,6 +560,13 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
  *
  * Called under mdsc->mutex.
  */
+struct dentry *get_nonsnap_parent(struct dentry *dentry)
+{
+       while (!IS_ROOT(dentry) && ceph_snap(dentry->d_inode) != CEPH_NOSNAP)
+               dentry = dentry->d_parent;
+       return dentry;
+}
+
 static int __choose_mds(struct ceph_mds_client *mdsc,
                        struct ceph_mds_request *req)
 {
@@ -590,14 +597,29 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
        if (req->r_inode) {
                inode = req->r_inode;
        } else if (req->r_dentry) {
-               if (req->r_dentry->d_inode) {
+               struct inode *dir = req->r_dentry->d_parent->d_inode;
+
+               if (dir->i_sb != mdsc->client->sb) {
+                       /* not this fs! */
+                       inode = req->r_dentry->d_inode;
+               } else if (ceph_snap(dir) != CEPH_NOSNAP) {
+                       /* direct snapped/virtual snapdir requests
+                        * based on parent dir inode */
+                       struct dentry *dn =
+                               get_nonsnap_parent(req->r_dentry->d_parent);
+                       inode = dn->d_inode;
+                       dout("__choose_mds using nonsnap parent %p\n", inode);
+               } else if (req->r_dentry->d_inode) {
+                       /* dentry target */
                        inode = req->r_dentry->d_inode;
                } else {
-                       inode = req->r_dentry->d_parent->d_inode;
+                       /* dir + name */
+                       inode = dir;
                        hash = req->r_dentry->d_name.hash;
                        is_hash = true;
                }
        }
+
        dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
             (int)hash, mode);
        if (!inode)
@@ -2208,7 +2230,7 @@ static void handle_session(struct ceph_mds_session *session,
                        pr_info("mds%d reconnect denied\n", session->s_mds);
                remove_session_caps(session);
                wake = 1; /* for good measure */
-               complete_all(&mdsc->session_close_waiters);
+               wake_up_all(&mdsc->session_close_wq);
                kick_requests(mdsc, mds);
                break;
 
@@ -2302,7 +2324,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
                if (IS_ERR(path)) {
                        err = PTR_ERR(path);
-                       BUG_ON(err);
+                       goto out_dput;
                }
        } else {
                path = NULL;
@@ -2310,7 +2332,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        }
        err = ceph_pagelist_encode_string(pagelist, path, pathlen);
        if (err)
-               goto out;
+               goto out_free;
 
        spin_lock(&inode->i_lock);
        cap->seq = 0;        /* reset cap seq */
@@ -2354,8 +2376,9 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                unlock_kernel();
        }
 
-out:
+out_free:
        kfree(path);
+out_dput:
        dput(dentry);
        return err;
 }
@@ -2876,7 +2899,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
                return -ENOMEM;
 
        init_completion(&mdsc->safe_umount_waiters);
-       init_completion(&mdsc->session_close_waiters);
+       init_waitqueue_head(&mdsc->session_close_wq);
        INIT_LIST_HEAD(&mdsc->waiting_for_map);
        mdsc->sessions = NULL;
        mdsc->max_sessions = 0;
@@ -3021,6 +3044,23 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
        wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
 }
 
+/*
+ * true if all sessions are closed, or we force unmount
+ */
+bool done_closing_sessions(struct ceph_mds_client *mdsc)
+{
+       int i, n = 0;
+
+       if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN)
+               return true;
+
+       mutex_lock(&mdsc->mutex);
+       for (i = 0; i < mdsc->max_sessions; i++)
+               if (mdsc->sessions[i])
+                       n++;
+       mutex_unlock(&mdsc->mutex);
+       return n == 0;
+}
 
 /*
  * called after sb is ro.
@@ -3029,45 +3069,32 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
 {
        struct ceph_mds_session *session;
        int i;
-       int n;
        struct ceph_client *client = mdsc->client;
-       unsigned long started, timeout = client->mount_args->mount_timeout * HZ;
+       unsigned long timeout = client->mount_args->mount_timeout * HZ;
 
        dout("close_sessions\n");
 
-       mutex_lock(&mdsc->mutex);
-
        /* close sessions */
-       started = jiffies;
-       while (time_before(jiffies, started + timeout)) {
-               dout("closing sessions\n");
-               n = 0;
-               for (i = 0; i < mdsc->max_sessions; i++) {
-                       session = __ceph_lookup_mds_session(mdsc, i);
-                       if (!session)
-                               continue;
-                       mutex_unlock(&mdsc->mutex);
-                       mutex_lock(&session->s_mutex);
-                       __close_session(mdsc, session);
-                       mutex_unlock(&session->s_mutex);
-                       ceph_put_mds_session(session);
-                       mutex_lock(&mdsc->mutex);
-                       n++;
-               }
-               if (n == 0)
-                       break;
-
-               if (client->mount_state == CEPH_MOUNT_SHUTDOWN)
-                       break;
-
-               dout("waiting for sessions to close\n");
+       mutex_lock(&mdsc->mutex);
+       for (i = 0; i < mdsc->max_sessions; i++) {
+               session = __ceph_lookup_mds_session(mdsc, i);
+               if (!session)
+                       continue;
                mutex_unlock(&mdsc->mutex);
-               wait_for_completion_timeout(&mdsc->session_close_waiters,
-                                           timeout);
+               mutex_lock(&session->s_mutex);
+               __close_session(mdsc, session);
+               mutex_unlock(&session->s_mutex);
+               ceph_put_mds_session(session);
                mutex_lock(&mdsc->mutex);
        }
+       mutex_unlock(&mdsc->mutex);
+
+       dout("waiting for sessions to close\n");
+       wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
+                          timeout);
 
        /* tear down remaining sessions */
+       mutex_lock(&mdsc->mutex);
        for (i = 0; i < mdsc->max_sessions; i++) {
                if (mdsc->sessions[i]) {
                        session = get_session(mdsc->sessions[i]);
@@ -3080,9 +3107,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
                        mutex_lock(&mdsc->mutex);
                }
        }
-
        WARN_ON(!list_empty(&mdsc->cap_delay_list));
-
        mutex_unlock(&mdsc->mutex);
 
        ceph_cleanup_empty_realms(mdsc);
index ab7e89f5e344b14d44500866625f378a2c849d57..c98267ce6d2ad97e1d9c86bc0660e2d82d39366c 100644 (file)
@@ -234,7 +234,8 @@ struct ceph_mds_client {
        struct mutex            mutex;         /* all nested structures */
 
        struct ceph_mdsmap      *mdsmap;
-       struct completion       safe_umount_waiters, session_close_waiters;
+       struct completion       safe_umount_waiters;
+       wait_queue_head_t       session_close_wq;
        struct list_head        waiting_for_map;
 
        struct ceph_mds_session **sessions;    /* NULL for mds if no session */
index bed6391e52c7109c966d5e642e472c665bd7d1f5..dfced1dacbcdcb47178f2c9676275869c2105296 100644 (file)
@@ -661,7 +661,7 @@ static int __send_request(struct ceph_osd_client *osdc,
        reqhead->reassert_version = req->r_reassert_version;
 
        req->r_stamp = jiffies;
-       list_move_tail(&osdc->req_lru, &req->r_req_lru_item);
+       list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
 
        ceph_msg_get(req->r_request); /* send consumes a ref */
        ceph_con_send(&req->r_osd->o_con, req->r_request);
index c0b26b6badba7f53d0d8b32be33c165fb762922e..4868b9dcac5a6cc7a4d00610780572f335ef68f2 100644 (file)
@@ -435,7 +435,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 {
        struct inode *inode = &ci->vfs_inode;
        struct ceph_cap_snap *capsnap;
-       int used;
+       int used, dirty;
 
        capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
        if (!capsnap) {
@@ -445,6 +445,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 
        spin_lock(&inode->i_lock);
        used = __ceph_caps_used(ci);
+       dirty = __ceph_caps_dirty(ci);
        if (__ceph_have_pending_cap_snap(ci)) {
                /* there is no point in queuing multiple "pending" cap_snaps,
                   as no new writes are allowed to start when pending, so any
@@ -452,11 +453,15 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                   cap_snap.  lucky us. */
                dout("queue_cap_snap %p already pending\n", inode);
                kfree(capsnap);
-       } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) {
+       } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR) ||
+                  (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
+                            CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR))) {
                struct ceph_snap_context *snapc = ci->i_head_snapc;
 
+               dout("queue_cap_snap %p cap_snap %p queuing under %p\n", inode,
+                    capsnap, snapc);
                igrab(inode);
-
+               
                atomic_set(&capsnap->nref, 1);
                capsnap->ci = ci;
                INIT_LIST_HEAD(&capsnap->ci_item);
@@ -464,15 +469,21 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
 
                capsnap->follows = snapc->seq - 1;
                capsnap->issued = __ceph_caps_issued(ci, NULL);
-               capsnap->dirty = __ceph_caps_dirty(ci);
+               capsnap->dirty = dirty;
 
                capsnap->mode = inode->i_mode;
                capsnap->uid = inode->i_uid;
                capsnap->gid = inode->i_gid;
 
-               /* fixme? */
-               capsnap->xattr_blob = NULL;
-               capsnap->xattr_len = 0;
+               if (dirty & CEPH_CAP_XATTR_EXCL) {
+                       __ceph_build_xattrs_blob(ci);
+                       capsnap->xattr_blob =
+                               ceph_buffer_get(ci->i_xattrs.blob);
+                       capsnap->xattr_version = ci->i_xattrs.version;
+               } else {
+                       capsnap->xattr_blob = NULL;
+                       capsnap->xattr_version = 0;
+               }
 
                /* dirty page count moved from _head to this cap_snap;
                   all subsequent writes page dirties occur _after_ this
@@ -480,7 +491,9 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
                ci->i_wrbuffer_ref_head = 0;
                capsnap->context = snapc;
-               ci->i_head_snapc = NULL;
+               ci->i_head_snapc =
+                       ceph_get_snap_context(ci->i_snap_realm->cached_context);
+               dout(" new snapc is %p\n", ci->i_head_snapc);
                list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
 
                if (used & CEPH_CAP_FILE_WR) {
@@ -539,6 +552,41 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
        return 1;  /* caller may want to ceph_flush_snaps */
 }
 
+/*
+ * Queue cap_snaps for snap writeback for this realm and its children.
+ * Called under snap_rwsem, so realm topology won't change.
+ */
+static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
+{
+       struct ceph_inode_info *ci;
+       struct inode *lastinode = NULL;
+       struct ceph_snap_realm *child;
+
+       dout("queue_realm_cap_snaps %p %llx inodes\n", realm, realm->ino);
+
+       spin_lock(&realm->inodes_with_caps_lock);
+       list_for_each_entry(ci, &realm->inodes_with_caps,
+                           i_snap_realm_item) {
+               struct inode *inode = igrab(&ci->vfs_inode);
+               if (!inode)
+                       continue;
+               spin_unlock(&realm->inodes_with_caps_lock);
+               if (lastinode)
+                       iput(lastinode);
+               lastinode = inode;
+               ceph_queue_cap_snap(ci);
+               spin_lock(&realm->inodes_with_caps_lock);
+       }
+       spin_unlock(&realm->inodes_with_caps_lock);
+       if (lastinode)
+               iput(lastinode);
+
+       dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino);
+       list_for_each_entry(child, &realm->children, child_item)
+               queue_realm_cap_snaps(child);
+
+       dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
+}
 
 /*
  * Parse and apply a snapblob "snap trace" from the MDS.  This specifies
@@ -589,29 +637,8 @@ more:
                 *
                 * ...unless it's a snap deletion!
                 */
-               if (!deletion) {
-                       struct ceph_inode_info *ci;
-                       struct inode *lastinode = NULL;
-
-                       spin_lock(&realm->inodes_with_caps_lock);
-                       list_for_each_entry(ci, &realm->inodes_with_caps,
-                                           i_snap_realm_item) {
-                               struct inode *inode = igrab(&ci->vfs_inode);
-                               if (!inode)
-                                       continue;
-                               spin_unlock(&realm->inodes_with_caps_lock);
-                               if (lastinode)
-                                       iput(lastinode);
-                               lastinode = inode;
-                               ceph_queue_cap_snap(ci);
-                               spin_lock(&realm->inodes_with_caps_lock);
-                       }
-                       spin_unlock(&realm->inodes_with_caps_lock);
-                       if (lastinode)
-                               iput(lastinode);
-                       dout("update_snap_trace cap_snaps queued\n");
-               }
-
+               if (!deletion)
+                       queue_realm_cap_snaps(realm);
        } else {
                dout("update_snap_trace %llx %p seq %lld unchanged\n",
                     realm->ino, realm, realm->seq);
index 2482d696f0de17f2fcccdedfc79d2ad4a14c6664..c33897ae5725e82ca269606b78d54214d8abf7af 100644 (file)
@@ -216,8 +216,7 @@ struct ceph_cap_snap {
        uid_t uid;
        gid_t gid;
 
-       void *xattr_blob;
-       int xattr_len;
+       struct ceph_buffer *xattr_blob;
        u64 xattr_version;
 
        u64 size;
@@ -229,8 +228,11 @@ struct ceph_cap_snap {
 
 static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
 {
-       if (atomic_dec_and_test(&capsnap->nref))
+       if (atomic_dec_and_test(&capsnap->nref)) {
+               if (capsnap->xattr_blob)
+                       ceph_buffer_put(capsnap->xattr_blob);
                kfree(capsnap);
+       }
 }
 
 /*
@@ -342,7 +344,8 @@ struct ceph_inode_info {
        unsigned i_cap_exporting_issued;
        struct ceph_cap_reservation i_cap_migration_resv;
        struct list_head i_cap_snaps;   /* snapped state pending flush to mds */
-       struct ceph_snap_context *i_head_snapc;  /* set if wr_buffer_head > 0 */
+       struct ceph_snap_context *i_head_snapc;  /* set if wr_buffer_head > 0 or
+                                                   dirty|flushing caps */
        unsigned i_snap_caps;           /* cap bits for snapped files */
 
        int i_nr_by_mode[CEPH_FILE_MODE_NUM];  /* open file counts */
index 097a2654c00f517c004ef429b3c0046664f205c3..9578af610b73fb48b69872ddeed222e58c8340f0 100644 (file)
@@ -485,6 +485,7 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
                ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
                ci->i_xattrs.prealloc_blob = NULL;
                ci->i_xattrs.dirty = false;
+               ci->i_xattrs.version++;
        }
 }
 
index 917b7d449bb2a6248c28c23284cf82d9c032f285..0da1debd499d1845420753f50ef7b7da6765c237 100644 (file)
@@ -2,6 +2,8 @@ config CIFS
        tristate "CIFS support (advanced network filesystem, SMBFS successor)"
        depends on INET
        select NLS
+       select CRYPTO_MD5
+       select CRYPTO_ARC4
        help
          This is the client VFS module for the Common Internet File System
          (CIFS) protocol which is the successor to the Server Message Block
index cfd1ce34e0bc7b8c4794c81e1aa937e7f009ca75..21f0fbd86989a6811dd6323668f0836079f7e621 100644 (file)
@@ -597,13 +597,13 @@ decode_negTokenInit(unsigned char *security_blob, int length,
                                if (compare_oid(oid, oidlen, MSKRB5_OID,
                                                MSKRB5_OID_LEN))
                                        server->sec_mskerberos = true;
-                               else if (compare_oid(oid, oidlen, KRB5U2U_OID,
+                               if (compare_oid(oid, oidlen, KRB5U2U_OID,
                                                     KRB5U2U_OID_LEN))
                                        server->sec_kerberosu2u = true;
-                               else if (compare_oid(oid, oidlen, KRB5_OID,
+                               if (compare_oid(oid, oidlen, KRB5_OID,
                                                     KRB5_OID_LEN))
                                        server->sec_kerberos = true;
-                               else if (compare_oid(oid, oidlen, NTLMSSP_OID,
+                               if (compare_oid(oid, oidlen, NTLMSSP_OID,
                                                     NTLMSSP_OID_LEN))
                                        server->sec_ntlmssp = true;
 
index 650638275a6feed35d31fdbc183e57092b872f33..7fe6b52df5076a2eb14a7ec563403795e2e4c041 100644 (file)
@@ -30,6 +30,8 @@
  *     This is a compressed table of upper and lower case conversion.
  *
  */
+#ifndef _CIFS_UNICODE_H
+#define _CIFS_UNICODE_H
 
 #include <asm/byteorder.h>
 #include <linux/types.h>
@@ -67,8 +69,8 @@ extern const struct UniCaseRange CifsUniUpperRange[];
 #endif                         /* UNIUPR_NOUPPER */
 
 #ifndef UNIUPR_NOLOWER
-extern signed char UniLowerTable[512];
-extern struct UniCaseRange UniLowerRange[];
+extern signed char CifsUniLowerTable[512];
+extern const struct UniCaseRange CifsUniLowerRange[];
 #endif                         /* UNIUPR_NOLOWER */
 
 #ifdef __KERNEL__
@@ -337,15 +339,15 @@ UniStrupr(register wchar_t *upin)
  * UniTolower:  Convert a unicode character to lower case
  */
 static inline wchar_t
-UniTolower(wchar_t uc)
+UniTolower(register wchar_t uc)
 {
-       register struct UniCaseRange *rp;
+       register const struct UniCaseRange *rp;
 
-       if (uc < sizeof(UniLowerTable)) {
+       if (uc < sizeof(CifsUniLowerTable)) {
                /* Latin characters */
-               return uc + UniLowerTable[uc];  /* Use base tables */
+               return uc + CifsUniLowerTable[uc];      /* Use base tables */
        } else {
-               rp = UniLowerRange;     /* Use range tables */
+               rp = CifsUniLowerRange; /* Use range tables */
                while (rp->start) {
                        if (uc < rp->start)     /* Before start of range */
                                return uc;      /* Uppercase = input */
@@ -374,3 +376,5 @@ UniStrlwr(register wchar_t *upin)
 }
 
 #endif
+
+#endif /* _CIFS_UNICODE_H */
index 18a9d978e5190dfc9e3974fdf46f193d0c29d8b6..0ac7c5a8633a777dd1d804138ea82aff9f957521 100644 (file)
@@ -140,7 +140,7 @@ const struct UniCaseRange CifsUniUpperRange[] = {
 /*
  * Latin lower case
  */
-static signed char CifsUniLowerTable[512] = {
+signed char CifsUniLowerTable[512] = {
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 000-00f */
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 010-01f */
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 020-02f */
@@ -242,12 +242,12 @@ static signed char UniCaseRangeLff20[27] = {
 /*
  * Lower Case Range
  */
-static const struct UniCaseRange CifsUniLowerRange[] = {
-       0x0380, 0x03ab, UniCaseRangeL0380,
-       0x0400, 0x042f, UniCaseRangeL0400,
-       0x0490, 0x04cb, UniCaseRangeL0490,
-       0x1e00, 0x1ff7, UniCaseRangeL1e00,
-       0xff20, 0xff3a, UniCaseRangeLff20,
-       0, 0, 0
+const struct UniCaseRange CifsUniLowerRange[] = {
+       {0x0380, 0x03ab, UniCaseRangeL0380},
+       {0x0400, 0x042f, UniCaseRangeL0400},
+       {0x0490, 0x04cb, UniCaseRangeL0490},
+       {0x1e00, 0x1ff7, UniCaseRangeL1e00},
+       {0xff20, 0xff3a, UniCaseRangeLff20},
+       {0}
 };
 #endif
index 847628dfdc44fa95c5c0ea232b8b28ccb03347e3..709f2296bdb4930b2eee0dcb7dd341f94ca42e9b 100644 (file)
@@ -27,6 +27,7 @@
 #include "md5.h"
 #include "cifs_unicode.h"
 #include "cifsproto.h"
+#include "ntlmssp.h"
 #include <linux/ctype.h>
 #include <linux/random.h>
 
@@ -42,21 +43,43 @@ extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
                       unsigned char *p24);
 
 static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
-                                   const struct mac_key *key, char *signature)
+                       struct TCP_Server_Info *server, char *signature)
 {
-       struct  MD5Context context;
+       int rc;
 
-       if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL))
+       if (cifs_pdu == NULL || server == NULL || signature == NULL)
                return -EINVAL;
 
-       cifs_MD5_init(&context);
-       cifs_MD5_update(&context, (char *)&key->data, key->len);
-       cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
+       if (!server->ntlmssp.sdescmd5) {
+               cERROR(1,
+                       "cifs_calculate_signature: can't generate signature\n");
+               return -1;
+       }
 
-       cifs_MD5_final(signature, &context);
-       return 0;
+       rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
+       if (rc) {
+               cERROR(1, "cifs_calculate_signature: oould not init md5\n");
+               return rc;
+       }
+
+       if (server->secType == RawNTLMSSP)
+               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                       server->session_key.data.ntlmv2.key,
+                       CIFS_NTLMV2_SESSKEY_SIZE);
+       else
+               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                       (char *)&server->session_key.data,
+                       server->session_key.len);
+
+       crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                       cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
+
+       rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature);
+
+       return rc;
 }
 
+
 int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
                  __u32 *pexpected_response_sequence_number)
 {
@@ -78,8 +101,7 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
        server->sequence_number++;
        spin_unlock(&GlobalMid_Lock);
 
-       rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key,
-                                     smb_signature);
+       rc = cifs_calculate_signature(cifs_pdu, server, smb_signature);
        if (rc)
                memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
        else
@@ -89,21 +111,39 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
 }
 
 static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
-                               const struct mac_key *key, char *signature)
+                       struct TCP_Server_Info *server, char *signature)
 {
-       struct  MD5Context context;
        int i;
+       int rc;
 
-       if ((iov == NULL) || (signature == NULL) || (key == NULL))
+       if (iov == NULL || server == NULL || signature == NULL)
                return -EINVAL;
 
-       cifs_MD5_init(&context);
-       cifs_MD5_update(&context, (char *)&key->data, key->len);
+       if (!server->ntlmssp.sdescmd5) {
+               cERROR(1, "cifs_calc_signature2: can't generate signature\n");
+               return -1;
+       }
+
+       rc = crypto_shash_init(&server->ntlmssp.sdescmd5->shash);
+       if (rc) {
+               cERROR(1, "cifs_calc_signature2: oould not init md5\n");
+               return rc;
+       }
+
+       if (server->secType == RawNTLMSSP)
+               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                       server->session_key.data.ntlmv2.key,
+                       CIFS_NTLMV2_SESSKEY_SIZE);
+       else
+               crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                       (char *)&server->session_key.data,
+                       server->session_key.len);
+
        for (i = 0; i < n_vec; i++) {
                if (iov[i].iov_len == 0)
                        continue;
                if (iov[i].iov_base == NULL) {
-                       cERROR(1, "null iovec entry");
+                       cERROR(1, "cifs_calc_signature2: null iovec entry");
                        return -EIO;
                }
                /* The first entry includes a length field (which does not get
@@ -111,18 +151,18 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
                if (i == 0) {
                        if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
                                break; /* nothing to sign or corrupt header */
-                       cifs_MD5_update(&context, iov[0].iov_base+4,
-                                 iov[0].iov_len-4);
+                       crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                               iov[i].iov_base + 4, iov[i].iov_len - 4);
                } else
-                       cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len);
+                       crypto_shash_update(&server->ntlmssp.sdescmd5->shash,
+                               iov[i].iov_base, iov[i].iov_len);
        }
 
-       cifs_MD5_final(signature, &context);
+       rc = crypto_shash_final(&server->ntlmssp.sdescmd5->shash, signature);
 
-       return 0;
+       return rc;
 }
 
-
 int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
                   __u32 *pexpected_response_sequence_number)
 {
@@ -145,8 +185,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
        server->sequence_number++;
        spin_unlock(&GlobalMid_Lock);
 
-       rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key,
-                                     smb_signature);
+       rc = cifs_calc_signature2(iov, n_vec, server, smb_signature);
        if (rc)
                memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
        else
@@ -156,14 +195,14 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
 }
 
 int cifs_verify_signature(struct smb_hdr *cifs_pdu,
-                         const struct mac_key *mac_key,
+                         struct TCP_Server_Info *server,
                          __u32 expected_sequence_number)
 {
-       unsigned int rc;
+       int rc;
        char server_response_sig[8];
        char what_we_think_sig_should_be[20];
 
-       if ((cifs_pdu == NULL) || (mac_key == NULL))
+       if (cifs_pdu == NULL || server == NULL)
                return -EINVAL;
 
        if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
@@ -192,7 +231,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
                                        cpu_to_le32(expected_sequence_number);
        cifs_pdu->Signature.Sequence.Reserved = 0;
 
-       rc = cifs_calculate_signature(cifs_pdu, mac_key,
+       rc = cifs_calculate_signature(cifs_pdu, server,
                what_we_think_sig_should_be);
 
        if (rc)
@@ -209,7 +248,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
 }
 
 /* We fill in key by putting in 40 byte array which was allocated by caller */
-int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
+int cifs_calculate_session_key(struct session_key *key, const char *rn,
                           const char *password)
 {
        char temp_key[16];
@@ -223,63 +262,6 @@ int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
        return 0;
 }
 
-int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *ses,
-                              const struct nls_table *nls_info)
-{
-       char temp_hash[16];
-       struct HMACMD5Context ctx;
-       char *ucase_buf;
-       __le16 *unicode_buf;
-       unsigned int i, user_name_len, dom_name_len;
-
-       if (ses == NULL)
-               return -EINVAL;
-
-       E_md4hash(ses->password, temp_hash);
-
-       hmac_md5_init_limK_to_64(temp_hash, 16, &ctx);
-       user_name_len = strlen(ses->userName);
-       if (user_name_len > MAX_USERNAME_SIZE)
-               return -EINVAL;
-       if (ses->domainName == NULL)
-               return -EINVAL; /* BB should we use CIFS_LINUX_DOM */
-       dom_name_len = strlen(ses->domainName);
-       if (dom_name_len > MAX_USERNAME_SIZE)
-               return -EINVAL;
-
-       ucase_buf = kmalloc((MAX_USERNAME_SIZE+1), GFP_KERNEL);
-       if (ucase_buf == NULL)
-               return -ENOMEM;
-       unicode_buf = kmalloc((MAX_USERNAME_SIZE+1)*4, GFP_KERNEL);
-       if (unicode_buf == NULL) {
-               kfree(ucase_buf);
-               return -ENOMEM;
-       }
-
-       for (i = 0; i < user_name_len; i++)
-               ucase_buf[i] = nls_info->charset2upper[(int)ses->userName[i]];
-       ucase_buf[i] = 0;
-       user_name_len = cifs_strtoUCS(unicode_buf, ucase_buf,
-                                     MAX_USERNAME_SIZE*2, nls_info);
-       unicode_buf[user_name_len] = 0;
-       user_name_len++;
-
-       for (i = 0; i < dom_name_len; i++)
-               ucase_buf[i] = nls_info->charset2upper[(int)ses->domainName[i]];
-       ucase_buf[i] = 0;
-       dom_name_len = cifs_strtoUCS(unicode_buf+user_name_len, ucase_buf,
-                                    MAX_USERNAME_SIZE*2, nls_info);
-
-       unicode_buf[user_name_len + dom_name_len] = 0;
-       hmac_md5_update((const unsigned char *) unicode_buf,
-               (user_name_len+dom_name_len)*2, &ctx);
-
-       hmac_md5_final(ses->server->ntlmv2_hash, &ctx);
-       kfree(ucase_buf);
-       kfree(unicode_buf);
-       return 0;
-}
-
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
                        char *lnm_session_key)
@@ -324,38 +306,52 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
 {
        int rc = 0;
        int len;
-       char nt_hash[16];
-       struct HMACMD5Context *pctxt;
+       char nt_hash[CIFS_NTHASH_SIZE];
        wchar_t *user;
        wchar_t *domain;
+       wchar_t *server;
 
-       pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
-
-       if (pctxt == NULL)
-               return -ENOMEM;
+       if (!ses->server->ntlmssp.sdeschmacmd5) {
+               cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
+               return -1;
+       }
 
        /* calculate md4 hash of password */
        E_md4hash(ses->password, nt_hash);
 
-       /* convert Domainname to unicode and uppercase */
-       hmac_md5_init_limK_to_64(nt_hash, 16, pctxt);
+       crypto_shash_setkey(ses->server->ntlmssp.hmacmd5, nt_hash,
+                               CIFS_NTHASH_SIZE);
+
+       rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
+       if (rc) {
+               cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5\n");
+               return rc;
+       }
 
        /* convert ses->userName to unicode and uppercase */
        len = strlen(ses->userName);
        user = kmalloc(2 + (len * 2), GFP_KERNEL);
-       if (user == NULL)
+       if (user == NULL) {
+               cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
+               rc = -ENOMEM;
                goto calc_exit_2;
+       }
        len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
        UniStrupr(user);
-       hmac_md5_update((char *)user, 2*len, pctxt);
+
+       crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
+                               (char *)user, 2 * len);
 
        /* convert ses->domainName to unicode and uppercase */
        if (ses->domainName) {
                len = strlen(ses->domainName);
 
                domain = kmalloc(2 + (len * 2), GFP_KERNEL);
-               if (domain == NULL)
+               if (domain == NULL) {
+                       cERROR(1, "calc_ntlmv2_hash: domain mem alloc failure");
+                       rc = -ENOMEM;
                        goto calc_exit_1;
+               }
                len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len,
                                        nls_cp);
                /* the following line was removed since it didn't work well
@@ -363,65 +359,292 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
                   Maybe converting the domain name earlier makes sense */
                /* UniStrupr(domain); */
 
-               hmac_md5_update((char *)domain, 2*len, pctxt);
+               crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
+                                       (char *)domain, 2 * len);
 
                kfree(domain);
+       } else if (ses->serverName) {
+               len = strlen(ses->serverName);
+
+               server = kmalloc(2 + (len * 2), GFP_KERNEL);
+               if (server == NULL) {
+                       cERROR(1, "calc_ntlmv2_hash: server mem alloc failure");
+                       rc = -ENOMEM;
+                       goto calc_exit_1;
+               }
+               len = cifs_strtoUCS((__le16 *)server, ses->serverName, len,
+                                       nls_cp);
+               /* the following line was removed since it didn't work well
+                  with lower cased domain name that passed as an option.
+                  Maybe converting the domain name earlier makes sense */
+               /* UniStrupr(domain); */
+
+               crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
+                                       (char *)server, 2 * len);
+
+               kfree(server);
        }
+
+       rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
+                                       ses->server->ntlmv2_hash);
+
 calc_exit_1:
        kfree(user);
 calc_exit_2:
        /* BB FIXME what about bytes 24 through 40 of the signing key?
           compare with the NTLM example */
-       hmac_md5_final(ses->server->ntlmv2_hash, pctxt);
 
-       kfree(pctxt);
        return rc;
 }
 
-void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
-                     const struct nls_table *nls_cp)
+static int
+find_domain_name(struct cifsSesInfo *ses)
+{
+       int rc = 0;
+       unsigned int attrsize;
+       unsigned int type;
+       unsigned char *blobptr;
+       struct ntlmssp2_name *attrptr;
+
+       if (ses->server->tiblob) {
+               blobptr = ses->server->tiblob;
+               attrptr = (struct ntlmssp2_name *) blobptr;
+
+               while ((type = attrptr->type) != 0) {
+                       blobptr += 2; /* advance attr type */
+                       attrsize = attrptr->length;
+                       blobptr += 2; /* advance attr size */
+                       if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
+                               if (!ses->domainName) {
+                                       ses->domainName =
+                                               kmalloc(attrptr->length + 1,
+                                                               GFP_KERNEL);
+                                       if (!ses->domainName)
+                                                       return -ENOMEM;
+                                       cifs_from_ucs2(ses->domainName,
+                                               (__le16 *)blobptr,
+                                               attrptr->length,
+                                               attrptr->length,
+                                               load_nls_default(), false);
+                               }
+                       }
+                       blobptr += attrsize; /* advance attr  value */
+                       attrptr = (struct ntlmssp2_name *) blobptr;
+               }
+       } else {
+               ses->server->tilen = 2 * sizeof(struct ntlmssp2_name);
+               ses->server->tiblob = kmalloc(ses->server->tilen, GFP_KERNEL);
+               if (!ses->server->tiblob) {
+                       ses->server->tilen = 0;
+                       cERROR(1, "Challenge target info allocation failure");
+                       return -ENOMEM;
+               }
+               memset(ses->server->tiblob, 0x0, ses->server->tilen);
+               attrptr = (struct ntlmssp2_name *) ses->server->tiblob;
+               attrptr->type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
+       }
+
+       return rc;
+}
+
+static int
+CalcNTLMv2_response(const struct TCP_Server_Info *server,
+                        char *v2_session_response)
 {
        int rc;
+
+       if (!server->ntlmssp.sdeschmacmd5) {
+               cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
+               return -1;
+       }
+
+       crypto_shash_setkey(server->ntlmssp.hmacmd5, server->ntlmv2_hash,
+               CIFS_HMAC_MD5_HASH_SIZE);
+
+       rc = crypto_shash_init(&server->ntlmssp.sdeschmacmd5->shash);
+       if (rc) {
+               cERROR(1, "CalcNTLMv2_response: could not init hmacmd5");
+               return rc;
+       }
+
+       memcpy(v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
+               server->cryptKey, CIFS_SERVER_CHALLENGE_SIZE);
+       crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
+               v2_session_response + CIFS_SERVER_CHALLENGE_SIZE,
+               sizeof(struct ntlmv2_resp) - CIFS_SERVER_CHALLENGE_SIZE);
+
+       if (server->tilen)
+               crypto_shash_update(&server->ntlmssp.sdeschmacmd5->shash,
+                                       server->tiblob, server->tilen);
+
+       rc = crypto_shash_final(&server->ntlmssp.sdeschmacmd5->shash,
+                                       v2_session_response);
+
+       return rc;
+}
+
+int
+setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
+                     const struct nls_table *nls_cp)
+{
+       int rc = 0;
        struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf;
-       struct HMACMD5Context context;
 
        buf->blob_signature = cpu_to_le32(0x00000101);
        buf->reserved = 0;
        buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
        get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
        buf->reserved2 = 0;
-       buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
-       buf->names[0].length = 0;
-       buf->names[1].type = 0;
-       buf->names[1].length = 0;
+
+       if (!ses->domainName) {
+               rc = find_domain_name(ses);
+               if (rc) {
+                       cERROR(1, "could not get domain/server name rc %d", rc);
+                       return rc;
+               }
+       }
 
        /* calculate buf->ntlmv2_hash */
        rc = calc_ntlmv2_hash(ses, nls_cp);
-       if (rc)
+       if (rc) {
                cERROR(1, "could not get v2 hash rc %d", rc);
-       CalcNTLMv2_response(ses, resp_buf);
+               return rc;
+       }
+       rc = CalcNTLMv2_response(ses->server, resp_buf);
+       if (rc) {
+               cERROR(1, "could not get v2 hash rc %d", rc);
+               return rc;
+       }
 
-       /* now calculate the MAC key for NTLMv2 */
-       hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
-       hmac_md5_update(resp_buf, 16, &context);
-       hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context);
+       if (!ses->server->ntlmssp.sdeschmacmd5) {
+               cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
+               return -1;
+       }
 
-       memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf,
-              sizeof(struct ntlmv2_resp));
-       ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp);
+       crypto_shash_setkey(ses->server->ntlmssp.hmacmd5,
+                       ses->server->ntlmv2_hash, CIFS_HMAC_MD5_HASH_SIZE);
+
+       rc = crypto_shash_init(&ses->server->ntlmssp.sdeschmacmd5->shash);
+       if (rc) {
+               cERROR(1, "setup_ntlmv2_rsp: could not init hmacmd5\n");
+               return rc;
+       }
+
+       crypto_shash_update(&ses->server->ntlmssp.sdeschmacmd5->shash,
+                               resp_buf, CIFS_HMAC_MD5_HASH_SIZE);
+
+       rc = crypto_shash_final(&ses->server->ntlmssp.sdeschmacmd5->shash,
+               ses->server->session_key.data.ntlmv2.key);
+
+       memcpy(&ses->server->session_key.data.ntlmv2.resp, resp_buf,
+                       sizeof(struct ntlmv2_resp));
+       ses->server->session_key.len = 16 + sizeof(struct ntlmv2_resp);
+
+       return rc;
 }
 
-void CalcNTLMv2_response(const struct cifsSesInfo *ses,
-                        char *v2_session_response)
+int
+calc_seckey(struct TCP_Server_Info *server)
 {
-       struct HMACMD5Context context;
-       /* rest of v2 struct already generated */
-       memcpy(v2_session_response + 8, ses->server->cryptKey, 8);
-       hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
+       int rc;
+       unsigned char sec_key[CIFS_NTLMV2_SESSKEY_SIZE];
+       struct crypto_blkcipher *tfm_arc4;
+       struct scatterlist sgin, sgout;
+       struct blkcipher_desc desc;
+
+       get_random_bytes(sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
+
+       tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)",
+                                               0, CRYPTO_ALG_ASYNC);
+       if (!tfm_arc4 || IS_ERR(tfm_arc4)) {
+               cERROR(1, "could not allocate " "master crypto API arc4\n");
+               return 1;
+       }
+
+       desc.tfm = tfm_arc4;
+
+       crypto_blkcipher_setkey(tfm_arc4,
+               server->session_key.data.ntlmv2.key, CIFS_CPHTXT_SIZE);
+       sg_init_one(&sgin, sec_key, CIFS_CPHTXT_SIZE);
+       sg_init_one(&sgout, server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
+       rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE);
 
-       hmac_md5_update(v2_session_response+8,
-                       sizeof(struct ntlmv2_resp) - 8, &context);
+       if (!rc)
+               memcpy(server->session_key.data.ntlmv2.key,
+                               sec_key, CIFS_NTLMV2_SESSKEY_SIZE);
+
+       crypto_free_blkcipher(tfm_arc4);
+
+       return 0;
+}
 
-       hmac_md5_final(v2_session_response, &context);
-/*     cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
+void
+cifs_crypto_shash_release(struct TCP_Server_Info *server)
+{
+       if (server->ntlmssp.md5)
+               crypto_free_shash(server->ntlmssp.md5);
+
+       if (server->ntlmssp.hmacmd5)
+               crypto_free_shash(server->ntlmssp.hmacmd5);
+
+       kfree(server->ntlmssp.sdeschmacmd5);
+
+       kfree(server->ntlmssp.sdescmd5);
+}
+
+int
+cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
+{
+       int rc;
+       unsigned int size;
+
+       server->ntlmssp.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
+       if (!server->ntlmssp.hmacmd5 ||
+                       IS_ERR(server->ntlmssp.hmacmd5)) {
+               cERROR(1, "could not allocate crypto hmacmd5\n");
+               return 1;
+       }
+
+       server->ntlmssp.md5 = crypto_alloc_shash("md5", 0, 0);
+       if (!server->ntlmssp.md5 || IS_ERR(server->ntlmssp.md5)) {
+               cERROR(1, "could not allocate crypto md5\n");
+               rc = 1;
+               goto cifs_crypto_shash_allocate_ret1;
+       }
+
+       size = sizeof(struct shash_desc) +
+                       crypto_shash_descsize(server->ntlmssp.hmacmd5);
+       server->ntlmssp.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
+       if (!server->ntlmssp.sdeschmacmd5) {
+               cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n");
+               rc = -ENOMEM;
+               goto cifs_crypto_shash_allocate_ret2;
+       }
+       server->ntlmssp.sdeschmacmd5->shash.tfm = server->ntlmssp.hmacmd5;
+       server->ntlmssp.sdeschmacmd5->shash.flags = 0x0;
+
+
+       size = sizeof(struct shash_desc) +
+                       crypto_shash_descsize(server->ntlmssp.md5);
+       server->ntlmssp.sdescmd5 = kmalloc(size, GFP_KERNEL);
+       if (!server->ntlmssp.sdescmd5) {
+               cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n");
+               rc = -ENOMEM;
+               goto cifs_crypto_shash_allocate_ret3;
+       }
+       server->ntlmssp.sdescmd5->shash.tfm = server->ntlmssp.md5;
+       server->ntlmssp.sdescmd5->shash.flags = 0x0;
+
+       return 0;
+
+cifs_crypto_shash_allocate_ret3:
+       kfree(server->ntlmssp.sdeschmacmd5);
+
+cifs_crypto_shash_allocate_ret2:
+       crypto_free_shash(server->ntlmssp.md5);
+
+cifs_crypto_shash_allocate_ret1:
+       crypto_free_shash(server->ntlmssp.hmacmd5);
+
+       return rc;
 }
index 0cdfb8c32ac68c34a98f5cdf412250c0eacdec2b..c9d0cfc086ebcb609b504afdec6431e89ac75db9 100644 (file)
@@ -25,6 +25,9 @@
 #include <linux/workqueue.h>
 #include "cifs_fs_sb.h"
 #include "cifsacl.h"
+#include <crypto/internal/hash.h>
+#include <linux/scatterlist.h>
+
 /*
  * The sizes of various internal tables and strings
  */
@@ -97,7 +100,7 @@ enum protocolEnum {
        /* Netbios frames protocol not supported at this time */
 };
 
-struct mac_key {
+struct session_key {
        unsigned int len;
        union {
                char ntlm[CIFS_SESS_KEY_SIZE + 16];
@@ -120,6 +123,21 @@ struct cifs_cred {
        struct cifs_ace *aces;
 };
 
+struct sdesc {
+       struct shash_desc shash;
+       char ctx[];
+};
+
+struct ntlmssp_auth {
+       __u32 client_flags;
+       __u32 server_flags;
+       unsigned char ciphertext[CIFS_CPHTXT_SIZE];
+       struct crypto_shash *hmacmd5;
+       struct crypto_shash *md5;
+       struct sdesc *sdeschmacmd5;
+       struct sdesc *sdescmd5;
+};
+
 /*
  *****************************************************************
  * Except the CIFS PDUs themselves all the
@@ -182,11 +200,14 @@ struct TCP_Server_Info {
        /* 16th byte of RFC1001 workstation name is always null */
        char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
        __u32 sequence_number; /* needed for CIFS PDU signature */
-       struct mac_key mac_signing_key;
+       struct session_key session_key;
        char ntlmv2_hash[16];
        unsigned long lstrp; /* when we got last response from this server */
        u16 dialect; /* dialect index that server chose */
        /* extended security flavors that server supports */
+       unsigned int tilen; /* length of the target info blob */
+       unsigned char *tiblob; /* target info blob in challenge response */
+       struct ntlmssp_auth ntlmssp; /* various keys, ciphers, flags */
        bool    sec_kerberos;           /* supports plain Kerberos */
        bool    sec_mskerberos;         /* supports legacy MS Kerberos */
        bool    sec_kerberosu2u;        /* supports U2U Kerberos */
index 14d036d8db111f2719f9e50576e94024a105adfc..320e0fd0ba7b5f988b559e060173dc61c2aa064c 100644 (file)
  * Size of the session key (crypto key encrypted with the password
  */
 #define CIFS_SESS_KEY_SIZE (24)
+#define CIFS_CLIENT_CHALLENGE_SIZE (8)
+#define CIFS_SERVER_CHALLENGE_SIZE (8)
+#define CIFS_HMAC_MD5_HASH_SIZE (16)
+#define CIFS_CPHTXT_SIZE (16)
+#define CIFS_NTLMV2_SESSKEY_SIZE (16)
+#define CIFS_NTHASH_SIZE (16)
 
 /*
  * Maximum user name length
@@ -663,7 +669,6 @@ struct ntlmv2_resp {
        __le64  time;
        __u64  client_chal; /* random */
        __u32  reserved2;
-       struct ntlmssp2_name names[2];
        /* array of name entries could follow ending in minimum 4 byte struct */
 } __attribute__((packed));
 
index 1f5450814087bed34651f0c2ac4834d8251d63e6..1378d9133844f08a369057608ed9d312bebc732f 100644 (file)
@@ -361,15 +361,15 @@ extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
 extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
                          __u32 *);
 extern int cifs_verify_signature(struct smb_hdr *,
-                                const struct mac_key *mac_key,
+                                struct TCP_Server_Info *server,
                                __u32 expected_sequence_number);
-extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
+extern int cifs_calculate_session_key(struct session_key *key, const char *rn,
                                 const char *pass);
-extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *,
-                       const struct nls_table *);
-extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *);
-extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
+extern int setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
                             const struct nls_table *);
+extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
+extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
+extern int calc_seckey(struct TCP_Server_Info *);
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 extern void calc_lanman_hash(const char *password, const char *cryptkey,
                                bool encrypt, char *lnm_session_key);
index c65c3419dd3703f12bb4994e9333c085c907ecfa..4bda920d1f754548ea705b94b13be9a1db321699 100644 (file)
@@ -604,11 +604,14 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                        else
                                rc = -EINVAL;
 
-                       if (server->sec_kerberos || server->sec_mskerberos)
-                               server->secType = Kerberos;
-                       else if (server->sec_ntlmssp)
-                               server->secType = RawNTLMSSP;
-                       else
+                       if (server->secType == Kerberos) {
+                               if (!server->sec_kerberos &&
+                                               !server->sec_mskerberos)
+                                       rc = -EOPNOTSUPP;
+                       } else if (server->secType == RawNTLMSSP) {
+                               if (!server->sec_ntlmssp)
+                                       rc = -EOPNOTSUPP;
+                       } else
                                rc = -EOPNOTSUPP;
                }
        } else
index 95c2ea67edfb8f240d286885caddd51031aa82fc..ec0ea4a43bdb4efc0f3734f439b6af78af97a2f4 100644 (file)
@@ -1673,7 +1673,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
                                    MAX_USERNAME_SIZE))
                                continue;
                        if (strlen(vol->username) != 0 &&
-                           strncmp(ses->password, vol->password,
+                           ses->password != NULL &&
+                           strncmp(ses->password,
+                                   vol->password ? vol->password : "",
                                    MAX_PASSWORD_SIZE))
                                continue;
                }
@@ -1706,6 +1708,7 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
                CIFSSMBLogoff(xid, ses);
                _FreeXid(xid);
        }
+       cifs_crypto_shash_release(server);
        sesInfoFree(ses);
        cifs_put_tcp_session(server);
 }
@@ -1785,13 +1788,23 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
        ses->linux_uid = volume_info->linux_uid;
        ses->overrideSecFlg = volume_info->secFlg;
 
+       rc = cifs_crypto_shash_allocate(server);
+       if (rc) {
+               cERROR(1, "could not setup hash structures rc %d", rc);
+               goto get_ses_fail;
+       }
+       server->tilen = 0;
+       server->tiblob = NULL;
+
        mutex_lock(&ses->session_mutex);
        rc = cifs_negotiate_protocol(xid, ses);
        if (!rc)
                rc = cifs_setup_session(xid, ses, volume_info->local_nls);
        mutex_unlock(&ses->session_mutex);
-       if (rc)
+       if (rc) {
+               cifs_crypto_shash_release(ses->server);
                goto get_ses_fail;
+       }
 
        /* success, put it on the list */
        write_lock(&cifs_tcp_ses_lock);
index 578d88c5b46e7eae539fb04aede8f59b7c9dac53..f9ed0751cc12cb6f93e498b526c07e215bb91138 100644 (file)
@@ -305,8 +305,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
        full_path = build_path_from_dentry(direntry);
        if (full_path == NULL) {
                rc = -ENOMEM;
-               FreeXid(xid);
-               return rc;
+               goto cifs_create_out;
        }
 
        if (oplockEnabled)
@@ -365,9 +364,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
 
        buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
        if (buf == NULL) {
-               kfree(full_path);
-               FreeXid(xid);
-               return -ENOMEM;
+               rc = -ENOMEM;
+               goto cifs_create_out;
        }
 
        /*
@@ -496,6 +494,11 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
        struct cifsTconInfo *pTcon;
        char *full_path = NULL;
        struct inode *newinode = NULL;
+       int oplock = 0;
+       u16 fileHandle;
+       FILE_ALL_INFO *buf = NULL;
+       unsigned int bytes_written;
+       struct win_dev *pdev;
 
        if (!old_valid_dev(device_number))
                return -EINVAL;
@@ -506,9 +509,12 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
        pTcon = cifs_sb->tcon;
 
        full_path = build_path_from_dentry(direntry);
-       if (full_path == NULL)
+       if (full_path == NULL) {
                rc = -ENOMEM;
-       else if (pTcon->unix_ext) {
+               goto mknod_out;
+       }
+
+       if (pTcon->unix_ext) {
                struct cifs_unix_set_info_args args = {
                        .mode   = mode & ~current_umask(),
                        .ctime  = NO_CHANGE_64,
@@ -527,87 +533,78 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
                                            cifs_sb->local_nls,
                                            cifs_sb->mnt_cifs_flags &
                                                CIFS_MOUNT_MAP_SPECIAL_CHR);
+               if (rc)
+                       goto mknod_out;
 
-               if (!rc) {
-                       rc = cifs_get_inode_info_unix(&newinode, full_path,
+               rc = cifs_get_inode_info_unix(&newinode, full_path,
                                                inode->i_sb, xid);
-                       if (pTcon->nocase)
-                               direntry->d_op = &cifs_ci_dentry_ops;
-                       else
-                               direntry->d_op = &cifs_dentry_ops;
-                       if (rc == 0)
-                               d_instantiate(direntry, newinode);
-               }
-       } else {
-               if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
-                       int oplock = 0;
-                       u16 fileHandle;
-                       FILE_ALL_INFO *buf;
+               if (pTcon->nocase)
+                       direntry->d_op = &cifs_ci_dentry_ops;
+               else
+                       direntry->d_op = &cifs_dentry_ops;
 
-                       cFYI(1, "sfu compat create special file");
+               if (rc == 0)
+                       d_instantiate(direntry, newinode);
+               goto mknod_out;
+       }
 
-                       buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
-                       if (buf == NULL) {
-                               kfree(full_path);
-                               rc = -ENOMEM;
-                               FreeXid(xid);
-                               return rc;
-                       }
+       if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+               goto mknod_out;
 
-                       rc = CIFSSMBOpen(xid, pTcon, full_path,
-                                        FILE_CREATE, /* fail if exists */
-                                        GENERIC_WRITE /* BB would
-                                         WRITE_OWNER | WRITE_DAC be better? */,
-                                        /* Create a file and set the
-                                           file attribute to SYSTEM */
-                                        CREATE_NOT_DIR | CREATE_OPTION_SPECIAL,
-                                        &fileHandle, &oplock, buf,
-                                        cifs_sb->local_nls,
-                                        cifs_sb->mnt_cifs_flags &
-                                           CIFS_MOUNT_MAP_SPECIAL_CHR);
-
-                       /* BB FIXME - add handling for backlevel servers
-                          which need legacy open and check for all
-                          calls to SMBOpen for fallback to SMBLeagcyOpen */
-                       if (!rc) {
-                               /* BB Do not bother to decode buf since no
-                                  local inode yet to put timestamps in,
-                                  but we can reuse it safely */
-                               unsigned int bytes_written;
-                               struct win_dev *pdev;
-                               pdev = (struct win_dev *)buf;
-                               if (S_ISCHR(mode)) {
-                                       memcpy(pdev->type, "IntxCHR", 8);
-                                       pdev->major =
-                                             cpu_to_le64(MAJOR(device_number));
-                                       pdev->minor =
-                                             cpu_to_le64(MINOR(device_number));
-                                       rc = CIFSSMBWrite(xid, pTcon,
-                                               fileHandle,
-                                               sizeof(struct win_dev),
-                                               0, &bytes_written, (char *)pdev,
-                                               NULL, 0);
-                               } else if (S_ISBLK(mode)) {
-                                       memcpy(pdev->type, "IntxBLK", 8);
-                                       pdev->major =
-                                             cpu_to_le64(MAJOR(device_number));
-                                       pdev->minor =
-                                             cpu_to_le64(MINOR(device_number));
-                                       rc = CIFSSMBWrite(xid, pTcon,
-                                               fileHandle,
-                                               sizeof(struct win_dev),
-                                               0, &bytes_written, (char *)pdev,
-                                               NULL, 0);
-                               } /* else if(S_ISFIFO */
-                               CIFSSMBClose(xid, pTcon, fileHandle);
-                               d_drop(direntry);
-                       }
-                       kfree(buf);
-                       /* add code here to set EAs */
-               }
+
+       cFYI(1, "sfu compat create special file");
+
+       buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+       if (buf == NULL) {
+               kfree(full_path);
+               rc = -ENOMEM;
+               FreeXid(xid);
+               return rc;
        }
 
+       /* FIXME: would WRITE_OWNER | WRITE_DAC be better? */
+       rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE,
+                        GENERIC_WRITE, CREATE_NOT_DIR | CREATE_OPTION_SPECIAL,
+                        &fileHandle, &oplock, buf, cifs_sb->local_nls,
+                        cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+       if (rc)
+               goto mknod_out;
+
+       /* BB Do not bother to decode buf since no local inode yet to put
+        * timestamps in, but we can reuse it safely */
+
+       pdev = (struct win_dev *)buf;
+       if (S_ISCHR(mode)) {
+               memcpy(pdev->type, "IntxCHR", 8);
+               pdev->major =
+                     cpu_to_le64(MAJOR(device_number));
+               pdev->minor =
+                     cpu_to_le64(MINOR(device_number));
+               rc = CIFSSMBWrite(xid, pTcon,
+                       fileHandle,
+                       sizeof(struct win_dev),
+                       0, &bytes_written, (char *)pdev,
+                       NULL, 0);
+       } else if (S_ISBLK(mode)) {
+               memcpy(pdev->type, "IntxBLK", 8);
+               pdev->major =
+                     cpu_to_le64(MAJOR(device_number));
+               pdev->minor =
+                     cpu_to_le64(MINOR(device_number));
+               rc = CIFSSMBWrite(xid, pTcon,
+                       fileHandle,
+                       sizeof(struct win_dev),
+                       0, &bytes_written, (char *)pdev,
+                       NULL, 0);
+       } /* else if (S_ISFIFO) */
+       CIFSSMBClose(xid, pTcon, fileHandle);
+       d_drop(direntry);
+
+       /* FIXME: add code here to set EAs */
+
+mknod_out:
        kfree(full_path);
+       kfree(buf);
        FreeXid(xid);
        return rc;
 }
index db11fdef0e92b1ca52a75fb033f03dda24e8f6a5..de748c652d11f4fca214b2aed62c8ad1639ae2dc 100644 (file)
@@ -242,8 +242,7 @@ int cifs_open(struct inode *inode, struct file *file)
        full_path = build_path_from_dentry(file->f_path.dentry);
        if (full_path == NULL) {
                rc = -ENOMEM;
-               FreeXid(xid);
-               return rc;
+               goto out;
        }
 
        cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
index 4bc47e5b5f29af38d601f699face0e1f16b31a9f..86a164f08a74a51399c2152799ec178c93902fa6 100644 (file)
@@ -834,7 +834,7 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
                                                xid, NULL);
 
        if (!inode)
-               return ERR_PTR(-ENOMEM);
+               return ERR_PTR(rc);
 
 #ifdef CONFIG_CIFS_FSCACHE
        /* populate tcon->resource_id */
index 49c9a4e7531979c3e65615dd277ad4b0815ed0ae..1db0f0746a5b4242f927e9203d8749711180918c 100644 (file)
 #define NTLMSSP_NEGOTIATE_KEY_XCH   0x40000000
 #define NTLMSSP_NEGOTIATE_56        0x80000000
 
+/* Define AV Pair Field IDs */
+#define NTLMSSP_AV_EOL                 0
+#define NTLMSSP_AV_NB_COMPUTER_NAME    1
+#define NTLMSSP_AV_NB_DOMAIN_NAME      2
+#define NTLMSSP_AV_DNS_COMPUTER_NAME   3
+#define NTLMSSP_AV_DNS_DOMAIN_NAME     4
+#define NTLMSSP_AV_DNS_TREE_NAME       5
+#define NTLMSSP_AV_FLAGS               6
+#define NTLMSSP_AV_TIMESTAMP           7
+#define NTLMSSP_AV_RESTRICTION         8
+#define NTLMSSP_AV_TARGET_NAME         9
+#define NTLMSSP_AV_CHANNEL_BINDINGS    10
+
 /* Although typedefs are not commonly used for structure definitions */
 /* in the Linux kernel, in this particular case they are useful      */
 /* to more closely match the standards document for NTLMSSP from     */
index 0a57cb7db5dd7554030e599cd111379e343083df..795095f4eac69ba204257a597522e465dafac371 100644 (file)
@@ -383,6 +383,9 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
 static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
                                    struct cifsSesInfo *ses)
 {
+       unsigned int tioffset; /* challeng message target info area */
+       unsigned int tilen; /* challeng message target info area length  */
+
        CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
 
        if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
@@ -405,6 +408,20 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
        /* BB spec says that if AvId field of MsvAvTimestamp is populated then
                we must set the MIC field of the AUTHENTICATE_MESSAGE */
 
+       ses->server->ntlmssp.server_flags = le32_to_cpu(pblob->NegotiateFlags);
+
+       tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
+       tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
+       ses->server->tilen = tilen;
+       if (tilen) {
+               ses->server->tiblob = kmalloc(tilen, GFP_KERNEL);
+               if (!ses->server->tiblob) {
+                       cERROR(1, "Challenge target info allocation failure");
+                       return -ENOMEM;
+               }
+               memcpy(ses->server->tiblob,  bcc_ptr + tioffset, tilen);
+       }
+
        return 0;
 }
 
@@ -425,12 +442,13 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
        /* BB is NTLMV2 session security format easier to use here? */
        flags = NTLMSSP_NEGOTIATE_56 |  NTLMSSP_REQUEST_TARGET |
                NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
-               NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM;
+               NTLMSSP_NEGOTIATE_NTLM;
        if (ses->server->secMode &
-          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
-               flags |= NTLMSSP_NEGOTIATE_SIGN;
-       if (ses->server->secMode & SECMODE_SIGN_REQUIRED)
-               flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
+          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
+               flags |= NTLMSSP_NEGOTIATE_SIGN |
+                       NTLMSSP_NEGOTIATE_KEY_XCH |
+                       NTLMSSP_NEGOTIATE_EXTENDED_SEC;
+       }
 
        sec_blob->NegotiateFlags |= cpu_to_le32(flags);
 
@@ -451,10 +469,12 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                                   struct cifsSesInfo *ses,
                                   const struct nls_table *nls_cp, bool first)
 {
+       int rc;
+       unsigned int size;
        AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
        __u32 flags;
        unsigned char *tmp;
-       char ntlm_session_key[CIFS_SESS_KEY_SIZE];
+       struct ntlmv2_resp ntlmv2_response = {};
 
        memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
        sec_blob->MessageType = NtLmAuthenticate;
@@ -477,19 +497,25 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        sec_blob->LmChallengeResponse.Length = 0;
        sec_blob->LmChallengeResponse.MaximumLength = 0;
 
-       /* calculate session key,  BB what about adding similar ntlmv2 path? */
-       SMBNTencrypt(ses->password, ses->server->cryptKey, ntlm_session_key);
-       if (first)
-               cifs_calculate_mac_key(&ses->server->mac_signing_key,
-                                      ntlm_session_key, ses->password);
-
-       memcpy(tmp, ntlm_session_key, CIFS_SESS_KEY_SIZE);
        sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
-       sec_blob->NtChallengeResponse.Length = cpu_to_le16(CIFS_SESS_KEY_SIZE);
-       sec_blob->NtChallengeResponse.MaximumLength =
-                               cpu_to_le16(CIFS_SESS_KEY_SIZE);
+       rc = setup_ntlmv2_rsp(ses, (char *)&ntlmv2_response, nls_cp);
+       if (rc) {
+               cERROR(1, "error rc: %d during ntlmssp ntlmv2 setup", rc);
+               goto setup_ntlmv2_ret;
+       }
+       size =  sizeof(struct ntlmv2_resp);
+       memcpy(tmp, (char *)&ntlmv2_response, size);
+       tmp += size;
+       if (ses->server->tilen > 0) {
+               memcpy(tmp, ses->server->tiblob, ses->server->tilen);
+               tmp += ses->server->tilen;
+       } else
+               ses->server->tilen = 0;
 
-       tmp += CIFS_SESS_KEY_SIZE;
+       sec_blob->NtChallengeResponse.Length = cpu_to_le16(size +
+                               ses->server->tilen);
+       sec_blob->NtChallengeResponse.MaximumLength =
+               cpu_to_le16(size + ses->server->tilen);
 
        if (ses->domainName == NULL) {
                sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -501,7 +527,6 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                len = cifs_strtoUCS((__le16 *)tmp, ses->domainName,
                                    MAX_USERNAME_SIZE, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
-               len += 2; /* trailing null */
                sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->DomainName.Length = cpu_to_le16(len);
                sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
@@ -518,7 +543,6 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
                                    MAX_USERNAME_SIZE, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
-               len += 2; /* trailing null */
                sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->UserName.Length = cpu_to_le16(len);
                sec_blob->UserName.MaximumLength = cpu_to_le16(len);
@@ -530,9 +554,26 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        sec_blob->WorkstationName.MaximumLength = 0;
        tmp += 2;
 
-       sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
-       sec_blob->SessionKey.Length = 0;
-       sec_blob->SessionKey.MaximumLength = 0;
+       if ((ses->server->ntlmssp.server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+                       !calc_seckey(ses->server)) {
+               memcpy(tmp, ses->server->ntlmssp.ciphertext, CIFS_CPHTXT_SIZE);
+               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
+               sec_blob->SessionKey.MaximumLength =
+                       cpu_to_le16(CIFS_CPHTXT_SIZE);
+               tmp += CIFS_CPHTXT_SIZE;
+       } else {
+               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->SessionKey.Length = 0;
+               sec_blob->SessionKey.MaximumLength = 0;
+       }
+
+       ses->server->sequence_number = 0;
+
+setup_ntlmv2_ret:
+       if (ses->server->tilen > 0)
+               kfree(ses->server->tiblob);
+
        return tmp - pbuffer;
 }
 
@@ -546,15 +587,14 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB,
        return;
 }
 
-static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
+static int setup_ntlmssp_auth_req(char *ntlmsspblob,
                                  struct cifsSesInfo *ses,
                                  const struct nls_table *nls, bool first_time)
 {
        int bloblen;
 
-       bloblen = build_ntlmssp_auth_blob(&pSMB->req.SecurityBlob[0], ses, nls,
+       bloblen = build_ntlmssp_auth_blob(ntlmsspblob, ses, nls,
                                          first_time);
-       pSMB->req.SecurityBlobLength = cpu_to_le16(bloblen);
 
        return bloblen;
 }
@@ -690,7 +730,7 @@ ssetup_ntlmssp_authenticate:
 
                if (first_time) /* should this be moved into common code
                                  with similar ntlmv2 path? */
-                       cifs_calculate_mac_key(&ses->server->mac_signing_key,
+                       cifs_calculate_session_key(&ses->server->session_key,
                                ntlm_session_key, ses->password);
                /* copy session key */
 
@@ -729,12 +769,21 @@ ssetup_ntlmssp_authenticate:
                        cpu_to_le16(sizeof(struct ntlmv2_resp));
 
                /* calculate session key */
-               setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
+               rc = setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
+               if (rc) {
+                       kfree(v2_sess_key);
+                       goto ssetup_exit;
+               }
                /* FIXME: calculate MAC key */
                memcpy(bcc_ptr, (char *)v2_sess_key,
                       sizeof(struct ntlmv2_resp));
                bcc_ptr += sizeof(struct ntlmv2_resp);
                kfree(v2_sess_key);
+               if (ses->server->tilen > 0) {
+                       memcpy(bcc_ptr, ses->server->tiblob,
+                               ses->server->tilen);
+                       bcc_ptr += ses->server->tilen;
+               }
                if (ses->capabilities & CAP_UNICODE) {
                        if (iov[0].iov_len % 2) {
                                *bcc_ptr = 0;
@@ -765,15 +814,15 @@ ssetup_ntlmssp_authenticate:
                }
                /* bail out if key is too long */
                if (msg->sesskey_len >
-                   sizeof(ses->server->mac_signing_key.data.krb5)) {
+                   sizeof(ses->server->session_key.data.krb5)) {
                        cERROR(1, "Kerberos signing key too long (%u bytes)",
                                msg->sesskey_len);
                        rc = -EOVERFLOW;
                        goto ssetup_exit;
                }
                if (first_time) {
-                       ses->server->mac_signing_key.len = msg->sesskey_len;
-                       memcpy(ses->server->mac_signing_key.data.krb5,
+                       ses->server->session_key.len = msg->sesskey_len;
+                       memcpy(ses->server->session_key.data.krb5,
                                msg->data, msg->sesskey_len);
                }
                pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
@@ -815,12 +864,28 @@ ssetup_ntlmssp_authenticate:
                        if (phase == NtLmNegotiate) {
                                setup_ntlmssp_neg_req(pSMB, ses);
                                iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
+                               iov[1].iov_base = &pSMB->req.SecurityBlob[0];
                        } else if (phase == NtLmAuthenticate) {
                                int blob_len;
-                               blob_len = setup_ntlmssp_auth_req(pSMB, ses,
-                                                                 nls_cp,
-                                                                 first_time);
+                               char *ntlmsspblob;
+
+                               ntlmsspblob = kmalloc(5 *
+                                       sizeof(struct _AUTHENTICATE_MESSAGE),
+                                       GFP_KERNEL);
+                               if (!ntlmsspblob) {
+                                       cERROR(1, "Can't allocate NTLMSSP");
+                                       rc = -ENOMEM;
+                                       goto ssetup_exit;
+                               }
+
+                               blob_len = setup_ntlmssp_auth_req(ntlmsspblob,
+                                                               ses,
+                                                               nls_cp,
+                                                               first_time);
                                iov[1].iov_len = blob_len;
+                               iov[1].iov_base = ntlmsspblob;
+                               pSMB->req.SecurityBlobLength =
+                                       cpu_to_le16(blob_len);
                                /* Make sure that we tell the server that we
                                   are using the uid that it just gave us back
                                   on the response (challenge) */
@@ -830,7 +895,6 @@ ssetup_ntlmssp_authenticate:
                                rc = -ENOSYS;
                                goto ssetup_exit;
                        }
-                       iov[1].iov_base = &pSMB->req.SecurityBlob[0];
                        /* unicode strings must be word aligned */
                        if ((iov[0].iov_len + iov[1].iov_len) % 2) {
                                *bcc_ptr = 0;
index 82f78c4d6978ceafdab5789182193b899a435202..e0588cdf4cc5d5a1e8a73c1190c21f2d6cbe4986 100644 (file)
@@ -543,7 +543,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
                    (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                             SECMODE_SIGN_ENABLED))) {
                        rc = cifs_verify_signature(midQ->resp_buf,
-                                               &ses->server->mac_signing_key,
+                                               ses->server,
                                                midQ->sequence_number+1);
                        if (rc) {
                                cERROR(1, "Unexpected SMB signature");
@@ -731,7 +731,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
                    (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                             SECMODE_SIGN_ENABLED))) {
                        rc = cifs_verify_signature(out_buf,
-                                               &ses->server->mac_signing_key,
+                                               ses->server,
                                                midQ->sequence_number+1);
                        if (rc) {
                                cERROR(1, "Unexpected SMB signature");
@@ -981,7 +981,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
            (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
                                     SECMODE_SIGN_ENABLED))) {
                rc = cifs_verify_signature(out_buf,
-                                          &ses->server->mac_signing_key,
+                                          ses->server,
                                           midQ->sequence_number+1);
                if (rc) {
                        cERROR(1, "Unexpected SMB signature");
index a2e3b562e65d996f62d1ef15eaa9c6e5a7a2293f..cbadc1bee6e7ecceeefa5131143e08783d006297 100644 (file)
@@ -1793,7 +1793,7 @@ struct kmem_cache *ecryptfs_key_tfm_cache;
 static struct list_head key_tfm_list;
 struct mutex key_tfm_list_mutex;
 
-int ecryptfs_init_crypto(void)
+int __init ecryptfs_init_crypto(void)
 {
        mutex_init(&key_tfm_list_mutex);
        INIT_LIST_HEAD(&key_tfm_list);
@@ -2169,7 +2169,6 @@ int ecryptfs_encrypt_and_encode_filename(
                                (ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE
                                 + encoded_name_no_prefix_size);
                        (*encoded_name)[(*encoded_name_size)] = '\0';
-                       (*encoded_name_size)++;
                } else {
                        rc = -EOPNOTSUPP;
                }
index 6c55113e72222cf473f92b16277056561ebbf55d..3fbc94203380acf8e6095627ee610ff058df5f38 100644 (file)
@@ -349,7 +349,7 @@ out:
 
 /**
  * ecryptfs_new_lower_dentry
- * @ename: The name of the new dentry.
+ * @name: The name of the new dentry.
  * @lower_dir_dentry: Parent directory of the new dentry.
  * @nd: nameidata from last lookup.
  *
@@ -386,20 +386,19 @@ ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry,
  * ecryptfs_lookup_one_lower
  * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
  * @lower_dir_dentry: lower parent directory
+ * @name: lower file name
  *
  * Get the lower dentry from vfs. If lower dentry does not exist yet,
  * create it.
  */
 static struct dentry *
 ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry,
-                         struct dentry *lower_dir_dentry)
+                         struct dentry *lower_dir_dentry, struct qstr *name)
 {
        struct nameidata nd;
        struct vfsmount *lower_mnt;
-       struct qstr *name;
        int err;
 
-       name = &ecryptfs_dentry->d_name;
        lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
                                    ecryptfs_dentry->d_parent));
        err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd);
@@ -434,6 +433,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
        size_t encrypted_and_encoded_name_size;
        struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
        struct dentry *lower_dir_dentry, *lower_dentry;
+       struct qstr lower_name;
        int rc = 0;
 
        ecryptfs_dentry->d_op = &ecryptfs_dops;
@@ -444,9 +444,17 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
                goto out_d_drop;
        }
        lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
-
+       lower_name.name = ecryptfs_dentry->d_name.name;
+       lower_name.len = ecryptfs_dentry->d_name.len;
+       lower_name.hash = ecryptfs_dentry->d_name.hash;
+       if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
+               rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
+                                                   &lower_name);
+               if (rc < 0)
+                       goto out_d_drop;
+       }
        lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
-                                                lower_dir_dentry);
+                                                lower_dir_dentry, &lower_name);
        if (IS_ERR(lower_dentry)) {
                rc = PTR_ERR(lower_dentry);
                ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
@@ -471,8 +479,17 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
                       "filename; rc = [%d]\n", __func__, rc);
                goto out_d_drop;
        }
+       lower_name.name = encrypted_and_encoded_name;
+       lower_name.len = encrypted_and_encoded_name_size;
+       lower_name.hash = full_name_hash(lower_name.name, lower_name.len);
+       if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) {
+               rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry,
+                                                   &lower_name);
+               if (rc < 0)
+                       goto out_d_drop;
+       }
        lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
-                                                lower_dir_dentry);
+                                                lower_dir_dentry, &lower_name);
        if (IS_ERR(lower_dentry)) {
                rc = PTR_ERR(lower_dentry);
                ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
index 89c5476506ef36c8c3de7520565b8360d70eb83b..73811cfa2ea4369766a99a251c614ddb091bc8f4 100644 (file)
@@ -515,6 +515,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
        if (!s) {
                printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
                       "[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
+               rc = -ENOMEM;
                goto out;
        }
        s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -806,6 +807,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
        if (!s) {
                printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
                       "[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
+               rc = -ENOMEM;
                goto out;
        }
        s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
index d8c3a373aafa40547090427c0ae1e96b73f49dbe..0851ab6980f54038b8f0ed9239712fea573622c5 100644 (file)
@@ -86,7 +86,7 @@ out:
        return 0;
 }
 
-int ecryptfs_init_kthread(void)
+int __init ecryptfs_init_kthread(void)
 {
        int rc = 0;
 
index bcb68c0cb1f0fed8ac5300c804b304879b08ae22..ab2248090515534dfd47b940b83aef63c696b545 100644 (file)
@@ -473,7 +473,7 @@ sleep:
        return rc;
 }
 
-int ecryptfs_init_messaging(void)
+int __init ecryptfs_init_messaging(void)
 {
        int i;
        int rc = 0;
index 3745f612bcd438cb477f8bb245afeb5f47495f89..00208c3d7e926cf799974ef23d5bdaebd72151c8 100644 (file)
@@ -500,7 +500,7 @@ static struct miscdevice ecryptfs_miscdev = {
  *
  * Returns zero on success; non-zero otherwise
  */
-int ecryptfs_init_ecryptfs_miscdev(void)
+int __init ecryptfs_init_ecryptfs_miscdev(void)
 {
        int rc;
 
index 2e7357104cfdf3208a6abe357c91e92536d8e815..3dfef062396845d2b45cc42a22064ec4402ee05f 100644 (file)
@@ -2450,14 +2450,13 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
 static __be32
 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_stateid *stp, struct nfsd4_open *open)
 {
-       u32 op_share_access, new_access;
+       u32 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
+       bool new_access;
        __be32 status;
 
-       set_access(&new_access, stp->st_access_bmap);
-       new_access = (~new_access) & open->op_share_access & ~NFS4_SHARE_WANT_MASK;
-
+       new_access = !test_bit(op_share_access, &stp->st_access_bmap);
        if (new_access) {
-               status = nfs4_get_vfs_file(rqstp, fp, cur_fh, new_access);
+               status = nfs4_get_vfs_file(rqstp, fp, cur_fh, op_share_access);
                if (status)
                        return status;
        }
@@ -2470,7 +2469,6 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
                return status;
        }
        /* remember the open */
-       op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
        __set_bit(op_share_access, &stp->st_access_bmap);
        __set_bit(open->op_share_deny, &stp->st_deny_bmap);
 
@@ -2983,7 +2981,6 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
                                *filpp = find_readable_file(stp->st_file);
                        else
                                *filpp = find_writeable_file(stp->st_file);
-                       BUG_ON(!*filpp); /* assured by check_openmode */
                }
        }
        status = nfs_ok;
@@ -3561,7 +3558,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        struct nfs4_stateowner *open_sop = NULL;
        struct nfs4_stateowner *lock_sop = NULL;
        struct nfs4_stateid *lock_stp;
-       struct file *filp;
+       struct nfs4_file *fp;
+       struct file *filp = NULL;
        struct file_lock file_lock;
        struct file_lock conflock;
        __be32 status = 0;
@@ -3591,7 +3589,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                 * lock stateid.
                 */
                struct nfs4_stateid *open_stp = NULL;
-               struct nfs4_file *fp;
                
                status = nfserr_stale_clientid;
                if (!nfsd4_has_session(cstate) &&
@@ -3634,6 +3631,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                if (status)
                        goto out;
                lock_sop = lock->lk_replay_owner;
+               fp = lock_stp->st_file;
        }
        /* lock->lk_replay_owner and lock_stp have been created or found */
 
@@ -3648,13 +3646,19 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        switch (lock->lk_type) {
                case NFS4_READ_LT:
                case NFS4_READW_LT:
-                       filp = find_readable_file(lock_stp->st_file);
+                       if (find_readable_file(lock_stp->st_file)) {
+                               nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ);
+                               filp = find_readable_file(lock_stp->st_file);
+                       }
                        file_lock.fl_type = F_RDLCK;
                        cmd = F_SETLK;
                break;
                case NFS4_WRITE_LT:
                case NFS4_WRITEW_LT:
-                       filp = find_writeable_file(lock_stp->st_file);
+                       if (find_writeable_file(lock_stp->st_file)) {
+                               nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE);
+                               filp = find_writeable_file(lock_stp->st_file);
+                       }
                        file_lock.fl_type = F_WRLCK;
                        cmd = F_SETLK;
                break;
index 7731a75971ddf88347b7ed634604e72fcc0b027c..322518c88e4b09eee40473ed8efaece99e02410b 100644 (file)
@@ -363,23 +363,23 @@ struct nfs4_file {
  * at all? */
 static inline struct file *find_writeable_file(struct nfs4_file *f)
 {
-       if (f->fi_fds[O_RDWR])
-               return f->fi_fds[O_RDWR];
-       return f->fi_fds[O_WRONLY];
+       if (f->fi_fds[O_WRONLY])
+               return f->fi_fds[O_WRONLY];
+       return f->fi_fds[O_RDWR];
 }
 
 static inline struct file *find_readable_file(struct nfs4_file *f)
 {
-       if (f->fi_fds[O_RDWR])
-               return f->fi_fds[O_RDWR];
-       return f->fi_fds[O_RDONLY];
+       if (f->fi_fds[O_RDONLY])
+               return f->fi_fds[O_RDONLY];
+       return f->fi_fds[O_RDWR];
 }
 
 static inline struct file *find_any_file(struct nfs4_file *f)
 {
        if (f->fi_fds[O_RDWR])
                return f->fi_fds[O_RDWR];
-       else if (f->fi_fds[O_RDWR])
+       else if (f->fi_fds[O_WRONLY])
                return f->fi_fds[O_WRONLY];
        else
                return f->fi_fds[O_RDONLY];
index 96360a83cb91f670d7a0d5df012ebf75324f4d7e..661a6cf8e8265eaeca988e2aabc0ba1ede767717 100644 (file)
@@ -2033,15 +2033,17 @@ out:
 __be32
 nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access)
 {
-       struct path path = {
-               .mnt    = fhp->fh_export->ex_path.mnt,
-               .dentry = fhp->fh_dentry,
-       };
        __be32 err;
 
        err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
-       if (!err && vfs_statfs(&path, stat))
-               err = nfserr_io;
+       if (!err) {
+               struct path path = {
+                       .mnt    = fhp->fh_export->ex_path.mnt,
+                       .dentry = fhp->fh_dentry,
+               };
+               if (vfs_statfs(&path, stat))
+                       err = nfserr_io;
+       }
        return err;
 }
 
index 15412fe15c3a47f3cc744af36cecfe2b01e5a5a3..b552f816de15942095f82ad0302b8656f7db09ae 100644 (file)
@@ -852,8 +852,8 @@ xfs_convert_page(
                SetPageUptodate(page);
 
        if (count) {
-               wbc->nr_to_write--;
-               if (wbc->nr_to_write <= 0)
+               if (--wbc->nr_to_write <= 0 &&
+                   wbc->sync_mode == WB_SYNC_NONE)
                        done = 1;
        }
        xfs_start_page_writeback(page, !page_dirty, count);
@@ -1068,7 +1068,7 @@ xfs_vm_writepage(
         * by themselves.
         */
        if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)
-               goto out_fail;
+               goto redirty;
 
        /*
         * We need a transaction if there are delalloc or unwritten buffers
@@ -1080,7 +1080,7 @@ xfs_vm_writepage(
         */
        xfs_count_page_state(page, &delalloc, &unwritten);
        if ((current->flags & PF_FSTRANS) && (delalloc || unwritten))
-               goto out_fail;
+               goto redirty;
 
        /* Is this page beyond the end of the file? */
        offset = i_size_read(inode);
@@ -1245,12 +1245,15 @@ error:
        if (iohead)
                xfs_cancel_ioend(iohead);
 
+       if (err == -EAGAIN)
+               goto redirty;
+
        xfs_aops_discard_page(page);
        ClearPageUptodate(page);
        unlock_page(page);
        return err;
 
-out_fail:
+redirty:
        redirty_page_for_writepage(wbc, page);
        unlock_page(page);
        return 0;
index 15c35b62ff14ba46e0dd31d09cb5b0dfb54cf195..a4e07974955be3025ebbbe3839264b9bb1acba74 100644 (file)
@@ -1226,6 +1226,7 @@ xfs_fs_statfs(
        struct xfs_inode        *ip = XFS_I(dentry->d_inode);
        __uint64_t              fakeinos, id;
        xfs_extlen_t            lsize;
+       __int64_t               ffree;
 
        statp->f_type = XFS_SB_MAGIC;
        statp->f_namelen = MAXNAMELEN - 1;
@@ -1249,7 +1250,11 @@ xfs_fs_statfs(
                statp->f_files = min_t(typeof(statp->f_files),
                                        statp->f_files,
                                        mp->m_maxicount);
-       statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
+
+       /* make sure statp->f_ffree does not underflow */
+       ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
+       statp->f_ffree = max_t(__int64_t, ffree, 0);
+
        spin_unlock(&mp->m_sb_lock);
 
        if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
@@ -1402,7 +1407,7 @@ xfs_fs_freeze(
 
        xfs_save_resvblks(mp);
        xfs_quiesce_attr(mp);
-       return -xfs_fs_log_dummy(mp);
+       return -xfs_fs_log_dummy(mp, SYNC_WAIT);
 }
 
 STATIC int
index dfcbd98d15997e62e7d8a433fa71e5b8a9912609..d59c4a65d492c9b6b0713accaec1ab1c2ba7ea5f 100644 (file)
@@ -34,6 +34,7 @@
 #include "xfs_inode_item.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
+#include "xfs_fsops.h"
 
 #include <linux/kthread.h>
 #include <linux/freezer.h>
@@ -340,38 +341,6 @@ xfs_sync_attr(
                                     XFS_ICI_NO_TAG, 0, NULL);
 }
 
-STATIC int
-xfs_commit_dummy_trans(
-       struct xfs_mount        *mp,
-       uint                    flags)
-{
-       struct xfs_inode        *ip = mp->m_rootip;
-       struct xfs_trans        *tp;
-       int                     error;
-
-       /*
-        * Put a dummy transaction in the log to tell recovery
-        * that all others are OK.
-        */
-       tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
-       error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-
-       xfs_trans_ijoin(tp, ip);
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       error = xfs_trans_commit(tp, 0);
-       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
-       /* the log force ensures this transaction is pushed to disk */
-       xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
-       return error;
-}
-
 STATIC int
 xfs_sync_fsdata(
        struct xfs_mount        *mp)
@@ -432,7 +401,7 @@ xfs_quiesce_data(
 
        /* mark the log as covered if needed */
        if (xfs_log_need_covered(mp))
-               error2 = xfs_commit_dummy_trans(mp, SYNC_WAIT);
+               error2 = xfs_fs_log_dummy(mp, SYNC_WAIT);
 
        /* flush data-only devices */
        if (mp->m_rtdev_targp)
@@ -563,7 +532,7 @@ xfs_flush_inodes(
 /*
  * Every sync period we need to unpin all items, reclaim inodes and sync
  * disk quotas.  We might need to cover the log to indicate that the
- * filesystem is idle.
+ * filesystem is idle and not frozen.
  */
 STATIC void
 xfs_sync_worker(
@@ -577,8 +546,9 @@ xfs_sync_worker(
                xfs_reclaim_inodes(mp, 0);
                /* dgc: errors ignored here */
                error = xfs_qm_sync(mp, SYNC_TRYLOCK);
-               if (xfs_log_need_covered(mp))
-                       error = xfs_commit_dummy_trans(mp, 0);
+               if (mp->m_super->s_frozen == SB_UNFROZEN &&
+                   xfs_log_need_covered(mp))
+                       error = xfs_fs_log_dummy(mp, 0);
        }
        mp->m_sync_seq++;
        wake_up(&mp->m_wait_single_sync_task);
index dbca5f5c37bad18fc220ce21f551d17b58f0c13a..43b1d56993350ba3af58c53757be99dfa936c017 100644 (file)
@@ -604,31 +604,36 @@ out:
        return 0;
 }
 
+/*
+ * Dump a transaction into the log that contains no real change. This is needed
+ * to be able to make the log dirty or stamp the current tail LSN into the log
+ * during the covering operation.
+ *
+ * We cannot use an inode here for this - that will push dirty state back up
+ * into the VFS and then periodic inode flushing will prevent log covering from
+ * making progress. Hence we log a field in the superblock instead.
+ */
 int
 xfs_fs_log_dummy(
-       xfs_mount_t     *mp)
+       xfs_mount_t     *mp,
+       int             flags)
 {
        xfs_trans_t     *tp;
-       xfs_inode_t     *ip;
        int             error;
 
        tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
-       error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
+       error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
+                                       XFS_DEFAULT_LOG_COUNT);
        if (error) {
                xfs_trans_cancel(tp, 0);
                return error;
        }
 
-       ip = mp->m_rootip;
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-
-       xfs_trans_ijoin(tp, ip);
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       xfs_trans_set_sync(tp);
-       error = xfs_trans_commit(tp, 0);
-
-       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-       return error;
+       /* log the UUID because it is an unchanging field */
+       xfs_mod_sb(tp, XFS_SB_UUID);
+       if (flags & SYNC_WAIT)
+               xfs_trans_set_sync(tp);
+       return xfs_trans_commit(tp, 0);
 }
 
 int
index 88435e0a77c9b2f67035344e8f3afc58532d4d5c..a786c5212c1e478677e46f2725105a010ebd1262 100644 (file)
@@ -25,6 +25,6 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
 extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
                                xfs_fsop_resblks_t *outval);
 extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
-extern int xfs_fs_log_dummy(xfs_mount_t *mp);
+extern int xfs_fs_log_dummy(xfs_mount_t *mp, int flags);
 
 #endif /* __XFS_FSOPS_H__ */
index abf80ae1e95bed43db56f17dd8a7ffc06039a60a..5371d2dc360ebde33776d7f44d87e4c79afe210e 100644 (file)
@@ -1213,7 +1213,6 @@ xfs_imap_lookup(
        struct xfs_inobt_rec_incore rec;
        struct xfs_btree_cur    *cur;
        struct xfs_buf          *agbp;
-       xfs_agino_t             startino;
        int                     error;
        int                     i;
 
@@ -1227,13 +1226,13 @@ xfs_imap_lookup(
        }
 
        /*
-        * derive and lookup the exact inode record for the given agino. If the
-        * record cannot be found, then it's an invalid inode number and we
-        * should abort.
+        * Lookup the inode record for the given agino. If the record cannot be
+        * found, then it's an invalid inode number and we should abort. Once
+        * we have a record, we need to ensure it contains the inode number
+        * we are looking up.
         */
        cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
-       startino = agino & ~(XFS_IALLOC_INODES(mp) - 1);
-       error = xfs_inobt_lookup(cur, startino, XFS_LOOKUP_EQ, &i);
+       error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
        if (!error) {
                if (i)
                        error = xfs_inobt_get_rec(cur, &rec, &i);
@@ -1246,6 +1245,11 @@ xfs_imap_lookup(
        if (error)
                return error;
 
+       /* check that the returned record contains the required inode */
+       if (rec.ir_startino > agino ||
+           rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino)
+               return EINVAL;
+
        /* for untrusted inodes check it is allocated first */
        if ((flags & XFS_IGET_UNTRUSTED) &&
            (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
index 68415cb4f23cab39861119c68838b1b099029df2..34798f391c49349018f04a47d625c6aafa035bea 100644 (file)
@@ -1914,6 +1914,11 @@ xfs_iunlink_remove(
        return 0;
 }
 
+/*
+ * A big issue when freeing the inode cluster is is that we _cannot_ skip any
+ * inodes that are in memory - they all must be marked stale and attached to
+ * the cluster buffer.
+ */
 STATIC void
 xfs_ifree_cluster(
        xfs_inode_t     *free_ip,
@@ -1945,8 +1950,6 @@ xfs_ifree_cluster(
        }
 
        for (j = 0; j < nbufs; j++, inum += ninodes) {
-               int     found = 0;
-
                blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
                                         XFS_INO_TO_AGBNO(mp, inum));
 
@@ -1965,7 +1968,9 @@ xfs_ifree_cluster(
                /*
                 * Walk the inodes already attached to the buffer and mark them
                 * stale. These will all have the flush locks held, so an
-                * in-memory inode walk can't lock them.
+                * in-memory inode walk can't lock them. By marking them all
+                * stale first, we will not attempt to lock them in the loop
+                * below as the XFS_ISTALE flag will be set.
                 */
                lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
                while (lip) {
@@ -1977,11 +1982,11 @@ xfs_ifree_cluster(
                                                        &iip->ili_flush_lsn,
                                                        &iip->ili_item.li_lsn);
                                xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
-                               found++;
                        }
                        lip = lip->li_bio_list;
                }
 
+
                /*
                 * For each inode in memory attempt to add it to the inode
                 * buffer and set it up for being staled on buffer IO
@@ -1993,6 +1998,7 @@ xfs_ifree_cluster(
                 * even trying to lock them.
                 */
                for (i = 0; i < ninodes; i++) {
+retry:
                        read_lock(&pag->pag_ici_lock);
                        ip = radix_tree_lookup(&pag->pag_ici_root,
                                        XFS_INO_TO_AGINO(mp, (inum + i)));
@@ -2003,38 +2009,36 @@ xfs_ifree_cluster(
                                continue;
                        }
 
-                       /* don't try to lock/unlock the current inode */
+                       /*
+                        * Don't try to lock/unlock the current inode, but we
+                        * _cannot_ skip the other inodes that we did not find
+                        * in the list attached to the buffer and are not
+                        * already marked stale. If we can't lock it, back off
+                        * and retry.
+                        */
                        if (ip != free_ip &&
                            !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
                                read_unlock(&pag->pag_ici_lock);
-                               continue;
+                               delay(1);
+                               goto retry;
                        }
                        read_unlock(&pag->pag_ici_lock);
 
-                       if (!xfs_iflock_nowait(ip)) {
-                               if (ip != free_ip)
-                                       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-                               continue;
-                       }
-
+                       xfs_iflock(ip);
                        xfs_iflags_set(ip, XFS_ISTALE);
-                       if (xfs_inode_clean(ip)) {
-                               ASSERT(ip != free_ip);
-                               xfs_ifunlock(ip);
-                               xfs_iunlock(ip, XFS_ILOCK_EXCL);
-                               continue;
-                       }
 
+                       /*
+                        * we don't need to attach clean inodes or those only
+                        * with unlogged changes (which we throw away, anyway).
+                        */
                        iip = ip->i_itemp;
-                       if (!iip) {
-                               /* inode with unlogged changes only */
+                       if (!iip || xfs_inode_clean(ip)) {
                                ASSERT(ip != free_ip);
                                ip->i_update_core = 0;
                                xfs_ifunlock(ip);
                                xfs_iunlock(ip, XFS_ILOCK_EXCL);
                                continue;
                        }
-                       found++;
 
                        iip->ili_last_fields = iip->ili_format.ilf_fields;
                        iip->ili_format.ilf_fields = 0;
@@ -2049,8 +2053,7 @@ xfs_ifree_cluster(
                                xfs_iunlock(ip, XFS_ILOCK_EXCL);
                }
 
-               if (found)
-                       xfs_trans_stale_inode_buf(tp, bp);
+               xfs_trans_stale_inode_buf(tp, bp);
                xfs_trans_binval(tp, bp);
        }
 
index 925d572bf0f405e9a94be11b74549c5f760b3ec4..33f718f92a4849df234880dfe190069791d7bcb6 100644 (file)
@@ -3015,7 +3015,8 @@ _xfs_log_force(
 
        XFS_STATS_INC(xs_log_force);
 
-       xlog_cil_push(log, 1);
+       if (log->l_cilp)
+               xlog_cil_force(log);
 
        spin_lock(&log->l_icloglock);
 
@@ -3167,7 +3168,7 @@ _xfs_log_force_lsn(
        XFS_STATS_INC(xs_log_force);
 
        if (log->l_cilp) {
-               lsn = xlog_cil_push_lsn(log, lsn);
+               lsn = xlog_cil_force_lsn(log, lsn);
                if (lsn == NULLCOMMITLSN)
                        return 0;
        }
@@ -3724,7 +3725,7 @@ xfs_log_force_umount(
         * call below.
         */
        if (!logerror && (mp->m_flags & XFS_MOUNT_DELAYLOG))
-               xlog_cil_push(log, 1);
+               xlog_cil_force(log);
 
        /*
         * We must hold both the GRANT lock and the LOG lock,
index 31e4ea2d19acfc08f069813dffa367e6b2420973..ed575fb4b49597806200f676680ff787d5be9d12 100644 (file)
@@ -68,6 +68,7 @@ xlog_cil_init(
        ctx->sequence = 1;
        ctx->cil = cil;
        cil->xc_ctx = ctx;
+       cil->xc_current_sequence = ctx->sequence;
 
        cil->xc_log = log;
        log->l_cilp = cil;
@@ -269,15 +270,10 @@ xlog_cil_insert(
 static void
 xlog_cil_format_items(
        struct log              *log,
-       struct xfs_log_vec      *log_vector,
-       struct xlog_ticket      *ticket,
-       xfs_lsn_t               *start_lsn)
+       struct xfs_log_vec      *log_vector)
 {
        struct xfs_log_vec *lv;
 
-       if (start_lsn)
-               *start_lsn = log->l_cilp->xc_ctx->sequence;
-
        ASSERT(log_vector);
        for (lv = log_vector; lv; lv = lv->lv_next) {
                void    *ptr;
@@ -301,9 +297,24 @@ xlog_cil_format_items(
                        ptr += vec->i_len;
                }
                ASSERT(ptr == lv->lv_buf + lv->lv_buf_len);
+       }
+}
+
+static void
+xlog_cil_insert_items(
+       struct log              *log,
+       struct xfs_log_vec      *log_vector,
+       struct xlog_ticket      *ticket,
+       xfs_lsn_t               *start_lsn)
+{
+       struct xfs_log_vec *lv;
+
+       if (start_lsn)
+               *start_lsn = log->l_cilp->xc_ctx->sequence;
 
+       ASSERT(log_vector);
+       for (lv = log_vector; lv; lv = lv->lv_next)
                xlog_cil_insert(log, ticket, lv->lv_item, lv);
-       }
 }
 
 static void
@@ -320,80 +331,6 @@ xlog_cil_free_logvec(
        }
 }
 
-/*
- * Commit a transaction with the given vector to the Committed Item List.
- *
- * To do this, we need to format the item, pin it in memory if required and
- * account for the space used by the transaction. Once we have done that we
- * need to release the unused reservation for the transaction, attach the
- * transaction to the checkpoint context so we carry the busy extents through
- * to checkpoint completion, and then unlock all the items in the transaction.
- *
- * For more specific information about the order of operations in
- * xfs_log_commit_cil() please refer to the comments in
- * xfs_trans_commit_iclog().
- *
- * Called with the context lock already held in read mode to lock out
- * background commit, returns without it held once background commits are
- * allowed again.
- */
-int
-xfs_log_commit_cil(
-       struct xfs_mount        *mp,
-       struct xfs_trans        *tp,
-       struct xfs_log_vec      *log_vector,
-       xfs_lsn_t               *commit_lsn,
-       int                     flags)
-{
-       struct log              *log = mp->m_log;
-       int                     log_flags = 0;
-       int                     push = 0;
-
-       if (flags & XFS_TRANS_RELEASE_LOG_RES)
-               log_flags = XFS_LOG_REL_PERM_RESERV;
-
-       if (XLOG_FORCED_SHUTDOWN(log)) {
-               xlog_cil_free_logvec(log_vector);
-               return XFS_ERROR(EIO);
-       }
-
-       /* lock out background commit */
-       down_read(&log->l_cilp->xc_ctx_lock);
-       xlog_cil_format_items(log, log_vector, tp->t_ticket, commit_lsn);
-
-       /* check we didn't blow the reservation */
-       if (tp->t_ticket->t_curr_res < 0)
-               xlog_print_tic_res(log->l_mp, tp->t_ticket);
-
-       /* attach the transaction to the CIL if it has any busy extents */
-       if (!list_empty(&tp->t_busy)) {
-               spin_lock(&log->l_cilp->xc_cil_lock);
-               list_splice_init(&tp->t_busy,
-                                       &log->l_cilp->xc_ctx->busy_extents);
-               spin_unlock(&log->l_cilp->xc_cil_lock);
-       }
-
-       tp->t_commit_lsn = *commit_lsn;
-       xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
-       xfs_trans_unreserve_and_mod_sb(tp);
-
-       /* check for background commit before unlock */
-       if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
-               push = 1;
-       up_read(&log->l_cilp->xc_ctx_lock);
-
-       /*
-        * We need to push CIL every so often so we don't cache more than we
-        * can fit in the log. The limit really is that a checkpoint can't be
-        * more than half the log (the current checkpoint is not allowed to
-        * overwrite the previous checkpoint), but commit latency and memory
-        * usage limit this to a smaller size in most cases.
-        */
-       if (push)
-               xlog_cil_push(log, 0);
-       return 0;
-}
-
 /*
  * Mark all items committed and clear busy extents. We free the log vector
  * chains in a separate pass so that we unpin the log items as quickly as
@@ -427,13 +364,23 @@ xlog_cil_committed(
 }
 
 /*
- * Push the Committed Item List to the log. If the push_now flag is not set,
- * then it is a background flush and so we can chose to ignore it.
+ * Push the Committed Item List to the log. If @push_seq flag is zero, then it
+ * is a background flush and so we can chose to ignore it. Otherwise, if the
+ * current sequence is the same as @push_seq we need to do a flush. If
+ * @push_seq is less than the current sequence, then it has already been
+ * flushed and we don't need to do anything - the caller will wait for it to
+ * complete if necessary.
+ *
+ * @push_seq is a value rather than a flag because that allows us to do an
+ * unlocked check of the sequence number for a match. Hence we can allows log
+ * forces to run racily and not issue pushes for the same sequence twice. If we
+ * get a race between multiple pushes for the same sequence they will block on
+ * the first one and then abort, hence avoiding needless pushes.
  */
-int
+STATIC int
 xlog_cil_push(
        struct log              *log,
-       int                     push_now)
+       xfs_lsn_t               push_seq)
 {
        struct xfs_cil          *cil = log->l_cilp;
        struct xfs_log_vec      *lv;
@@ -453,12 +400,14 @@ xlog_cil_push(
        if (!cil)
                return 0;
 
+       ASSERT(!push_seq || push_seq <= cil->xc_ctx->sequence);
+
        new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
        new_ctx->ticket = xlog_cil_ticket_alloc(log);
 
        /* lock out transaction commit, but don't block on background push */
        if (!down_write_trylock(&cil->xc_ctx_lock)) {
-               if (!push_now)
+               if (!push_seq)
                        goto out_free_ticket;
                down_write(&cil->xc_ctx_lock);
        }
@@ -469,7 +418,11 @@ xlog_cil_push(
                goto out_skip;
 
        /* check for spurious background flush */
-       if (!push_now && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
+       if (!push_seq && cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
+               goto out_skip;
+
+       /* check for a previously pushed seqeunce */
+       if (push_seq < cil->xc_ctx->sequence)
                goto out_skip;
 
        /*
@@ -514,6 +467,13 @@ xlog_cil_push(
        new_ctx->cil = cil;
        cil->xc_ctx = new_ctx;
 
+       /*
+        * mirror the new sequence into the cil structure so that we can do
+        * unlocked checks against the current sequence in log forces without
+        * risking deferencing a freed context pointer.
+        */
+       cil->xc_current_sequence = new_ctx->sequence;
+
        /*
         * The switch is now done, so we can drop the context lock and move out
         * of a shared context. We can't just go straight to the commit record,
@@ -625,6 +585,102 @@ out_abort:
        return XFS_ERROR(EIO);
 }
 
+/*
+ * Commit a transaction with the given vector to the Committed Item List.
+ *
+ * To do this, we need to format the item, pin it in memory if required and
+ * account for the space used by the transaction. Once we have done that we
+ * need to release the unused reservation for the transaction, attach the
+ * transaction to the checkpoint context so we carry the busy extents through
+ * to checkpoint completion, and then unlock all the items in the transaction.
+ *
+ * For more specific information about the order of operations in
+ * xfs_log_commit_cil() please refer to the comments in
+ * xfs_trans_commit_iclog().
+ *
+ * Called with the context lock already held in read mode to lock out
+ * background commit, returns without it held once background commits are
+ * allowed again.
+ */
+int
+xfs_log_commit_cil(
+       struct xfs_mount        *mp,
+       struct xfs_trans        *tp,
+       struct xfs_log_vec      *log_vector,
+       xfs_lsn_t               *commit_lsn,
+       int                     flags)
+{
+       struct log              *log = mp->m_log;
+       int                     log_flags = 0;
+       int                     push = 0;
+
+       if (flags & XFS_TRANS_RELEASE_LOG_RES)
+               log_flags = XFS_LOG_REL_PERM_RESERV;
+
+       if (XLOG_FORCED_SHUTDOWN(log)) {
+               xlog_cil_free_logvec(log_vector);
+               return XFS_ERROR(EIO);
+       }
+
+       /*
+        * do all the hard work of formatting items (including memory
+        * allocation) outside the CIL context lock. This prevents stalling CIL
+        * pushes when we are low on memory and a transaction commit spends a
+        * lot of time in memory reclaim.
+        */
+       xlog_cil_format_items(log, log_vector);
+
+       /* lock out background commit */
+       down_read(&log->l_cilp->xc_ctx_lock);
+       xlog_cil_insert_items(log, log_vector, tp->t_ticket, commit_lsn);
+
+       /* check we didn't blow the reservation */
+       if (tp->t_ticket->t_curr_res < 0)
+               xlog_print_tic_res(log->l_mp, tp->t_ticket);
+
+       /* attach the transaction to the CIL if it has any busy extents */
+       if (!list_empty(&tp->t_busy)) {
+               spin_lock(&log->l_cilp->xc_cil_lock);
+               list_splice_init(&tp->t_busy,
+                                       &log->l_cilp->xc_ctx->busy_extents);
+               spin_unlock(&log->l_cilp->xc_cil_lock);
+       }
+
+       tp->t_commit_lsn = *commit_lsn;
+       xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
+       xfs_trans_unreserve_and_mod_sb(tp);
+
+       /*
+        * Once all the items of the transaction have been copied to the CIL,
+        * the items can be unlocked and freed.
+        *
+        * This needs to be done before we drop the CIL context lock because we
+        * have to update state in the log items and unlock them before they go
+        * to disk. If we don't, then the CIL checkpoint can race with us and
+        * we can run checkpoint completion before we've updated and unlocked
+        * the log items. This affects (at least) processing of stale buffers,
+        * inodes and EFIs.
+        */
+       xfs_trans_free_items(tp, *commit_lsn, 0);
+
+       /* check for background commit before unlock */
+       if (log->l_cilp->xc_ctx->space_used > XLOG_CIL_SPACE_LIMIT(log))
+               push = 1;
+
+       up_read(&log->l_cilp->xc_ctx_lock);
+
+       /*
+        * We need to push CIL every so often so we don't cache more than we
+        * can fit in the log. The limit really is that a checkpoint can't be
+        * more than half the log (the current checkpoint is not allowed to
+        * overwrite the previous checkpoint), but commit latency and memory
+        * usage limit this to a smaller size in most cases.
+        */
+       if (push)
+               xlog_cil_push(log, 0);
+       return 0;
+}
+
 /*
  * Conditionally push the CIL based on the sequence passed in.
  *
@@ -639,39 +695,34 @@ out_abort:
  * commit lsn is there. It'll be empty, so this is broken for now.
  */
 xfs_lsn_t
-xlog_cil_push_lsn(
+xlog_cil_force_lsn(
        struct log      *log,
-       xfs_lsn_t       push_seq)
+       xfs_lsn_t       sequence)
 {
        struct xfs_cil          *cil = log->l_cilp;
        struct xfs_cil_ctx      *ctx;
        xfs_lsn_t               commit_lsn = NULLCOMMITLSN;
 
-restart:
-       down_write(&cil->xc_ctx_lock);
-       ASSERT(push_seq <= cil->xc_ctx->sequence);
-
-       /* check to see if we need to force out the current context */
-       if (push_seq == cil->xc_ctx->sequence) {
-               up_write(&cil->xc_ctx_lock);
-               xlog_cil_push(log, 1);
-               goto restart;
-       }
+       ASSERT(sequence <= cil->xc_current_sequence);
+
+       /*
+        * check to see if we need to force out the current context.
+        * xlog_cil_push() handles racing pushes for the same sequence,
+        * so no need to deal with it here.
+        */
+       if (sequence == cil->xc_current_sequence)
+               xlog_cil_push(log, sequence);
 
        /*
         * See if we can find a previous sequence still committing.
-        * We can drop the flush lock as soon as we have the cil lock
-        * because we are now only comparing contexts protected by
-        * the cil lock.
-        *
         * We need to wait for all previous sequence commits to complete
         * before allowing the force of push_seq to go ahead. Hence block
         * on commits for those as well.
         */
+restart:
        spin_lock(&cil->xc_cil_lock);
-       up_write(&cil->xc_ctx_lock);
        list_for_each_entry(ctx, &cil->xc_committing, committing) {
-               if (ctx->sequence > push_seq)
+               if (ctx->sequence > sequence)
                        continue;
                if (!ctx->commit_lsn) {
                        /*
@@ -681,7 +732,7 @@ restart:
                        sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
                        goto restart;
                }
-               if (ctx->sequence != push_seq)
+               if (ctx->sequence != sequence)
                        continue;
                /* found it! */
                commit_lsn = ctx->commit_lsn;
index 8c072618965caf80476a562e7e38cae026089f88..ced52b98b322e3eb1be0e0c7dfc70f6096d0cd80 100644 (file)
@@ -422,6 +422,7 @@ struct xfs_cil {
        struct rw_semaphore     xc_ctx_lock;
        struct list_head        xc_committing;
        sv_t                    xc_commit_wait;
+       xfs_lsn_t               xc_current_sequence;
 };
 
 /*
@@ -562,8 +563,16 @@ int        xlog_cil_init(struct log *log);
 void   xlog_cil_init_post_recovery(struct log *log);
 void   xlog_cil_destroy(struct log *log);
 
-int    xlog_cil_push(struct log *log, int push_now);
-xfs_lsn_t xlog_cil_push_lsn(struct log *log, xfs_lsn_t push_sequence);
+/*
+ * CIL force routines
+ */
+xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence);
+
+static inline void
+xlog_cil_force(struct log *log)
+{
+       xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
+}
 
 /*
  * Unmount record type is used as a pseudo transaction type for the ticket.
index fdca7416c754636a26dae9a312003932f7306d9f..1c47edaea0d28f4def851e87f664bd9339ac19b7 100644 (file)
@@ -1167,7 +1167,7 @@ xfs_trans_del_item(
  * Unlock all of the items of a transaction and free all the descriptors
  * of that transaction.
  */
-STATIC void
+void
 xfs_trans_free_items(
        struct xfs_trans        *tp,
        xfs_lsn_t               commit_lsn,
@@ -1653,9 +1653,6 @@ xfs_trans_commit_cil(
                return error;
 
        current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
-
-       /* xfs_trans_free_items() unlocks them first */
-       xfs_trans_free_items(tp, *commit_lsn, 0);
        xfs_trans_free(tp);
        return 0;
 }
index e2d93d8ead7b68b9b6ab74b1869a3f8e10721981..62da86c90de53bb36b3fd2928d539ae9fc339478 100644 (file)
@@ -25,7 +25,8 @@ struct xfs_trans;
 
 void   xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
 void   xfs_trans_del_item(struct xfs_log_item *);
-
+void   xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
+                               int flags);
 void   xfs_trans_item_committed(struct xfs_log_item *lip,
                                xfs_lsn_t commit_lsn, int aborted);
 void   xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp);
index 2a512bc0d4ab74f7c56bc9460b7df3e4e3c0f8d9..7809d230adee3f90c9537f6c00ec66bb53c1bd27 100644 (file)
@@ -305,14 +305,16 @@ struct drm_ioctl_desc {
        unsigned int cmd;
        int flags;
        drm_ioctl_t *func;
+       unsigned int cmd_drv;
 };
 
 /**
  * Creates a driver or general drm_ioctl_desc array entry for the given
  * ioctl, for use by drm_ioctl().
  */
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
-       [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags}
+
+#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags)                        \
+       [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl}
 
 struct drm_magic_entry {
        struct list_head head;
index 4b00d2dd4f684f7dfde4256c84f09bf08844e2b6..61315c29b8f3d53dcb903487e13825bd507e48bf 100644 (file)
@@ -264,20 +264,20 @@ typedef struct _drm_i830_sarea {
 #define DRM_I830_GETPARAM      0x0c
 #define DRM_I830_SETPARAM      0x0d
 
-#define DRM_IOCTL_I830_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_INIT, drm_i830_init_t)
-#define DRM_IOCTL_I830_VERTEX          DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_VERTEX, drm_i830_vertex_t)
-#define DRM_IOCTL_I830_CLEAR           DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_CLEAR, drm_i830_clear_t)
-#define DRM_IOCTL_I830_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLUSH)
-#define DRM_IOCTL_I830_GETAGE          DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_GETAGE)
-#define DRM_IOCTL_I830_GETBUF          DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETBUF, drm_i830_dma_t)
-#define DRM_IOCTL_I830_SWAP            DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_SWAP)
-#define DRM_IOCTL_I830_COPY            DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_COPY, drm_i830_copy_t)
-#define DRM_IOCTL_I830_DOCOPY          DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_DOCOPY)
-#define DRM_IOCTL_I830_FLIP            DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLIP)
-#define DRM_IOCTL_I830_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_EMIT, drm_i830_irq_emit_t)
-#define DRM_IOCTL_I830_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_WAIT, drm_i830_irq_wait_t)
-#define DRM_IOCTL_I830_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETPARAM, drm_i830_getparam_t)
-#define DRM_IOCTL_I830_SETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_SETPARAM, drm_i830_setparam_t)
+#define DRM_IOCTL_I830_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I830_INIT, drm_i830_init_t)
+#define DRM_IOCTL_I830_VERTEX          DRM_IOW( DRM_COMMAND_BASE + DRM_I830_VERTEX, drm_i830_vertex_t)
+#define DRM_IOCTL_I830_CLEAR           DRM_IOW( DRM_COMMAND_BASE + DRM_I830_CLEAR, drm_i830_clear_t)
+#define DRM_IOCTL_I830_FLUSH           DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLUSH)
+#define DRM_IOCTL_I830_GETAGE          DRM_IO ( DRM_COMMAND_BASE + DRM_I830_GETAGE)
+#define DRM_IOCTL_I830_GETBUF          DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETBUF, drm_i830_dma_t)
+#define DRM_IOCTL_I830_SWAP            DRM_IO ( DRM_COMMAND_BASE + DRM_I830_SWAP)
+#define DRM_IOCTL_I830_COPY            DRM_IOW( DRM_COMMAND_BASE + DRM_I830_COPY, drm_i830_copy_t)
+#define DRM_IOCTL_I830_DOCOPY          DRM_IO ( DRM_COMMAND_BASE + DRM_I830_DOCOPY)
+#define DRM_IOCTL_I830_FLIP            DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLIP)
+#define DRM_IOCTL_I830_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_IRQ_EMIT, drm_i830_irq_emit_t)
+#define DRM_IOCTL_I830_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I830_IRQ_WAIT, drm_i830_irq_wait_t)
+#define DRM_IOCTL_I830_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETPARAM, drm_i830_getparam_t)
+#define DRM_IOCTL_I830_SETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_SETPARAM, drm_i830_setparam_t)
 
 typedef struct _drm_i830_clear {
        int clear_color;
index 8f8b072c4c7b6013e12c10e35bbf1da24f6a9d79..e41c74facb6a3e41e84e3c66df6395d1a94add76 100644 (file)
@@ -215,6 +215,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 #define DRM_IOCTL_I915_VBLANK_SWAP     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_HWS_ADDR                DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
 #define DRM_IOCTL_I915_GEM_INIT                DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
 #define DRM_IOCTL_I915_GEM_EXECBUFFER  DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
index 3ffbc4798afacaec3f521eac11b607269cfd7c20..c16097f99be090f557d21405f8eeee1446be0324 100644 (file)
@@ -248,7 +248,7 @@ typedef struct _drm_mga_sarea {
 #define DRM_MGA_DMA_BOOTSTRAP  0x0c
 
 #define DRM_IOCTL_MGA_INIT     DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
-#define DRM_IOCTL_MGA_FLUSH    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
+#define DRM_IOCTL_MGA_FLUSH    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock)
 #define DRM_IOCTL_MGA_RESET    DRM_IO(  DRM_COMMAND_BASE + DRM_MGA_RESET)
 #define DRM_IOCTL_MGA_SWAP     DRM_IO(  DRM_COMMAND_BASE + DRM_MGA_SWAP)
 #define DRM_IOCTL_MGA_CLEAR    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
index fe917dee723a7357f44c285626189ae7acd5e863..01a7141195068c8e0fd384c0dde661adfb9601b6 100644 (file)
@@ -197,4 +197,17 @@ struct drm_nouveau_sarea {
 #define DRM_NOUVEAU_GEM_CPU_FINI       0x43
 #define DRM_NOUVEAU_GEM_INFO           0x44
 
+#define DRM_IOCTL_NOUVEAU_GETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_SETPARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC      DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC        DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
+#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC  DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
+#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE        DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
+#define DRM_IOCTL_NOUVEAU_GEM_NEW            DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
+#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF        DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
+#define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
+#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI       DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
+#define DRM_IOCTL_NOUVEAU_GEM_INFO           DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
+
 #endif /* __NOUVEAU_DRM_H__ */
index 0acaf8f9143751791f54b3a2da0be0c40653ad19..10f8b53bdd404d47c80cd8d9ee876c57b1c2b8a5 100644 (file)
@@ -547,8 +547,8 @@ typedef struct {
 #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
 #define DRM_IOCTL_RADEON_CS            DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
 #define DRM_IOCTL_RADEON_INFO          DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
-#define DRM_IOCTL_RADEON_SET_TILING    DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
-#define DRM_IOCTL_RADEON_GET_TILING    DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
+#define DRM_IOCTL_RADEON_GEM_SET_TILING        DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
+#define DRM_IOCTL_RADEON_GEM_GET_TILING        DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
 #define DRM_IOCTL_RADEON_GEM_BUSY      DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
 
 typedef struct drm_radeon_init {
index 8a576ef01821715099d7f16ef991571f290749ce..4863cf6bf96fb23c96d3eab9d7cde5d794fe89bc 100644 (file)
@@ -63,10 +63,10 @@ typedef struct _drm_savage_sarea {
 #define DRM_SAVAGE_BCI_EVENT_EMIT      0x02
 #define DRM_SAVAGE_BCI_EVENT_WAIT      0x03
 
-#define DRM_IOCTL_SAVAGE_INIT          DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
-#define DRM_IOCTL_SAVAGE_CMDBUF                DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
-#define DRM_IOCTL_SAVAGE_EVENT_EMIT    DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
-#define DRM_IOCTL_SAVAGE_EVENT_WAIT    DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
+#define DRM_IOCTL_SAVAGE_BCI_INIT              DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
+#define DRM_IOCTL_SAVAGE_BCI_CMDBUF            DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
+#define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT        DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
+#define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT        DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
 
 #define SAVAGE_DMA_PCI 1
 #define SAVAGE_DMA_AGP 3
index c831467774d0e71963ccf774d310bd4f6dd91b8e..bed7a4682b90734935e3dd6680d5e646b0b895c0 100644 (file)
@@ -119,7 +119,7 @@ struct ethhdr {
        unsigned char   h_dest[ETH_ALEN];       /* destination eth addr */
        unsigned char   h_source[ETH_ALEN];     /* source ether addr    */
        __be16          h_proto;                /* packet type ID field */
-} __packed;
+} __attribute__((packed));
 
 #ifdef __KERNEL__
 #include <linux/skbuff.h>
index 9947c39e62f6fa49ea95505d0afc8053a27fbebb..e6dc11e7f9a54613f3700af8fe9d6557d8bd6135 100644 (file)
@@ -67,7 +67,7 @@ struct fddi_8022_1_hdr {
        __u8    dsap;                                   /* destination service access point */
        __u8    ssap;                                   /* source service access point */
        __u8    ctrl;                                   /* control byte #1 */
-} __packed;
+} __attribute__((packed));
 
 /* Define 802.2 Type 2 header */
 struct fddi_8022_2_hdr {
@@ -75,7 +75,7 @@ struct fddi_8022_2_hdr {
        __u8    ssap;                                   /* source service access point */
        __u8    ctrl_1;                                 /* control byte #1 */
        __u8    ctrl_2;                                 /* control byte #2 */
-} __packed;
+} __attribute__((packed));
 
 /* Define 802.2 SNAP header */
 #define FDDI_K_OUI_LEN 3
@@ -85,7 +85,7 @@ struct fddi_snap_hdr {
        __u8    ctrl;                                   /* always 0x03 */
        __u8    oui[FDDI_K_OUI_LEN];    /* organizational universal id */
        __be16  ethertype;                              /* packet type ID field */
-} __packed;
+} __attribute__((packed));
 
 /* Define FDDI LLC frame header */
 struct fddihdr {
@@ -98,7 +98,7 @@ struct fddihdr {
                struct fddi_8022_2_hdr          llc_8022_2;
                struct fddi_snap_hdr            llc_snap;
                } hdr;
-} __packed;
+} __attribute__((packed));
 
 #ifdef __KERNEL__
 #include <linux/netdevice.h>
index 5fe5f307c6f560f424f16252ad61a1a9bf4b7b45..cdc049f1829a8ca6ccc9aabb2103b1aacd3fb990 100644 (file)
@@ -104,7 +104,7 @@ struct hippi_fp_hdr {
        __be32          fixed;
 #endif
        __be32          d2_size;
-} __packed;
+} __attribute__((packed));
 
 struct hippi_le_hdr {
 #if defined (__BIG_ENDIAN_BITFIELD)
@@ -129,7 +129,7 @@ struct hippi_le_hdr {
        __u8            daddr[HIPPI_ALEN];
        __u16           locally_administered;
        __u8            saddr[HIPPI_ALEN];
-} __packed;
+} __attribute__((packed));
 
 #define HIPPI_OUI_LEN  3
 /*
@@ -142,12 +142,12 @@ struct hippi_snap_hdr {
        __u8    ctrl;                   /* always 0x03 */
        __u8    oui[HIPPI_OUI_LEN];     /* organizational universal id (zero)*/
        __be16  ethertype;              /* packet type ID field */
-} __packed;
+} __attribute__((packed));
 
 struct hippi_hdr {
        struct hippi_fp_hdr     fp;
        struct hippi_le_hdr     le;
        struct hippi_snap_hdr   snap;
-} __packed;
+} __attribute__((packed));
 
 #endif /* _LINUX_IF_HIPPI_H */
index 1925e0c3f1623e0d515a22ac6cbc01074fd83e68..27741e05446f97dfad3e5f35e83f328d02520588 100644 (file)
@@ -59,7 +59,7 @@ struct sockaddr_pppox {
        union{ 
                struct pppoe_addr       pppoe; 
        }sa_addr; 
-} __packed;
+} __attribute__((packed));
 
 /* The use of the above union isn't viable because the size of this
  * struct must stay fixed over time -- applications use sizeof(struct
@@ -70,7 +70,7 @@ struct sockaddr_pppol2tp {
        sa_family_t     sa_family;      /* address family, AF_PPPOX */
        unsigned int    sa_protocol;    /* protocol identifier */
        struct pppol2tp_addr pppol2tp;
-} __packed;
+} __attribute__((packed));
 
 /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
  * bits. So we need a different sockaddr structure.
@@ -79,7 +79,7 @@ struct sockaddr_pppol2tpv3 {
        sa_family_t     sa_family;      /* address family, AF_PPPOX */
        unsigned int    sa_protocol;    /* protocol identifier */
        struct pppol2tpv3_addr pppol2tp;
-} __packed;
+} __attribute__((packed));
 
 /*********************************************************************
  *
@@ -101,7 +101,7 @@ struct pppoe_tag {
        __be16 tag_type;
        __be16 tag_len;
        char tag_data[0];
-} __attribute ((packed));
+} __attribute__ ((packed));
 
 /* Tag identifiers */
 #define PTT_EOL                __cpu_to_be16(0x0000)
@@ -129,7 +129,7 @@ struct pppoe_hdr {
        __be16 sid;
        __be16 length;
        struct pppoe_tag tag[0];
-} __packed;
+} __attribute__((packed));
 
 /* Length of entire PPPoE + PPP header */
 #define PPPOE_SES_HLEN 8
index ab9e9e89e4074318405a595c1147642e344255c0..e62683ba88e6824e72b3e8c81c7315998868f606 100644 (file)
@@ -58,7 +58,7 @@ struct ipv6_opt_hdr {
        /* 
         * TLV encoded option data follows.
         */
-} __packed;    /* required for some archs */
+} __attribute__((packed));     /* required for some archs */
 
 #define ipv6_destopt_hdr ipv6_opt_hdr
 #define ipv6_hopopt_hdr  ipv6_opt_hdr
@@ -99,7 +99,7 @@ struct ipv6_destopt_hao {
        __u8                    type;
        __u8                    length;
        struct in6_addr         addr;
-} __packed;
+} __attribute__((packed));
 
 /*
  *     IPv6 fixed header
index cf343a852534bee2f3ed02ffd9cce21210caac8f..7950a37a71466b773ac72d1880e834d20b1d0c14 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/compiler.h>
 #include <linux/spinlock.h>
 #include <linux/kref.h>
+#include <linux/kobject_ns.h>
 #include <linux/kernel.h>
 #include <linux/wait.h>
 #include <asm/atomic.h>
@@ -136,42 +137,8 @@ struct kobj_attribute {
 
 extern const struct sysfs_ops kobj_sysfs_ops;
 
-/*
- * Namespace types which are used to tag kobjects and sysfs entries.
- * Network namespace will likely be the first.
- */
-enum kobj_ns_type {
-       KOBJ_NS_TYPE_NONE = 0,
-       KOBJ_NS_TYPE_NET,
-       KOBJ_NS_TYPES
-};
-
 struct sock;
 
-/*
- * Callbacks so sysfs can determine namespaces
- *   @current_ns: return calling task's namespace
- *   @netlink_ns: return namespace to which a sock belongs (right?)
- *   @initial_ns: return the initial namespace (i.e. init_net_ns)
- */
-struct kobj_ns_type_operations {
-       enum kobj_ns_type type;
-       const void *(*current_ns)(void);
-       const void *(*netlink_ns)(struct sock *sk);
-       const void *(*initial_ns)(void);
-};
-
-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
-int kobj_ns_type_registered(enum kobj_ns_type type);
-const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
-const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
-
-const void *kobj_ns_current(enum kobj_ns_type type);
-const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
-const void *kobj_ns_initial(enum kobj_ns_type type);
-void kobj_ns_exit(enum kobj_ns_type type, const void *ns);
-
-
 /**
  * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
  *
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
new file mode 100644 (file)
index 0000000..82cb5bf
--- /dev/null
@@ -0,0 +1,56 @@
+/* Kernel object name space definitions
+ *
+ * Copyright (c) 2002-2003 Patrick Mochel
+ * Copyright (c) 2002-2003 Open Source Development Labs
+ * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (c) 2006-2008 Novell Inc.
+ *
+ * Split from kobject.h by David Howells (dhowells@redhat.com)
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please read Documentation/kobject.txt before using the kobject
+ * interface, ESPECIALLY the parts about reference counts and object
+ * destructors.
+ */
+
+#ifndef _LINUX_KOBJECT_NS_H
+#define _LINUX_KOBJECT_NS_H
+
+struct sock;
+struct kobject;
+
+/*
+ * Namespace types which are used to tag kobjects and sysfs entries.
+ * Network namespace will likely be the first.
+ */
+enum kobj_ns_type {
+       KOBJ_NS_TYPE_NONE = 0,
+       KOBJ_NS_TYPE_NET,
+       KOBJ_NS_TYPES
+};
+
+/*
+ * Callbacks so sysfs can determine namespaces
+ *   @current_ns: return calling task's namespace
+ *   @netlink_ns: return namespace to which a sock belongs (right?)
+ *   @initial_ns: return the initial namespace (i.e. init_net_ns)
+ */
+struct kobj_ns_type_operations {
+       enum kobj_ns_type type;
+       const void *(*current_ns)(void);
+       const void *(*netlink_ns)(struct sock *sk);
+       const void *(*initial_ns)(void);
+};
+
+int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
+int kobj_ns_type_registered(enum kobj_ns_type type);
+const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
+const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
+
+const void *kobj_ns_current(enum kobj_ns_type type);
+const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
+const void *kobj_ns_initial(enum kobj_ns_type type);
+void kobj_ns_exit(enum kobj_ns_type type, const void *ns);
+
+#endif /* _LINUX_KOBJECT_NS_H */
index bafffc737903971934037fc334f7d4158d9eda48..18fd13028ba1aab8207a1baa9c5b1527435d3a01 100644 (file)
@@ -33,6 +33,7 @@
 #define MWAVE_MINOR            219     /* ACP/Mwave Modem */
 #define MPT_MINOR              220
 #define MPT2SAS_MINOR          221
+#define UINPUT_MINOR           223
 #define HPET_MINOR             228
 #define FUSE_MINOR             229
 #define KVM_MINOR              232
index 709f6728fc90e223c9f41092d626b7b7fc4458bd..e6b1210772ceace3fc70817a32e1a411096a6b22 100644 (file)
@@ -78,7 +78,11 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_MAYSHARE    0x00000080
 
 #define VM_GROWSDOWN   0x00000100      /* general info on the segment */
+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
 #define VM_GROWSUP     0x00000200
+#else
+#define VM_GROWSUP     0x00000000
+#endif
 #define VM_PFNMAP      0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
 #define VM_DENYWRITE   0x00000800      /* ETXTBSY on write attempts.. */
 
@@ -1330,8 +1334,10 @@ unsigned long ra_submit(struct file_ra_state *ra,
 
 /* Do stack extension */
 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
-#ifdef CONFIG_IA64
+#if VM_GROWSUP
 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+#else
+  #define expand_upwards(vma, address) do { } while (0)
 #endif
 extern int expand_stack_downwards(struct vm_area_struct *vma,
                                  unsigned long address);
@@ -1357,7 +1363,15 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
        return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 }
 
+#ifdef CONFIG_MMU
 pgprot_t vm_get_page_prot(unsigned long vm_flags);
+#else
+static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+       return __pgprot(0);
+}
+#endif
+
 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
                        unsigned long pfn, unsigned long size, pgprot_t);
index bb58854a806196d8dea6ab9599daa244012ef5d8..d146ca10c0f52449dd528e9fec61be47e0c85221 100644 (file)
@@ -88,7 +88,7 @@ struct nbd_request {
        char handle[8];
        __be64 from;
        __be32 len;
-} __packed;
+} __attribute__((packed));
 
 /*
  * This is the reply packet that nbd-server sends back to the client after
index 3ace8370e61e9855cfda5d7237362c1538c701a6..99f0adeeb3f348e58c65312133217055a6ccf3c0 100644 (file)
@@ -27,7 +27,7 @@ struct ncp_request_header {
        __u8 conn_high;
        __u8 function;
        __u8 data[0];
-} __packed;
+} __attribute__((packed));
 
 #define NCP_REPLY                (0x3333)
 #define NCP_WATCHDOG            (0x3E3E)
@@ -42,7 +42,7 @@ struct ncp_reply_header {
        __u8 completion_code;
        __u8 connection_state;
        __u8 data[0];
-} __packed;
+} __attribute__((packed));
 
 #define NCP_VOLNAME_LEN (16)
 #define NCP_NUMBER_OF_VOLUMES (256)
@@ -158,7 +158,7 @@ struct nw_info_struct {
 #ifdef __KERNEL__
        struct nw_nfs_info nfs;
 #endif
-} __packed;
+} __attribute__((packed));
 
 /* modify mask - use with MODIFY_DOS_INFO structure */
 #define DM_ATTRIBUTES            (cpu_to_le32(0x02))
@@ -190,12 +190,12 @@ struct nw_modify_dos_info {
        __u16 inheritanceGrantMask;
        __u16 inheritanceRevokeMask;
        __u32 maximumSpace;
-} __packed;
+} __attribute__((packed));
 
 struct nw_search_sequence {
        __u8 volNumber;
        __u32 dirBase;
        __u32 sequence;
-} __packed;
+} __attribute__((packed));
 
 #endif                         /* _LINUX_NCP_H */
index 3e1aa1be942ef2297637d0bf4a778eec605d7040..208ae938733143ce0ba2117378423d443a3d8312 100644 (file)
@@ -39,7 +39,7 @@ struct idletimer_tg_info {
        char label[MAX_IDLETIMER_LABEL_SIZE];
 
        /* for kernel module internal use only */
-       struct idletimer_tg *timer __attribute((aligned(8)));
+       struct idletimer_tg *timer __attribute__((aligned(8)));
 };
 
 #endif
index 1167aeb7a34793ff288aff2b7190a966378e5080..eff34ac1880883f70d88282a8ac881a1e8bff8a5 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _XT_IPVS_H
 #define _XT_IPVS_H
 
+#include <linux/types.h>
+
 enum {
        XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */
        XT_IPVS_PROTO =         1 << 1,
index 24426c3d6b5ab34a463cf3ee650b497d9e545522..76edadf046d3d3b68215c23ef2a30ce18a86992a 100644 (file)
@@ -56,7 +56,7 @@ struct phonethdr {
        __be16  pn_length;
        __u8    pn_robj;
        __u8    pn_sobj;
-} __packed;
+} __attribute__((packed));
 
 /* Common Phonet payload header */
 struct phonetmsg {
@@ -98,7 +98,7 @@ struct sockaddr_pn {
        __u8 spn_dev;
        __u8 spn_resource;
        __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3];
-} __packed;
+} __attribute__((packed));
 
 /* Well known address */
 #define PN_DEV_PC      0x10
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h
new file mode 100644 (file)
index 0000000..18d75e7
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ *pxa168 ethernet platform device data definition file.
+ */
+#ifndef __LINUX_PXA168_ETH_H
+#define __LINUX_PXA168_ETH_H
+
+struct pxa168_eth_platform_data {
+       int     port_number;
+       int     phy_addr;
+
+       /*
+        * If speed is 0, then speed and duplex are autonegotiated.
+        */
+       int     speed;          /* 0, SPEED_10, SPEED_100 */
+       int     duplex;         /* DUPLEX_HALF or DUPLEX_FULL */
+
+       /*
+        * Override default RX/TX queue sizes if nonzero.
+        */
+       int     rx_queue_size;
+       int     tx_queue_size;
+
+       /*
+        * init callback is used for board specific initialization
+        * e.g on Aspenite its used to initialize the PHY transceiver.
+        */
+       int (*init)(void);
+};
+
+#endif /* __LINUX_PXA168_ETH_H */
index 4f82326eb2945f2cd275bbcff64151a06ca6ccb4..08c32e4f261aca004ac06d895a1a8bbd73cfd887 100644 (file)
@@ -81,7 +81,7 @@ struct rfkill_event {
        __u8  type;
        __u8  op;
        __u8  soft, hard;
-} __packed;
+} __attribute__((packed));
 
 /*
  * We are planning to be backward and forward compatible with changes
index 3c2ad99fed347e282d913ea2ad43082b6606b248..64458a9a893809af26a6036b3aee1f62209d5107 100644 (file)
@@ -465,7 +465,7 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
 #ifdef SUPPORT_SYSRQ
        if (port->sysrq) {
                if (ch && time_before(jiffies, port->sysrq)) {
-                       handle_sysrq(ch, port->state->port.tty);
+                       handle_sysrq(ch);
                        port->sysrq = 0;
                        return 1;
                }
index 3c92121ba9afb3f75700e540e24c52f3aae8b974..96eb576d82fdadbd6a3cce0253325c733f6603f3 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/errno.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/kobject_ns.h>
 #include <asm/atomic.h>
 
 struct kobject;
index 609e8ca5f53429bddfc2c29aee5e0411cfe464af..387fa7d05c982b758942f83395e328949324fc1a 100644 (file)
@@ -15,9 +15,7 @@
 #define _LINUX_SYSRQ_H
 
 #include <linux/errno.h>
-
-struct pt_regs;
-struct tty_struct;
+#include <linux/types.h>
 
 /* Possible values of bitmask for enabling sysrq functions */
 /* 0x0001 is reserved for enable everything */
@@ -31,7 +29,7 @@ struct tty_struct;
 #define SYSRQ_ENABLE_RTNICE    0x0100
 
 struct sysrq_key_op {
-       void (*handler)(int, struct tty_struct *);
+       void (*handler)(int);
        char *help_msg;
        char *action_msg;
        int enable_mask;
@@ -44,8 +42,8 @@ struct sysrq_key_op {
  * are available -- else NULL's).
  */
 
-void handle_sysrq(int key, struct tty_struct *tty);
-void __handle_sysrq(int key, struct tty_struct *tty, int check_mask);
+void handle_sysrq(int key);
+void __handle_sysrq(int key, bool check_mask);
 int register_sysrq_key(int key, struct sysrq_key_op *op);
 int unregister_sysrq_key(int key, struct sysrq_key_op *op);
 struct sysrq_key_op *__sysrq_get_key_op(int key);
@@ -54,7 +52,11 @@ int sysrq_toggle_support(int enable_mask);
 
 #else
 
-static inline void handle_sysrq(int key, struct tty_struct *tty)
+static inline void handle_sysrq(int key)
+{
+}
+
+static inline void __handle_sysrq(int key, bool check_mask)
 {
 }
 
index 60c81da77f0f36b94bde7d83562db55dfa707d92..05f7fed2b173eb8205b0754ee3a00de730215e8e 100644 (file)
@@ -37,7 +37,6 @@
 #define UINPUT_VERSION         3
 
 #ifdef __KERNEL__
-#define UINPUT_MINOR           223
 #define UINPUT_NAME            "uinput"
 #define UINPUT_BUFFER_SIZE     16
 #define UINPUT_NUM_REQUESTS    16
index 890bc1472190f2189026e17792750f0f14706ff3..617068134ae8a91cadc1f7ccf42761cf212b5adf 100644 (file)
@@ -247,6 +247,7 @@ int usb_add_config(struct usb_composite_dev *,
  *     value; it should return zero on successful initialization.
  * @unbind: Reverses @bind(); called as a side effect of unregistering
  *     this driver.
+ * @disconnect: optional driver disconnect method
  * @suspend: Notifies when the host stops sending USB traffic,
  *     after function notifications
  * @resume: Notifies configuration when the host restarts USB traffic,
index 84a4c44c208b78a7eb9602091bc5a1401afdab8d..55675b1efb28659b37ff7b165fc6aa6ab0835fd7 100644 (file)
@@ -342,8 +342,7 @@ extern int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
 extern void usb_serial_generic_process_read_urb(struct urb *urb);
 extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
                                                void *dest, size_t size);
-extern int usb_serial_handle_sysrq_char(struct tty_struct *tty,
-                                       struct usb_serial_port *port,
+extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
                                        unsigned int ch);
 extern int usb_serial_handle_break(struct usb_serial_port *port);
 
index 6228b5b77d35860019adcf84747819e0e9e43ea6..e9e1524b582cbde9b1988107d9c50a86a68f08ae 100644 (file)
@@ -93,8 +93,11 @@ extern void vga_set_legacy_decoding(struct pci_dev *pdev,
  *     Nested calls are supported (a per-resource counter is maintained)
  */
 
-extern int vga_get(struct pci_dev *pdev, unsigned int rsrc,
-                                                                                       int interruptible);
+#if defined(CONFIG_VGA_ARB)
+extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
+#else
+static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; }
+#endif
 
 /**
  *     vga_get_interruptible
@@ -131,7 +134,11 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev,
  *     are already locked by another card. It can be called in any context
  */
 
+#if defined(CONFIG_VGA_ARB)
 extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
+#else
+static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
+#endif
 
 /**
  *     vga_put         - release lock on legacy VGA resources
@@ -146,7 +153,11 @@ extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
  *     released if the counter reaches 0.
  */
 
+#if defined(CONFIG_VGA_ARB)
 extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
+#else
+#define vga_put(pdev, rsrc)
+#endif
 
 
 /**
index c624126a9c8a6fb889fe596ea261f649236d51a7..425bcfe56c620211678d94f6d3923a9451305db2 100644 (file)
@@ -81,14 +81,16 @@ TRACE_EVENT(timer_expire_entry,
        TP_STRUCT__entry(
                __field( void *,        timer   )
                __field( unsigned long, now     )
+               __field( void *,        function)
        ),
 
        TP_fast_assign(
                __entry->timer          = timer;
                __entry->now            = jiffies;
+               __entry->function       = timer->function;
        ),
 
-       TP_printk("timer=%p now=%lu", __entry->timer, __entry->now)
+       TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
 );
 
 /**
@@ -200,14 +202,16 @@ TRACE_EVENT(hrtimer_expire_entry,
        TP_STRUCT__entry(
                __field( void *,        hrtimer )
                __field( s64,           now     )
+               __field( void *,        function)
        ),
 
        TP_fast_assign(
                __entry->hrtimer        = hrtimer;
                __entry->now            = now->tv64;
+               __entry->function       = hrtimer->function;
        ),
 
-       TP_printk("hrtimer=%p now=%llu", __entry->hrtimer,
+       TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
                  (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now }))
  );
 
index ce9d671c636c85499d7077361710baab3d50b519..a785a3b0c8c7d89d9a46e1a1d90300cb91da5308 100644 (file)
 #define XEN_IOPORT_PROTOVER    (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */
 #define XEN_IOPORT_PRODNUM     (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */
 
-#define XEN_UNPLUG_ALL_IDE_DISKS 1
-#define XEN_UNPLUG_ALL_NICS 2
-#define XEN_UNPLUG_AUX_IDE_DISKS 4
-#define XEN_UNPLUG_ALL 7
-#define XEN_UNPLUG_IGNORE 8
+#define XEN_UNPLUG_ALL_IDE_DISKS       (1<<0)
+#define XEN_UNPLUG_ALL_NICS            (1<<1)
+#define XEN_UNPLUG_AUX_IDE_DISKS       (1<<2)
+#define XEN_UNPLUG_ALL                 (XEN_UNPLUG_ALL_IDE_DISKS|\
+                                        XEN_UNPLUG_ALL_NICS|\
+                                        XEN_UNPLUG_AUX_IDE_DISKS)
+
+#define XEN_UNPLUG_UNNECESSARY                 (1<<16)
+#define XEN_UNPLUG_NEVER                       (1<<17)
 
 static inline int xen_must_unplug_nics(void) {
 #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \
index 3c2d4972d2352ad1bff0fb31a676f467420b6ddd..de407c78178d9014332255f2d8ae412f9fbd5e5a 100644 (file)
@@ -741,7 +741,7 @@ static struct console kgdbcons = {
 };
 
 #ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_dbg(int key, struct tty_struct *tty)
+static void sysrq_handle_dbg(int key)
 {
        if (!dbg_io_ops) {
                printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
index 28b844118bbd9d27f1e6bede2fd2502c6fd4f1b8..caf057a3de0e8457cb35e92a4b371f4f4597df3a 100644 (file)
@@ -1929,7 +1929,7 @@ static int kdb_sr(int argc, const char **argv)
        if (argc != 1)
                return KDB_ARGCOUNT;
        kdb_trap_printk++;
-       __handle_sysrq(*argv[1], NULL, 0);
+       __handle_sysrq(*argv[1], false);
        kdb_trap_printk--;
 
        return 0;
index 996a4dec5f968406aae7ef9b79240768da1ffddb..b7e4c362361bcf46fe34992e9bba1852dd478b71 100644 (file)
@@ -212,15 +212,17 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active);
 
 /**
  * pm_qos_add_request - inserts new qos request into the list
- * @pm_qos_class: identifies which list of qos request to us
+ * @dep: pointer to a preallocated handle
+ * @pm_qos_class: identifies which list of qos request to use
  * @value: defines the qos request
  *
  * This function inserts a new entry in the pm_qos_class list of requested qos
  * performance characteristics.  It recomputes the aggregate QoS expectations
- * for the pm_qos_class of parameters, and returns the pm_qos_request list
- * element as a handle for use in updating and removal.  Call needs to save
- * this handle for later use.
+ * for the pm_qos_class of parameters and initializes the pm_qos_request_list
+ * handle.  Caller needs to save this handle for later use in updates and
+ * removal.
  */
+
 void pm_qos_add_request(struct pm_qos_request_list *dep,
                        int pm_qos_class, s32 value)
 {
@@ -348,7 +350,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
 
        pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
        if (pm_qos_class >= 0) {
-               struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req));
+               struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
                if (!req)
                        return -ENOMEM;
 
index e8b337006276e71e782876db74cb94a4bbcb41a8..d52359374e8501e8c1f4e8a88427468249509fb3 100644 (file)
@@ -24,7 +24,7 @@ static void do_poweroff(struct work_struct *dummy)
 
 static DECLARE_WORK(poweroff_work, do_poweroff);
 
-static void handle_poweroff(int key, struct tty_struct *tty)
+static void handle_poweroff(int key)
 {
        /* run sysrq poweroff on boot cpu */
        schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
index 41541d79e3c85ac7d367f3ddffc4e51a0f63e755..09b574e7f4df7c14615d104c3fe736f7c1be0847 100644 (file)
@@ -3865,8 +3865,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
                /*
                 * Owner changed, break to re-assess state.
                 */
-               if (lock->owner != owner)
+               if (lock->owner != owner) {
+                       /*
+                        * If the lock has switched to a different owner,
+                        * we likely have heavy contention. Return 0 to quit
+                        * optimistic spinning and not contend further:
+                        */
+                       if (lock->owner)
+                               return 0;
                        break;
+               }
 
                /*
                 * Is that owner really running on that cpu?
index 806d1b227a21060aac100994a8992266c00b59b5..ab661ebc4895a8471ecc808825477cf0c3558444 100644 (file)
@@ -3752,6 +3752,8 @@ static void task_fork_fair(struct task_struct *p)
 
        raw_spin_lock_irqsave(&rq->lock, flags);
 
+       update_rq_clock(rq);
+
        if (unlikely(task_cpu(p) != this_cpu))
                __set_task_cpu(p, this_cpu);
 
index 056468eae7cfce7a76bf5c2364f71c722adeb325..a6b7e0e0f3eb092aac06e8bc017dcf5ea80105af 100644 (file)
@@ -249,7 +249,7 @@ static int trace_lookup_stack(struct seq_file *m, long i)
 {
        unsigned long addr = stack_dump_trace[i];
 
-       return seq_printf(m, "%pF\n", (void *)addr);
+       return seq_printf(m, "%pS\n", (void *)addr);
 }
 
 static void print_disabled(struct seq_file *m)
index 613bc1f046108ec52b8f7372fe2e7e66aa8fe4c7..0d53c8e853b12450cf0c74665d13a22e91a47543 100644 (file)
@@ -206,6 +206,9 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi,
                 struct perf_sample_data *data,
                 struct pt_regs *regs)
 {
+       /* Ensure the watchdog never gets throttled */
+       event->hw.interrupts = 0;
+
        if (__get_cpu_var(watchdog_nmi_touch) == true) {
                __get_cpu_var(watchdog_nmi_touch) = false;
                return;
index b93579504dfaaafee092156c113b36130b0758a7..70af0a7f97c0eb4801e177458d182ab6baad2767 100644 (file)
@@ -123,7 +123,7 @@ static int kobj_usermode_filter(struct kobject *kobj)
  * @kobj: struct kobject that the action is happening to
  * @envp_ext: pointer to environmental data
  *
- * Returns 0 if kobject_uevent() is completed with success or the
+ * Returns 0 if kobject_uevent_env() is completed with success or the
  * corresponding error when it fails.
  */
 int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
@@ -317,7 +317,7 @@ exit:
 EXPORT_SYMBOL_GPL(kobject_uevent_env);
 
 /**
- * kobject_uevent - notify userspace by ending an uevent
+ * kobject_uevent - notify userspace by sending an uevent
  *
  * @action: action that is happening
  * @kobj: struct kobject that the action is happening to
index 5b7d4623f0b70aee189deda3bc8318a590476160..efd16fa80b1cfd55f2e1f1295f1cd45765925abf 100644 (file)
@@ -174,14 +174,16 @@ static void radix_tree_node_rcu_free(struct rcu_head *head)
 {
        struct radix_tree_node *node =
                        container_of(head, struct radix_tree_node, rcu_head);
+       int i;
 
        /*
         * must only free zeroed nodes into the slab. radix_tree_shrink
         * can leave us with a non-NULL entry in the first slot, so clear
         * that here to make sure.
         */
-       tag_clear(node, 0, 0);
-       tag_clear(node, 1, 0);
+       for (i = 0; i < RADIX_TREE_MAX_TAGS; i++)
+               tag_clear(node, i, 0);
+
        node->slots[0] = NULL;
        node->count = 0;
 
@@ -623,6 +625,13 @@ EXPORT_SYMBOL(radix_tree_tag_get);
  * also settag. The function stops either after tagging nr_to_tag items or
  * after reaching last_index.
  *
+ * The tags must be set from the leaf level only and propagated back up the
+ * path to the root. We must do this so that we resolve the full path before
+ * setting any tags on intermediate nodes. If we set tags as we descend, then
+ * we can get to the leaf node and find that the index that has the iftag
+ * set is outside the range we are scanning. This reults in dangling tags and
+ * can lead to problems with later tag operations (e.g. livelocks on lookups).
+ *
  * The function returns number of leaves where the tag was set and sets
  * *first_indexp to the first unscanned index.
  * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
@@ -633,9 +642,13 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
                unsigned long nr_to_tag,
                unsigned int iftag, unsigned int settag)
 {
-       unsigned int height = root->height, shift;
-       unsigned long tagged = 0, index = *first_indexp;
-       struct radix_tree_node *open_slots[height], *slot;
+       unsigned int height = root->height;
+       struct radix_tree_path path[height];
+       struct radix_tree_path *pathp = path;
+       struct radix_tree_node *slot;
+       unsigned int shift;
+       unsigned long tagged = 0;
+       unsigned long index = *first_indexp;
 
        last_index = min(last_index, radix_tree_maxindex(height));
        if (index > last_index)
@@ -655,6 +668,13 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
        shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
        slot = radix_tree_indirect_to_ptr(root->rnode);
 
+       /*
+        * we fill the path from (root->height - 2) to 0, leaving the index at
+        * (root->height - 1) as a terminator. Zero the node in the terminator
+        * so that we can use this to end walk loops back up the path.
+        */
+       path[height - 1].node = NULL;
+
        for (;;) {
                int offset;
 
@@ -663,17 +683,30 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
                        goto next;
                if (!tag_get(slot, iftag, offset))
                        goto next;
+               if (height > 1) {
+                       /* Go down one level */
+                       height--;
+                       shift -= RADIX_TREE_MAP_SHIFT;
+                       path[height - 1].node = slot;
+                       path[height - 1].offset = offset;
+                       slot = slot->slots[offset];
+                       continue;
+               }
+
+               /* tag the leaf */
+               tagged++;
                tag_set(slot, settag, offset);
-               if (height == 1) {
-                       tagged++;
-                       goto next;
+
+               /* walk back up the path tagging interior nodes */
+               pathp = &path[0];
+               while (pathp->node) {
+                       /* stop if we find a node with the tag already set */
+                       if (tag_get(pathp->node, settag, pathp->offset))
+                               break;
+                       tag_set(pathp->node, settag, pathp->offset);
+                       pathp++;
                }
-               /* Go down one level */
-               height--;
-               shift -= RADIX_TREE_MAP_SHIFT;
-               open_slots[height] = slot;
-               slot = slot->slots[offset];
-               continue;
+
 next:
                /* Go to next item at level determined by 'shift' */
                index = ((index >> shift) + 1) << shift;
@@ -688,7 +721,7 @@ next:
                         * last_index is guaranteed to be in the tree, what
                         * we do below cannot wander astray.
                         */
-                       slot = open_slots[height];
+                       slot = path[height - 1].node;
                        height++;
                        shift += RADIX_TREE_MAP_SHIFT;
                }
index 2ed2267439df0f894011ab62adef6d7efe68db9a..6b2ab10518512052c895dd5db7ff0f20fd1df2f3 100644 (file)
@@ -2760,11 +2760,9 @@ out_release:
 }
 
 /*
- * This is like a special single-page "expand_downwards()",
- * except we must first make sure that 'address-PAGE_SIZE'
+ * This is like a special single-page "expand_{down|up}wards()",
+ * except we must first make sure that 'address{-|+}PAGE_SIZE'
  * doesn't hit another vma.
- *
- * The "find_vma()" will do the right thing even if we wrap
  */
 static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
 {
@@ -2783,6 +2781,15 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
 
                expand_stack(vma, address - PAGE_SIZE);
        }
+       if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+               struct vm_area_struct *next = vma->vm_next;
+
+               /* As VM_GROWSDOWN but s/below/above/ */
+               if (next && next->vm_start == address + PAGE_SIZE)
+                       return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+
+               expand_upwards(vma, address + PAGE_SIZE);
+       }
        return 0;
 }
 
index 331e51af38c9c950c8295fa125c67d00ba540025..6128dc8e5ede709cada129438fbac101895aa09d 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1716,9 +1716,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
  * vma is the last one with address > vma->vm_end.  Have to extend vma.
  */
-#ifndef CONFIG_IA64
-static
-#endif
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
        int error;
index c09ef5219cbe36f267a37f55d6fc670815082522..e3bccac1f0255bb78373e6268aab787a370ee520 100644 (file)
@@ -985,22 +985,16 @@ continue_unlock:
                                }
                        }
 
-                       if (wbc->nr_to_write > 0) {
-                               if (--wbc->nr_to_write == 0 &&
-                                   wbc->sync_mode == WB_SYNC_NONE) {
-                                       /*
-                                        * We stop writing back only if we are
-                                        * not doing integrity sync. In case of
-                                        * integrity sync we have to keep going
-                                        * because someone may be concurrently
-                                        * dirtying pages, and we might have
-                                        * synced a lot of newly appeared dirty
-                                        * pages, but have not synced all of the
-                                        * old dirty pages.
-                                        */
-                                       done = 1;
-                                       break;
-                               }
+                       /*
+                        * We stop writing back only if we are not doing
+                        * integrity sync. In case of integrity sync we have to
+                        * keep going until we have written all the pages
+                        * we tagged for writeback prior to entering this loop.
+                        */
+                       if (--wbc->nr_to_write <= 0 &&
+                           wbc->sync_mode == WB_SYNC_NONE) {
+                               done = 1;
+                               break;
                        }
                }
                pagevec_release(&pvec);
@@ -1132,6 +1126,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
                task_io_account_write(PAGE_CACHE_SIZE);
        }
 }
+EXPORT_SYMBOL(account_page_dirtied);
 
 /*
  * For address_spaces which do not use buffers.  Just tag the page as dirty in
index 87b9e8ad450962afa1159b763f0aa0a977ab9a88..f6f0d2dda2eae8480860cf57f5a9cfce69820716 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -316,7 +316,7 @@ void __init anon_vma_init(void)
  */
 struct anon_vma *page_lock_anon_vma(struct page *page)
 {
-       struct anon_vma *anon_vma;
+       struct anon_vma *anon_vma, *root_anon_vma;
        unsigned long anon_mapping;
 
        rcu_read_lock();
@@ -327,8 +327,21 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
                goto out;
 
        anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
-       anon_vma_lock(anon_vma);
-       return anon_vma;
+       root_anon_vma = ACCESS_ONCE(anon_vma->root);
+       spin_lock(&root_anon_vma->lock);
+
+       /*
+        * If this page is still mapped, then its anon_vma cannot have been
+        * freed.  But if it has been unmapped, we have no security against
+        * the anon_vma structure being freed and reused (for another anon_vma:
+        * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot
+        * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting
+        * anon_vma->root before page_unlock_anon_vma() is called to unlock.
+        */
+       if (page_mapped(page))
+               return anon_vma;
+
+       spin_unlock(&root_anon_vma->lock);
 out:
        rcu_read_unlock();
        return NULL;
index 3d59c9bf8febf5064150ff50d22db9d569453131..3bccdd12a2642a06e1c5078b5c4065419a57428a 100644 (file)
@@ -510,7 +510,8 @@ static int vlan_dev_open(struct net_device *dev)
        if (vlan->flags & VLAN_FLAG_GVRP)
                vlan_gvrp_request_join(dev);
 
-       netif_carrier_on(dev);
+       if (netif_carrier_ok(real_dev))
+               netif_carrier_on(dev);
        return 0;
 
 clear_allmulti:
index 51d6c31679757f58e62368a42edba16d9bfc4d09..e8f4f9a57f1258fb1a589c87a956797019c67504 100644 (file)
@@ -1420,6 +1420,9 @@ static int translate_compat_table(const char *name,
                if (ret != 0)
                        break;
                ++i;
+               if (strcmp(arpt_get_target(iter1)->u.user.name,
+                   XT_ERROR_TARGET) == 0)
+                       ++newinfo->stacksize;
        }
        if (ret) {
                /*
index 97b64b22c41214858d6ddb1fb9cf8e292468e23d..d163f2e3b2e99e5f18ae9997d3c74867b3e79354 100644 (file)
@@ -1751,6 +1751,9 @@ translate_compat_table(struct net *net,
                if (ret != 0)
                        break;
                ++i;
+               if (strcmp(ipt_get_target(iter1)->u.user.name,
+                   XT_ERROR_TARGET) == 0)
+                       ++newinfo->stacksize;
        }
        if (ret) {
                /*
index 29a7bca29e3fddec24a55ea2e6ca228bc7a452ba..8e754be92c2450e7142a958466b493dc077f5772 100644 (file)
@@ -1766,6 +1766,9 @@ translate_compat_table(struct net *net,
                if (ret != 0)
                        break;
                ++i;
+               if (strcmp(ip6t_get_target(iter1)->u.user.name,
+                   XT_ERROR_TARGET) == 0)
+                       ++newinfo->stacksize;
        }
        if (ret) {
                /*
index 9616c32d1076dda982fff4c6da5c6e1057cf39d1..5bb8353105cca7c761647b94c346cc1da82c42e9 100644 (file)
@@ -169,6 +169,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
 {
        struct irlan_cb *self = netdev_priv(dev);
        int ret;
+       unsigned int len;
 
        /* skb headroom large enough to contain all IrDA-headers? */
        if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
@@ -188,6 +189,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
 
        dev->trans_start = jiffies;
 
+       len = skb->len;
        /* Now queue the packet in the transport layer */
        if (self->use_udata)
                ret = irttp_udata_request(self->tsap_data, skb);
@@ -209,7 +211,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
                self->stats.tx_dropped++;
        } else {
                self->stats.tx_packets++;
-               self->stats.tx_bytes += skb->len;
+               self->stats.tx_bytes += len;
        }
 
        return NETDEV_TX_OK;
index 8648a9922aabace0231de8eec18b88e30239fd5f..980fe4ad0016c52ed260735c955b0600181c7720 100644 (file)
@@ -1406,7 +1406,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        struct netlink_sock *nlk = nlk_sk(sk);
        int noblock = flags&MSG_DONTWAIT;
        size_t copied;
-       struct sk_buff *skb, *frag __maybe_unused = NULL;
+       struct sk_buff *skb, *data_skb;
        int err;
 
        if (flags&MSG_OOB)
@@ -1418,45 +1418,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        if (skb == NULL)
                goto out;
 
+       data_skb = skb;
+
 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
        if (unlikely(skb_shinfo(skb)->frag_list)) {
-               bool need_compat = !!(flags & MSG_CMSG_COMPAT);
-
                /*
-                * If this skb has a frag_list, then here that means that
-                * we will have to use the frag_list skb for compat tasks
-                * and the regular skb for non-compat tasks.
+                * If this skb has a frag_list, then here that means that we
+                * will have to use the frag_list skb's data for compat tasks
+                * and the regular skb's data for normal (non-compat) tasks.
                 *
-                * The skb might (and likely will) be cloned, so we can't
-                * just reset frag_list and go on with things -- we need to
-                * keep that. For the compat case that's easy -- simply get
-                * a reference to the compat skb and free the regular one
-                * including the frag. For the non-compat case, we need to
-                * avoid sending the frag to the user -- so assign NULL but
-                * restore it below before freeing the skb.
+                * If we need to send the compat skb, assign it to the
+                * 'data_skb' variable so that it will be used below for data
+                * copying. We keep 'skb' for everything else, including
+                * freeing both later.
                 */
-               if (need_compat) {
-                       struct sk_buff *compskb = skb_shinfo(skb)->frag_list;
-                       skb_get(compskb);
-                       kfree_skb(skb);
-                       skb = compskb;
-               } else {
-                       frag = skb_shinfo(skb)->frag_list;
-                       skb_shinfo(skb)->frag_list = NULL;
-               }
+               if (flags & MSG_CMSG_COMPAT)
+                       data_skb = skb_shinfo(skb)->frag_list;
        }
 #endif
 
        msg->msg_namelen = 0;
 
-       copied = skb->len;
+       copied = data_skb->len;
        if (len < copied) {
                msg->msg_flags |= MSG_TRUNC;
                copied = len;
        }
 
-       skb_reset_transport_header(skb);
-       err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+       skb_reset_transport_header(data_skb);
+       err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
 
        if (msg->msg_name) {
                struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
@@ -1476,11 +1466,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        }
        siocb->scm->creds = *NETLINK_CREDS(skb);
        if (flags & MSG_TRUNC)
-               copied = skb->len;
-
-#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
-       skb_shinfo(skb)->frag_list = frag;
-#endif
+               copied = data_skb->len;
 
        skb_free_datagram(sk, skb);
 
index 795a00b7f2cb7aa6a539adb3c86a2cad180575f1..c93588c2d553cf6b162ab500cf1fd72dbbc9c26c 100644 (file)
@@ -297,7 +297,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
 {
        struct rds_notifier *notifier;
-       struct rds_rdma_notify cmsg;
+       struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
        unsigned int count = 0, max_messages = ~0U;
        unsigned long flags;
        LIST_HEAD(copy);
index c39327e60ea499919937fae08a8d5bc5dc9c4fd9..515253fe46cfdb3d034f983090abfeff7c8c0aa2 100644 (file)
@@ -497,7 +497,9 @@ int conf_write_defconfig(const char *filename)
                        /*
                         * If symbol is a choice value and equals to the
                         * default for a choice - skip.
-                        * But only if value is bool and equal to "y" .
+                        * But only if value is bool and equal to "y" and
+                        * choice is not "optional".
+                        * (If choice is "optional" then all values can be "n")
                         */
                        if (sym_is_choice_value(sym)) {
                                struct symbol *cs;
@@ -505,7 +507,7 @@ int conf_write_defconfig(const char *filename)
 
                                cs = prop_get_symbol(sym_get_choice_prop(sym));
                                ds = sym_choice_default(cs);
-                               if (sym == ds) {
+                               if (!sym_is_optional(cs) && sym == ds) {
                                        if ((sym->type == S_BOOLEAN) &&
                                            sym_get_tristate_value(sym) == yes)
                                                goto next_menu;
index e95718fea3555de1b8db125b16add3cf9a350719..943712ca6c0a6a0eb5bbf97dbba2a2d20e495b47 100644 (file)
@@ -937,6 +937,8 @@ static void sym_check_print_recursive(struct symbol *last_sym)
                sym = stack->sym;
                next_sym = stack->next ? stack->next->sym : last_sym;
                prop = stack->prop;
+               if (prop == NULL)
+                       prop = stack->sym->prop;
 
                /* for choice values find the menu entry (used below) */
                if (sym_is_choice(sym) || sym_is_choice_value(sym)) {
index 67d59c7a18dc57b1580a833ed37e832eff227c85..5325423ceab483833c8f09efb37a336aaa8f9588 100644 (file)
@@ -44,7 +44,9 @@ all:
 
 Makefile:;
 
-\$(all) %/: all
+\$(all): all
        @:
 
+%/: all
+       @:
 EOF
index e90a91cc5185709d31a9a128b4790b676a35599e..057b6b3c5dfb4cf0677add9a7c6f04c5029d2e15 100755 (executable)
@@ -43,7 +43,7 @@ scm_version()
        fi
 
        # Check for git and a git repo.
-       if head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
+       if test -d .git && head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
 
                # If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
                # it, because this version is defined in the top level Makefile.
@@ -85,7 +85,7 @@ scm_version()
        fi
 
        # Check for mercurial and a mercurial repo.
-       if hgid=`hg id 2>/dev/null`; then
+       if test -d .hg && hgid=`hg id 2>/dev/null`; then
                tag=`printf '%s' "$hgid" | cut -s -d' ' -f2`
 
                # Do we have an untagged version?
index dd8fb86c842b85feef9a3018f2dcf42b2a06dba4..3827092cc1d2802e0902a2c809032cac6f6ef9da 100644 (file)
@@ -589,6 +589,7 @@ int /*__devinit*/ snd_hda_bus_new(struct snd_card *card,
        bus->ops = temp->ops;
 
        mutex_init(&bus->cmd_mutex);
+       mutex_init(&bus->prepare_mutex);
        INIT_LIST_HEAD(&bus->codec_list);
 
        snprintf(bus->workq_name, sizeof(bus->workq_name),
@@ -1068,7 +1069,6 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus,
        codec->addr = codec_addr;
        mutex_init(&codec->spdif_mutex);
        mutex_init(&codec->control_mutex);
-       mutex_init(&codec->prepare_mutex);
        init_hda_cache(&codec->amp_cache, sizeof(struct hda_amp_info));
        init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head));
        snd_array_init(&codec->mixers, sizeof(struct hda_nid_item), 32);
@@ -1213,6 +1213,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
                                u32 stream_tag,
                                int channel_id, int format)
 {
+       struct hda_codec *c;
        struct hda_cvt_setup *p;
        unsigned int oldval, newval;
        int i;
@@ -1253,10 +1254,12 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
        p->dirty = 0;
 
        /* make other inactive cvts with the same stream-tag dirty */
-       for (i = 0; i < codec->cvt_setups.used; i++) {
-               p = snd_array_elem(&codec->cvt_setups, i);
-               if (!p->active && p->stream_tag == stream_tag)
-                       p->dirty = 1;
+       list_for_each_entry(c, &codec->bus->codec_list, list) {
+               for (i = 0; i < c->cvt_setups.used; i++) {
+                       p = snd_array_elem(&c->cvt_setups, i);
+                       if (!p->active && p->stream_tag == stream_tag)
+                               p->dirty = 1;
+               }
        }
 }
 EXPORT_SYMBOL_HDA(snd_hda_codec_setup_stream);
@@ -1306,12 +1309,16 @@ static void really_cleanup_stream(struct hda_codec *codec,
 /* clean up the all conflicting obsolete streams */
 static void purify_inactive_streams(struct hda_codec *codec)
 {
+       struct hda_codec *c;
        int i;
 
-       for (i = 0; i < codec->cvt_setups.used; i++) {
-               struct hda_cvt_setup *p = snd_array_elem(&codec->cvt_setups, i);
-               if (p->dirty)
-                       really_cleanup_stream(codec, p);
+       list_for_each_entry(c, &codec->bus->codec_list, list) {
+               for (i = 0; i < c->cvt_setups.used; i++) {
+                       struct hda_cvt_setup *p;
+                       p = snd_array_elem(&c->cvt_setups, i);
+                       if (p->dirty)
+                               really_cleanup_stream(c, p);
+               }
        }
 }
 
@@ -3502,11 +3509,11 @@ int snd_hda_codec_prepare(struct hda_codec *codec,
                          struct snd_pcm_substream *substream)
 {
        int ret;
-       mutex_lock(&codec->prepare_mutex);
+       mutex_lock(&codec->bus->prepare_mutex);
        ret = hinfo->ops.prepare(hinfo, codec, stream, format, substream);
        if (ret >= 0)
                purify_inactive_streams(codec);
-       mutex_unlock(&codec->prepare_mutex);
+       mutex_unlock(&codec->bus->prepare_mutex);
        return ret;
 }
 EXPORT_SYMBOL_HDA(snd_hda_codec_prepare);
@@ -3515,9 +3522,9 @@ void snd_hda_codec_cleanup(struct hda_codec *codec,
                           struct hda_pcm_stream *hinfo,
                           struct snd_pcm_substream *substream)
 {
-       mutex_lock(&codec->prepare_mutex);
+       mutex_lock(&codec->bus->prepare_mutex);
        hinfo->ops.cleanup(hinfo, codec, substream);
-       mutex_unlock(&codec->prepare_mutex);
+       mutex_unlock(&codec->bus->prepare_mutex);
 }
 EXPORT_SYMBOL_HDA(snd_hda_codec_cleanup);
 
index 4303353feda99af99b63b63ed0a9bb6a404f8fdc..62c70224010808dca1b7a8330e28dfa791bbe61e 100644 (file)
@@ -648,6 +648,7 @@ struct hda_bus {
        struct hda_codec *caddr_tbl[HDA_MAX_CODEC_ADDRESS + 1];
 
        struct mutex cmd_mutex;
+       struct mutex prepare_mutex;
 
        /* unsolicited event queue */
        struct hda_bus_unsolicited *unsol;
@@ -826,7 +827,6 @@ struct hda_codec {
 
        struct mutex spdif_mutex;
        struct mutex control_mutex;
-       struct mutex prepare_mutex;
        unsigned int spdif_status;      /* IEC958 status bits */
        unsigned short spdif_ctls;      /* SPDIF control bits */
        unsigned int spdif_in_enable;   /* SPDIF input enable? */
index 803b298f741101065b5966f17a84cddf01028ef6..26c3ade735838624c9f6d2ca889b4ca724df96db 100644 (file)
@@ -596,6 +596,8 @@ void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld)
 }
 EXPORT_SYMBOL_HDA(snd_hda_eld_proc_free);
 
+#endif /* CONFIG_PROC_FS */
+
 /* update PCM info based on ELD */
 void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm,
                              struct hda_pcm_stream *codec_pars)
@@ -644,5 +646,3 @@ void hdmi_eld_update_pcm_info(struct hdmi_eld *eld, struct hda_pcm_stream *pcm,
        pcm->maxbps = min(pcm->maxbps, codec_pars->maxbps);
 }
 EXPORT_SYMBOL_HDA(hdmi_eld_update_pcm_info);
-
-#endif /* CONFIG_PROC_FS */
index c424952a734e0e48546518fe6b47ccbf959e9aee..5cdb80edbd7f06d0cc00db6dd43cc325a1ef7779 100644 (file)
@@ -3059,6 +3059,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G series (AMD)", CXT5066_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD),
        {}
index 2bc0f07cf33fe4f67720fc3c9487a817b3e77823..afd6022a96a75bda3b24b652973dccbe591c625f 100644 (file)
@@ -707,8 +707,6 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t nid,
                              u32 stream_tag, int format)
 {
        struct hdmi_spec *spec = codec->spec;
-       int tag;
-       int fmt;
        int pinctl;
        int new_pinctl = 0;
        int i;
@@ -745,24 +743,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t nid,
                return -EINVAL;
        }
 
-       tag = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0) >> 4;
-       fmt = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_STREAM_FORMAT, 0);
-
-       snd_printdd("hdmi_setup_stream: "
-                   "NID=0x%x, %sstream=0x%x, %sformat=0x%x\n",
-                   nid,
-                   tag == stream_tag ? "" : "new-",
-                   stream_tag,
-                   fmt == format ? "" : "new-",
-                   format);
-
-       if (tag != stream_tag)
-               snd_hda_codec_write(codec, nid, 0,
-                                   AC_VERB_SET_CHANNEL_STREAMID,
-                                   stream_tag << 4);
-       if (fmt != format)
-               snd_hda_codec_write(codec, nid, 0,
-                                   AC_VERB_SET_STREAM_FORMAT, format);
+       snd_hda_codec_setup_stream(codec, nid, stream_tag, 0, format);
        return 0;
 }
 
index d382d3c81c0fc5bb0f320fcad17b96d67f6ef473..36a9b83a6174d93f703bf76795e7ba140901c739 100644 (file)
@@ -69,20 +69,12 @@ static int intel_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
        return hdmi_setup_stream(codec, hinfo->nid, stream_tag, format);
 }
 
-static int intel_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
-                                          struct hda_codec *codec,
-                                          struct snd_pcm_substream *substream)
-{
-       return 0;
-}
-
 static struct hda_pcm_stream intel_hdmi_pcm_playback = {
        .substreams = 1,
        .channels_min = 2,
        .ops = {
                .open = hdmi_pcm_open,
                .prepare = intel_hdmi_playback_pcm_prepare,
-               .cleanup = intel_hdmi_playback_pcm_cleanup,
        },
 };
 
index f636870dc718d4445ed400924f3075b11b186159..69b950d527c31d966846a3c6e8f0ff30bdbe886c 100644 (file)
@@ -326,13 +326,6 @@ static int nvhdmi_dig_playback_pcm_prepare_8ch(struct hda_pcm_stream *hinfo,
        return 0;
 }
 
-static int nvhdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
-                                          struct hda_codec *codec,
-                                          struct snd_pcm_substream *substream)
-{
-       return 0;
-}
-
 static int nvhdmi_dig_playback_pcm_prepare_2ch(struct hda_pcm_stream *hinfo,
                                        struct hda_codec *codec,
                                        unsigned int stream_tag,
@@ -350,7 +343,6 @@ static struct hda_pcm_stream nvhdmi_pcm_digital_playback_8ch_89 = {
        .ops = {
                .open = hdmi_pcm_open,
                .prepare = nvhdmi_dig_playback_pcm_prepare_8ch_89,
-               .cleanup = nvhdmi_playback_pcm_cleanup,
        },
 };
 
index f3f861bd1bf880f650d4361df30092f2b034d09c..95148e58026cfb045793d3ba26d570f26108c284 100644 (file)
@@ -6303,6 +6303,21 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = {
        { .id = 0x111d76b5, .name = "92HD71B6X", .patch = patch_stac92hd71bxx },
        { .id = 0x111d76b6, .name = "92HD71B5X", .patch = patch_stac92hd71bxx },
        { .id = 0x111d76b7, .name = "92HD71B5X", .patch = patch_stac92hd71bxx },
+       { .id = 0x111d76c0, .name = "92HD89C3", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c1, .name = "92HD89C2", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c2, .name = "92HD89C1", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c3, .name = "92HD89B3", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c4, .name = "92HD89B2", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c5, .name = "92HD89B1", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c6, .name = "92HD89E3", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c7, .name = "92HD89E2", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c8, .name = "92HD89E1", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76c9, .name = "92HD89D3", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76ca, .name = "92HD89D2", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76cb, .name = "92HD89D1", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76cc, .name = "92HD89F3", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
+       { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
        {} /* terminator */
 };
 
index 6433e65c9507d20a65be6c0b14af1275c30e0c40..46774924957643874171c1c4e0dfd9a20672d2f3 100644 (file)
@@ -1774,6 +1774,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
                .name = "HP/Compaq nx7010",
                .type = AC97_TUNE_MUTE_LED
         },
+       {
+               .subvendor = 0x1014,
+               .subdevice = 0x0534,
+               .name = "ThinkPad X31",
+               .type = AC97_TUNE_INV_EAPD
+       },
        {
                .subvendor = 0x1014,
                .subdevice = 0x1f00,
index a11daa1e905b3552ed2c1a8dc178e29e5491cca6..c81da05a4f11ee0ada1ad170dea17aea207eb969 100644 (file)
@@ -254,6 +254,9 @@ static int imx_ssi_hw_params(struct snd_pcm_substream *substream,
                dma_data = &ssi->dma_params_rx;
        }
 
+       if (ssi->flags & IMX_SSI_SYN)
+               reg = SSI_STCCR;
+
        snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
 
        sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK;