]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 12 Nov 2010 16:40:23 +0000 (08:40 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 12 Nov 2010 16:40:23 +0000 (08:40 -0800)
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, pvclock: Remove leftover scale_delta() function
  x86, apic: Remove double #include
  x86: Adjust section annotations in AMD Fam10 MMCONF enabling code
  x86, UV: Update node controller MMRs
  x86: Remove unnecessary casts of void ptr returning alloc function return values
  x86: Address gcc4.6 "set but not used" warnings in apic.h
  x86, mm: Fix section mismatch in tlb.c

481 files changed:
Documentation/ABI/obsolete/proc-pid-oom_adj [new file with mode: 0644]
Documentation/filesystems/Locking
Documentation/filesystems/xfs-delayed-logging-design.txt
Documentation/i2c/busses/i2c-i801
Documentation/leds-class.txt
Documentation/leds/leds-lp5521.txt [new file with mode: 0644]
Documentation/leds/leds-lp5523.txt [new file with mode: 0644]
Documentation/scsi/ChangeLog.megaraid_sas
Documentation/sysctl/kernel.txt
MAINTAINERS
Makefile
arch/arm/mach-shmobile/Kconfig
arch/arm/mach-shmobile/board-ap4evb.c
arch/arm/mach-shmobile/clock-sh7372.c
arch/arm/mach-shmobile/include/mach/gpio.h
arch/arm/mach-shmobile/include/mach/sh7372.h
arch/arm/mach-u300/spi.c
arch/m68k/include/asm/irqflags.h
arch/m68k/include/asm/machdep.h
arch/powerpc/kernel/kvm.c
arch/powerpc/kvm/booke_interrupts.S
arch/powerpc/kvm/e500.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/kvm/timing.c
arch/sh/Kconfig
arch/sh/Makefile
arch/sh/boards/Kconfig
arch/sh/boards/Makefile
arch/sh/boards/board-edosk7705.c [new file with mode: 0644]
arch/sh/boards/board-secureedge5410.c [moved from arch/sh/boards/mach-snapgear/setup.c with 70% similarity]
arch/sh/boards/mach-edosk7705/Makefile [deleted file]
arch/sh/boards/mach-edosk7705/io.c [deleted file]
arch/sh/boards/mach-edosk7705/setup.c [deleted file]
arch/sh/boards/mach-microdev/io.c
arch/sh/boards/mach-microdev/setup.c
arch/sh/boards/mach-se/7206/Makefile
arch/sh/boards/mach-se/7206/io.c [deleted file]
arch/sh/boards/mach-se/7206/irq.c
arch/sh/boards/mach-se/7206/setup.c
arch/sh/boards/mach-se/770x/Makefile
arch/sh/boards/mach-se/770x/io.c [deleted file]
arch/sh/boards/mach-se/770x/setup.c
arch/sh/boards/mach-se/7751/Makefile
arch/sh/boards/mach-se/7751/io.c [deleted file]
arch/sh/boards/mach-se/7751/setup.c
arch/sh/boards/mach-snapgear/Makefile [deleted file]
arch/sh/boards/mach-snapgear/io.c [deleted file]
arch/sh/boards/mach-systemh/Makefile [deleted file]
arch/sh/boards/mach-systemh/io.c [deleted file]
arch/sh/boards/mach-systemh/irq.c [deleted file]
arch/sh/boards/mach-systemh/setup.c [deleted file]
arch/sh/configs/secureedge5410_defconfig [moved from arch/sh/configs/snapgear_defconfig with 100% similarity]
arch/sh/configs/systemh_defconfig [deleted file]
arch/sh/include/asm/addrspace.h
arch/sh/include/asm/pgtable.h
arch/sh/include/asm/system.h
arch/sh/include/asm/system_32.h
arch/sh/include/asm/system_64.h
arch/sh/include/asm/uncached.h
arch/sh/include/mach-common/mach/edosk7705.h [deleted file]
arch/sh/include/mach-common/mach/microdev.h
arch/sh/include/mach-common/mach/secureedge5410.h [moved from arch/sh/include/mach-common/mach/snapgear.h with 79% similarity]
arch/sh/include/mach-common/mach/systemh7751.h [deleted file]
arch/sh/kernel/cpu/sh4a/clock-sh7724.c
arch/sh/mm/Kconfig
arch/sh/mm/consistent.c
arch/sh/mm/uncached.c
arch/sh/tools/mach-types
arch/tile/include/asm/highmem.h
arch/tile/include/asm/kmap_types.h
arch/tile/include/asm/pgtable.h
arch/tile/include/asm/stat.h
arch/tile/include/asm/unistd.h
arch/tile/kernel/compat.c
arch/tile/kernel/early_printk.c
arch/tile/kernel/hardwall.c
arch/tile/kernel/irq.c
arch/tile/kernel/machine_kexec.c
arch/tile/kernel/messaging.c
arch/tile/kernel/ptrace.c
arch/tile/kernel/reboot.c
arch/tile/kernel/setup.c
arch/tile/kernel/signal.c
arch/tile/kernel/smp.c
arch/tile/kernel/time.c
arch/tile/lib/memcpy_tile64.c
arch/tile/mm/highmem.c
arch/tile/mm/init.c
arch/tile/mm/pgtable.c
arch/um/include/asm/ptrace-generic.h
arch/um/kernel/ptrace.c
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c
crypto/pcrypt.c
drivers/Makefile
drivers/block/floppy.c
drivers/char/Makefile
drivers/char/agp/intel-gtt.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/sh_mtu2.c
drivers/clocksource/sh_tmu.c
drivers/firewire/ohci.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_manager.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/via/via_dmablit.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
drivers/gpu/stub/Kconfig
drivers/hwmon/ltc4261.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-i801.c
drivers/input/mouse/appletouch.c
drivers/input/touchscreen/ad7879.c
drivers/input/touchscreen/bu21013_ts.c
drivers/isdn/hisax/isar.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/led-class.c
drivers/leds/led-triggers.c
drivers/leds/leds-gpio.c
drivers/leds/leds-lp5521.c [new file with mode: 0644]
drivers/leds/leds-lp5523.c [new file with mode: 0644]
drivers/leds/leds-net5501.c
drivers/leds/ledtrig-timer.c
drivers/macintosh/adb-iop.c
drivers/media/IR/ir-keytable.c
drivers/misc/apds9802als.c
drivers/misc/bh1770glc.c
drivers/misc/isl29020.c
drivers/net/atlx/atl1.c
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_hsi.h
drivers/net/bnx2x/bnx2x_link.c
drivers/net/caif/caif_spi.c
drivers/net/caif/caif_spi_slave.c
drivers/net/cxgb3/cxgb3_main.c
drivers/net/cxgb4/cxgb4_main.c
drivers/net/cxgb4vf/cxgb4vf_main.c
drivers/net/ibm_newemac/core.c
drivers/net/jme.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/qlcnic/qlcnic_main.c
drivers/net/smsc911x.h
drivers/net/tulip/de2104x.c
drivers/net/usb/usbnet.c
drivers/net/wireless/ipw2x00/libipw_module.c
drivers/net/wireless/rt2x00/Kconfig
drivers/rapidio/rio.c
drivers/rtc/rtc-ds1302.c
drivers/s390/scsi/zfcp_fc.h
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/scsi/zfcp_unit.c
drivers/scsi/bfa/bfa.h
drivers/scsi/bfa/bfa_cb_ioim.h
drivers/scsi/bfa/bfa_core.c
drivers/scsi/bfa/bfa_cs.h
drivers/scsi/bfa/bfa_defs.h
drivers/scsi/bfa/bfa_defs_fcs.h
drivers/scsi/bfa/bfa_defs_svc.h
drivers/scsi/bfa/bfa_drv.c
drivers/scsi/bfa/bfa_fc.h
drivers/scsi/bfa/bfa_fcbuild.c
drivers/scsi/bfa/bfa_fcpim.c
drivers/scsi/bfa/bfa_fcpim.h
drivers/scsi/bfa/bfa_fcs.c
drivers/scsi/bfa/bfa_fcs.h
drivers/scsi/bfa/bfa_fcs_fcpim.c
drivers/scsi/bfa/bfa_fcs_lport.c
drivers/scsi/bfa/bfa_fcs_rport.c
drivers/scsi/bfa/bfa_hw_cb.c
drivers/scsi/bfa/bfa_hw_ct.c
drivers/scsi/bfa/bfa_ioc.c
drivers/scsi/bfa/bfa_ioc.h
drivers/scsi/bfa/bfa_ioc_cb.c
drivers/scsi/bfa/bfa_ioc_ct.c
drivers/scsi/bfa/bfa_modules.h
drivers/scsi/bfa/bfa_os_inc.h
drivers/scsi/bfa/bfa_port.c
drivers/scsi/bfa/bfa_svc.c
drivers/scsi/bfa/bfa_svc.h
drivers/scsi/bfa/bfad.c
drivers/scsi/bfa/bfad_attr.c
drivers/scsi/bfa/bfad_debugfs.c
drivers/scsi/bfa/bfad_drv.h
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bfa/bfi.h
drivers/scsi/bfa/bfi_ms.h
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/libfcoe.c
drivers/scsi/gdth.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/libfc/fc_disc.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_mbox.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/megaraid/megaraid_sas.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/osd/osd_initiator.c
drivers/scsi/pmcraid.c
drivers/scsi/pmcraid.h
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_bsg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla4xxx/ql4_dbg.c
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_iocb.c
drivers/scsi/qla4xxx/ql4_isr.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_nx.c
drivers/scsi/qla4xxx/ql4_nx.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/sr_ioctl.c
drivers/serial/crisv10.c
drivers/sh/clk/core.c
drivers/sh/intc/core.c
drivers/sh/intc/dynamic.c
drivers/spi/spi.c
drivers/spi/spi_bfin5xx.c
drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
drivers/staging/ath6kl/os/linux/ar6000_raw_if.c
drivers/staging/ath6kl/os/linux/include/athendpack_linux.h [deleted file]
drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h [deleted file]
drivers/staging/brcm80211/brcmfmac/dhd_linux.c
drivers/staging/brcm80211/sys/wl_mac80211.c
drivers/staging/comedi/drivers/dt9812.c
drivers/staging/comedi/drivers/usbdux.c
drivers/staging/comedi/drivers/usbduxfast.c
drivers/staging/msm/msm_fb.c
drivers/staging/rtl8712/osdep_service.h
drivers/staging/smbfs/inode.c
drivers/staging/solo6x10/solo6010-v4l2-enc.c
drivers/staging/solo6x10/solo6010-v4l2.c
drivers/staging/tm6000/tm6000-i2c.c
drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c
drivers/tty/Makefile [new file with mode: 0644]
drivers/tty/n_gsm.c [moved from drivers/char/n_gsm.c with 100% similarity]
drivers/tty/n_hdlc.c [moved from drivers/char/n_hdlc.c with 100% similarity]
drivers/tty/n_r3964.c [moved from drivers/char/n_r3964.c with 100% similarity]
drivers/tty/n_tty.c [moved from drivers/char/n_tty.c with 100% similarity]
drivers/tty/pty.c [moved from drivers/char/pty.c with 100% similarity]
drivers/tty/sysrq.c [moved from drivers/char/sysrq.c with 100% similarity]
drivers/tty/tty_audit.c [moved from drivers/char/tty_audit.c with 100% similarity]
drivers/tty/tty_buffer.c [moved from drivers/char/tty_buffer.c with 100% similarity]
drivers/tty/tty_io.c [moved from drivers/char/tty_io.c with 100% similarity]
drivers/tty/tty_ioctl.c [moved from drivers/char/tty_ioctl.c with 100% similarity]
drivers/tty/tty_ldisc.c [moved from drivers/char/tty_ldisc.c with 100% similarity]
drivers/tty/tty_mutex.c [moved from drivers/char/tty_mutex.c with 100% similarity]
drivers/tty/tty_port.c [moved from drivers/char/tty_port.c with 100% similarity]
drivers/tty/vt/.gitignore [moved from drivers/char/.gitignore with 100% similarity]
drivers/tty/vt/Makefile [new file with mode: 0644]
drivers/tty/vt/consolemap.c [moved from drivers/char/consolemap.c with 100% similarity]
drivers/tty/vt/cp437.uni [moved from drivers/char/cp437.uni with 100% similarity]
drivers/tty/vt/defkeymap.c_shipped [moved from drivers/char/defkeymap.c_shipped with 100% similarity]
drivers/tty/vt/defkeymap.map [moved from drivers/char/defkeymap.map with 100% similarity]
drivers/tty/vt/keyboard.c [moved from drivers/char/keyboard.c with 100% similarity]
drivers/tty/vt/selection.c [moved from drivers/char/selection.c with 100% similarity]
drivers/tty/vt/vc_screen.c [moved from drivers/char/vc_screen.c with 100% similarity]
drivers/tty/vt/vt.c [moved from drivers/char/vt.c with 100% similarity]
drivers/tty/vt/vt_ioctl.c [moved from drivers/char/vt_ioctl.c with 100% similarity]
drivers/usb/gadget/u_ether.c
drivers/video/backlight/adp8860_bl.c
drivers/video/backlight/l4f00242t03.c
drivers/video/backlight/lms283gf05.c
drivers/video/backlight/mbp_nvidia_bl.c
drivers/video/backlight/pwm_bl.c
drivers/video/backlight/s6e63m0.c
fs/cifs/TODO
fs/cifs/cifs_fs_sb.h
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/ioctl.c
fs/cifs/misc.c
fs/ext4/ext4.h
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/gfs2/file.c
fs/hpfs/buffer.c
fs/hpfs/hpfs_fn.h
fs/hpfs/super.c
fs/hugetlbfs/inode.c
fs/jbd2/journal.c
fs/locks.c
fs/logfs/logfs.h
fs/nfs/file.c
fs/nfsd/nfs4state.c
fs/openpromfs/inode.c
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_quota.h
include/asm-generic/stat.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/linux/atomic.h [new file with mode: 0644]
include/linux/fs.h
include/linux/hardirq.h
include/linux/highmem.h
include/linux/i2c.h
include/linux/irq.h
include/linux/irqnr.h
include/linux/kernel.h
include/linux/leds-lp5521.h [new file with mode: 0644]
include/linux/leds-lp5523.h [new file with mode: 0644]
include/linux/leds.h
include/linux/mmc/sh_mmcif.h
include/linux/pci_ids.h
include/linux/perf_event.h
include/linux/pwm_backlight.h
include/linux/radix-tree.h
include/linux/resource.h
include/linux/semaphore.h
include/linux/sh_clk.h
include/linux/sh_timer.h
include/linux/spi/spi.h
include/linux/sunrpc/svc_xprt.h
include/net/caif/caif_dev.h
include/net/caif/caif_spi.h
include/net/caif/cfcnfg.h
include/net/netlink.h
include/scsi/libfc.h
include/scsi/osd_initiator.h
include/scsi/osd_protocol.h
include/scsi/osd_types.h
include/trace/events/ext4.h
kernel/exit.c
kernel/irq/manage.c
kernel/latencytop.c
kernel/perf_event.c
kernel/printk.c
kernel/range.c
kernel/relay.c
kernel/sysctl.c
kernel/watchdog.c
lib/radix-tree.c
mm/filemap.c
mm/memcontrol.c
mm/mprotect.c
mm/vmscan.c
mm/vmstat.c
net/caif/caif_config_util.c
net/caif/caif_dev.c
net/caif/caif_socket.c
net/caif/cfcnfg.c
net/caif/cfctrl.c
net/caif/cfdbgl.c
net/caif/cfrfml.c
net/core/dev.c
net/ipv4/fib_lookup.h
net/ipv4/inet_diag.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/nf_nat_core.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/route.c
net/l2tp/l2tp_debugfs.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_proto.c
net/rds/loop.c
net/rds/tcp.c
net/sched/cls_cgroup.c
net/sched/em_text.c
net/x25/x25_facilities.c
net/x25/x25_in.c
scripts/checkpatch.pl
scripts/kconfig/symbol.c
security/Kconfig
security/apparmor/lsm.c
security/apparmor/policy.c
security/commoncap.c
sound/pci/asihpi/hpi6000.c
sound/pci/asihpi/hpi6205.c
sound/pci/asihpi/hpicmn.c
sound/pci/cs46xx/dsp_spos.c
sound/pci/hda/patch_cirrus.c
sound/pci/lx6464es/lx6464es.c
sound/pci/lx6464es/lx6464es.h
sound/pci/lx6464es/lx_core.c
sound/soc/codecs/Kconfig
sound/soc/codecs/tlv320dac33.c
sound/soc/codecs/tpa6130a2.c
sound/soc/codecs/wm8900.c
sound/soc/codecs/wm_hubs.c
sound/soc/pxa/tosa.c
sound/soc/soc-core.c
sound/usb/mixer_quirks.c
sound/usb/pcm.c
tools/perf/Documentation/perf-trace.txt
tools/perf/builtin-record.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/scripts/perl/bin/failed-syscalls-record
tools/perf/scripts/perl/bin/rw-by-file-record
tools/perf/scripts/perl/bin/rw-by-pid-record
tools/perf/scripts/perl/bin/rwtop-record
tools/perf/scripts/perl/bin/wakeup-latency-record
tools/perf/scripts/perl/bin/workqueue-stats-record
tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
tools/perf/scripts/python/bin/futex-contention-record
tools/perf/scripts/python/bin/netdev-times-record
tools/perf/scripts/python/bin/sched-migration-record
tools/perf/scripts/python/bin/sctop-record
tools/perf/scripts/python/bin/syscall-counts-by-pid-record
tools/perf/scripts/python/bin/syscall-counts-record
tools/perf/util/ui/util.c
usr/initramfs_data.S

diff --git a/Documentation/ABI/obsolete/proc-pid-oom_adj b/Documentation/ABI/obsolete/proc-pid-oom_adj
new file mode 100644 (file)
index 0000000..cf63f26
--- /dev/null
@@ -0,0 +1,22 @@
+What:  /proc/<pid>/oom_adj
+When:  August 2012
+Why:   /proc/<pid>/oom_adj allows userspace to influence the oom killer's
+       badness heuristic used to determine which task to kill when the kernel
+       is out of memory.
+
+       The badness heuristic has since been rewritten since the introduction of
+       this tunable such that its meaning is deprecated.  The value was
+       implemented as a bitshift on a score generated by the badness()
+       function that did not have any precise units of measure.  With the
+       rewrite, the score is given as a proportion of available memory to the
+       task allocating pages, so using a bitshift which grows the score
+       exponentially is, thus, impossible to tune with fine granularity.
+
+       A much more powerful interface, /proc/<pid>/oom_score_adj, was
+       introduced with the oom killer rewrite that allows users to increase or
+       decrease the badness() score linearly.  This interface will replace
+       /proc/<pid>/oom_adj.
+
+       A warning will be emitted to the kernel log if an application uses this
+       deprecated interface.  After it is printed once, future warnings will be
+       suppressed until the kernel is rebooted.
index 8a817f656f0a808dffb344c6a46bab960cb8a0ae..a91f30890011ddf69ba9b859484966c943db7fb5 100644 (file)
@@ -322,7 +322,6 @@ fl_release_private: yes     yes
 prototypes:
        int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
        void (*fl_notify)(struct file_lock *);  /* unblock callback */
-       void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
        void (*fl_release_private)(struct file_lock *);
        void (*fl_break)(struct file_lock *); /* break_lease callback */
 
@@ -330,7 +329,6 @@ locking rules:
                        BKL     may block
 fl_compare_owner:      yes     no
 fl_notify:             yes     no
-fl_copy_lock:          yes     no
 fl_release_private:    yes     yes
 fl_break:              yes     no
 
index 96d0df28bed323d5596fc051b0ffb96ed8e3c8df..7445bf335dae7eeba4bd6640a82fa3987f48d2cc 100644 (file)
@@ -794,17 +794,6 @@ designed.
 
 Roadmap:
 
-2.6.37 Remove experimental tag from mount option
-       => should be roughly 6 months after initial merge
-       => enough time to:
-               => gain confidence and fix problems reported by early
-                  adopters (a.k.a. guinea pigs)
-               => address worst performance regressions and undesired
-                  behaviours
-               => start tuning/optimising code for parallelism
-               => start tuning/optimising algorithms consuming
-                  excessive CPU time
-
 2.6.39 Switch default mount option to use delayed logging
        => should be roughly 12 months after initial merge
        => enough time to shake out remaining problems before next round of
index e307914a3edafc84c2019911d48b9922133e7812..93fe76e56522a199a0ab5044544673418be64ec0 100644 (file)
@@ -15,10 +15,14 @@ Supported adapters:
   * Intel 82801I (ICH9)
   * Intel EP80579 (Tolapai)
   * Intel 82801JI (ICH10)
-  * Intel 3400/5 Series (PCH)
+  * Intel 5/3400 Series (PCH)
   * Intel Cougar Point (PCH)
+  * Intel Patsburg (PCH)
    Datasheets: Publicly available at the Intel website
 
+On Intel Patsburg and later chipsets, both the normal host SMBus controller
+and the additional 'Integrated Device Function' controllers are supported.
+
 Authors: 
        Mark Studebaker <mdsxyz123@yahoo.com>
        Jean Delvare <khali@linux-fr.org>
index 8fd5ca2ae32dde4d9eb27722942a5d099f4afbc3..58b266bd1846f1d0560bdae575f6e764a95fd4b3 100644 (file)
@@ -60,15 +60,18 @@ Hardware accelerated blink of LEDs
 
 Some LEDs can be programmed to blink without any CPU interaction. To
 support this feature, a LED driver can optionally implement the
-blink_set() function (see <linux/leds.h>). If implemented, triggers can
-attempt to use it before falling back to software timers. The blink_set()
-function should return 0 if the blink setting is supported, or -EINVAL
-otherwise, which means that LED blinking will be handled by software.
-
-The blink_set() function should choose a user friendly blinking
-value if it is called with *delay_on==0 && *delay_off==0 parameters. In
-this case the driver should give back the chosen value through delay_on
-and delay_off parameters to the leds subsystem.
+blink_set() function (see <linux/leds.h>). To set an LED to blinking,
+however, it is better to use use the API function led_blink_set(),
+as it will check and implement software fallback if necessary.
+
+To turn off blinking again, use the API function led_brightness_set()
+as that will not just set the LED brightness but also stop any software
+timers that may have been required for blinking.
+
+The blink_set() function should choose a user friendly blinking value
+if it is called with *delay_on==0 && *delay_off==0 parameters. In this
+case the driver should give back the chosen value through delay_on and
+delay_off parameters to the leds subsystem.
 
 Setting the brightness to zero with brightness_set() callback function
 should completely turn off the LED and cancel the previously programmed
diff --git a/Documentation/leds/leds-lp5521.txt b/Documentation/leds/leds-lp5521.txt
new file mode 100644 (file)
index 0000000..c4d8d15
--- /dev/null
@@ -0,0 +1,88 @@
+Kernel driver for lp5521
+========================
+
+* National Semiconductor LP5521 led driver chip
+* Datasheet: http://www.national.com/pf/LP/LP5521.html
+
+Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
+Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
+
+Description
+-----------
+
+LP5521 can drive up to 3 channels. Leds can be controlled directly via
+the led class control interface. Channels have generic names:
+lp5521:channelx, where x is 0 .. 2
+
+All three channels can be also controlled using the engine micro programs.
+More details of the instructions can be found from the public data sheet.
+
+Control interface for the engines:
+x is 1 .. 3
+enginex_mode : disabled, load, run
+enginex_load : store program (visible only in engine load mode)
+
+Example (start to blink the channel 2 led):
+cd   /sys/class/leds/lp5521:channel2/device
+echo "load" > engine3_mode
+echo "037f4d0003ff6000" > engine3_load
+echo "run" > engine3_mode
+
+stop the engine:
+echo "disabled" > engine3_mode
+
+sysfs contains a selftest entry.
+The test communicates with the chip and checks that
+the clock mode is automatically set to the requested one.
+
+Each channel has its own led current settings.
+/sys/class/leds/lp5521:channel0/led_current - RW
+/sys/class/leds/lp5521:channel0/max_current - RO
+Format: 10x mA i.e 10 means 1.0 mA
+
+example platform data:
+
+Note: chan_nr can have values between 0 and 2.
+
+static struct lp5521_led_config lp5521_led_config[] = {
+        {
+                .chan_nr        = 0,
+                .led_current    = 50,
+               .max_current    = 130,
+        }, {
+                .chan_nr        = 1,
+                .led_current    = 0,
+               .max_current    = 130,
+        }, {
+                .chan_nr        = 2,
+                .led_current    = 0,
+               .max_current    = 130,
+        }
+};
+
+static int lp5521_setup(void)
+{
+       /* setup HW resources */
+}
+
+static void lp5521_release(void)
+{
+       /* Release HW resources */
+}
+
+static void lp5521_enable(bool state)
+{
+       /* Control of chip enable signal */
+}
+
+static struct lp5521_platform_data lp5521_platform_data = {
+        .led_config     = lp5521_led_config,
+        .num_channels   = ARRAY_SIZE(lp5521_led_config),
+        .clock_mode     = LP5521_CLOCK_EXT,
+        .setup_resources   = lp5521_setup,
+        .release_resources = lp5521_release,
+        .enable            = lp5521_enable,
+};
+
+If the current is set to 0 in the platform data, that channel is
+disabled and it is not visible in the sysfs.
diff --git a/Documentation/leds/leds-lp5523.txt b/Documentation/leds/leds-lp5523.txt
new file mode 100644 (file)
index 0000000..fad2feb
--- /dev/null
@@ -0,0 +1,83 @@
+Kernel driver for lp5523
+========================
+
+* National Semiconductor LP5523 led driver chip
+* Datasheet: http://www.national.com/pf/LP/LP5523.html
+
+Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
+Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
+
+Description
+-----------
+LP5523 can drive up to 9 channels. Leds can be controlled directly via
+the led class control interface. Channels have generic names:
+lp5523:channelx where x is 0...8
+
+The chip provides 3 engines. Each engine can control channels without
+interaction from the main CPU. Details of the micro engine code can be found
+from the public data sheet. Leds can be muxed to different channels.
+
+Control interface for the engines:
+x is 1 .. 3
+enginex_mode : disabled, load, run
+enginex_load : microcode load (visible only in load mode)
+enginex_leds : led mux control (visible only in load mode)
+
+cd /sys/class/leds/lp5523:channel2/device
+echo "load" > engine3_mode
+echo "9d80400004ff05ff437f0000" > engine3_load
+echo "111111111" > engine3_leds
+echo "run" > engine3_mode
+
+sysfs contains a selftest entry. It measures each channel
+voltage level and checks if it looks reasonable. If the level is too high,
+the led is missing; if the level is too low, there is a short circuit.
+
+Selftest uses always the current from the platform data.
+
+Each channel contains led current settings.
+/sys/class/leds/lp5523:channel2/led_current - RW
+/sys/class/leds/lp5523:channel2/max_current - RO
+Format: 10x mA i.e 10 means 1.0 mA
+
+Example platform data:
+
+Note - chan_nr can have values between 0 and 8.
+
+static struct lp5523_led_config lp5523_led_config[] = {
+        {
+                .chan_nr        = 0,
+                .led_current    = 50,
+               .max_current    = 130,
+        },
+...
+        }, {
+                .chan_nr        = 8,
+                .led_current    = 50,
+               .max_current    = 130,
+        }
+};
+
+static int lp5523_setup(void)
+{
+       /* Setup HW resources */
+}
+
+static void lp5523_release(void)
+{
+       /* Release HW resources */
+}
+
+static void lp5523_enable(bool state)
+{
+       /* Control chip enable signal */
+}
+
+static struct lp5523_platform_data lp5523_platform_data = {
+        .led_config     = lp5523_led_config,
+        .num_channels   = ARRAY_SIZE(lp5523_led_config),
+        .clock_mode     = LP5523_CLOCK_EXT,
+        .setup_resources   = lp5523_setup,
+        .release_resources = lp5523_release,
+        .enable            = lp5523_enable,
+};
index 30023568805e2a56077fa4cfbd088ee666522ab0..00301ed9c3715596b35411f46ff31243412444dd 100644 (file)
@@ -1,3 +1,50 @@
+1 Release Date    : Thur.  May 03, 2010 09:12:45 PST 2009 -
+                       (emaild-id:megaraidlinux@lsi.com)
+                       Bo Yang
+
+2 Current Version : 00.00.04.31-rc1
+3 Older Version   : 00.00.04.17.1-rc1
+
+1.     Add the Online Controller Reset (OCR) to the Driver.
+       OCR is the new feature for megaraid_sas driver which
+       will allow the fw to do the chip reset which will not
+       affact the OS behavious.
+
+       To add the OCR support, driver need to do:
+               a). reset the controller chips -- Xscale and Gen2 which
+               will change the function calls and add the reset function
+               related to this two chips.
+
+               b). during the reset, driver will store the pending cmds
+               which not returned by FW to driver's pending queue.  Driver
+               will re-issue those pending cmds again to FW after the OCR
+               finished.
+
+               c). In driver's timeout routine, driver will report to
+               OS as reset. Also driver's queue routine will block the
+               cmds until the OCR finished.
+
+               d). in Driver's ISR routine, if driver get the FW state as
+               state change, FW in Failure status and FW support online controller
+               reset (OCR), driver will start to do the controller reset.
+
+               e). In driver's IOCTL routine, the application cmds will wait for the
+               OCR to finish, then issue the cmds to FW.
+
+               f). Before driver kill adapter, driver will do last chance of
+               OCR to see if driver can bring back the FW.
+
+2.     Add the support update flag to the driver to tell LSI megaraid_sas
+       application which driver will support the device update.  So application
+       will not need to do the device update after application add/del the device
+       from the system.
+3.     In driver's timeout routine, driver will do three time reset if fw is in
+       failed state.  Driver will kill adapter if can't bring back FW after the
+       this three times reset.
+4.     Add the input parameter max_sectors to 1MB support to our GEN2 controller.
+       customer can use the input paramenter max_sectors to add 1MB support to GEN2
+       controller.
+
 1 Release Date    : Thur.  Oct 29, 2009 09:12:45 PST 2009 -
                        (emaild-id:megaraidlinux@lsi.com)
                        Bo Yang
index 3894eaa23486f951ace787893740ac2850f7d6d9..209e1584c3dc25e8e1af61a3061c779a28f5d11e 100644 (file)
@@ -28,6 +28,7 @@ show up in /proc/sys/kernel:
 - core_uses_pid
 - ctrl-alt-del
 - dentry-state
+- dmesg_restrict
 - domainname
 - hostname
 - hotplug
@@ -213,6 +214,19 @@ to decide what to do with it.
 
 ==============================================================
 
+dmesg_restrict:
+
+This toggle indicates whether unprivileged users are prevented from using
+dmesg(8) to view messages from the kernel's log buffer.  When
+dmesg_restrict is set to (0) there are no restrictions.  When
+dmesg_restrict is set set to (1), users must have CAP_SYS_ADMIN to use
+dmesg(8).
+
+The kernel config option CONFIG_SECURITY_DMESG_RESTRICT sets the default
+value of dmesg_restrict.
+
+==============================================================
+
 domainname & hostname:
 
 These files can be used to set the NIS/YP domainname and the
index cb8b580203524baa077ee0bea390efb3d559c4c4..0094224ca79b018ced32c21fb67506d07aae93f8 100644 (file)
@@ -1757,6 +1757,7 @@ L:        linux-cris-kernel@axis.com
 W:     http://developer.axis.com
 S:     Maintained
 F:     arch/cris/
+F:     drivers/serial/crisv10.*
 
 CRYPTO API
 M:     Herbert Xu <herbert@gondor.apana.org.au>
index 519db43052a047f175501cca984d8058cf382b1c..6619720f50dd667abf746f1dd892b35c67a9742c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
-SUBLEVEL = 36
-EXTRAVERSION =
+SUBLEVEL = 37
+EXTRAVERSION = -rc1
 NAME = Flesh-Eating Bats with Fangs
 
 # *DOCUMENTATION*
index 54b479c35ee01d2db0f2f5c75f9786f329293146..51dcd59eda6a1aeb965d02b5b94bf66e8737e2ff 100644 (file)
@@ -116,4 +116,6 @@ endmenu
 config SH_CLK_CPG
        bool
 
+source "drivers/sh/Kconfig"
+
 endif
index 46ca4d4abf910aad98652306a7d41bfa7ccdcfe8..32d9e2816e569a98a9a6525e9aab6c757053388f 100644 (file)
@@ -565,12 +565,50 @@ static struct platform_device *qhd_devices[] __initdata = {
 
 /* FSI */
 #define IRQ_FSI                evt2irq(0x1840)
+
+static int fsi_set_rate(int is_porta, int rate)
+{
+       struct clk *fsib_clk;
+       struct clk *fdiv_clk = &sh7372_fsidivb_clk;
+       int ret;
+
+       /* set_rate is not needed if port A */
+       if (is_porta)
+               return 0;
+
+       fsib_clk = clk_get(NULL, "fsib_clk");
+       if (IS_ERR(fsib_clk))
+               return -EINVAL;
+
+       switch (rate) {
+       case 48000:
+               clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000));
+               clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000));
+               ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
+               break;
+       default:
+               pr_err("unsupported rate in FSI2 port B\n");
+               ret = -EINVAL;
+               break;
+       }
+
+       clk_put(fsib_clk);
+
+       return ret;
+}
+
 static struct sh_fsi_platform_info fsi_info = {
        .porta_flags = SH_FSI_BRS_INV |
                       SH_FSI_OUT_SLAVE_MODE |
                       SH_FSI_IN_SLAVE_MODE |
                       SH_FSI_OFMT(PCM) |
                       SH_FSI_IFMT(PCM),
+
+       .portb_flags = SH_FSI_BRS_INV |
+                      SH_FSI_BRM_INV |
+                      SH_FSI_LRS_INV |
+                      SH_FSI_OFMT(SPDIF),
+       .set_rate = fsi_set_rate,
 };
 
 static struct resource fsi_resources[] = {
@@ -634,6 +672,7 @@ static struct platform_device lcdc1_device = {
 static struct sh_mobile_hdmi_info hdmi_info = {
        .lcd_chan = &sh_mobile_lcdc1_info.ch[0],
        .lcd_dev = &lcdc1_device.dev,
+       .flags = HDMI_SND_SRC_SPDIF,
 };
 
 static struct resource hdmi_resources[] = {
@@ -992,6 +1031,7 @@ static void __init ap4evb_map_io(void)
 
 #define GPIO_PORT9CR   0xE6051009
 #define GPIO_PORT10CR  0xE605100A
+#define USCCR1         0xE6058144
 static void __init ap4evb_init(void)
 {
        u32 srcr4;
@@ -1062,7 +1102,7 @@ static void __init ap4evb_init(void)
        /* setup USB phy */
        __raw_writew(0x8a0a, 0xE6058130);       /* USBCR2 */
 
-       /* enable FSI2 */
+       /* enable FSI2 port A (ak4643) */
        gpio_request(GPIO_FN_FSIAIBT,   NULL);
        gpio_request(GPIO_FN_FSIAILR,   NULL);
        gpio_request(GPIO_FN_FSIAISLD,  NULL);
@@ -1079,6 +1119,10 @@ static void __init ap4evb_init(void)
        gpio_request(GPIO_PORT41, NULL);
        gpio_direction_input(GPIO_PORT41);
 
+       /* setup FSI2 port B (HDMI) */
+       gpio_request(GPIO_FN_FSIBCK, NULL);
+       __raw_writew(__raw_readw(USCCR1) & ~(1 << 6), USCCR1); /* use SPDIF */
+
        /* set SPU2 clock to 119.6 MHz */
        clk = clk_get(NULL, "spu_clk");
        if (!IS_ERR(clk)) {
index 8565aefa21fd3c2e62877c3de33a43d8390dda76..7db31e6c6bf2085908cb66ce669238488c5aa444 100644 (file)
@@ -50,6 +50,9 @@
 #define SMSTPCR3       0xe615013c
 #define SMSTPCR4       0xe6150140
 
+#define FSIDIVA                0xFE1F8000
+#define FSIDIVB                0xFE1F8008
+
 /* Platforms must set frequency on their DV_CLKI pin */
 struct clk sh7372_dv_clki_clk = {
 };
@@ -288,6 +291,7 @@ struct clk sh7372_pllc2_clk = {
        .ops            = &pllc2_clk_ops,
        .parent         = &extal1_div2_clk,
        .freq_table     = pllc2_freq_table,
+       .nr_freqs       = ARRAY_SIZE(pllc2_freq_table) - 1,
        .parent_table   = pllc2_parent,
        .parent_num     = ARRAY_SIZE(pllc2_parent),
 };
@@ -417,6 +421,101 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
                                      fsibckcr_parent, ARRAY_SIZE(fsibckcr_parent), 6, 2),
 };
 
+/* FSI DIV */
+static unsigned long fsidiv_recalc(struct clk *clk)
+{
+       unsigned long value;
+
+       value = __raw_readl(clk->mapping->base);
+
+       if ((value & 0x3) != 0x3)
+               return 0;
+
+       value >>= 16;
+       if (value < 2)
+               return 0;
+
+       return clk->parent->rate / value;
+}
+
+static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
+{
+       return clk_rate_div_range_round(clk, 2, 0xffff, rate);
+}
+
+static void fsidiv_disable(struct clk *clk)
+{
+       __raw_writel(0, clk->mapping->base);
+}
+
+static int fsidiv_enable(struct clk *clk)
+{
+       unsigned long value;
+
+       value  = __raw_readl(clk->mapping->base) >> 16;
+       if (value < 2) {
+               fsidiv_disable(clk);
+               return -ENOENT;
+       }
+
+       __raw_writel((value << 16) | 0x3, clk->mapping->base);
+
+       return 0;
+}
+
+static int fsidiv_set_rate(struct clk *clk,
+                          unsigned long rate, int algo_id)
+{
+       int idx;
+
+       if (clk->parent->rate == rate) {
+               fsidiv_disable(clk);
+               return 0;
+       }
+
+       idx = (clk->parent->rate / rate) & 0xffff;
+       if (idx < 2)
+               return -ENOENT;
+
+       __raw_writel(idx << 16, clk->mapping->base);
+       return fsidiv_enable(clk);
+}
+
+static struct clk_ops fsidiv_clk_ops = {
+       .recalc         = fsidiv_recalc,
+       .round_rate     = fsidiv_round_rate,
+       .set_rate       = fsidiv_set_rate,
+       .enable         = fsidiv_enable,
+       .disable        = fsidiv_disable,
+};
+
+static struct clk_mapping sh7372_fsidiva_clk_mapping = {
+       .phys   = FSIDIVA,
+       .len    = 8,
+};
+
+struct clk sh7372_fsidiva_clk = {
+       .ops            = &fsidiv_clk_ops,
+       .parent         = &div6_reparent_clks[DIV6_FSIA], /* late install */
+       .mapping        = &sh7372_fsidiva_clk_mapping,
+};
+
+static struct clk_mapping sh7372_fsidivb_clk_mapping = {
+       .phys   = FSIDIVB,
+       .len    = 8,
+};
+
+struct clk sh7372_fsidivb_clk = {
+       .ops            = &fsidiv_clk_ops,
+       .parent         = &div6_reparent_clks[DIV6_FSIB],  /* late install */
+       .mapping        = &sh7372_fsidivb_clk_mapping,
+};
+
+static struct clk *late_main_clks[] = {
+       &sh7372_fsidiva_clk,
+       &sh7372_fsidivb_clk,
+};
+
 enum { MSTP001,
        MSTP131, MSTP130,
        MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
@@ -585,6 +684,9 @@ void __init sh7372_clock_init(void)
        if (!ret)
                ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
 
+       for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
+               ret = clk_register(late_main_clks[k]);
+
        clkdev_add_table(lookups, ARRAY_SIZE(lookups));
 
        if (!ret)
index 5bc6bd444d7236016eac8c004f764da16a481380..2b1bb9e43ddadc8b83505b021c5ca7ae4f4704c5 100644 (file)
@@ -35,12 +35,12 @@ static inline int gpio_cansleep(unsigned gpio)
 
 static inline int gpio_to_irq(unsigned gpio)
 {
-       return -ENOSYS;
+       return __gpio_to_irq(gpio);
 }
 
 static inline int irq_to_gpio(unsigned int irq)
 {
-       return -EINVAL;
+       return -ENOSYS;
 }
 
 #endif /* CONFIG_GPIOLIB */
index 147775a94bcefa528d662f8d49d0d93b6302cddf..e4f9004e710382574fcf5e23cb67339fa0f53148 100644 (file)
@@ -464,5 +464,7 @@ extern struct clk sh7372_dv_clki_div2_clk;
 extern struct clk sh7372_pllc2_clk;
 extern struct clk sh7372_fsiack_clk;
 extern struct clk sh7372_fsibck_clk;
+extern struct clk sh7372_fsidiva_clk;
+extern struct clk sh7372_fsidivb_clk;
 
 #endif /* __ASM_SH7372_H__ */
index edb2c0d255c2a1671cae12e6704da28e6b66cb8d..00869def54205c3291b44eafacf3afba7eb03d68 100644 (file)
@@ -67,7 +67,7 @@ static struct spi_board_info u300_spi_devices[] = {
                .bus_num        = 0, /* Only one bus on this chip */
                .chip_select    = 0,
                /* Means SPI_CS_HIGH, change if e.g low CS */
-               .mode           = SPI_MODE_1 | SPI_LSB_FIRST | SPI_LOOP,
+               .mode           = SPI_MODE_1 | SPI_LOOP,
        },
 #endif
 };
index 4a5b284a15500f4a1a8525dab1609d34310599c0..7ef4115b8c4a59f0248df2273184ab5e2ed8f053 100644 (file)
@@ -2,7 +2,9 @@
 #define _M68K_IRQFLAGS_H
 
 #include <linux/types.h>
+#ifdef CONFIG_MMU
 #include <linux/hardirq.h>
+#endif
 #include <linux/preempt.h>
 #include <asm/thread_info.h>
 #include <asm/entry.h>
index 789f3b2de0e9bf2f63720bcc0c522c686e9c9d31..415d5484916c4d9e6ec1981e380cfe9a0f1ab4de 100644 (file)
@@ -40,5 +40,6 @@ extern unsigned long hw_timer_offset(void);
 extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
 
 extern void config_BSP(char *command, int len);
+extern void do_IRQ(int irq, struct pt_regs *fp);
 
 #endif /* _M68K_MACHDEP_H */
index 428d0e538aec50880f23b8b097698572a2d2a01a..b06bdae04064f59d2c4c4c98e76304dee6ca98c8 100644 (file)
@@ -127,7 +127,7 @@ static void kvm_patch_ins_nop(u32 *inst)
 
 static void kvm_patch_ins_b(u32 *inst, int addr)
 {
-#ifdef CONFIG_RELOCATABLE
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
        /* On relocatable kernels interrupts handlers and our code
           can be in different regions, so we don't patch them */
 
index 049846911ce4d18d8097507b9be0caa8d767733c..1cc471faac2dd4bac032a956860b4f10d29170cb 100644 (file)
@@ -416,7 +416,7 @@ lightweight_exit:
        lwz     r3, VCPU_PC(r4)
        mtsrr0  r3
        lwz     r3, VCPU_SHARED(r4)
-       lwz     r3, VCPU_SHARED_MSR(r3)
+       lwz     r3, (VCPU_SHARED_MSR + 4)(r3)
        oris    r3, r3, KVMPPC_MSR_MASK@h
        ori     r3, r3, KVMPPC_MSR_MASK@l
        mtsrr1  r3
index 71750f2dd5d34378c963acf7bf9d3a69a15863cc..e3768ee9b59537bf92eceb853fa9edf56d7d5b6c 100644 (file)
@@ -138,8 +138,8 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 
        free_page((unsigned long)vcpu->arch.shared);
-       kvmppc_e500_tlb_uninit(vcpu_e500);
        kvm_vcpu_uninit(vcpu);
+       kvmppc_e500_tlb_uninit(vcpu_e500);
        kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
 }
 
index 2f87a1627f6cff35604f96e8e041cffd9af071ff..38f756f2505389ac5f59eb9ae3921ef72175eea1 100644 (file)
@@ -617,6 +617,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
        switch (ioctl) {
        case KVM_PPC_GET_PVINFO: {
                struct kvm_ppc_pvinfo pvinfo;
+               memset(&pvinfo, 0, sizeof(pvinfo));
                r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
                if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
                        r = -EFAULT;
index 46fa04f12a9b979c7cbe50c703d43e2c2705604e..a021f5827a336ce97b6c62ed5b8d0ca625d571bf 100644 (file)
@@ -35,7 +35,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
        int i;
 
        /* pause guest execution to avoid concurrent updates */
-       local_irq_disable();
        mutex_lock(&vcpu->mutex);
 
        vcpu->arch.last_exit_type = 0xDEAD;
@@ -51,7 +50,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
        vcpu->arch.timing_last_enter.tv64 = 0;
 
        mutex_unlock(&vcpu->mutex);
-       local_irq_enable();
 }
 
 static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
index 5c075f562ebae912476974f20faee1bfaeec707d..7f217b3a50a806b7347f991b588397011aa3388e 100644 (file)
@@ -193,6 +193,7 @@ config CPU_SH2
 config CPU_SH2A
        bool
        select CPU_SH2
+       select UNCACHED_MAPPING
 
 config CPU_SH3
        bool
index 307b3a4a790b98b61bd3cb893dfa155ff4983e0b..9c8c6e1a2a154d4dff50db8d0d532201c5f991be 100644 (file)
@@ -133,10 +133,7 @@ machdir-$(CONFIG_SOLUTION_ENGINE)          += mach-se
 machdir-$(CONFIG_SH_HP6XX)                     += mach-hp6xx
 machdir-$(CONFIG_SH_DREAMCAST)                 += mach-dreamcast
 machdir-$(CONFIG_SH_SH03)                      += mach-sh03
-machdir-$(CONFIG_SH_SECUREEDGE5410)            += mach-snapgear
 machdir-$(CONFIG_SH_RTS7751R2D)                        += mach-r2d
-machdir-$(CONFIG_SH_7751_SYSTEMH)              += mach-systemh
-machdir-$(CONFIG_SH_EDOSK7705)                 += mach-edosk7705
 machdir-$(CONFIG_SH_HIGHLANDER)                        += mach-highlander
 machdir-$(CONFIG_SH_MIGOR)                     += mach-migor
 machdir-$(CONFIG_SH_AP325RXA)                  += mach-ap325rxa
index 9c94711aa6caee785b152ba7be282b98a6b6d285..2018c7ea4c93f49d53d7f1ff82f32db2b85e85bc 100644 (file)
@@ -81,13 +81,6 @@ config SH_7343_SOLUTION_ENGINE
          Select 7343 SolutionEngine if configuring for a Hitachi
          SH7343 (SH-Mobile 3AS) evaluation board.
 
-config SH_7751_SYSTEMH
-       bool "SystemH7751R"
-       depends on CPU_SUBTYPE_SH7751R
-       help
-         Select SystemH if you are configuring for a Renesas SystemH
-         7751R evaluation board.
-
 config SH_HP6XX
        bool "HP6XX"
        select SYS_SUPPORTS_APM_EMULATION
index 38ef655cc0f08cfe0f881b053f421e9861412556..be7d11d04b26a3abcaeacde829e496f970903c33 100644 (file)
@@ -2,10 +2,12 @@
 # Specific board support, not covered by a mach group.
 #
 obj-$(CONFIG_SH_MAGIC_PANEL_R2)        += board-magicpanelr2.o
+obj-$(CONFIG_SH_SECUREEDGE5410)        += board-secureedge5410.o
 obj-$(CONFIG_SH_SH2007)                += board-sh2007.o
 obj-$(CONFIG_SH_SH7785LCR)     += board-sh7785lcr.o
 obj-$(CONFIG_SH_URQUELL)       += board-urquell.o
 obj-$(CONFIG_SH_SHMIN)         += board-shmin.o
+obj-$(CONFIG_SH_EDOSK7705)     += board-edosk7705.o
 obj-$(CONFIG_SH_EDOSK7760)     += board-edosk7760.o
 obj-$(CONFIG_SH_ESPT)          += board-espt.o
 obj-$(CONFIG_SH_POLARIS)       += board-polaris.o
diff --git a/arch/sh/boards/board-edosk7705.c b/arch/sh/boards/board-edosk7705.c
new file mode 100644 (file)
index 0000000..4cb3bb7
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * arch/sh/boards/renesas/edosk7705/setup.c
+ *
+ * Copyright (C) 2000  Kazumoto Kojima
+ *
+ * Hitachi SolutionEngine Support.
+ *
+ * Modified for edosk7705 development
+ * board by S. Dunn, 2003.
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/smc91x.h>
+#include <asm/machvec.h>
+#include <asm/sizes.h>
+
+#define SMC_IOBASE     0xA2000000
+#define SMC_IO_OFFSET  0x300
+#define SMC_IOADDR     (SMC_IOBASE + SMC_IO_OFFSET)
+
+#define ETHERNET_IRQ   0x09
+
+static void __init sh_edosk7705_init_irq(void)
+{
+       make_imask_irq(ETHERNET_IRQ);
+}
+
+/* eth initialization functions */
+static struct smc91x_platdata smc91x_info = {
+       .flags = SMC91X_USE_16BIT | SMC91X_IO_SHIFT_1 | IORESOURCE_IRQ_LOWLEVEL,
+};
+
+static struct resource smc91x_res[] = {
+       [0] = {
+               .start  = SMC_IOADDR,
+               .end    = SMC_IOADDR + SZ_32 - 1,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = ETHERNET_IRQ,
+               .end    = ETHERNET_IRQ,
+               .flags  = IORESOURCE_IRQ ,
+       }
+};
+
+static struct platform_device smc91x_dev = {
+       .name           = "smc91x",
+       .id             = -1,
+       .num_resources  = ARRAY_SIZE(smc91x_res),
+       .resource       = smc91x_res,
+
+       .dev    = {
+               .platform_data  = &smc91x_info,
+       },
+};
+
+/* platform init code */
+static struct platform_device *edosk7705_devices[] __initdata = {
+       &smc91x_dev,
+};
+
+static int __init init_edosk7705_devices(void)
+{
+       return platform_add_devices(edosk7705_devices,
+                                   ARRAY_SIZE(edosk7705_devices));
+}
+__initcall(init_edosk7705_devices);
+
+/*
+ * The Machine Vector
+ */
+static struct sh_machine_vector mv_edosk7705 __initmv = {
+       .mv_name                = "EDOSK7705",
+       .mv_nr_irqs             = 80,
+       .mv_init_irq            = sh_edosk7705_init_irq,
+};
similarity index 70%
rename from arch/sh/boards/mach-snapgear/setup.c
rename to arch/sh/boards/board-secureedge5410.c
index 331745dee3798b2f9465c197827bd2ab90bb49fb..32f875e8493dede169b70e29ce9c19283264f5f9 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * linux/arch/sh/boards/snapgear/setup.c
- *
  * Copyright (C) 2002  David McCullough <davidm@snapgear.com>
  * Copyright (C) 2003  Paul Mundt <lethal@linux-sh.org>
  *
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <asm/machvec.h>
-#include <mach/snapgear.h>
+#include <mach/secureedge5410.h>
 #include <asm/irq.h>
 #include <asm/io.h>
 #include <cpu/timer.h>
 
+unsigned short secureedge5410_ioport;
+
 /*
  * EraseConfig handling functions
  */
-
 static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
 {
-       (void)__raw_readb(0xb8000000);  /* dummy read */
+       ctrl_delay();   /* dummy read */
 
        printk("SnapGear: erase switch interrupt!\n");
 
@@ -39,21 +38,22 @@ static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
 
 static int __init eraseconfig_init(void)
 {
+       unsigned int irq = evt2irq(0x240);
+
        printk("SnapGear: EraseConfig init\n");
+
        /* Setup "EraseConfig" switch on external IRQ 0 */
-       if (request_irq(IRL0_IRQ, eraseconfig_interrupt, IRQF_DISABLED,
+       if (request_irq(irq, eraseconfig_interrupt, IRQF_DISABLED,
                                "Erase Config", NULL))
                printk("SnapGear: failed to register IRQ%d for Reset witch\n",
-                               IRL0_IRQ);
+                               irq);
        else
                printk("SnapGear: registered EraseConfig switch on IRQ%d\n",
-                               IRL0_IRQ);
-       return(0);
+                               irq);
+       return 0;
 }
-
 module_init(eraseconfig_init);
 
-/****************************************************************************/
 /*
  * Initialize IRQ setting
  *
@@ -62,7 +62,6 @@ module_init(eraseconfig_init);
  * IRL2 = eth1
  * IRL3 = crypto
  */
-
 static void __init init_snapgear_IRQ(void)
 {
        printk("Setup SnapGear IRQ/IPR ...\n");
@@ -76,20 +75,5 @@ static void __init init_snapgear_IRQ(void)
 static struct sh_machine_vector mv_snapgear __initmv = {
        .mv_name                = "SnapGear SecureEdge5410",
        .mv_nr_irqs             = 72,
-
-       .mv_inb                 = snapgear_inb,
-       .mv_inw                 = snapgear_inw,
-       .mv_inl                 = snapgear_inl,
-       .mv_outb                = snapgear_outb,
-       .mv_outw                = snapgear_outw,
-       .mv_outl                = snapgear_outl,
-
-       .mv_inb_p               = snapgear_inb_p,
-       .mv_inw_p               = snapgear_inw,
-       .mv_inl_p               = snapgear_inl,
-       .mv_outb_p              = snapgear_outb_p,
-       .mv_outw_p              = snapgear_outw,
-       .mv_outl_p              = snapgear_outl,
-
        .mv_init_irq            = init_snapgear_IRQ,
 };
diff --git a/arch/sh/boards/mach-edosk7705/Makefile b/arch/sh/boards/mach-edosk7705/Makefile
deleted file mode 100644 (file)
index cd54acb..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the EDOSK7705 specific parts of the kernel
-#
-
-obj-y   := setup.o io.o
diff --git a/arch/sh/boards/mach-edosk7705/io.c b/arch/sh/boards/mach-edosk7705/io.c
deleted file mode 100644 (file)
index 5b9c57c..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * arch/sh/boards/renesas/edosk7705/io.c
- *
- * Copyright (C) 2001  Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routines for Hitachi EDOSK7705 board.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <mach/edosk7705.h>
-#include <asm/addrspace.h>
-
-#define SMC_IOADDR     0xA2000000
-
-/* Map the Ethernet addresses as if it is at 0x300 - 0x320 */
-static unsigned long sh_edosk7705_isa_port2addr(unsigned long port)
-{
-       /*
-        * SMC91C96 registers are 4 byte aligned rather than the
-        * usual 2 byte!
-        */
-       if (port >= 0x300 && port < 0x320)
-               return SMC_IOADDR + ((port - 0x300) * 2);
-
-       maybebadio(port);
-       return port;
-}
-
-/* Trying to read / write bytes on odd-byte boundaries to the Ethernet
- * registers causes problems. So we bit-shift the value and read / write
- * in 2 byte chunks. Setting the low byte to 0 does not cause problems
- * now as odd byte writes are only made on the bit mask / interrupt
- * register. This may not be the case in future Mar-2003 SJD
- */
-unsigned char sh_edosk7705_inb(unsigned long port)
-{
-       if (port >= 0x300 && port < 0x320 && port & 0x01)
-               return __raw_readw(port - 1) >> 8;
-
-       return __raw_readb(sh_edosk7705_isa_port2addr(port));
-}
-
-void sh_edosk7705_outb(unsigned char value, unsigned long port)
-{
-       if (port >= 0x300 && port < 0x320 && port & 0x01) {
-               __raw_writew(((unsigned short)value << 8), port - 1);
-               return;
-       }
-
-       __raw_writeb(value, sh_edosk7705_isa_port2addr(port));
-}
-
-void sh_edosk7705_insb(unsigned long port, void *addr, unsigned long count)
-{
-       unsigned char *p = addr;
-
-       while (count--)
-               *p++ = sh_edosk7705_inb(port);
-}
-
-void sh_edosk7705_outsb(unsigned long port, const void *addr, unsigned long count)
-{
-       unsigned char *p = (unsigned char *)addr;
-
-       while (count--)
-               sh_edosk7705_outb(*p++, port);
-}
diff --git a/arch/sh/boards/mach-edosk7705/setup.c b/arch/sh/boards/mach-edosk7705/setup.c
deleted file mode 100644 (file)
index d59225e..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * arch/sh/boards/renesas/edosk7705/setup.c
- *
- * Copyright (C) 2000  Kazumoto Kojima
- *
- * Hitachi SolutionEngine Support.
- *
- * Modified for edosk7705 development
- * board by S. Dunn, 2003.
- */
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <asm/machvec.h>
-#include <mach/edosk7705.h>
-
-static void __init sh_edosk7705_init_irq(void)
-{
-       /* This is the Ethernet interrupt */
-       make_imask_irq(0x09);
-}
-
-/*
- * The Machine Vector
- */
-static struct sh_machine_vector mv_edosk7705 __initmv = {
-       .mv_name                = "EDOSK7705",
-       .mv_nr_irqs             = 80,
-
-       .mv_inb                 = sh_edosk7705_inb,
-       .mv_outb                = sh_edosk7705_outb,
-
-       .mv_insb                = sh_edosk7705_insb,
-       .mv_outsb               = sh_edosk7705_outsb,
-
-       .mv_init_irq            = sh_edosk7705_init_irq,
-};
index 2960c659020ee92e6c60c5c1d16d8d05ab73e267..acdafb0c6404a153cc482e46c0a17287f15ea26e 100644 (file)
@@ -54,7 +54,7 @@
 /*
  * map I/O ports to memory-mapped addresses
  */
-static unsigned long microdev_isa_port2addr(unsigned long offset)
+void __iomem *microdev_ioport_map(unsigned long offset, unsigned int len)
 {
        unsigned long result;
 
@@ -72,16 +72,6 @@ static unsigned long microdev_isa_port2addr(unsigned long offset)
                         *      Configuration Registers
                         */
                result = IO_SUPERIO_PHYS + (offset << 1);
-#if 0
-       } else if (offset == KBD_DATA_REG || offset == KBD_CNTL_REG ||
-                  offset == KBD_STATUS_REG) {
-                       /*
-                        *      SMSC FDC37C93xAPM SuperIO chip
-                        *
-                        *      PS/2 Keyboard + Mouse (ports 0x60 and 0x64).
-                        */
-               result = IO_SUPERIO_PHYS + (offset << 1);
-#endif
        } else if (((offset >= IO_IDE1_BASE) &&
                    (offset <  IO_IDE1_BASE + IO_IDE_EXTENT)) ||
                    (offset == IO_IDE1_MISC)) {
@@ -131,237 +121,5 @@ static unsigned long microdev_isa_port2addr(unsigned long offset)
                result = PVR;
        }
 
-       return result;
-}
-
-#define PORT2ADDR(x) (microdev_isa_port2addr(x))
-
-static inline void delay(void)
-{
-#if defined(CONFIG_PCI)
-       /* System board present, just make a dummy SRAM access.  (CS0 will be
-          mapped to PCI memory, probably good to avoid it.) */
-       __raw_readw(0xa6800000);
-#else
-       /* CS0 will be mapped to flash, ROM etc so safe to access it. */
-       __raw_readw(0xa0000000);
-#endif
-}
-
-unsigned char microdev_inb(unsigned long port)
-{
-#ifdef CONFIG_PCI
-       if (port >= PCIBIOS_MIN_IO)
-               return microdev_pci_inb(port);
-#endif
-       return *(volatile unsigned char*)PORT2ADDR(port);
-}
-
-unsigned short microdev_inw(unsigned long port)
-{
-#ifdef CONFIG_PCI
-       if (port >= PCIBIOS_MIN_IO)
-               return microdev_pci_inw(port);
-#endif
-       return *(volatile unsigned short*)PORT2ADDR(port);
-}
-
-unsigned int microdev_inl(unsigned long port)
-{
-#ifdef CONFIG_PCI
-       if (port >= PCIBIOS_MIN_IO)
-               return microdev_pci_inl(port);
-#endif
-       return *(volatile unsigned int*)PORT2ADDR(port);
-}
-
-void microdev_outw(unsigned short b, unsigned long port)
-{
-#ifdef CONFIG_PCI
-       if (port >= PCIBIOS_MIN_IO) {
-               microdev_pci_outw(b, port);
-               return;
-       }
-#endif
-       *(volatile unsigned short*)PORT2ADDR(port) = b;
-}
-
-void microdev_outb(unsigned char b, unsigned long port)
-{
-#ifdef CONFIG_PCI
-       if (port >= PCIBIOS_MIN_IO) {
-               microdev_pci_outb(b, port);
-               return;
-       }
-#endif
-
-       /*
-        *      There is a board feature with the current SH4-202 MicroDev in
-        *      that the 2 byte enables (nBE0 and nBE1) are tied together (and
-        *      to the Chip Select Line (Ethernet_CS)). Due to this connectivity,
-        *      it is not possible to safely perform 8-bit writes to the
-        *      Ethernet registers, as 16-bits will be consumed from the Data
-        *      lines (corrupting the other byte).  Hence, this function is
-        *      written to implement 16-bit read/modify/write for all byte-wide
-        *      accesses.
-        *
-        *      Note: there is no problem with byte READS (even or odd).
-        *
-        *                      Sean McGoogan - 16th June 2003.
-        */
-       if ((port >= IO_LAN91C111_BASE) &&
-           (port <  IO_LAN91C111_BASE + IO_LAN91C111_EXTENT)) {
-                       /*
-                        * Then are trying to perform a byte-write to the
-                        * LAN91C111.  This needs special care.
-                        */
-               if (port % 2 == 1) {    /* is the port odd ? */
-                       /* unset bit-0, i.e. make even */
-                       const unsigned long evenPort = port-1;
-                       unsigned short word;
-
-                       /*
-                        * do a 16-bit read/write to write to 'port',
-                        * preserving even byte.
-                        *
-                        *      Even addresses are bits 0-7
-                        *      Odd  addresses are bits 8-15
-                        */
-                       word = microdev_inw(evenPort);
-                       word = (word & 0xffu) | (b << 8);
-                       microdev_outw(word, evenPort);
-               } else {
-                       /* else, we are trying to do an even byte write */
-                       unsigned short word;
-
-                       /*
-                        * do a 16-bit read/write to write to 'port',
-                        * preserving odd byte.
-                        *
-                        *      Even addresses are bits 0-7
-                        *      Odd  addresses are bits 8-15
-                        */
-                       word = microdev_inw(port);
-                       word = (word & 0xff00u) | (b);
-                       microdev_outw(word, port);
-               }
-       } else {
-               *(volatile unsigned char*)PORT2ADDR(port) = b;
-       }
-}
-
-void microdev_outl(unsigned int b, unsigned long port)
-{
-#ifdef CONFIG_PCI
-       if (port >= PCIBIOS_MIN_IO) {
-               microdev_pci_outl(b, port);
-               return;
-       }
-#endif
-       *(volatile unsigned int*)PORT2ADDR(port) = b;
-}
-
-unsigned char microdev_inb_p(unsigned long port)
-{
-       unsigned char v = microdev_inb(port);
-       delay();
-       return v;
-}
-
-unsigned short microdev_inw_p(unsigned long port)
-{
-       unsigned short v = microdev_inw(port);
-       delay();
-       return v;
-}
-
-unsigned int microdev_inl_p(unsigned long port)
-{
-       unsigned int v = microdev_inl(port);
-       delay();
-       return v;
-}
-
-void microdev_outb_p(unsigned char b, unsigned long port)
-{
-       microdev_outb(b, port);
-       delay();
-}
-
-void microdev_outw_p(unsigned short b, unsigned long port)
-{
-       microdev_outw(b, port);
-       delay();
-}
-
-void microdev_outl_p(unsigned int b, unsigned long port)
-{
-       microdev_outl(b, port);
-       delay();
-}
-
-void microdev_insb(unsigned long port, void *buffer, unsigned long count)
-{
-       volatile unsigned char *port_addr;
-       unsigned char *buf = buffer;
-
-       port_addr = (volatile unsigned char *)PORT2ADDR(port);
-
-       while (count--)
-               *buf++ = *port_addr;
-}
-
-void microdev_insw(unsigned long port, void *buffer, unsigned long count)
-{
-       volatile unsigned short *port_addr;
-       unsigned short *buf = buffer;
-
-       port_addr = (volatile unsigned short *)PORT2ADDR(port);
-
-       while (count--)
-               *buf++ = *port_addr;
-}
-
-void microdev_insl(unsigned long port, void *buffer, unsigned long count)
-{
-       volatile unsigned long *port_addr;
-       unsigned int *buf = buffer;
-
-       port_addr = (volatile unsigned long *)PORT2ADDR(port);
-
-       while (count--)
-               *buf++ = *port_addr;
-}
-
-void microdev_outsb(unsigned long port, const void *buffer, unsigned long count)
-{
-       volatile unsigned char *port_addr;
-       const unsigned char *buf = buffer;
-
-       port_addr = (volatile unsigned char *)PORT2ADDR(port);
-
-       while (count--)
-               *port_addr = *buf++;
-}
-
-void microdev_outsw(unsigned long port, const void *buffer, unsigned long count)
-{
-       volatile unsigned short *port_addr;
-       const unsigned short *buf = buffer;
-
-       port_addr = (volatile unsigned short *)PORT2ADDR(port);
-
-       while (count--)
-               *port_addr = *buf++;
-}
-
-void microdev_outsl(unsigned long port, const void *buffer, unsigned long count)
-{
-       volatile unsigned long *port_addr;
-       const unsigned int *buf = buffer;
-
-       port_addr = (volatile unsigned long *)PORT2ADDR(port);
-
-       while (count--)
-               *port_addr = *buf++;
+       return (void __iomem *)result;
 }
index d1df2a4fb9b885913d38a90c49ff44f7bb0ace50..d8a747291e03ed82a829e20e4e779013881f1197 100644 (file)
@@ -195,27 +195,6 @@ device_initcall(microdev_devices_setup);
 static struct sh_machine_vector mv_sh4202_microdev __initmv = {
        .mv_name                = "SH4-202 MicroDev",
        .mv_nr_irqs             = 72,
-
-       .mv_inb                 = microdev_inb,
-       .mv_inw                 = microdev_inw,
-       .mv_inl                 = microdev_inl,
-       .mv_outb                = microdev_outb,
-       .mv_outw                = microdev_outw,
-       .mv_outl                = microdev_outl,
-
-       .mv_inb_p               = microdev_inb_p,
-       .mv_inw_p               = microdev_inw_p,
-       .mv_inl_p               = microdev_inl_p,
-       .mv_outb_p              = microdev_outb_p,
-       .mv_outw_p              = microdev_outw_p,
-       .mv_outl_p              = microdev_outl_p,
-
-       .mv_insb                = microdev_insb,
-       .mv_insw                = microdev_insw,
-       .mv_insl                = microdev_insl,
-       .mv_outsb               = microdev_outsb,
-       .mv_outsw               = microdev_outsw,
-       .mv_outsl               = microdev_outsl,
-
+       .mv_ioport_map          = microdev_ioport_map,
        .mv_init_irq            = init_microdev_irq,
 };
index 63e7ed699f39b3c41d8578daa8b57ce31807c689..5c9eaa0535b935c5f90bde958b0ca79724e1dcc2 100644 (file)
@@ -2,4 +2,4 @@
 # Makefile for the 7206 SolutionEngine specific parts of the kernel
 #
 
-obj-y   := setup.o io.o irq.o
+obj-y   := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/7206/io.c b/arch/sh/boards/mach-se/7206/io.c
deleted file mode 100644 (file)
index adadc77..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-/* $Id: io.c,v 1.5 2004/02/22 23:08:43 kkojima Exp $
- *
- * linux/arch/sh/boards/se/7206/io.c
- *
- * Copyright (C) 2006 Yoshinori Sato
- *
- * I/O routine for Hitachi 7206 SolutionEngine.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/io.h>
-#include <mach-se/mach/se7206.h>
-
-
-static inline void delay(void)
-{
-       __raw_readw(0x20000000);  /* P2 ROM Area */
-}
-
-/* MS7750 requires special versions of in*, out* routines, since
-   PC-like io ports are located at upper half byte of 16-bit word which
-   can be accessed only with 16-bit wide.  */
-
-static inline volatile __u16 *
-port2adr(unsigned int port)
-{
-       if (port >= 0x2000 && port < 0x2020)
-               return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
-       else if (port >= 0x300 && port < 0x310)
-               return (volatile __u16 *) (PA_SMSC + (port - 0x300));
-
-       return (volatile __u16 *)port;
-}
-
-unsigned char se7206_inb(unsigned long port)
-{
-       return (*port2adr(port)) & 0xff;
-}
-
-unsigned char se7206_inb_p(unsigned long port)
-{
-       unsigned long v;
-
-       v = (*port2adr(port)) & 0xff;
-       delay();
-       return v;
-}
-
-unsigned short se7206_inw(unsigned long port)
-{
-       return *port2adr(port);
-}
-
-void se7206_outb(unsigned char value, unsigned long port)
-{
-       *(port2adr(port)) = value;
-}
-
-void se7206_outb_p(unsigned char value, unsigned long port)
-{
-       *(port2adr(port)) = value;
-       delay();
-}
-
-void se7206_outw(unsigned short value, unsigned long port)
-{
-       *port2adr(port) = value;
-}
-
-void se7206_insb(unsigned long port, void *addr, unsigned long count)
-{
-       volatile __u16 *p = port2adr(port);
-       __u8 *ap = addr;
-
-       while (count--)
-               *ap++ = *p;
-}
-
-void se7206_insw(unsigned long port, void *addr, unsigned long count)
-{
-       volatile __u16 *p = port2adr(port);
-       __u16 *ap = addr;
-       while (count--)
-               *ap++ = *p;
-}
-
-void se7206_outsb(unsigned long port, const void *addr, unsigned long count)
-{
-       volatile __u16 *p = port2adr(port);
-       const __u8 *ap = addr;
-
-       while (count--)
-               *p = *ap++;
-}
-
-void se7206_outsw(unsigned long port, const void *addr, unsigned long count)
-{
-       volatile __u16 *p = port2adr(port);
-       const __u16 *ap = addr;
-       while (count--)
-               *p = *ap++;
-}
index 883b21eacaa686d00efbf822911dac79e6e220d5..d961949600fd462199f3d9706e86371e815e7b1b 100644 (file)
@@ -139,11 +139,13 @@ void __init init_se7206_IRQ(void)
        make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */
        make_se7206_irq(IRQ1_IRQ); /* ATA */
        make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
-       __raw_writew(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */
+
+       __raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR); /* ICR1 */
 
        /* FPGA System register setup*/
        __raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */
        __raw_writew(0x0000,INTSTS1); /* Clear INTSTS1 */
+
        /* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */
        __raw_writew(0x0001,INTSEL);
 }
index 8f5c65d43d1d85a6e1db054cd5f065ee3aa72119..7f4871c71a01813dbd11882d9c55469d46359ce0 100644 (file)
@@ -86,20 +86,5 @@ __initcall(se7206_devices_setup);
 static struct sh_machine_vector mv_se __initmv = {
        .mv_name                = "SolutionEngine",
        .mv_nr_irqs             = 256,
-       .mv_inb                 = se7206_inb,
-       .mv_inw                 = se7206_inw,
-       .mv_outb                = se7206_outb,
-       .mv_outw                = se7206_outw,
-
-       .mv_inb_p               = se7206_inb_p,
-       .mv_inw_p               = se7206_inw,
-       .mv_outb_p              = se7206_outb_p,
-       .mv_outw_p              = se7206_outw,
-
-       .mv_insb                = se7206_insb,
-       .mv_insw                = se7206_insw,
-       .mv_outsb               = se7206_outsb,
-       .mv_outsw               = se7206_outsw,
-
        .mv_init_irq            = init_se7206_IRQ,
 };
index 8e624b06d5ea67efc746ecab0716485d9a4742bc..43ea14feef51094d032c5b7eec98b35deff925f8 100644 (file)
@@ -2,4 +2,4 @@
 # Makefile for the 770x SolutionEngine specific parts of the kernel
 #
 
-obj-y   := setup.o io.o irq.o
+obj-y   := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/770x/io.c b/arch/sh/boards/mach-se/770x/io.c
deleted file mode 100644 (file)
index 28833c8..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (C) 2000  Kazumoto Kojima
- *
- * I/O routine for Hitachi SolutionEngine.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/io.h>
-#include <mach-se/mach/se.h>
-
-/* MS7750 requires special versions of in*, out* routines, since
-   PC-like io ports are located at upper half byte of 16-bit word which
-   can be accessed only with 16-bit wide.  */
-
-static inline volatile __u16 *
-port2adr(unsigned int port)
-{
-       if (port & 0xff000000)
-               return ( volatile __u16 *) port;
-       if (port >= 0x2000)
-               return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
-       else if (port >= 0x1000)
-               return (volatile __u16 *) (PA_83902 + (port << 1));
-       else
-               return (volatile __u16 *) (PA_SUPERIO + (port << 1));
-}
-
-static inline int
-shifted_port(unsigned long port)
-{
-       /* For IDE registers, value is not shifted */
-       if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
-               return 0;
-       else
-               return 1;
-}
-
-unsigned char se_inb(unsigned long port)
-{
-       if (shifted_port(port))
-               return (*port2adr(port) >> 8);
-       else
-               return (*port2adr(port))&0xff;
-}
-
-unsigned char se_inb_p(unsigned long port)
-{
-       unsigned long v;
-
-       if (shifted_port(port))
-               v = (*port2adr(port) >> 8);
-       else
-               v = (*port2adr(port))&0xff;
-       ctrl_delay();
-       return v;
-}
-
-unsigned short se_inw(unsigned long port)
-{
-       if (port >= 0x2000)
-               return *port2adr(port);
-       else
-               maybebadio(port);
-       return 0;
-}
-
-unsigned int se_inl(unsigned long port)
-{
-       maybebadio(port);
-       return 0;
-}
-
-void se_outb(unsigned char value, unsigned long port)
-{
-       if (shifted_port(port))
-               *(port2adr(port)) = value << 8;
-       else
-               *(port2adr(port)) = value;
-}
-
-void se_outb_p(unsigned char value, unsigned long port)
-{
-       if (shifted_port(port))
-               *(port2adr(port)) = value << 8;
-       else
-               *(port2adr(port)) = value;
-       ctrl_delay();
-}
-
-void se_outw(unsigned short value, unsigned long port)
-{
-       if (port >= 0x2000)
-               *port2adr(port) = value;
-       else
-               maybebadio(port);
-}
-
-void se_outl(unsigned int value, unsigned long port)
-{
-       maybebadio(port);
-}
-
-void se_insb(unsigned long port, void *addr, unsigned long count)
-{
-       volatile __u16 *p = port2adr(port);
-       __u8 *ap = addr;
-
-       if (shifted_port(port)) {
-               while (count--)
-                       *ap++ = *p >> 8;
-       } else {
-               while (count--)
-                       *ap++ = *p;
-       }
-}
-
-void se_insw(unsigned long port, void *addr, unsigned long count)
-{
-       volatile __u16 *p = port2adr(port);
-       __u16 *ap = addr;
-       while (count--)
-               *ap++ = *p;
-}
-
-void se_insl(unsigned long port, void *addr, unsigned long count)
-{
-       maybebadio(port);
-}
-
-void se_outsb(unsigned long port, const void *addr, unsigned long count)
-{
-       volatile __u16 *p = port2adr(port);
-       const __u8 *ap = addr;
-
-       if (shifted_port(port)) {
-               while (count--)
-                       *p = *ap++ << 8;
-       } else {
-               while (count--)
-                       *p = *ap++;
-       }
-}
-
-void se_outsw(unsigned long port, const void *addr, unsigned long count)
-{
-       volatile __u16 *p = port2adr(port);
-       const __u16 *ap = addr;
-
-       while (count--)
-               *p = *ap++;
-}
-
-void se_outsl(unsigned long port, const void *addr, unsigned long count)
-{
-       maybebadio(port);
-}
index 66d39d1b0901de5a9b6da4c7fb7978223b18b502..31330c65c0cec9aa1b78abb69e5bcb95c9c4cd80 100644 (file)
@@ -195,27 +195,5 @@ static struct sh_machine_vector mv_se __initmv = {
 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
        .mv_nr_irqs             = 104,
 #endif
-
-       .mv_inb                 = se_inb,
-       .mv_inw                 = se_inw,
-       .mv_inl                 = se_inl,
-       .mv_outb                = se_outb,
-       .mv_outw                = se_outw,
-       .mv_outl                = se_outl,
-
-       .mv_inb_p               = se_inb_p,
-       .mv_inw_p               = se_inw,
-       .mv_inl_p               = se_inl,
-       .mv_outb_p              = se_outb_p,
-       .mv_outw_p              = se_outw,
-       .mv_outl_p              = se_outl,
-
-       .mv_insb                = se_insb,
-       .mv_insw                = se_insw,
-       .mv_insl                = se_insl,
-       .mv_outsb               = se_outsb,
-       .mv_outsw               = se_outsw,
-       .mv_outsl               = se_outsl,
-
        .mv_init_irq            = init_se_IRQ,
 };
index e6f4341bfe6eaa0467e60dfb56ae08688ea5a9c0..a338fd9d503944f84b4e84b5a3a3e7cccc4a5fcb 100644 (file)
@@ -2,4 +2,4 @@
 # Makefile for the 7751 SolutionEngine specific parts of the kernel
 #
 
-obj-y   := setup.o io.o irq.o
+obj-y   := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/7751/io.c b/arch/sh/boards/mach-se/7751/io.c
deleted file mode 100644 (file)
index 6e75bd4..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (C) 2001  Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Hitachi 7751 SolutionEngine.
- *
- * Initial version only to support LAN access; some
- * placeholder code from io_se.c left in with the
- * expectation of later SuperIO and PCMCIA access.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <mach-se/mach/se7751.h>
-#include <asm/addrspace.h>
-
-static inline volatile u16 *port2adr(unsigned int port)
-{
-       if (port >= 0x2000)
-               return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
-       maybebadio((unsigned long)port);
-       return (volatile __u16*)port;
-}
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window.  Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used  w/o translation for
- * compatibility.
- */
-unsigned char sh7751se_inb(unsigned long port)
-{
-       if (PXSEG(port))
-               return *(volatile unsigned char *)port;
-       else
-               return (*port2adr(port)) & 0xff;
-}
-
-unsigned char sh7751se_inb_p(unsigned long port)
-{
-       unsigned char v;
-
-        if (PXSEG(port))
-                v = *(volatile unsigned char *)port;
-       else
-               v = (*port2adr(port)) & 0xff;
-       ctrl_delay();
-       return v;
-}
-
-unsigned short sh7751se_inw(unsigned long port)
-{
-        if (PXSEG(port))
-                return *(volatile unsigned short *)port;
-       else if (port >= 0x2000)
-               return *port2adr(port);
-       else
-               maybebadio(port);
-       return 0;
-}
-
-unsigned int sh7751se_inl(unsigned long port)
-{
-        if (PXSEG(port))
-                return *(volatile unsigned long *)port;
-       else if (port >= 0x2000)
-               return *port2adr(port);
-       else
-               maybebadio(port);
-       return 0;
-}
-
-void sh7751se_outb(unsigned char value, unsigned long port)
-{
-
-        if (PXSEG(port))
-                *(volatile unsigned char *)port = value;
-       else
-               *(port2adr(port)) = value;
-}
-
-void sh7751se_outb_p(unsigned char value, unsigned long port)
-{
-        if (PXSEG(port))
-                *(volatile unsigned char *)port = value;
-       else
-               *(port2adr(port)) = value;
-       ctrl_delay();
-}
-
-void sh7751se_outw(unsigned short value, unsigned long port)
-{
-        if (PXSEG(port))
-                *(volatile unsigned short *)port = value;
-       else if (port >= 0x2000)
-               *port2adr(port) = value;
-       else
-               maybebadio(port);
-}
-
-void sh7751se_outl(unsigned int value, unsigned long port)
-{
-        if (PXSEG(port))
-                *(volatile unsigned long *)port = value;
-       else
-               maybebadio(port);
-}
-
-void sh7751se_insl(unsigned long port, void *addr, unsigned long count)
-{
-       maybebadio(port);
-}
-
-void sh7751se_outsl(unsigned long port, const void *addr, unsigned long count)
-{
-       maybebadio(port);
-}
index 50572512e3e84a6db630114a57415153fd4e0506..9fbc51beb1812dfafd10a91cf7b6c02c1cabfdce 100644 (file)
@@ -56,23 +56,5 @@ __initcall(se7751_devices_setup);
 static struct sh_machine_vector mv_7751se __initmv = {
        .mv_name                = "7751 SolutionEngine",
        .mv_nr_irqs             = 72,
-
-       .mv_inb                 = sh7751se_inb,
-       .mv_inw                 = sh7751se_inw,
-       .mv_inl                 = sh7751se_inl,
-       .mv_outb                = sh7751se_outb,
-       .mv_outw                = sh7751se_outw,
-       .mv_outl                = sh7751se_outl,
-
-       .mv_inb_p               = sh7751se_inb_p,
-       .mv_inw_p               = sh7751se_inw,
-       .mv_inl_p               = sh7751se_inl,
-       .mv_outb_p              = sh7751se_outb_p,
-       .mv_outw_p              = sh7751se_outw,
-       .mv_outl_p              = sh7751se_outl,
-
-       .mv_insl                = sh7751se_insl,
-       .mv_outsl               = sh7751se_outsl,
-
        .mv_init_irq            = init_7751se_IRQ,
 };
diff --git a/arch/sh/boards/mach-snapgear/Makefile b/arch/sh/boards/mach-snapgear/Makefile
deleted file mode 100644 (file)
index d2d2f4b..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the SnapGear specific parts of the kernel
-#
-
-obj-y   := setup.o io.o
diff --git a/arch/sh/boards/mach-snapgear/io.c b/arch/sh/boards/mach-snapgear/io.c
deleted file mode 100644 (file)
index 476650e..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2002  David McCullough <davidm@snapgear.com>
- * Copyright (C) 2001  Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Hitachi 7751 SolutionEngine.
- *
- * Initial version only to support LAN access; some
- * placeholder code from io_se.c left in with the
- * expectation of later SuperIO and PCMCIA access.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <asm/addrspace.h>
-
-#ifdef CONFIG_SH_SECUREEDGE5410
-unsigned short secureedge5410_ioport;
-#endif
-
-static inline volatile __u16 *port2adr(unsigned int port)
-{
-       maybebadio((unsigned long)port);
-       return (volatile __u16*)port;
-}
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window.  Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used  w/o translation for
- * compatibility.
- */
-unsigned char snapgear_inb(unsigned long port)
-{
-       if (PXSEG(port))
-               return *(volatile unsigned char *)port;
-       else
-               return (*port2adr(port)) & 0xff;
-}
-
-unsigned char snapgear_inb_p(unsigned long port)
-{
-       unsigned char v;
-
-       if (PXSEG(port))
-               v = *(volatile unsigned char *)port;
-       else
-               v = (*port2adr(port))&0xff;
-       ctrl_delay();
-       return v;
-}
-
-unsigned short snapgear_inw(unsigned long port)
-{
-       if (PXSEG(port))
-               return *(volatile unsigned short *)port;
-       else if (port >= 0x2000)
-               return *port2adr(port);
-       else
-               maybebadio(port);
-       return 0;
-}
-
-unsigned int snapgear_inl(unsigned long port)
-{
-       if (PXSEG(port))
-               return *(volatile unsigned long *)port;
-       else if (port >= 0x2000)
-               return *port2adr(port);
-       else
-               maybebadio(port);
-       return 0;
-}
-
-void snapgear_outb(unsigned char value, unsigned long port)
-{
-
-       if (PXSEG(port))
-               *(volatile unsigned char *)port = value;
-       else
-               *(port2adr(port)) = value;
-}
-
-void snapgear_outb_p(unsigned char value, unsigned long port)
-{
-       if (PXSEG(port))
-               *(volatile unsigned char *)port = value;
-       else
-               *(port2adr(port)) = value;
-       ctrl_delay();
-}
-
-void snapgear_outw(unsigned short value, unsigned long port)
-{
-       if (PXSEG(port))
-               *(volatile unsigned short *)port = value;
-       else if (port >= 0x2000)
-               *port2adr(port) = value;
-       else
-               maybebadio(port);
-}
-
-void snapgear_outl(unsigned int value, unsigned long port)
-{
-       if (PXSEG(port))
-               *(volatile unsigned long *)port = value;
-       else
-               maybebadio(port);
-}
-
-void snapgear_insl(unsigned long port, void *addr, unsigned long count)
-{
-       maybebadio(port);
-}
-
-void snapgear_outsl(unsigned long port, const void *addr, unsigned long count)
-{
-       maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-systemh/Makefile b/arch/sh/boards/mach-systemh/Makefile
deleted file mode 100644 (file)
index 2cc6a23..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Makefile for the SystemH specific parts of the kernel
-#
-
-obj-y   := setup.o irq.o io.o
-
-# XXX: This wants to be consolidated in arch/sh/drivers/pci, and more
-# importantly, with the generic sh7751_pcic_init() code. For now, we'll
-# just abuse the hell out of kbuild, because we can..
-
-obj-$(CONFIG_PCI) += pci.o
-pci-y := ../../se/7751/pci.o
-
diff --git a/arch/sh/boards/mach-systemh/io.c b/arch/sh/boards/mach-systemh/io.c
deleted file mode 100644 (file)
index 15577ff..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * linux/arch/sh/boards/renesas/systemh/io.c
- *
- * Copyright (C) 2001  Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Hitachi 7751 Systemh.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <mach/systemh7751.h>
-#include <asm/addrspace.h>
-#include <asm/io.h>
-
-#define ETHER_IOMAP(adr) (0xB3000000 + (adr)) /*map to 16bits access area
-                                                of smc lan chip*/
-static inline volatile __u16 *
-port2adr(unsigned int port)
-{
-       if (port >= 0x2000)
-               return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
-       maybebadio((unsigned long)port);
-       return (volatile __u16*)port;
-}
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window.  Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used  w/o translation for
- * compatibility.
- */
-unsigned char sh7751systemh_inb(unsigned long port)
-{
-       if (PXSEG(port))
-               return *(volatile unsigned char *)port;
-       else if (port <= 0x3F1)
-               return *(volatile unsigned char *)ETHER_IOMAP(port);
-       else
-               return (*port2adr(port))&0xff;
-}
-
-unsigned char sh7751systemh_inb_p(unsigned long port)
-{
-       unsigned char v;
-
-        if (PXSEG(port))
-                v = *(volatile unsigned char *)port;
-       else if (port <= 0x3F1)
-               v = *(volatile unsigned char *)ETHER_IOMAP(port);
-       else
-               v = (*port2adr(port))&0xff;
-       ctrl_delay();
-       return v;
-}
-
-unsigned short sh7751systemh_inw(unsigned long port)
-{
-        if (PXSEG(port))
-                return *(volatile unsigned short *)port;
-       else if (port >= 0x2000)
-               return *port2adr(port);
-       else if (port <= 0x3F1)
-               return *(volatile unsigned int *)ETHER_IOMAP(port);
-       else
-               maybebadio(port);
-       return 0;
-}
-
-unsigned int sh7751systemh_inl(unsigned long port)
-{
-        if (PXSEG(port))
-                return *(volatile unsigned long *)port;
-       else if (port >= 0x2000)
-               return *port2adr(port);
-       else if (port <= 0x3F1)
-               return *(volatile unsigned int *)ETHER_IOMAP(port);
-       else
-               maybebadio(port);
-       return 0;
-}
-
-void sh7751systemh_outb(unsigned char value, unsigned long port)
-{
-
-        if (PXSEG(port))
-                *(volatile unsigned char *)port = value;
-       else if (port <= 0x3F1)
-               *(volatile unsigned char *)ETHER_IOMAP(port) = value;
-       else
-               *(port2adr(port)) = value;
-}
-
-void sh7751systemh_outb_p(unsigned char value, unsigned long port)
-{
-        if (PXSEG(port))
-                *(volatile unsigned char *)port = value;
-       else if (port <= 0x3F1)
-               *(volatile unsigned char *)ETHER_IOMAP(port) = value;
-       else
-               *(port2adr(port)) = value;
-       ctrl_delay();
-}
-
-void sh7751systemh_outw(unsigned short value, unsigned long port)
-{
-        if (PXSEG(port))
-                *(volatile unsigned short *)port = value;
-       else if (port >= 0x2000)
-               *port2adr(port) = value;
-       else if (port <= 0x3F1)
-               *(volatile unsigned short *)ETHER_IOMAP(port) = value;
-       else
-               maybebadio(port);
-}
-
-void sh7751systemh_outl(unsigned int value, unsigned long port)
-{
-        if (PXSEG(port))
-                *(volatile unsigned long *)port = value;
-       else
-               maybebadio(port);
-}
-
-void sh7751systemh_insb(unsigned long port, void *addr, unsigned long count)
-{
-       unsigned char *p = addr;
-       while (count--) *p++ = sh7751systemh_inb(port);
-}
-
-void sh7751systemh_insw(unsigned long port, void *addr, unsigned long count)
-{
-       unsigned short *p = addr;
-       while (count--) *p++ = sh7751systemh_inw(port);
-}
-
-void sh7751systemh_insl(unsigned long port, void *addr, unsigned long count)
-{
-       maybebadio(port);
-}
-
-void sh7751systemh_outsb(unsigned long port, const void *addr, unsigned long count)
-{
-       unsigned char *p = (unsigned char*)addr;
-       while (count--) sh7751systemh_outb(*p++, port);
-}
-
-void sh7751systemh_outsw(unsigned long port, const void *addr, unsigned long count)
-{
-       unsigned short *p = (unsigned short*)addr;
-       while (count--) sh7751systemh_outw(*p++, port);
-}
-
-void sh7751systemh_outsl(unsigned long port, const void *addr, unsigned long count)
-{
-       maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-systemh/irq.c b/arch/sh/boards/mach-systemh/irq.c
deleted file mode 100644 (file)
index e5ee13a..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * linux/arch/sh/boards/renesas/systemh/irq.c
- *
- * Copyright (C) 2000  Kazumoto Kojima
- *
- * Hitachi SystemH Support.
- *
- * Modified for 7751 SystemH by
- * Jonathan Short.
- */
-
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include <mach/systemh7751.h>
-#include <asm/smc37c93x.h>
-
-/* address of external interrupt mask register
- * address must be set prior to use these (maybe in init_XXX_irq())
- * XXX : is it better to use .config than specifying it in code? */
-static unsigned long *systemh_irq_mask_register = (unsigned long *)0xB3F10004;
-static unsigned long *systemh_irq_request_register = (unsigned long *)0xB3F10000;
-
-static void disable_systemh_irq(struct irq_data *data)
-{
-       unsigned long val, mask = 0x01 << 1;
-
-       /* Clear the "irq"th bit in the mask and set it in the request */
-       val = __raw_readl((unsigned long)systemh_irq_mask_register);
-       val &= ~mask;
-       __raw_writel(val, (unsigned long)systemh_irq_mask_register);
-
-       val = __raw_readl((unsigned long)systemh_irq_request_register);
-       val |= mask;
-       __raw_writel(val, (unsigned long)systemh_irq_request_register);
-}
-
-static void enable_systemh_irq(struct irq_data *data)
-{
-       unsigned long val, mask = 0x01 << 1;
-
-       /* Set "irq"th bit in the mask register */
-       val = __raw_readl((unsigned long)systemh_irq_mask_register);
-       val |= mask;
-       __raw_writel(val, (unsigned long)systemh_irq_mask_register);
-}
-
-static struct irq_chip systemh_irq_type = {
-       .name           = "SystemH Register",
-       .irq_unmask     = enable_systemh_irq,
-       .irq_mask       = disable_systemh_irq,
-};
-
-void make_systemh_irq(unsigned int irq)
-{
-       disable_irq_nosync(irq);
-       set_irq_chip_and_handler(irq, &systemh_irq_type, handle_level_irq);
-       disable_systemh_irq(irq_get_irq_data(irq));
-}
diff --git a/arch/sh/boards/mach-systemh/setup.c b/arch/sh/boards/mach-systemh/setup.c
deleted file mode 100644 (file)
index 219fd80..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * linux/arch/sh/boards/renesas/systemh/setup.c
- *
- * Copyright (C) 2000  Kazumoto Kojima
- * Copyright (C) 2003  Paul Mundt
- *
- * Hitachi SystemH Support.
- *
- * Modified for 7751 SystemH by Jonathan Short.
- *
- * Rewritten for 2.6 by Paul Mundt.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <asm/machvec.h>
-#include <mach/systemh7751.h>
-
-extern void make_systemh_irq(unsigned int irq);
-
-/*
- * Initialize IRQ setting
- */
-static void __init sh7751systemh_init_irq(void)
-{
-       make_systemh_irq(0xb);  /* Ethernet interrupt */
-}
-
-static struct sh_machine_vector mv_7751systemh __initmv = {
-       .mv_name                = "7751 SystemH",
-       .mv_nr_irqs             = 72,
-
-       .mv_inb                 = sh7751systemh_inb,
-       .mv_inw                 = sh7751systemh_inw,
-       .mv_inl                 = sh7751systemh_inl,
-       .mv_outb                = sh7751systemh_outb,
-       .mv_outw                = sh7751systemh_outw,
-       .mv_outl                = sh7751systemh_outl,
-
-       .mv_inb_p               = sh7751systemh_inb_p,
-       .mv_inw_p               = sh7751systemh_inw,
-       .mv_inl_p               = sh7751systemh_inl,
-       .mv_outb_p              = sh7751systemh_outb_p,
-       .mv_outw_p              = sh7751systemh_outw,
-       .mv_outl_p              = sh7751systemh_outl,
-
-       .mv_insb                = sh7751systemh_insb,
-       .mv_insw                = sh7751systemh_insw,
-       .mv_insl                = sh7751systemh_insl,
-       .mv_outsb               = sh7751systemh_outsb,
-       .mv_outsw               = sh7751systemh_outsw,
-       .mv_outsl               = sh7751systemh_outsl,
-
-       .mv_init_irq            = sh7751systemh_init_irq,
-};
diff --git a/arch/sh/configs/systemh_defconfig b/arch/sh/configs/systemh_defconfig
deleted file mode 100644 (file)
index b58dfc5..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_CPU_SUBTYPE_SH7751R=y
-CONFIG_MEMORY_START=0x0c000000
-CONFIG_MEMORY_SIZE=0x00400000
-CONFIG_FLATMEM_MANUAL=y
-CONFIG_SH_7751_SYSTEMH=y
-CONFIG_PREEMPT=y
-# CONFIG_STANDALONE is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=1024
-# CONFIG_INPUT is not set
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_VT is not set
-CONFIG_HW_RANDOM=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
index 446b3831c2149380265476f47674d4ad4ea72b1e..3d1ae2bfaa6fd7056701e8ddc41538d94a4a1a71 100644 (file)
 /*
  * These will never work in 32-bit, don't even bother.
  */
-#define P1SEGADDR(a)   __futile_remapping_attempt
-#define P2SEGADDR(a)   __futile_remapping_attempt
-#define P3SEGADDR(a)   __futile_remapping_attempt
-#define P4SEGADDR(a)   __futile_remapping_attempt
+#define P1SEGADDR(a)   ({ (void)(a); BUG(); NULL; })
+#define P2SEGADDR(a)   ({ (void)(a); BUG(); NULL; })
+#define P3SEGADDR(a)   ({ (void)(a); BUG(); NULL; })
+#define P4SEGADDR(a)   ({ (void)(a); BUG(); NULL; })
 #endif
 #endif /* P1SEG */
 
index a15f1058bbf439b8895f1b3d277f7e1bcf05c6aa..083ea068e819e80a389d5610bc9e19243d4ed950 100644 (file)
@@ -66,7 +66,6 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
 #define PHYS_ADDR_MASK29               0x1fffffff
 #define PHYS_ADDR_MASK32               0xffffffff
 
-#ifdef CONFIG_PMB
 static inline unsigned long phys_addr_mask(void)
 {
        /* Is the MMU in 29bit mode? */
@@ -75,17 +74,6 @@ static inline unsigned long phys_addr_mask(void)
 
        return PHYS_ADDR_MASK32;
 }
-#elif defined(CONFIG_32BIT)
-static inline unsigned long phys_addr_mask(void)
-{
-       return PHYS_ADDR_MASK32;
-}
-#else
-static inline unsigned long phys_addr_mask(void)
-{
-       return PHYS_ADDR_MASK29;
-}
-#endif
 
 #define PTE_PHYS_MASK          (phys_addr_mask() & PAGE_MASK)
 #define PTE_FLAGS_MASK         (~(PTE_PHYS_MASK) << PAGE_SHIFT)
index 1f1af5afff0374537f989911d4e98f51078a9c1e..10c8b1823a181ef6abd8f07766c801f7c2c6d3f1 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/compiler.h>
 #include <linux/linkage.h>
 #include <asm/types.h>
+#include <asm/uncached.h>
 
 #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
 
@@ -137,9 +138,6 @@ extern unsigned int instruction_size(unsigned int insn);
 #define instruction_size(insn) (4)
 #endif
 
-extern unsigned long cached_to_uncached;
-extern unsigned long uncached_size;
-
 void per_cpu_trap_init(void);
 void default_idle(void);
 void cpu_idle_wait(void);
index c941b273940581bc4221458a8cabf639829e1500..a4ad1cd9bc4d02740bb8afcb1936a932b2ed3f35 100644 (file)
@@ -145,42 +145,6 @@ do {                                                               \
                __restore_dsp(prev);                            \
 } while (0)
 
-/*
- * Jump to uncached area.
- * When handling TLB or caches, we need to do it from an uncached area.
- */
-#define jump_to_uncached()                     \
-do {                                           \
-       unsigned long __dummy;                  \
-                                               \
-       __asm__ __volatile__(                   \
-               "mova   1f, %0\n\t"             \
-               "add    %1, %0\n\t"             \
-               "jmp    @%0\n\t"                \
-               " nop\n\t"                      \
-               ".balign 4\n"                   \
-               "1:"                            \
-               : "=&z" (__dummy)               \
-               : "r" (cached_to_uncached));    \
-} while (0)
-
-/*
- * Back to cached area.
- */
-#define back_to_cached()                               \
-do {                                                   \
-       unsigned long __dummy;                          \
-       ctrl_barrier();                                 \
-       __asm__ __volatile__(                           \
-               "mov.l  1f, %0\n\t"                     \
-               "jmp    @%0\n\t"                        \
-               " nop\n\t"                              \
-               ".balign 4\n"                           \
-               "1:     .long 2f\n"                     \
-               "2:"                                    \
-               : "=&r" (__dummy));                     \
-} while (0)
-
 #ifdef CONFIG_CPU_HAS_SR_RB
 #define lookup_exception_vector()      \
 ({                                     \
index 36338646dfc81832227e78d39ccf6cdc71df4963..8593bc8d1a4e74af89ace4cb2e767b9fd0567ea6 100644 (file)
@@ -34,9 +34,6 @@ do {                                                          \
                              &next->thread);                   \
 } while (0)
 
-#define jump_to_uncached()     do { } while (0)
-#define back_to_cached()       do { } while (0)
-
 #define __icbi(addr)   __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
 #define __ocbp(addr)   __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
 #define __ocbi(addr)   __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
index e3419f96626ad49719130f120bcb71bcbdb40c42..6f8816b79cf152091b1fc08e6cd4dc5d4390bfe2 100644 (file)
@@ -4,15 +4,55 @@
 #include <linux/bug.h>
 
 #ifdef CONFIG_UNCACHED_MAPPING
+extern unsigned long cached_to_uncached;
+extern unsigned long uncached_size;
 extern unsigned long uncached_start, uncached_end;
 
 extern int virt_addr_uncached(unsigned long kaddr);
 extern void uncached_init(void);
 extern void uncached_resize(unsigned long size);
+
+/*
+ * Jump to uncached area.
+ * When handling TLB or caches, we need to do it from an uncached area.
+ */
+#define jump_to_uncached()                     \
+do {                                           \
+       unsigned long __dummy;                  \
+                                               \
+       __asm__ __volatile__(                   \
+               "mova   1f, %0\n\t"             \
+               "add    %1, %0\n\t"             \
+               "jmp    @%0\n\t"                \
+               " nop\n\t"                      \
+               ".balign 4\n"                   \
+               "1:"                            \
+               : "=&z" (__dummy)               \
+               : "r" (cached_to_uncached));    \
+} while (0)
+
+/*
+ * Back to cached area.
+ */
+#define back_to_cached()                               \
+do {                                                   \
+       unsigned long __dummy;                          \
+       ctrl_barrier();                                 \
+       __asm__ __volatile__(                           \
+               "mov.l  1f, %0\n\t"                     \
+               "jmp    @%0\n\t"                        \
+               " nop\n\t"                              \
+               ".balign 4\n"                           \
+               "1:     .long 2f\n"                     \
+               "2:"                                    \
+               : "=&r" (__dummy));                     \
+} while (0)
 #else
 #define virt_addr_uncached(kaddr)      (0)
 #define uncached_init()                        do { } while (0)
 #define uncached_resize(size)          BUG()
+#define jump_to_uncached()             do { } while (0)
+#define back_to_cached()               do { } while (0)
 #endif
 
 #endif /* __ASM_SH_UNCACHED_H */
diff --git a/arch/sh/include/mach-common/mach/edosk7705.h b/arch/sh/include/mach-common/mach/edosk7705.h
deleted file mode 100644 (file)
index efc43b3..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_SH_EDOSK7705_H
-#define __ASM_SH_EDOSK7705_H
-
-#define __IO_PREFIX sh_edosk7705
-#include <asm/io_generic.h>
-
-#endif /* __ASM_SH_EDOSK7705_H */
index 1aed15856e11ff5bae72b2522f94f816084e0dee..dcb05fa8c164d9c9044df5ce85665953328f7e21 100644 (file)
@@ -68,13 +68,4 @@ extern void microdev_print_fpga_intc_status(void);
 #define __IO_PREFIX microdev
 #include <asm/io_generic.h>
 
-#if defined(CONFIG_PCI)
-unsigned char  microdev_pci_inb(unsigned long port);
-unsigned short microdev_pci_inw(unsigned long port);
-unsigned long  microdev_pci_inl(unsigned long port);
-void           microdev_pci_outb(unsigned char  data, unsigned long port);
-void           microdev_pci_outw(unsigned short data, unsigned long port);
-void           microdev_pci_outl(unsigned long  data, unsigned long port);
-#endif
-
 #endif /* __ASM_SH_MICRODEV_H */
similarity index 79%
rename from arch/sh/include/mach-common/mach/snapgear.h
rename to arch/sh/include/mach-common/mach/secureedge5410.h
index 042d95f51c4dc35c3cc5ac9558feb1026f1c99b9..3653b9a4bacc323027fb3b8d98e45d9e8eea6dfe 100644 (file)
 #ifndef _ASM_SH_IO_SNAPGEAR_H
 #define _ASM_SH_IO_SNAPGEAR_H
 
-#if defined(CONFIG_CPU_SH4)
-/*
- * The external interrupt lines, these take up ints 0 - 15 inclusive
- * depending on the priority for the interrupt.  In fact the priority
- * is the interrupt :-)
- */
-
-#define IRL0_IRQ       2
-#define IRL0_PRIORITY  13
-
-#define IRL1_IRQ       5
-#define IRL1_PRIORITY  10
-
-#define IRL2_IRQ       8
-#define IRL2_PRIORITY  7
-
-#define IRL3_IRQ       11
-#define IRL3_PRIORITY  4
-#endif
-
 #define __IO_PREFIX    snapgear
 #include <asm/io_generic.h>
 
-#ifdef CONFIG_SH_SECUREEDGE5410
 /*
  * We need to remember what was written to the ioport as some bits
  * are shared with other functions and you cannot read back what was
@@ -66,6 +45,5 @@ extern unsigned short secureedge5410_ioport;
                        ((secureedge5410_ioport & ~(mask)) | ((val) & (mask)))))
 #define SECUREEDGE_READ_IOPORT() \
         ((*SECUREEDGE_IOPORT_ADDR&0x0817) | (secureedge5410_ioport&~0x0817))
-#endif
 
 #endif /* _ASM_SH_IO_SNAPGEAR_H */
diff --git a/arch/sh/include/mach-common/mach/systemh7751.h b/arch/sh/include/mach-common/mach/systemh7751.h
deleted file mode 100644 (file)
index 4161122..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef __ASM_SH_SYSTEMH_7751SYSTEMH_H
-#define __ASM_SH_SYSTEMH_7751SYSTEMH_H
-
-/*
- * linux/include/asm-sh/systemh/7751systemh.h
- *
- * Copyright (C) 2000  Kazumoto Kojima
- *
- * Hitachi SystemH support
-
- * Modified for 7751 SystemH by
- * Jonathan Short, 2002.
- */
-
-/* Box specific addresses.  */
-
-#define PA_ROM         0x00000000      /* EPROM */
-#define PA_ROM_SIZE    0x00400000      /* EPROM size 4M byte */
-#define PA_FROM                0x01000000      /* EPROM */
-#define PA_FROM_SIZE   0x00400000      /* EPROM size 4M byte */
-#define PA_EXT1                0x04000000
-#define PA_EXT1_SIZE   0x04000000
-#define PA_EXT2                0x08000000
-#define PA_EXT2_SIZE   0x04000000
-#define PA_SDRAM       0x0c000000
-#define PA_SDRAM_SIZE  0x04000000
-
-#define PA_EXT4                0x12000000
-#define PA_EXT4_SIZE   0x02000000
-#define PA_EXT5                0x14000000
-#define PA_EXT5_SIZE   0x04000000
-#define PA_PCIC                0x18000000      /* MR-SHPC-01 PCMCIA */
-
-#define PA_DIPSW0      0xb9000000      /* Dip switch 5,6 */
-#define PA_DIPSW1      0xb9000002      /* Dip switch 7,8 */
-#define PA_LED         0xba000000      /* LED */
-#define        PA_BCR          0xbb000000      /* FPGA on the MS7751SE01 */
-
-#define PA_MRSHPC      0xb83fffe0      /* MR-SHPC-01 PCMCIA controller */
-#define PA_MRSHPC_MW1  0xb8400000      /* MR-SHPC-01 memory window base */
-#define PA_MRSHPC_MW2  0xb8500000      /* MR-SHPC-01 attribute window base */
-#define PA_MRSHPC_IO   0xb8600000      /* MR-SHPC-01 I/O window base */
-#define MRSHPC_MODE     (PA_MRSHPC + 4)
-#define MRSHPC_OPTION   (PA_MRSHPC + 6)
-#define MRSHPC_CSR      (PA_MRSHPC + 8)
-#define MRSHPC_ISR      (PA_MRSHPC + 10)
-#define MRSHPC_ICR      (PA_MRSHPC + 12)
-#define MRSHPC_CPWCR    (PA_MRSHPC + 14)
-#define MRSHPC_MW0CR1   (PA_MRSHPC + 16)
-#define MRSHPC_MW1CR1   (PA_MRSHPC + 18)
-#define MRSHPC_IOWCR1   (PA_MRSHPC + 20)
-#define MRSHPC_MW0CR2   (PA_MRSHPC + 22)
-#define MRSHPC_MW1CR2   (PA_MRSHPC + 24)
-#define MRSHPC_IOWCR2   (PA_MRSHPC + 26)
-#define MRSHPC_CDCR     (PA_MRSHPC + 28)
-#define MRSHPC_PCIC_INFO (PA_MRSHPC + 30)
-
-#define BCR_ILCRA      (PA_BCR + 0)
-#define BCR_ILCRB      (PA_BCR + 2)
-#define BCR_ILCRC      (PA_BCR + 4)
-#define BCR_ILCRD      (PA_BCR + 6)
-#define BCR_ILCRE      (PA_BCR + 8)
-#define BCR_ILCRF      (PA_BCR + 10)
-#define BCR_ILCRG      (PA_BCR + 12)
-
-#define IRQ_79C973     13
-
-#define __IO_PREFIX    sh7751systemh
-#include <asm/io_generic.h>
-
-#endif  /* __ASM_SH_SYSTEMH_7751SYSTEMH_H */
index 2d9700c6b53a07426bdc6253e55a62effed962c0..0fe2e9329cb25f699acd433dd4f2d9b4b945f00b 100644 (file)
@@ -48,7 +48,7 @@ static struct clk r_clk = {
  * Default rate for the root input clock, reset this with clk_set_rate()
  * from the platform code.
  */
-struct clk extal_clk = {
+static struct clk extal_clk = {
        .rate           = 33333333,
 };
 
@@ -111,7 +111,7 @@ static struct clk div3_clk = {
        .parent         = &pll_clk,
 };
 
-struct clk *main_clks[] = {
+static struct clk *main_clks[] = {
        &r_clk,
        &extal_clk,
        &fll_clk,
@@ -156,7 +156,7 @@ struct clk div4_clks[DIV4_NR] = {
 
 enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR };
 
-struct clk div6_clks[DIV6_NR] = {
+static struct clk div6_clks[DIV6_NR] = {
        [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0),
        [DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0),
        [DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0),
index 09370392aff15d791d612ac57bcdf0c6031d8443..c3e61b36649380f5db818c5f6edb05bfc897de1c 100644 (file)
@@ -79,7 +79,7 @@ config 29BIT
 
 config 32BIT
        bool
-       default y if CPU_SH5
+       default y if CPU_SH5 || !MMU
 
 config PMB
        bool "Support 32-bit physical addressing through PMB"
index 0387932869904a042472cca19b65141b5b8bdc9f..40733a9524021d42d42ddc5a8d7e08ef77a1c155 100644 (file)
@@ -79,21 +79,20 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                    enum dma_data_direction direction)
 {
-#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB)
-       void *p1addr = vaddr;
-#else
-       void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
-#endif
+       void *addr;
+
+       addr = __in_29bit_mode() ?
+              (void *)P1SEGADDR((unsigned long)vaddr) : vaddr;
 
        switch (direction) {
        case DMA_FROM_DEVICE:           /* invalidate only */
-               __flush_invalidate_region(p1addr, size);
+               __flush_invalidate_region(addr, size);
                break;
        case DMA_TO_DEVICE:             /* writeback only */
-               __flush_wback_region(p1addr, size);
+               __flush_wback_region(addr, size);
                break;
        case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
-               __flush_purge_region(p1addr, size);
+               __flush_purge_region(addr, size);
                break;
        default:
                BUG();
index 8a4eca551fc0a08542085faddeac7af373ad091b..a7767da815e91453c5b5526a05f4c72405e4afba 100644 (file)
@@ -28,7 +28,7 @@ EXPORT_SYMBOL(virt_addr_uncached);
 
 void __init uncached_init(void)
 {
-#ifdef CONFIG_29BIT
+#if defined(CONFIG_29BIT) || !defined(CONFIG_MMU)
        uncached_start = P2SEG;
 #else
        uncached_start = memory_end;
index 9f56eb978024ef2b6e93497a6580d00dbc7f62eb..0e68465e7b50920ffcaa8a832402fdfc3670226c 100644 (file)
@@ -26,7 +26,6 @@ HD64461                       HD64461
 7724SE                 SH_7724_SOLUTION_ENGINE
 7751SE                 SH_7751_SOLUTION_ENGINE
 7780SE                 SH_7780_SOLUTION_ENGINE
-7751SYSTEMH            SH_7751_SYSTEMH
 HP6XX                  SH_HP6XX
 DREAMCAST              SH_DREAMCAST
 SNAPGEAR               SH_SECUREEDGE5410
index e0f7ee186721cf554dc142efdfc58350eaf6a7df..b2a6c5de79abf00ddad0354fca8b694937282131 100644 (file)
@@ -23,7 +23,6 @@
 
 #include <linux/interrupt.h>
 #include <linux/threads.h>
-#include <asm/kmap_types.h>
 #include <asm/tlbflush.h>
 #include <asm/homecache.h>
 
index 1480106d1c05c2572177058611ee655db569c41c..3d0f202462609e1401b4737b905cb324332d8b4c 100644 (file)
 #define _ASM_TILE_KMAP_TYPES_H
 
 /*
- * In TILE Linux each set of four of these uses another 16MB chunk of
- * address space, given 64 tiles and 64KB pages, so we only enable
- * ones that are required by the kernel configuration.
+ * In 32-bit TILE Linux we have to balance the desire to have a lot of
+ * nested atomic mappings with the fact that large page sizes and many
+ * processors chew up address space quickly.  In a typical
+ * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
+ * adds 4MB of required address-space.  For now we leave KM_TYPE_NR
+ * set to depth 8.
  */
 enum km_type {
+       KM_TYPE_NR = 8
+};
+
+/*
+ * We provide dummy definitions of all the stray values that used to be
+ * required for kmap_atomic() and no longer are.
+ */
+enum {
        KM_BOUNCE_READ,
        KM_SKB_SUNRPC_DATA,
        KM_SKB_DATA_SOFTIRQ,
        KM_USER0,
        KM_USER1,
        KM_BIO_SRC_IRQ,
+       KM_BIO_DST_IRQ,
+       KM_PTE0,
+       KM_PTE1,
        KM_IRQ0,
        KM_IRQ1,
        KM_SOFTIRQ0,
        KM_SOFTIRQ1,
-       KM_MEMCPY0,
-       KM_MEMCPY1,
-#if defined(CONFIG_HIGHPTE)
-       KM_PTE0,
-       KM_PTE1,
-#endif
-       KM_TYPE_NR
+       KM_SYNC_ICACHE,
+       KM_SYNC_DCACHE,
+       KM_UML_USERCOPY,
+       KM_IRQ_PTE,
+       KM_NMI,
+       KM_NMI_PTE,
+       KM_KDB
 };
 
 #endif /* _ASM_TILE_KMAP_TYPES_H */
index dc4ccdd855bca293070ea8bff94a699821d5e01b..a6604e9485da2a188265d618eaaa8a75ec8a4828 100644 (file)
@@ -344,10 +344,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
 #if defined(CONFIG_HIGHPTE)
-extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
-#define pte_offset_map(dir, address) \
-       _pte_offset_map(dir, address, KM_PTE0)
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
+#define pte_unmap(pte) kunmap_atomic(pte)
 #else
 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
 #define pte_unmap(pte) do { } while (0)
index 3dc90fa92c704d8fb07d66b034daadd44ee6376b..b16e5db8f0e7225f9e8789a747f9295d341d947d 100644 (file)
@@ -1 +1,4 @@
+#ifdef CONFIG_COMPAT
+#define __ARCH_WANT_STAT64     /* Used for compat_sys_stat64() etc. */
+#endif
 #include <asm-generic/stat.h>
index f2e3ff485333224f022344fad78f7c96b19d5243..b35c2db71199ae3d70fc2b1e29687dc350514a77 100644 (file)
@@ -41,6 +41,7 @@ __SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr)
 #ifdef CONFIG_COMPAT
 #define __ARCH_WANT_SYS_LLSEEK
 #endif
+#define __ARCH_WANT_SYS_NEWFSTATAT
 #endif
 
 #endif /* _ASM_TILE_UNISTD_H */
index 77739cdd9462dacf2a9188a0a0418c014e6379f9..67617a05e602380a7d1fba86dadba25db17a13ef 100644 (file)
@@ -148,11 +148,11 @@ long tile_compat_sys_msgrcv(int msqid,
 #define compat_sys_readahead sys32_readahead
 #define compat_sys_sync_file_range compat_sys_sync_file_range2
 
-/* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */
-#define compat_sys_stat64 sys_newstat
-#define compat_sys_lstat64 sys_newlstat
-#define compat_sys_fstat64 sys_newfstat
-#define compat_sys_fstatat64 sys_newfstatat
+/* We leverage the "struct stat64" type for 32-bit time_t/nsec. */
+#define compat_sys_stat64 sys_stat64
+#define compat_sys_lstat64 sys_lstat64
+#define compat_sys_fstat64 sys_fstat64
+#define compat_sys_fstatat64 sys_fstatat64
 
 /* The native sys_ptrace dynamically handles compat binaries. */
 #define compat_sys_ptrace sys_ptrace
index 2c54fd43a8a0132533927ed5b5e373940f10ce18..493a0e66d916c618f867034db5647f72acd61999 100644 (file)
@@ -54,7 +54,7 @@ void early_printk(const char *fmt, ...)
 void early_panic(const char *fmt, ...)
 {
        va_list ap;
-       raw_local_irq_disable_all();
+       arch_local_irq_disable_all();
        va_start(ap, fmt);
        early_printk("Kernel panic - not syncing: ");
        early_vprintk(fmt, ap);
index 1e54a7843410e57d021dbac9921acf4f39e2f481..e910530436e64a4079ba65e22bdca13e6ad01bfa 100644 (file)
@@ -151,12 +151,12 @@ enum direction_protect {
 
 static void enable_firewall_interrupts(void)
 {
-       raw_local_irq_unmask_now(INT_UDN_FIREWALL);
+       arch_local_irq_unmask_now(INT_UDN_FIREWALL);
 }
 
 static void disable_firewall_interrupts(void)
 {
-       raw_local_irq_mask_now(INT_UDN_FIREWALL);
+       arch_local_irq_mask_now(INT_UDN_FIREWALL);
 }
 
 /* Set up hardwall on this cpu based on the passed hardwall_info. */
@@ -768,13 +768,13 @@ static int hardwall_release(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dev_hardwall_fops = {
+       .open           = nonseekable_open,
        .unlocked_ioctl = hardwall_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = hardwall_compat_ioctl,
 #endif
        .flush          = hardwall_flush,
        .release        = hardwall_release,
-       .llseek         = noop_llseek,
 };
 
 static struct cdev hardwall_dev;
index e63917687e998907824a62b1783ffef599418818..128805ef8f2c83c1a8b61cf03c5711e5fbb97ff1 100644 (file)
@@ -26,7 +26,7 @@
 #define IS_HW_CLEARED 1
 
 /*
- * The set of interrupts we enable for raw_local_irq_enable().
+ * The set of interrupts we enable for arch_local_irq_enable().
  * This is initialized to have just a single interrupt that the kernel
  * doesn't actually use as a sentinel.  During kernel init,
  * interrupts are added as the kernel gets prepared to support them.
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
        /* Enable interrupt delivery. */
        unmask_irqs(~0UL);
 #if CHIP_HAS_IPI()
-       raw_local_irq_unmask(INT_IPI_K);
+       arch_local_irq_unmask(INT_IPI_K);
 #endif
 }
 
index ba7a265d61796a09168b6c35ed74af23f9fdb256..0d8b9e933487c2847824efc1637364cba8ec84d8 100644 (file)
@@ -182,13 +182,13 @@ static void kexec_find_and_set_command_line(struct kimage *image)
 
                if ((entry & IND_SOURCE)) {
                        void *va =
-                               kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0);
+                               kmap_atomic_pfn(entry >> PAGE_SHIFT);
                        r = kexec_bn2cl(va);
                        if (r) {
                                command_line = r;
                                break;
                        }
-                       kunmap_atomic(va, KM_USER0);
+                       kunmap_atomic(va);
                }
        }
 
@@ -198,7 +198,7 @@ static void kexec_find_and_set_command_line(struct kimage *image)
 
                hverr = hv_set_command_line(
                        (HV_VirtAddr) command_line, strlen(command_line));
-               kunmap_atomic(command_line, KM_USER0);
+               kunmap_atomic(command_line);
        } else {
                pr_info("%s: no command line found; making empty\n",
                       __func__);
index 997e3933f72679718583dae5a82c72c4d2f9fbdb..0858ee6b520f6acf4c23b3a27c8491267919b943 100644 (file)
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
                panic("hv_register_message_state: error %d", rc);
 
        /* Make sure downcall interrupts will be enabled. */
-       raw_local_irq_unmask(INT_INTCTRL_K);
+       arch_local_irq_unmask(INT_INTCTRL_K);
 }
 
 void hv_message_intr(struct pt_regs *regs, int intnum)
index 9cd29884c09f2bfec1cddbacb67579bbd1e821c6..e92e40527d6dcb9855625cafa13a83f08f56df9c 100644 (file)
@@ -50,10 +50,10 @@ long arch_ptrace(struct task_struct *child, long request,
 {
        unsigned long __user *datap = (long __user __force *)data;
        unsigned long tmp;
-       int i;
        long ret = -EIO;
-       unsigned long *childregs;
        char *childreg;
+       struct pt_regs copyregs;
+       int ex1_offset;
 
        switch (request) {
 
@@ -80,6 +80,16 @@ long arch_ptrace(struct task_struct *child, long request,
                if (addr >= PTREGS_SIZE)
                        break;
                childreg = (char *)task_pt_regs(child) + addr;
+
+               /* Guard against overwrites of the privilege level. */
+               ex1_offset = PTREGS_OFFSET_EX1;
+#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
+               if (is_compat_task())   /* point at low word */
+                       ex1_offset += sizeof(compat_long_t);
+#endif
+               if (addr == ex1_offset)
+                       data = PL_ICS_EX1(USER_PL, EX1_ICS(data));
+
 #ifdef CONFIG_COMPAT
                if (is_compat_task()) {
                        if (addr & (sizeof(compat_long_t)-1))
@@ -96,26 +106,19 @@ long arch_ptrace(struct task_struct *child, long request,
                break;
 
        case PTRACE_GETREGS:  /* Get all registers from the child. */
-               if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
-                       break;
-               childregs = (long *)task_pt_regs(child);
-               for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
-                               ++i) {
-                       ret = __put_user(childregs[i], &datap[i]);
-                       if (ret != 0)
-                               break;
+               if (copy_to_user(datap, task_pt_regs(child),
+                                sizeof(struct pt_regs)) == 0) {
+                       ret = 0;
                }
                break;
 
        case PTRACE_SETREGS:  /* Set all registers in the child. */
-               if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
-                       break;
-               childregs = (long *)task_pt_regs(child);
-               for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
-                               ++i) {
-                       ret = __get_user(childregs[i], &datap[i]);
-                       if (ret != 0)
-                               break;
+               if (copy_from_user(&copyregs, datap,
+                                  sizeof(struct pt_regs)) == 0) {
+                       copyregs.ex1 =
+                               PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
+                       *task_pt_regs(child) = copyregs;
+                       ret = 0;
                }
                break;
 
index acd86d20beba7ab75aed0f484c3b980a836af282..baa3d905fee21c9fc2ef403449f7e4d671ad7e1e 100644 (file)
@@ -27,7 +27,7 @@
 void machine_halt(void)
 {
        warn_early_printk();
-       raw_local_irq_disable_all();
+       arch_local_irq_disable_all();
        smp_send_stop();
        hv_halt();
 }
@@ -35,14 +35,14 @@ void machine_halt(void)
 void machine_power_off(void)
 {
        warn_early_printk();
-       raw_local_irq_disable_all();
+       arch_local_irq_disable_all();
        smp_send_stop();
        hv_power_off();
 }
 
 void machine_restart(char *cmd)
 {
-       raw_local_irq_disable_all();
+       arch_local_irq_disable_all();
        smp_send_stop();
        hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
 }
index ae51cad12da0d6b445666d1796925816b8e7e285..fb0b3cbeae149081a5b3011f0230fbd48f4d56dd 100644 (file)
@@ -868,14 +868,14 @@ void __cpuinit setup_cpu(int boot)
 
        /* Allow asynchronous TLB interrupts. */
 #if CHIP_HAS_TILE_DMA()
-       raw_local_irq_unmask(INT_DMATLB_MISS);
-       raw_local_irq_unmask(INT_DMATLB_ACCESS);
+       arch_local_irq_unmask(INT_DMATLB_MISS);
+       arch_local_irq_unmask(INT_DMATLB_ACCESS);
 #endif
 #if CHIP_HAS_SN_PROC()
-       raw_local_irq_unmask(INT_SNITLB_MISS);
+       arch_local_irq_unmask(INT_SNITLB_MISS);
 #endif
 #ifdef __tilegx__
-       raw_local_irq_unmask(INT_SINGLE_STEP_K);
+       arch_local_irq_unmask(INT_SINGLE_STEP_K);
 #endif
 
        /*
index fb28e85ae3aea3698c7d2cd5da564bb0a5f7fb0d..687719d4abd1eba56fbd591852c8fab51950bc52 100644 (file)
@@ -71,6 +71,9 @@ int restore_sigcontext(struct pt_regs *regs,
        for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
                err |= __get_user(regs->regs[i], &sc->gregs[i]);
 
+       /* Ensure that the PL is always set to USER_PL. */
+       regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
+
        regs->faultnum = INT_SWINT_1_SIGRETURN;
 
        err |= __get_user(*pr0, &sc->gregs[0]);
@@ -330,7 +333,7 @@ void do_signal(struct pt_regs *regs)
                        current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
                }
 
-               return;
+               goto done;
        }
 
        /* Did we come from a system call? */
@@ -358,4 +361,8 @@ void do_signal(struct pt_regs *regs)
                current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
                sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
        }
+
+done:
+       /* Avoid double syscall restart if there are nested signals. */
+       regs->faultnum = INT_SWINT_1_SIGRETURN;
 }
index 75255d90aff309669b110dff32f2254ede9daa27..9575b37a8b75318d57cd061172d7cd09d0cf8049 100644 (file)
@@ -115,7 +115,7 @@ static void smp_start_cpu_interrupt(void)
 static void smp_stop_cpu_interrupt(void)
 {
        set_cpu_online(smp_processor_id(), 0);
-       raw_local_irq_disable_all();
+       arch_local_irq_disable_all();
        for (;;)
                asm("nap");
 }
index 6bed820e1421ae628c9ad162de3e50eb1f2884bf..f2e156e44692505e620c7e6c1a35e1ca948c3855 100644 (file)
@@ -132,7 +132,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
 {
        BUG_ON(ticks > MAX_TICK);
        __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
-       raw_local_irq_unmask_now(INT_TILE_TIMER);
+       arch_local_irq_unmask_now(INT_TILE_TIMER);
        return 0;
 }
 
@@ -143,7 +143,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
 static void tile_timer_set_mode(enum clock_event_mode mode,
                                struct clock_event_device *evt)
 {
-       raw_local_irq_mask_now(INT_TILE_TIMER);
+       arch_local_irq_mask_now(INT_TILE_TIMER);
 }
 
 /*
@@ -172,7 +172,7 @@ void __cpuinit setup_tile_timer(void)
        evt->cpumask = cpumask_of(smp_processor_id());
 
        /* Start out with timer not firing. */
-       raw_local_irq_mask_now(INT_TILE_TIMER);
+       arch_local_irq_mask_now(INT_TILE_TIMER);
 
        /* Register tile timer. */
        clockevents_register_device(evt);
@@ -188,7 +188,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
         * Mask the timer interrupt here, since we are a oneshot timer
         * and there are now by definition no events pending.
         */
-       raw_local_irq_mask(INT_TILE_TIMER);
+       arch_local_irq_mask(INT_TILE_TIMER);
 
        /* Track time spent here in an interrupt context */
        irq_enter();
index dfedea7b266b5d58e6cd445fa8ed0f8ae8fcaea8..f7d4a6ad61e811356ac6bec2b9f235c94f4355c6 100644 (file)
@@ -54,7 +54,7 @@ typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long);
  * we must run with interrupts disabled to avoid the risk of some
  * other code seeing the incoherent data in our cache.  (Recall that
  * our cache is indexed by PA, so even if the other code doesn't use
- * our KM_MEMCPY virtual addresses, they'll still hit in cache using
+ * our kmap_atomic virtual addresses, they'll still hit in cache using
  * the normal VAs that aren't supposed to hit in cache.)
  */
 static void memcpy_multicache(void *dest, const void *source,
@@ -64,6 +64,7 @@ static void memcpy_multicache(void *dest, const void *source,
        unsigned long flags, newsrc, newdst;
        pmd_t *pmdp;
        pte_t *ptep;
+       int type0, type1;
        int cpu = get_cpu();
 
        /*
@@ -77,7 +78,8 @@ static void memcpy_multicache(void *dest, const void *source,
        sim_allow_multiple_caching(1);
 
        /* Set up the new dest mapping */
-       idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0;
+       type0 = kmap_atomic_idx_push();
+       idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
        newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
        pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
        ptep = pte_offset_kernel(pmdp, newdst);
@@ -87,7 +89,8 @@ static void memcpy_multicache(void *dest, const void *source,
        }
 
        /* Set up the new source mapping */
-       idx += (KM_MEMCPY0 - KM_MEMCPY1);
+       type1 = kmap_atomic_idx_push();
+       idx += (type0 - type1);
        src_pte = hv_pte_set_nc(src_pte);
        src_pte = hv_pte_clear_writable(src_pte);  /* be paranoid */
        newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
@@ -119,6 +122,8 @@ static void memcpy_multicache(void *dest, const void *source,
         * We're done: notify the simulator that all is back to normal,
         * and re-enable interrupts and pre-emption.
         */
+       kmap_atomic_idx_pop();
+       kmap_atomic_idx_pop();
        sim_allow_multiple_caching(0);
        local_irq_restore(flags);
        put_cpu();
index abb57331cf6e1b25fdeccfd295e25ea7464ebbf5..31dbbd9afe47d5cb32590b96e1d7065e2ff5bf24 100644 (file)
@@ -227,7 +227,7 @@ EXPORT_SYMBOL(kmap_atomic_prot);
 void *__kmap_atomic(struct page *page)
 {
        /* PAGE_NONE is a magic value that tells us to check immutability. */
-       return kmap_atomic_prot(page, type, PAGE_NONE);
+       return kmap_atomic_prot(page, PAGE_NONE);
 }
 EXPORT_SYMBOL(__kmap_atomic);
 
index 78e1982cb6c9dc0385e258b81bfc233f23baaaac..0b9ce69b0ee5e755ea6186e1093269507cae156d 100644 (file)
@@ -988,8 +988,12 @@ static long __write_once initfree = 1;
 /* Select whether to free (1) or mark unusable (0) the __init pages. */
 static int __init set_initfree(char *str)
 {
-       strict_strtol(str, 0, &initfree);
-       pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't");
+       long val;
+       if (strict_strtol(str, 0, &val)) {
+               initfree = val;
+               pr_info("initfree: %s free init pages\n",
+                       initfree ? "will" : "won't");
+       }
        return 1;
 }
 __setup("initfree=", set_initfree);
index 335c24621c418b1a7bffdacc2723bbcee665c8e2..1f5430c53d0db4cf62c992c656de7e1b2e9769e6 100644 (file)
@@ -134,9 +134,9 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
 }
 
 #if defined(CONFIG_HIGHPTE)
-pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type)
+pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
 {
-       pte_t *pte = kmap_atomic(pmd_page(*dir), type) +
+       pte_t *pte = kmap_atomic(pmd_page(*dir)) +
                (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
        return &pte[pte_index(address)];
 }
index 2cd899f75a3cc21f2e53e2f6b8d9b46368f496cc..b7c5bab9bd774db16b8eaaa31441a8aa45e45aea 100644 (file)
@@ -38,8 +38,8 @@ struct pt_regs {
 
 struct task_struct;
 
-extern long subarch_ptrace(struct task_struct *child, long request, long addr,
-                          long data);
+extern long subarch_ptrace(struct task_struct *child, long request,
+       unsigned long addr, unsigned long data);
 extern unsigned long getreg(struct task_struct *child, int regno);
 extern int putreg(struct task_struct *child, int regno, unsigned long value);
 extern int get_fpregs(struct user_i387_struct __user *buf,
index a5e33f29bbeb7ef71e9a25697e5c8bfd3e664370..701b672c11225aaf6ffcdd9fe9485bccacbd4726 100644 (file)
@@ -122,7 +122,7 @@ long arch_ptrace(struct task_struct *child, long request,
                break;
 
        case PTRACE_SET_THREAD_AREA:
-               ret = ptrace_set_thread_area(child, addr, datavp);
+               ret = ptrace_set_thread_area(child, addr, vp);
                break;
 
        case PTRACE_FAULTINFO: {
index 46d58448c3aff9039fdf0b909d069d00d80ef75e..e421b8cd6944af860c4b28a1a176a14320ac9f79 100644 (file)
@@ -280,11 +280,11 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
        struct amd_nb *nb;
        int i;
 
-       nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
+       nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
+                         cpu_to_node(cpu));
        if (!nb)
                return NULL;
 
-       memset(nb, 0, sizeof(*nb));
        nb->nb_id = nb_id;
 
        /*
index 908ea5464a518097958083156eb5b0b94f4d021c..fb8b376bf28cb3e04a6bb903900f32838ab02a14 100644 (file)
@@ -720,7 +720,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        }
 }
 
-static void set_spte_track_bits(u64 *sptep, u64 new_spte)
+static int set_spte_track_bits(u64 *sptep, u64 new_spte)
 {
        pfn_t pfn;
        u64 old_spte = *sptep;
@@ -731,19 +731,20 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
                old_spte = __xchg_spte(sptep, new_spte);
 
        if (!is_rmap_spte(old_spte))
-               return;
+               return 0;
 
        pfn = spte_to_pfn(old_spte);
        if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
                kvm_set_pfn_accessed(pfn);
        if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
                kvm_set_pfn_dirty(pfn);
+       return 1;
 }
 
 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
 {
-       set_spte_track_bits(sptep, new_spte);
-       rmap_remove(kvm, sptep);
+       if (set_spte_track_bits(sptep, new_spte))
+               rmap_remove(kvm, sptep);
 }
 
 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
index 2288ad829b327bb68d0efa65c104dcc7f2bbe7b5..cdac9e592aa53ee84b7e184900601ab4730354e5 100644 (file)
@@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
                !kvm_exception_is_soft(vcpu->arch.exception.nr);
        events->exception.nr = vcpu->arch.exception.nr;
        events->exception.has_error_code = vcpu->arch.exception.has_error_code;
+       events->exception.pad = 0;
        events->exception.error_code = vcpu->arch.exception.error_code;
 
        events->interrupt.injected =
@@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
        events->nmi.injected = vcpu->arch.nmi_injected;
        events->nmi.pending = vcpu->arch.nmi_pending;
        events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
+       events->nmi.pad = 0;
 
        events->sipi_vector = vcpu->arch.sipi_vector;
 
        events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
                         | KVM_VCPUEVENT_VALID_SIPI_VECTOR
                         | KVM_VCPUEVENT_VALID_SHADOW);
+       memset(&events->reserved, 0, sizeof(events->reserved));
 }
 
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
        dbgregs->dr6 = vcpu->arch.dr6;
        dbgregs->dr7 = vcpu->arch.dr7;
        dbgregs->flags = 0;
+       memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
 }
 
 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
                sizeof(ps->channels));
        ps->flags = kvm->arch.vpit->pit_state.flags;
        mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+       memset(&ps->reserved, 0, sizeof(ps->reserved));
        return r;
 }
 
@@ -3169,10 +3174,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                struct kvm_memslots *slots, *old_slots;
                unsigned long *dirty_bitmap;
 
-               spin_lock(&kvm->mmu_lock);
-               kvm_mmu_slot_remove_write_access(kvm, log->slot);
-               spin_unlock(&kvm->mmu_lock);
-
                r = -ENOMEM;
                dirty_bitmap = vmalloc(n);
                if (!dirty_bitmap)
@@ -3194,6 +3195,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
                kfree(old_slots);
 
+               spin_lock(&kvm->mmu_lock);
+               kvm_mmu_slot_remove_write_access(kvm, log->slot);
+               spin_unlock(&kvm->mmu_lock);
+
                r = -EFAULT;
                if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
                        vfree(dirty_bitmap);
@@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
                user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
                local_irq_enable();
                user_ns.flags = 0;
+               memset(&user_ns.pad, 0, sizeof(user_ns.pad));
 
                r = -EFAULT;
                if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -3972,8 +3978,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
                return X86EMUL_CONTINUE;
 
        if (kvm_x86_ops->has_wbinvd_exit()) {
+               preempt_disable();
                smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
                                wbinvd_ipi, NULL, 1);
+               preempt_enable();
                cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
        }
        wbinvd();
index de3078215fe6eadd89f31bfd3a86f426a4a18015..75586f1f86e7b27632b1250b2e4572135bf03e1c 100644 (file)
@@ -504,7 +504,6 @@ err:
 
 static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
 {
-       kobject_put(&pcrypt->pinst->kobj);
        free_cpumask_var(pcrypt->cb_cpumask->mask);
        kfree(pcrypt->cb_cpumask);
 
index 14cf9077bb2bcf4f01025518b11b0e87e072e9b5..f3ebb30f1b7fd9c7ed93d6328dbe4d8c54b1fd87 100644 (file)
@@ -26,6 +26,7 @@ obj-$(CONFIG_REGULATOR)               += regulator/
 
 # char/ comes before serial/ etc so that the VT console is the boot-time
 # default.
+obj-y                          += tty/
 obj-y                          += char/
 
 # gpu/ comes after char for AGP vs DRM startup
index 767107cce982bfdcb651797eb2fb634dde4f2356..3951020e494ac803001083b186a5b7614eb6d42f 100644 (file)
@@ -4363,9 +4363,9 @@ out_unreg_blkdev:
 out_put_disk:
        while (dr--) {
                del_timer(&motor_off_timer[dr]);
-               put_disk(disks[dr]);
                if (disks[dr]->queue)
                        blk_cleanup_queue(disks[dr]->queue);
+               put_disk(disks[dr]);
        }
        return err;
 }
@@ -4573,8 +4573,8 @@ static void __exit floppy_module_exit(void)
                        device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
                        platform_device_unregister(&floppy_device[drive]);
                }
-               put_disk(disks[drive]);
                blk_cleanup_queue(disks[drive]->queue);
+               put_disk(disks[drive]);
        }
 
        del_timer_sync(&fd_timeout);
index 3a9c01416839ebc785abcdec1405135fce3df817..ba53ec956c95627ff03695a3bbb363f9f5f2f400 100644 (file)
@@ -2,24 +2,10 @@
 # Makefile for the kernel character device drivers.
 #
 
-#
-# This file contains the font map for the default (hardware) font
-#
-FONTMAPFILE = cp437.uni
-
-obj-y   += mem.o random.o tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o tty_buffer.o tty_port.o
-
-obj-y                          += tty_mutex.o
-obj-$(CONFIG_LEGACY_PTYS)      += pty.o
-obj-$(CONFIG_UNIX98_PTYS)      += pty.o
+obj-y                          += mem.o random.o
 obj-$(CONFIG_TTY_PRINTK)       += ttyprintk.o
 obj-y                          += misc.o
-obj-$(CONFIG_VT)               += vt_ioctl.o vc_screen.o selection.o keyboard.o
 obj-$(CONFIG_BFIN_JTAG_COMM)   += bfin_jtag_comm.o
-obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
-obj-$(CONFIG_HW_CONSOLE)       += vt.o defkeymap.o
-obj-$(CONFIG_AUDIT)            += tty_audit.o
-obj-$(CONFIG_MAGIC_SYSRQ)      += sysrq.o
 obj-$(CONFIG_MVME147_SCC)      += generic_serial.o vme_scc.o
 obj-$(CONFIG_MVME162_SCC)      += generic_serial.o vme_scc.o
 obj-$(CONFIG_BVME6000_SCC)     += generic_serial.o vme_scc.o
@@ -41,8 +27,6 @@ obj-$(CONFIG_ISI)             += isicom.o
 obj-$(CONFIG_SYNCLINK)         += synclink.o
 obj-$(CONFIG_SYNCLINKMP)       += synclinkmp.o
 obj-$(CONFIG_SYNCLINK_GT)      += synclink_gt.o
-obj-$(CONFIG_N_HDLC)           += n_hdlc.o
-obj-$(CONFIG_N_GSM)            += n_gsm.o
 obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
 obj-$(CONFIG_SX)               += sx.o generic_serial.o
 obj-$(CONFIG_RIO)              += rio/ generic_serial.o
@@ -74,7 +58,6 @@ obj-$(CONFIG_PRINTER)         += lp.o
 obj-$(CONFIG_APM_EMULATION)    += apm-emulation.o
 
 obj-$(CONFIG_DTLK)             += dtlk.o
-obj-$(CONFIG_R3964)            += n_r3964.o
 obj-$(CONFIG_APPLICOM)         += applicom.o
 obj-$(CONFIG_SONYPI)           += sonypi.o
 obj-$(CONFIG_RTC)              += rtc.o
@@ -115,28 +98,3 @@ obj-$(CONFIG_RAMOOPS)               += ramoops.o
 
 obj-$(CONFIG_JS_RTC)           += js-rtc.o
 js-rtc-y = rtc.o
-
-# Files generated that shall be removed upon make clean
-clean-files := consolemap_deftbl.c defkeymap.c
-
-quiet_cmd_conmk = CONMK   $@
-      cmd_conmk = scripts/conmakehash $< > $@
-
-$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
-       $(call cmd,conmk)
-
-$(obj)/defkeymap.o:  $(obj)/defkeymap.c
-
-# Uncomment if you're changing the keymap and have an appropriate
-# loadkeys version for the map. By default, we'll use the shipped
-# versions.
-# GENERATE_KEYMAP := 1
-
-ifdef GENERATE_KEYMAP
-
-$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
-       loadkeys --mktable $< > $@.tmp
-       sed -e 's/^static *//' $@.tmp > $@
-       rm $@.tmp
-
-endif
index 6b6760ea2435bac1865325aec74d579f9687f87e..9272c38dd3c6ce8a66e9e0d2b0a5085ddd2973a7 100644 (file)
@@ -1210,14 +1210,14 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
        unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
        u32 pte_flags;
 
-       if (type_mask == AGP_USER_UNCACHED_MEMORY)
+       if (type_mask == AGP_USER_MEMORY)
                pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
        else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
-               pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+               pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
                if (gfdt)
                        pte_flags |= GEN6_PTE_GFDT;
        } else { /* set 'normal'/'cached' to LLC by default */
-               pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+               pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
                if (gfdt)
                        pte_flags |= GEN6_PTE_GFDT;
        }
index a44611652282c6c173e70b861910d4bee1610db4..d68d3aa1814b4ac635017e98cebd9625b0e7b40a 100644 (file)
@@ -616,13 +616,9 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
        /* get hold of clock */
        p->clk = clk_get(&p->pdev->dev, "cmt_fck");
        if (IS_ERR(p->clk)) {
-               dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
-               p->clk = clk_get(&p->pdev->dev, cfg->clk);
-               if (IS_ERR(p->clk)) {
-                       dev_err(&p->pdev->dev, "cannot get clock\n");
-                       ret = PTR_ERR(p->clk);
-                       goto err1;
-               }
+               dev_err(&p->pdev->dev, "cannot get clock\n");
+               ret = PTR_ERR(p->clk);
+               goto err1;
        }
 
        if (resource_size(res) == 6) {
index ef7a5be8a09f5a36bb1ed545fccf64e2fa7e7f89..40630cb9823707423c84e2459b812165786c5a15 100644 (file)
@@ -287,13 +287,9 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
        /* get hold of clock */
        p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
        if (IS_ERR(p->clk)) {
-               dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
-               p->clk = clk_get(&p->pdev->dev, cfg->clk);
-               if (IS_ERR(p->clk)) {
-                       dev_err(&p->pdev->dev, "cannot get clock\n");
-                       ret = PTR_ERR(p->clk);
-                       goto err1;
-               }
+               dev_err(&p->pdev->dev, "cannot get clock\n");
+               ret = PTR_ERR(p->clk);
+               goto err1;
        }
 
        return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
index de715901b82a28a09f8160dd1b62f17a25247a51..36aba9923060f5dfbd02d250a1de675e274e7695 100644 (file)
@@ -393,13 +393,9 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
        /* get hold of clock */
        p->clk = clk_get(&p->pdev->dev, "tmu_fck");
        if (IS_ERR(p->clk)) {
-               dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
-               p->clk = clk_get(&p->pdev->dev, cfg->clk);
-               if (IS_ERR(p->clk)) {
-                       dev_err(&p->pdev->dev, "cannot get clock\n");
-                       ret = PTR_ERR(p->clk);
-                       goto err1;
-               }
+               dev_err(&p->pdev->dev, "cannot get clock\n");
+               ret = PTR_ERR(p->clk);
+               goto err1;
        }
 
        return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
index 9dcb17d51aee737bcbb4ac478807c04311588d6c..84eb607d6c031b4275e834604d4c0c0729fa8502 100644 (file)
@@ -577,17 +577,11 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr,
        return ret;
 }
 
-static int ar_context_add_page(struct ar_context *ctx)
+static void ar_context_link_page(struct ar_context *ctx,
+                                struct ar_buffer *ab, dma_addr_t ab_bus)
 {
-       struct device *dev = ctx->ohci->card.device;
-       struct ar_buffer *ab;
-       dma_addr_t uninitialized_var(ab_bus);
        size_t offset;
 
-       ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
-       if (ab == NULL)
-               return -ENOMEM;
-
        ab->next = NULL;
        memset(&ab->descriptor, 0, sizeof(ab->descriptor));
        ab->descriptor.control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
@@ -606,6 +600,19 @@ static int ar_context_add_page(struct ar_context *ctx)
 
        reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
        flush_writes(ctx->ohci);
+}
+
+static int ar_context_add_page(struct ar_context *ctx)
+{
+       struct device *dev = ctx->ohci->card.device;
+       struct ar_buffer *ab;
+       dma_addr_t uninitialized_var(ab_bus);
+
+       ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
+       if (ab == NULL)
+               return -ENOMEM;
+
+       ar_context_link_page(ctx, ab, ab_bus);
 
        return 0;
 }
@@ -730,16 +737,17 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
 static void ar_context_tasklet(unsigned long data)
 {
        struct ar_context *ctx = (struct ar_context *)data;
-       struct fw_ohci *ohci = ctx->ohci;
        struct ar_buffer *ab;
        struct descriptor *d;
        void *buffer, *end;
+       __le16 res_count;
 
        ab = ctx->current_buffer;
        d = &ab->descriptor;
 
-       if (d->res_count == 0) {
-               size_t size, rest, offset;
+       res_count = ACCESS_ONCE(d->res_count);
+       if (res_count == 0) {
+               size_t size, size2, rest, pktsize, size3, offset;
                dma_addr_t start_bus;
                void *start;
 
@@ -750,29 +758,63 @@ static void ar_context_tasklet(unsigned long data)
                 */
 
                offset = offsetof(struct ar_buffer, data);
-               start = buffer = ab;
+               start = ab;
                start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+               buffer = ab->data;
 
                ab = ab->next;
                d = &ab->descriptor;
-               size = buffer + PAGE_SIZE - ctx->pointer;
+               size = start + PAGE_SIZE - ctx->pointer;
+               /* valid buffer data in the next page */
                rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
+               /* what actually fits in this page */
+               size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
                memmove(buffer, ctx->pointer, size);
-               memcpy(buffer + size, ab->data, rest);
-               ctx->current_buffer = ab;
-               ctx->pointer = (void *) ab->data + rest;
-               end = buffer + size + rest;
+               memcpy(buffer + size, ab->data, size2);
+
+               while (size > 0) {
+                       void *next = handle_ar_packet(ctx, buffer);
+                       pktsize = next - buffer;
+                       if (pktsize >= size) {
+                               /*
+                                * We have handled all the data that was
+                                * originally in this page, so we can now
+                                * continue in the next page.
+                                */
+                               buffer = next;
+                               break;
+                       }
+                       /* move the next packet to the start of the buffer */
+                       memmove(buffer, next, size + size2 - pktsize);
+                       size -= pktsize;
+                       /* fill up this page again */
+                       size3 = min(rest - size2,
+                                   (size_t)PAGE_SIZE - offset - size - size2);
+                       memcpy(buffer + size + size2,
+                              (void *) ab->data + size2, size3);
+                       size2 += size3;
+               }
 
-               while (buffer < end)
-                       buffer = handle_ar_packet(ctx, buffer);
+               if (rest > 0) {
+                       /* handle the packets that are fully in the next page */
+                       buffer = (void *) ab->data +
+                                       (buffer - (start + offset + size));
+                       end = (void *) ab->data + rest;
+
+                       while (buffer < end)
+                               buffer = handle_ar_packet(ctx, buffer);
 
-               dma_free_coherent(ohci->card.device, PAGE_SIZE,
-                                 start, start_bus);
-               ar_context_add_page(ctx);
+                       ctx->current_buffer = ab;
+                       ctx->pointer = end;
+
+                       ar_context_link_page(ctx, start, start_bus);
+               } else {
+                       ctx->pointer = start + PAGE_SIZE;
+               }
        } else {
                buffer = ctx->pointer;
                ctx->pointer = end =
-                       (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
+                       (void *) ab + PAGE_SIZE - le16_to_cpu(res_count);
 
                while (buffer < end)
                        buffer = handle_ar_packet(ctx, buffer);
index dcbeb98f195a7addf665e0134af9ee799280a279..f7af91cb273d58232d41c68b712b2d6d4404671c 100644 (file)
@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
        struct drm_crtc *tmp;
        int crtc_mask = 1;
 
-       WARN(!crtc, "checking null crtc?");
+       WARN(!crtc, "checking null crtc?\n");
 
        dev = crtc->dev;
 
index c1a26217a5305ee65a07b85fa77e4c15db77436b..a245d17165ae28b4eab835d612c92e008e3ab5d4 100644 (file)
@@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
                        .addr   = DDC_ADDR,
                        .flags  = I2C_M_RD,
                        .len    = len,
-                       .buf    = buf + start,
+                       .buf    = buf,
                }
        };
 
@@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
 static u8 *
 drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
-       int i, j = 0;
+       int i, j = 0, valid_extensions = 0;
        u8 *block, *new;
 
        if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
@@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 
        for (j = 1; j <= block[0x7e]; j++) {
                for (i = 0; i < 4; i++) {
-                       if (drm_do_probe_ddc_edid(adapter, block, j,
-                                                 EDID_LENGTH))
+                       if (drm_do_probe_ddc_edid(adapter,
+                                 block + (valid_extensions + 1) * EDID_LENGTH,
+                                 j, EDID_LENGTH))
                                goto out;
-                       if (drm_edid_block_valid(block + j * EDID_LENGTH))
+                       if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
+                               valid_extensions++;
                                break;
+                       }
                }
                if (i == 4)
-                       goto carp;
+                       dev_warn(connector->dev->dev,
+                        "%s: Ignoring invalid EDID block %d.\n",
+                        drm_get_connector_name(connector), j);
+       }
+
+       if (valid_extensions != block[0x7e]) {
+               block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+               block[0x7e] = valid_extensions;
+               new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+               if (!new)
+                       goto out;
+               block = new;
        }
 
        return block;
index 3467dd420760fa51084a2ba00d097b90d6c214a2..80745f85902cf73fe17bf2ec41863dcb36a38550 100644 (file)
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
 module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
 
 unsigned int i915_powersave = 1;
-module_param_named(powersave, i915_powersave, int, 0400);
+module_param_named(powersave, i915_powersave, int, 0600);
 
 unsigned int i915_lvds_downclock = 0;
 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
index 2c2c19b6285ecf331edbac9c53e0bdd44b2093ad..90414ae86afcd0758d49b2c70dfb39f613ab22f5 100644 (file)
@@ -1321,6 +1321,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
 
 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
index 8eb8453208b5ce491e77c1c06c77c1c0a2da41bc..ef188e391406a1b3a2763e92d578c8f9e03b28b3 100644 (file)
@@ -2172,7 +2172,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 static int i915_ring_idle(struct drm_device *dev,
                          struct intel_ring_buffer *ring)
 {
-       if (list_empty(&ring->gpu_write_list))
+       if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
                return 0;
 
        i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2190,7 @@ i915_gpu_idle(struct drm_device *dev)
        int ret;
 
        lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->render_ring.active_list) &&
-                      list_empty(&dev_priv->bsd_ring.active_list) &&
-                      list_empty(&dev_priv->blt_ring.active_list));
+                      list_empty(&dev_priv->mm.active_list));
        if (lists_empty)
                return 0;
 
@@ -3108,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
         * write domain
         */
        if (obj->write_domain &&
-           obj->write_domain != obj->pending_read_domains) {
+           (obj->write_domain != obj->pending_read_domains ||
+            obj_priv->ring != ring)) {
                flush_domains |= obj->write_domain;
                invalidate_domains |=
                        obj->pending_read_domains & ~obj->write_domain;
@@ -3497,6 +3496,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
        return 0;
 }
 
+static int
+i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
+                               struct drm_file *file,
+                               struct intel_ring_buffer *ring,
+                               struct drm_gem_object **objects,
+                               int count)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret, i;
+
+       /* Zero the global flush/invalidate flags. These
+        * will be modified as new domains are computed
+        * for each object
+        */
+       dev->invalidate_domains = 0;
+       dev->flush_domains = 0;
+       dev_priv->mm.flush_rings = 0;
+       for (i = 0; i < count; i++)
+               i915_gem_object_set_to_gpu_domain(objects[i], ring);
+
+       if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+               DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+                         __func__,
+                        dev->invalidate_domains,
+                        dev->flush_domains);
+#endif
+               i915_gem_flush(dev, file,
+                              dev->invalidate_domains,
+                              dev->flush_domains,
+                              dev_priv->mm.flush_rings);
+       }
+
+       for (i = 0; i < count; i++) {
+               struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
+               /* XXX replace with semaphores */
+               if (obj->ring && ring != obj->ring) {
+                       ret = i915_gem_object_wait_rendering(&obj->base, true);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
 /* Throttle our rendering by waiting until the ring has completed our requests
  * emitted over 20 msec ago.
  *
@@ -3757,33 +3802,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto err;
        }
 
-       /* Zero the global flush/invalidate flags. These
-        * will be modified as new domains are computed
-        * for each object
-        */
-       dev->invalidate_domains = 0;
-       dev->flush_domains = 0;
-       dev_priv->mm.flush_rings = 0;
-
-       for (i = 0; i < args->buffer_count; i++) {
-               struct drm_gem_object *obj = object_list[i];
-
-               /* Compute new gpu domains and update invalidate/flush */
-               i915_gem_object_set_to_gpu_domain(obj, ring);
-       }
-
-       if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
-               DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
-                         __func__,
-                        dev->invalidate_domains,
-                        dev->flush_domains);
-#endif
-               i915_gem_flush(dev, file,
-                              dev->invalidate_domains,
-                              dev->flush_domains,
-                              dev_priv->mm.flush_rings);
-       }
+       ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
+                                             object_list, args->buffer_count);
+       if (ret)
+               goto err;
 
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4065,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
                        alignment = i915_gem_get_gtt_alignment(obj);
                if (obj_priv->gtt_offset & (alignment - 1)) {
                        WARN(obj_priv->pin_count,
-                            "bo is already pinned with incorrect alignment:"
-                            " offset=%x, req.alignment=%x\n",
+                            "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
                             obj_priv->gtt_offset, alignment);
                        ret = i915_gem_object_unbind(obj);
                        if (ret)
@@ -4856,17 +4877,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
                     struct drm_file *file_priv)
 {
        struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       void *obj_addr;
-       int ret;
-       char __user *user_data;
+       void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+       char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
 
-       user_data = (char __user *) (uintptr_t) args->data_ptr;
-       obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
+       DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
 
-       DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
-       ret = copy_from_user(obj_addr, user_data, args->size);
-       if (ret)
-               return -EFAULT;
+       if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+               unsigned long unwritten;
+
+               /* The physical object once assigned is fixed for the lifetime
+                * of the obj, so we can safely drop the lock and continue
+                * to access vaddr.
+                */
+               mutex_unlock(&dev->struct_mutex);
+               unwritten = copy_from_user(vaddr, user_data, args->size);
+               mutex_lock(&dev->struct_mutex);
+               if (unwritten)
+                       return -EFAULT;
+       }
 
        drm_agp_chipset_flush(dev);
        return 0;
@@ -4900,9 +4928,7 @@ i915_gpu_is_active(struct drm_device *dev)
        int lists_empty;
 
        lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-                     list_empty(&dev_priv->render_ring.active_list) &&
-                     list_empty(&dev_priv->bsd_ring.active_list) &&
-                     list_empty(&dev_priv->blt_ring.active_list);
+                     list_empty(&dev_priv->mm.active_list);
 
        return !lists_empty;
 }
index 43a4013f53fa24e212849af68940384b777d30a1..d8ae7d1d0cc671d9d59393fd2d11ddff317b322f 100644 (file)
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev)
 
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->render_ring.active_list) &&
-                      list_empty(&dev_priv->bsd_ring.active_list) &&
-                      list_empty(&dev_priv->blt_ring.active_list));
+                      list_empty(&dev_priv->mm.active_list));
        if (lists_empty)
                return -ENOSPC;
 
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev)
 
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
-                      list_empty(&dev_priv->render_ring.active_list) &&
-                      list_empty(&dev_priv->bsd_ring.active_list) &&
-                      list_empty(&dev_priv->blt_ring.active_list));
+                      list_empty(&dev_priv->mm.active_list));
        BUG_ON(!lists_empty);
 
        return 0;
index 989c19d2d959b6bc6c54a0e4bf31d9aee093efe6..454c064f8ef7f8d0daffe0ed40394354b152ee55 100644 (file)
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
        /* Clock gating state */
        intel_init_clock_gating(dev);
 
-       if (HAS_PCH_SPLIT(dev))
+       if (HAS_PCH_SPLIT(dev)) {
                ironlake_enable_drps(dev);
+               intel_init_emon(dev);
+       }
 
        /* Cache mode state */
        I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
index 990f065374b22eacfc2fcddd68c873a5081ee773..48d8fd686ea91d9fb3ba307036ecd0f5388174e9 100644 (file)
@@ -1681,6 +1681,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
        udelay(500);
 }
 
+static void intel_fdi_normal_train(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       u32 reg, temp;
+
+       /* enable normal train */
+       reg = FDI_TX_CTL(pipe);
+       temp = I915_READ(reg);
+       temp &= ~FDI_LINK_TRAIN_NONE;
+       temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+       I915_WRITE(reg, temp);
+
+       reg = FDI_RX_CTL(pipe);
+       temp = I915_READ(reg);
+       if (HAS_PCH_CPT(dev)) {
+               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+               temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+       } else {
+               temp &= ~FDI_LINK_TRAIN_NONE;
+               temp |= FDI_LINK_TRAIN_NONE;
+       }
+       I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+       /* wait one idle pattern time */
+       POSTING_READ(reg);
+       udelay(1000);
+}
+
 /* The FDI link training functions for ILK/Ibexpeak. */
 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 {
@@ -1767,27 +1798,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 
        DRM_DEBUG_KMS("FDI train done\n");
 
-       /* enable normal train */
-       reg = FDI_TX_CTL(pipe);
-       temp = I915_READ(reg);
-       temp &= ~FDI_LINK_TRAIN_NONE;
-       temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
-       I915_WRITE(reg, temp);
-
-       reg = FDI_RX_CTL(pipe);
-       temp = I915_READ(reg);
-       if (HAS_PCH_CPT(dev)) {
-               temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-               temp |= FDI_LINK_TRAIN_NORMAL_CPT;
-       } else {
-               temp &= ~FDI_LINK_TRAIN_NONE;
-               temp |= FDI_LINK_TRAIN_NONE;
-       }
-       I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-
-       /* wait one idle pattern time */
-       POSTING_READ(reg);
-       udelay(1000);
 }
 
 static const int const snb_b_fdi_train_param [] = {
@@ -2090,6 +2100,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
        I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
 
+       intel_fdi_normal_train(crtc);
+
        /* For PCH DP, enable TRANS_DP_CTL */
        if (HAS_PCH_CPT(dev) &&
            intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
@@ -2200,9 +2212,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        udelay(100);
 
        /* Ironlake workaround, disable clock pointer after downing FDI */
-       I915_WRITE(FDI_RX_CHICKEN(pipe),
-                  I915_READ(FDI_RX_CHICKEN(pipe) &
-                            ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+       if (HAS_PCH_IBX(dev))
+               I915_WRITE(FDI_RX_CHICKEN(pipe),
+                          I915_READ(FDI_RX_CHICKEN(pipe) &
+                                    ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
 
        /* still set train pattern 1 */
        reg = FDI_TX_CTL(pipe);
@@ -5581,20 +5594,19 @@ void ironlake_enable_drps(struct drm_device *dev)
        fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
        fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
                MEMMODE_FSTART_SHIFT;
-       fstart = fmax;
 
        vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
                PXVFREQ_PX_SHIFT;
 
-       dev_priv->fmax = fstart; /* IPS callback will increase this */
+       dev_priv->fmax = fmax; /* IPS callback will increase this */
        dev_priv->fstart = fstart;
 
-       dev_priv->max_delay = fmax;
+       dev_priv->max_delay = fstart;
        dev_priv->min_delay = fmin;
        dev_priv->cur_delay = fstart;
 
-       DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
-                        fstart);
+       DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+                        fmax, fmin, fstart);
 
        I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
 
index 891f4f1d63b11570b7ede1c4e5e49814f2dcb5d0..c8e005553310a5eaeae98e4cb8ed349b4b204164 100644 (file)
@@ -1517,7 +1517,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
                        status = connector_status_connected;
        }
 
-       return bit;
+       return status;
 }
 
 /**
index 9af9f86a8765c82833706aa1320ca9baed3fa416..21551fe745416abb4597341b18d647f2529e85ce 100644 (file)
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 extern void intel_init_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
 
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
                                      struct drm_gem_object *obj,
index f1a649990ea9e61d5f7ff6f8b15e127d6fcc2f17..4324a326f98ee28f4d67936b7a4789556344c4fb 100644 (file)
@@ -481,11 +481,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
        struct drm_device *dev = connector->dev;
        struct drm_display_mode *mode;
 
-       if (intel_lvds->edid) {
-               drm_mode_connector_update_edid_property(connector,
-                                                       intel_lvds->edid);
+       if (intel_lvds->edid)
                return drm_add_edid_modes(connector, intel_lvds->edid);
-       }
 
        mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
        if (mode == 0)
@@ -939,7 +936,16 @@ void intel_lvds_init(struct drm_device *dev)
         */
        intel_lvds->edid = drm_get_edid(connector,
                                        &dev_priv->gmbus[pin].adapter);
-
+       if (intel_lvds->edid) {
+               if (drm_add_edid_modes(connector,
+                                      intel_lvds->edid)) {
+                       drm_mode_connector_update_edid_property(connector,
+                                                               intel_lvds->edid);
+               } else {
+                       kfree(intel_lvds->edid);
+                       intel_lvds->edid = NULL;
+               }
+       }
        if (!intel_lvds->edid) {
                /* Didn't get an EDID, so
                 * Set wide sync ranges so we get all modes
index 917c7dc3cd6b33ee151ad2e960743fe35016cec0..9b0d9a867aeada1f1c9d601e21f76e2d9e4e7351 100644 (file)
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev)
        return 0;
 
 err_out:
-       iounmap(opregion->header);
+       iounmap(base);
        return err;
 }
index afb96d25219afe473b116802f30616c0edb1cf86..02ff0a481f470cea6e3f4f109805e07fbd84725e 100644 (file)
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev,
 {
        int uv_hscale = uv_hsubsampling(rec->flags);
        int uv_vscale = uv_vsubsampling(rec->flags);
-       u32 stride_mask, depth, tmp;
+       u32 stride_mask;
+       int depth;
+       u32 tmp;
 
        /* check src dimensions */
        if (IS_845G(dev) || IS_I830(dev)) {
index 09f2dc353ae239f0d2a6f86a5940c1d7b27656e4..b83306f9244b6c887d84976b4f1801449445941a 100644 (file)
@@ -177,7 +177,7 @@ static int init_ring_common(struct drm_device *dev,
 
        I915_WRITE_CTL(ring,
                        ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
-                       | RING_NO_REPORT | RING_VALID);
+                       | RING_REPORT_64K | RING_VALID);
 
        head = I915_READ_HEAD(ring) & HEAD_ADDR;
        /* If the head is still not zero, the ring is dead */
@@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
        i915_gem_object_unpin(ring->gem_object);
        drm_gem_object_unreference(ring->gem_object);
        ring->gem_object = NULL;
+
+       if (ring->cleanup)
+               ring->cleanup(ring);
+
        cleanup_status_page(dev, ring);
 }
 
@@ -688,6 +692,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
 {
        unsigned long end;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 head;
+
+       head = intel_read_status_page(ring, 4);
+       if (head) {
+               ring->head = head & HEAD_ADDR;
+               ring->space = ring->head - (ring->tail + 8);
+               if (ring->space < 0)
+                       ring->space += ring->size;
+               if (ring->space >= n)
+                       return 0;
+       }
 
        trace_i915_ring_wait_begin (dev);
        end = jiffies + 3 * HZ;
@@ -854,19 +869,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
        /* do nothing */
 }
 
+
+/* Workaround for some stepping of SNB,
+ * each time when BLT engine ring tail moved,
+ * the first command in the ring to be parsed
+ * should be MI_BATCH_BUFFER_START
+ */
+#define NEED_BLT_WORKAROUND(dev) \
+       (IS_GEN6(dev) && (dev->pdev->revision < 8))
+
+static inline struct drm_i915_gem_object *
+to_blt_workaround(struct intel_ring_buffer *ring)
+{
+       return ring->private;
+}
+
+static int blt_ring_init(struct drm_device *dev,
+                        struct intel_ring_buffer *ring)
+{
+       if (NEED_BLT_WORKAROUND(dev)) {
+               struct drm_i915_gem_object *obj;
+               u32 __iomem *ptr;
+               int ret;
+
+               obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+               if (obj == NULL)
+                       return -ENOMEM;
+
+               ret = i915_gem_object_pin(&obj->base, 4096);
+               if (ret) {
+                       drm_gem_object_unreference(&obj->base);
+                       return ret;
+               }
+
+               ptr = kmap(obj->pages[0]);
+               iowrite32(MI_BATCH_BUFFER_END, ptr);
+               iowrite32(MI_NOOP, ptr+1);
+               kunmap(obj->pages[0]);
+
+               ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+               if (ret) {
+                       i915_gem_object_unpin(&obj->base);
+                       drm_gem_object_unreference(&obj->base);
+                       return ret;
+               }
+
+               ring->private = obj;
+       }
+
+       return init_ring_common(dev, ring);
+}
+
+static void blt_ring_begin(struct drm_device *dev,
+                          struct intel_ring_buffer *ring,
+                         int num_dwords)
+{
+       if (ring->private) {
+               intel_ring_begin(dev, ring, num_dwords+2);
+               intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
+               intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
+       } else
+               intel_ring_begin(dev, ring, 4);
+}
+
+static void blt_ring_flush(struct drm_device *dev,
+                          struct intel_ring_buffer *ring,
+                          u32 invalidate_domains,
+                          u32 flush_domains)
+{
+       blt_ring_begin(dev, ring, 4);
+       intel_ring_emit(dev, ring, MI_FLUSH_DW);
+       intel_ring_emit(dev, ring, 0);
+       intel_ring_emit(dev, ring, 0);
+       intel_ring_emit(dev, ring, 0);
+       intel_ring_advance(dev, ring);
+}
+
+static u32
+blt_ring_add_request(struct drm_device *dev,
+                    struct intel_ring_buffer *ring,
+                    u32 flush_domains)
+{
+       u32 seqno = i915_gem_get_seqno(dev);
+
+       blt_ring_begin(dev, ring, 4);
+       intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+       intel_ring_emit(dev, ring,
+                       I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_ring_emit(dev, ring, seqno);
+       intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+       intel_ring_advance(dev, ring);
+
+       DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+       return seqno;
+}
+
+static void blt_ring_cleanup(struct intel_ring_buffer *ring)
+{
+       if (!ring->private)
+               return;
+
+       i915_gem_object_unpin(ring->private);
+       drm_gem_object_unreference(ring->private);
+       ring->private = NULL;
+}
+
 static const struct intel_ring_buffer gen6_blt_ring = {
        .name                   = "blt ring",
        .id                     = RING_BLT,
        .mmio_base              = BLT_RING_BASE,
        .size                   = 32 * PAGE_SIZE,
-       .init                   = init_ring_common,
+       .init                   = blt_ring_init,
        .write_tail             = ring_write_tail,
-       .flush                  = gen6_ring_flush,
-       .add_request            = ring_add_request,
+       .flush                  = blt_ring_flush,
+       .add_request            = blt_ring_add_request,
        .get_seqno              = ring_status_page_get_seqno,
        .user_irq_get           = blt_ring_get_user_irq,
        .user_irq_put           = blt_ring_put_user_irq,
        .dispatch_gem_execbuffer        = gen6_ring_dispatch_gem_execbuffer,
+       .cleanup                        = blt_ring_cleanup,
 };
 
 int intel_init_render_ring_buffer(struct drm_device *dev)
index a05aff0e5764d67e421a5a4995f2b0b92dfcd925..3126c2681983e21ba729d9ca599d104f4f1323de 100644 (file)
@@ -63,6 +63,7 @@ struct  intel_ring_buffer {
                        struct drm_i915_gem_execbuffer2 *exec,
                        struct drm_clip_rect *cliprects,
                        uint64_t exec_offset);
+       void            (*cleanup)(struct intel_ring_buffer *ring);
 
        /**
         * List of objects currently involved in rendering from the
@@ -98,6 +99,8 @@ struct  intel_ring_buffer {
 
        wait_queue_head_t irq_queue;
        drm_local_map_t map;
+
+       void *private;
 };
 
 static inline u32
index f12a5b3ec050320505c6dc0b1cb44dcdde3fbeed..488c36c8f5e6069ebbb6cf5f9b830747a61c062e 100644 (file)
@@ -2033,7 +2033,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
        u32 grbm_int_cntl = 0;
 
        if (!rdev->irq.installed) {
-               WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+               WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
                return -EINVAL;
        }
        /* don't enable anything if the ih is disabled */
@@ -2295,6 +2295,7 @@ restart_ih:
                        case 0: /* D1 vblank */
                                if (disp_int & LB_D1_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 0);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int &= ~LB_D1_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D1 vblank\n");
@@ -2316,6 +2317,7 @@ restart_ih:
                        case 0: /* D2 vblank */
                                if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 1);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D2 vblank\n");
@@ -2337,6 +2339,7 @@ restart_ih:
                        case 0: /* D3 vblank */
                                if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 2);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D3 vblank\n");
@@ -2358,6 +2361,7 @@ restart_ih:
                        case 0: /* D4 vblank */
                                if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 3);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D4 vblank\n");
@@ -2379,6 +2383,7 @@ restart_ih:
                        case 0: /* D5 vblank */
                                if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 4);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D5 vblank\n");
@@ -2400,6 +2405,7 @@ restart_ih:
                        case 0: /* D6 vblank */
                                if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
                                        drm_handle_vblank(rdev->ddev, 5);
+                                       rdev->pm.vblank_sync = true;
                                        wake_up(&rdev->irq.vblank_queue);
                                        disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
                                        DRM_DEBUG("IH: D6 vblank\n");
index 0e8f28a689271c88a88d3865ead4890089049dca..8e10aa9f74b052a25e86d04e65173c8a7a19eb76 100644 (file)
@@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
        int r;
 
        if (rdev->gart.table.ram.ptr) {
-               WARN(1, "R100 PCI GART already initialized.\n");
+               WARN(1, "R100 PCI GART already initialized\n");
                return 0;
        }
        /* Initialize common gart structure */
@@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev)
        uint32_t tmp = 0;
 
        if (!rdev->irq.installed) {
-               WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+               WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
                WREG32(R_000040_GEN_INT_CNTL, 0);
                return -EINVAL;
        }
index 34527e600fe94f91ab33da78285b695afbb6cd6e..cde1d3480d932c55ae33855c3a2336eadfe9ab54 100644 (file)
@@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
        int r;
 
        if (rdev->gart.table.vram.robj) {
-               WARN(1, "RV370 PCIE GART already initialized.\n");
+               WARN(1, "RV370 PCIE GART already initialized\n");
                return 0;
        }
        /* Initialize common gart structure */
index 33952a12f0a31a49986a248a39c38cb48cdf618c..0f806cc7dc75f02f5bb00f98fc0d65b662aad688 100644 (file)
@@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev)
 {
        u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
                ASIC_T_SHIFT;
-       u32 actual_temp = 0;
 
-       if ((temp >> 7) & 1)
-               actual_temp = 0;
-       else
-               actual_temp = (temp >> 1) & 0xff;
-
-       return actual_temp * 1000;
+       return temp * 1000;
 }
 
 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -919,7 +913,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
        int r;
 
        if (rdev->gart.table.vram.robj) {
-               WARN(1, "R600 PCIE GART already initialized.\n");
+               WARN(1, "R600 PCIE GART already initialized\n");
                return 0;
        }
        /* Initialize common gart structure */
@@ -2995,7 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev)
        u32 hdmi1, hdmi2;
 
        if (!rdev->irq.installed) {
-               WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+               WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
                return -EINVAL;
        }
        /* don't enable anything if the ih is disabled */
index 04cac7ec90397fa7f4f46f9d966fb41b2d3c5368..87ead090c7d5a5195af6cf8d15be5a5809ff9678 100644 (file)
@@ -526,8 +526,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
        if (crev < 2)
                return false;
 
-       router.valid = false;
-
        obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
        path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
            (ctx->bios + data_offset +
@@ -624,6 +622,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                        if (connector_type == DRM_MODE_CONNECTOR_Unknown)
                                continue;
 
+                       router.ddc_valid = false;
+                       router.cd_valid = false;
                        for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
                                uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
 
@@ -647,9 +647,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                                                 usDeviceTag));
 
                                } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
-                                       router.valid = false;
                                        for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
-                                               u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID);
+                                               u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
                                                if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
                                                        ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
                                                                (ctx->bios + data_offset +
@@ -657,6 +656,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                                        ATOM_I2C_RECORD *i2c_record;
                                                        ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
                                                        ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+                                                       ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
                                                        ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
                                                                (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
                                                                (ctx->bios + data_offset +
@@ -690,10 +690,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
                                                                case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
                                                                        ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
                                                                                record;
-                                                                       router.valid = true;
-                                                                       router.mux_type = ddc_path->ucMuxType;
-                                                                       router.mux_control_pin = ddc_path->ucMuxControlPin;
-                                                                       router.mux_state = ddc_path->ucMuxState[enum_id];
+                                                                       router.ddc_valid = true;
+                                                                       router.ddc_mux_type = ddc_path->ucMuxType;
+                                                                       router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
+                                                                       router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
+                                                                       break;
+                                                               case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
+                                                                       cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
+                                                                               record;
+                                                                       router.cd_valid = true;
+                                                                       router.cd_mux_type = cd_path->ucMuxType;
+                                                                       router.cd_mux_control_pin = cd_path->ucMuxControlPin;
+                                                                       router.cd_mux_state = cd_path->ucMuxState[enum_id];
                                                                        break;
                                                                }
                                                                record = (ATOM_COMMON_RECORD_HEADER *)
@@ -860,7 +868,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
        size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
        struct radeon_router router;
 
-       router.valid = false;
+       router.ddc_valid = false;
+       router.cd_valid = false;
 
        bios_connectors = kzalloc(bc_size, GFP_KERNEL);
        if (!bios_connectors)
index 4dac4b0a02eea1928837c7009f88e84f227a81f5..fe6c74780f18e67bb55ea5fdeefdbb038ffadbf1 100644 (file)
@@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
                                        continue;
 
                                if (priority == true) {
-                                       DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
-                                       DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
+                                       DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
+                                       DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
                                        conflict->status = connector_status_disconnected;
                                        radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
                                } else {
-                                       DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
-                                       DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict));
+                                       DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
+                                       DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
                                        current_status = connector_status_disconnected;
                                }
                                break;
@@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
                            mode->vdisplay == native_mode->vdisplay) {
                                *native_mode = *mode;
                                drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
-                               DRM_INFO("Determined LVDS native mode details from EDID\n");
+                               DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
                                break;
                        }
                }
        }
        if (!native_mode->clock) {
-               DRM_INFO("No LVDS native mode details, disabling RMX\n");
+               DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
                radeon_encoder->rmx_type = RMX_OFF;
        }
 }
@@ -1116,7 +1116,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                radeon_connector->shared_ddc = true;
                                shared_ddc = true;
                        }
-                       if (radeon_connector->router_bus && router->valid &&
+                       if (radeon_connector->router_bus && router->ddc_valid &&
                            (radeon_connector->router.router_id == router->router_id)) {
                                radeon_connector->shared_ddc = false;
                                shared_ddc = false;
@@ -1136,7 +1136,7 @@ radeon_add_atom_connector(struct drm_device *dev,
        radeon_connector->connector_object_id = connector_object_id;
        radeon_connector->hpd = *hpd;
        radeon_connector->router = *router;
-       if (router->valid) {
+       if (router->ddc_valid || router->cd_valid) {
                radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
                if (!radeon_connector->router_bus)
                        goto failed;
index 0383631da69c42db417f27f6b87ddf58f19eb24e..1df4dc6c063cc44e0e7e943da9b62e6c85d27d38 100644 (file)
@@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev)
                                 radeon_connector->ddc_bus->rec.en_data_reg,
                                 radeon_connector->ddc_bus->rec.y_clk_reg,
                                 radeon_connector->ddc_bus->rec.y_data_reg);
-                       if (radeon_connector->router_bus)
+                       if (radeon_connector->router.ddc_valid)
                                DRM_INFO("  DDC Router 0x%x/0x%x\n",
-                                        radeon_connector->router.mux_control_pin,
-                                        radeon_connector->router.mux_state);
+                                        radeon_connector->router.ddc_mux_control_pin,
+                                        radeon_connector->router.ddc_mux_state);
+                       if (radeon_connector->router.cd_valid)
+                               DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
+                                        radeon_connector->router.cd_mux_control_pin,
+                                        radeon_connector->router.cd_mux_state);
                } else {
                        if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
                            connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
        int ret = 0;
 
        /* on hw with routers, select right port */
-       if (radeon_connector->router.valid)
-               radeon_router_select_port(radeon_connector);
+       if (radeon_connector->router.ddc_valid)
+               radeon_router_select_ddc_port(radeon_connector);
 
        if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
            (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
@@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector)
        int ret = 0;
 
        /* on hw with routers, select right port */
-       if (radeon_connector->router.valid)
-               radeon_router_select_port(radeon_connector);
+       if (radeon_connector->router.ddc_valid)
+               radeon_router_select_ddc_port(radeon_connector);
 
        if (!radeon_connector->ddc_bus)
                return -1;
index ae58b6849a2eaa4fd259b88f3d43d2497733161b..f678257c42e6ab54f668951b93e685cf04d287d5 100644 (file)
@@ -1520,6 +1520,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
 static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
        if (radeon_encoder->active_device &
            (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
@@ -1531,6 +1532,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
        radeon_atom_output_lock(encoder, true);
        radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
+       /* select the clock/data port if it uses a router */
+       if (connector) {
+               struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+               if (radeon_connector->router.cd_valid)
+                       radeon_router_select_cd_port(radeon_connector);
+       }
+
        /* this is needed for the pll/ss setup to work correctly in some cases */
        atombios_set_encoder_crtc_source(encoder);
 }
@@ -1547,6 +1555,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig;
+
+       /* check for pre-DCE3 cards with shared encoders;
+        * can't really use the links individually, so don't disable
+        * the encoder if it's in use by another connector
+        */
+       if (!ASIC_IS_DCE3(rdev)) {
+               struct drm_encoder *other_encoder;
+               struct radeon_encoder *other_radeon_encoder;
+
+               list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+                       other_radeon_encoder = to_radeon_encoder(other_encoder);
+                       if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+                           drm_helper_encoder_in_use(other_encoder))
+                               goto disable_done;
+               }
+       }
+
        radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
        switch (radeon_encoder->encoder_id) {
@@ -1586,6 +1611,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
                break;
        }
 
+disable_done:
        if (radeon_encoder_is_digital(encoder)) {
                if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
                        r600_hdmi_disable(encoder);
index 216392d0353bcace87cd2a80d06dda358e285ee4..daacb281dfafea57f26ac258dfbdca3c6d0c0767 100644 (file)
@@ -240,7 +240,8 @@ retry:
                 */
                if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
                        /* good news we believe it's a lockup */
-                       WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+                       WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
+                            fence->seq, seq);
                        /* FIXME: what should we do ? marking everyone
                         * as signaled for now
                         */
index 6a13ee38a5b9fc71201a0a9a38dd93b3ab5957eb..0cfbba02c4d03773df29eb07efe74010a989002b 100644 (file)
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
        };
 
        /* on hw with routers, select right port */
-       if (radeon_connector->router.valid)
-               radeon_router_select_port(radeon_connector);
+       if (radeon_connector->router.ddc_valid)
+               radeon_router_select_ddc_port(radeon_connector);
 
        ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
        if (ret == 2)
@@ -1084,26 +1084,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
                          addr, val);
 }
 
-/* router switching */
-void radeon_router_select_port(struct radeon_connector *radeon_connector)
+/* ddc router switching */
+void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
 {
        u8 val;
 
-       if (!radeon_connector->router.valid)
+       if (!radeon_connector->router.ddc_valid)
                return;
 
        radeon_i2c_get_byte(radeon_connector->router_bus,
                            radeon_connector->router.i2c_addr,
                            0x3, &val);
-       val &= radeon_connector->router.mux_control_pin;
+       val &= ~radeon_connector->router.ddc_mux_control_pin;
        radeon_i2c_put_byte(radeon_connector->router_bus,
                            radeon_connector->router.i2c_addr,
                            0x3, val);
        radeon_i2c_get_byte(radeon_connector->router_bus,
                            radeon_connector->router.i2c_addr,
                            0x1, &val);
-       val &= radeon_connector->router.mux_control_pin;
-       val |= radeon_connector->router.mux_state;
+       val &= ~radeon_connector->router.ddc_mux_control_pin;
+       val |= radeon_connector->router.ddc_mux_state;
+       radeon_i2c_put_byte(radeon_connector->router_bus,
+                           radeon_connector->router.i2c_addr,
+                           0x1, val);
+}
+
+/* clock/data router switching */
+void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
+{
+       u8 val;
+
+       if (!radeon_connector->router.cd_valid)
+               return;
+
+       radeon_i2c_get_byte(radeon_connector->router_bus,
+                           radeon_connector->router.i2c_addr,
+                           0x3, &val);
+       val &= ~radeon_connector->router.cd_mux_control_pin;
+       radeon_i2c_put_byte(radeon_connector->router_bus,
+                           radeon_connector->router.i2c_addr,
+                           0x3, val);
+       radeon_i2c_get_byte(radeon_connector->router_bus,
+                           radeon_connector->router.i2c_addr,
+                           0x1, &val);
+       val &= ~radeon_connector->router.cd_mux_control_pin;
+       val |= radeon_connector->router.cd_mux_state;
        radeon_i2c_put_byte(radeon_connector->router_bus,
                            radeon_connector->router.i2c_addr,
                            0x1, val);
index 92457163d07094dba9bd5975dc63dfb4968e92a6..680f57644e865ce2a5e4fdc97b5154a7dffb11a5 100644 (file)
@@ -401,13 +401,19 @@ struct radeon_hpd {
 };
 
 struct radeon_router {
-       bool valid;
        u32 router_id;
        struct radeon_i2c_bus_rec i2c_info;
        u8 i2c_addr;
-       u8 mux_type;
-       u8 mux_control_pin;
-       u8 mux_state;
+       /* i2c mux */
+       bool ddc_valid;
+       u8 ddc_mux_type;
+       u8 ddc_mux_control_pin;
+       u8 ddc_mux_state;
+       /* clock/data mux */
+       bool cd_valid;
+       u8 cd_mux_type;
+       u8 cd_mux_control_pin;
+       u8 cd_mux_state;
 };
 
 struct radeon_connector {
@@ -488,7 +494,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
                                u8 slave_addr,
                                u8 addr,
                                u8 val);
-extern void radeon_router_select_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
 extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
 extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
index d7ab914164103bdda7bf9ea72e8e35a2c78a5f37..8eb183466015326f7ca93b2647e85a610fe91f6f 100644 (file)
@@ -102,6 +102,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
                type = ttm_bo_type_device;
        }
        *bo_ptr = NULL;
+
+retry:
        bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
@@ -109,8 +111,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
        bo->gobj = gobj;
        bo->surface_reg = -1;
        INIT_LIST_HEAD(&bo->list);
-
-retry:
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
        mutex_lock(&rdev->vram_mutex);
index fe95bb35317ea26c2cabe33fc0bae285bfc32cef..01c2c736a1daff116284db0f84f3896977e6eecb 100644 (file)
@@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
        gtt = container_of(backend, struct radeon_ttm_backend, backend);
        gtt->offset = bo_mem->start << PAGE_SHIFT;
        if (!gtt->num_pages) {
-               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
+               WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+                    gtt->num_pages, bo_mem, backend);
        }
        r = radeon_gart_bind(gtt->rdev, gtt->offset,
                             gtt->num_pages, gtt->pages);
index f683e51a2a0674518b476d6a2157b39a303170a6..5512e4e5e636ebb91ea73e99d63a4e60bb95bbe6 100644 (file)
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev)
        int r;
 
        if (rdev->gart.table.ram.ptr) {
-               WARN(1, "RS400 GART already initialized.\n");
+               WARN(1, "RS400 GART already initialized\n");
                return 0;
        }
        /* Check gart size */
index b091a1f6fa4ed2b86dc1975717abbf017667c251..f1c6e02c2e6b41f18338688b1dc9d49b33dbe8bb 100644 (file)
@@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev)
        int r;
 
        if (rdev->gart.table.vram.robj) {
-               WARN(1, "RS600 GART already initialized.\n");
+               WARN(1, "RS600 GART already initialized\n");
                return 0;
        }
        /* Initialize common gart structure */
@@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev)
                ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
 
        if (!rdev->irq.installed) {
-               WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+               WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
                WREG32(R_000040_GEN_INT_CNTL, 0);
                return -EINVAL;
        }
index a1cb783c7131c56de8aa19cf5afb6a668e5e4345..3ca77dc03915017e78d0e7f7e8e479d9f279fc56 100644 (file)
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
-/* Notes:
- *
- * We store bo pointer in drm_mm_node struct so we know which bo own a
- * specific node. There is no protection on the pointer, thus to make
- * sure things don't go berserk you have to access this pointer while
- * holding the global lru lock and make sure anytime you free a node you
- * reset the pointer to NULL.
- */
 
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
@@ -45,6 +37,7 @@
 #include <linux/mm.h>
 #include <linux/file.h>
 #include <linux/module.h>
+#include <asm/atomic.h>
 
 #define TTM_ASSERT_LOCKED(param)
 #define TTM_DEBUG(fmt, arg...)
@@ -452,6 +445,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
        ttm_bo_mem_put(bo, &bo->mem);
 
        atomic_set(&bo->reserved, 0);
+
+       /*
+        * Make processes trying to reserve really pick it up.
+        */
+       smp_mb__after_atomic_dec();
        wake_up_all(&bo->event_queue);
 }
 
@@ -460,7 +458,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bo->glob;
        struct ttm_bo_driver *driver;
-       void *sync_obj;
+       void *sync_obj = NULL;
        void *sync_obj_arg;
        int put_count;
        int ret;
@@ -495,17 +493,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
                spin_lock(&glob->lru_lock);
        }
 queue:
-       sync_obj = bo->sync_obj;
-       sync_obj_arg = bo->sync_obj_arg;
        driver = bdev->driver;
+       if (bo->sync_obj)
+               sync_obj = driver->sync_obj_ref(bo->sync_obj);
+       sync_obj_arg = bo->sync_obj_arg;
 
        kref_get(&bo->list_kref);
        list_add_tail(&bo->ddestroy, &bdev->ddestroy);
        spin_unlock(&glob->lru_lock);
        spin_unlock(&bo->lock);
 
-       if (sync_obj)
+       if (sync_obj) {
                driver->sync_obj_flush(sync_obj, sync_obj_arg);
+               driver->sync_obj_unref(&sync_obj);
+       }
        schedule_delayed_work(&bdev->wq,
                              ((HZ / 100) < 1) ? 1 : HZ / 100);
 }
@@ -822,7 +823,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                                        bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
        int ret;
 
@@ -832,12 +832,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                        return ret;
                if (mem->mm_node)
                        break;
-               spin_lock(&glob->lru_lock);
-               if (list_empty(&man->lru)) {
-                       spin_unlock(&glob->lru_lock);
-                       break;
-               }
-               spin_unlock(&glob->lru_lock);
                ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
                                                no_wait_reserve, no_wait_gpu);
                if (unlikely(ret != 0))
@@ -1125,35 +1119,9 @@ EXPORT_SYMBOL(ttm_bo_validate);
 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
                                struct ttm_placement *placement)
 {
-       int i;
+       BUG_ON((placement->fpfn || placement->lpfn) &&
+              (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
 
-       if (placement->fpfn || placement->lpfn) {
-               if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
-                       printk(KERN_ERR TTM_PFX "Page number range to small "
-                               "Need %lu pages, range is [%u, %u]\n",
-                               bo->mem.num_pages, placement->fpfn,
-                               placement->lpfn);
-                       return -EINVAL;
-               }
-       }
-       for (i = 0; i < placement->num_placement; i++) {
-               if (!capable(CAP_SYS_ADMIN)) {
-                       if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
-                               printk(KERN_ERR TTM_PFX "Need to be root to "
-                                       "modify NO_EVICT status.\n");
-                               return -EINVAL;
-                       }
-               }
-       }
-       for (i = 0; i < placement->num_busy_placement; i++) {
-               if (!capable(CAP_SYS_ADMIN)) {
-                       if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
-                               printk(KERN_ERR TTM_PFX "Need to be root to "
-                                       "modify NO_EVICT status.\n");
-                               return -EINVAL;
-                       }
-               }
-       }
        return 0;
 }
 
@@ -1176,6 +1144,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (num_pages == 0) {
                printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
+               if (destroy)
+                       (*destroy)(bo);
+               else
+                       kfree(bo);
                return -EINVAL;
        }
        bo->destroy = destroy;
@@ -1369,18 +1341,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
        int ret = -EINVAL;
        struct ttm_mem_type_manager *man;
 
-       if (type >= TTM_NUM_MEM_TYPES) {
-               printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
-               return ret;
-       }
-
+       BUG_ON(type >= TTM_NUM_MEM_TYPES);
        man = &bdev->man[type];
-       if (man->has_type) {
-               printk(KERN_ERR TTM_PFX
-                      "Memory manager already initialized for type %d\n",
-                      type);
-               return ret;
-       }
+       BUG_ON(man->has_type);
 
        ret = bdev->driver->init_mem_type(bdev, type, man);
        if (ret)
@@ -1389,13 +1352,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
 
        ret = 0;
        if (type != TTM_PL_SYSTEM) {
-               if (!p_size) {
-                       printk(KERN_ERR TTM_PFX
-                              "Zero size memory manager type %d\n",
-                              type);
-                       return ret;
-               }
-
                ret = (*man->func->init)(man, p_size);
                if (ret)
                        return ret;
index 7410c190c8911a9342ba57ea616d77a722061312..038e947d00f986ef987ef914fd8432f2202c25f3 100644 (file)
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
 #include "ttm/ttm_placement.h"
-#include <linux/jiffies.h>
+#include "drm_mm.h"
 #include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/file.h>
+#include <linux/spinlock.h>
 #include <linux/module.h>
 
+/**
+ * Currently we use a spinlock for the lock, but a mutex *may* be
+ * more appropriate to reduce scheduling latency if the range manager
+ * ends up with very fragmented allocation patterns.
+ */
+
+struct ttm_range_manager {
+       struct drm_mm mm;
+       spinlock_t lock;
+};
+
 static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
                               struct ttm_buffer_object *bo,
                               struct ttm_placement *placement,
                               struct ttm_mem_reg *mem)
 {
-       struct ttm_bo_global *glob = man->bdev->glob;
-       struct drm_mm *mm = man->priv;
+       struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+       struct drm_mm *mm = &rman->mm;
        struct drm_mm_node *node = NULL;
        unsigned long lpfn;
        int ret;
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
                if (unlikely(ret))
                        return ret;
 
-               spin_lock(&glob->lru_lock);
+               spin_lock(&rman->lock);
                node = drm_mm_search_free_in_range(mm,
                                        mem->num_pages, mem->page_alignment,
                                        placement->fpfn, lpfn, 1);
                if (unlikely(node == NULL)) {
-                       spin_unlock(&glob->lru_lock);
+                       spin_unlock(&rman->lock);
                        return 0;
                }
                node = drm_mm_get_block_atomic_range(node, mem->num_pages,
-                                                       mem->page_alignment,
-                                                       placement->fpfn,
-                                                       lpfn);
-               spin_unlock(&glob->lru_lock);
+                                                    mem->page_alignment,
+                                                    placement->fpfn,
+                                                    lpfn);
+               spin_unlock(&rman->lock);
        } while (node == NULL);
 
        mem->mm_node = node;
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
                                struct ttm_mem_reg *mem)
 {
-       struct ttm_bo_global *glob = man->bdev->glob;
+       struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 
        if (mem->mm_node) {
-               spin_lock(&glob->lru_lock);
+               spin_lock(&rman->lock);
                drm_mm_put_block(mem->mm_node);
-               spin_unlock(&glob->lru_lock);
+               spin_unlock(&rman->lock);
                mem->mm_node = NULL;
        }
 }
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
 static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
                           unsigned long p_size)
 {
-       struct drm_mm *mm;
+       struct ttm_range_manager *rman;
        int ret;
 
-       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
-       if (!mm)
+       rman = kzalloc(sizeof(*rman), GFP_KERNEL);
+       if (!rman)
                return -ENOMEM;
 
-       ret = drm_mm_init(mm, 0, p_size);
+       ret = drm_mm_init(&rman->mm, 0, p_size);
        if (ret) {
-               kfree(mm);
+               kfree(rman);
                return ret;
        }
 
-       man->priv = mm;
+       spin_lock_init(&rman->lock);
+       man->priv = rman;
        return 0;
 }
 
 static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
 {
-       struct ttm_bo_global *glob = man->bdev->glob;
-       struct drm_mm *mm = man->priv;
-       int ret = 0;
+       struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+       struct drm_mm *mm = &rman->mm;
 
-       spin_lock(&glob->lru_lock);
+       spin_lock(&rman->lock);
        if (drm_mm_clean(mm)) {
                drm_mm_takedown(mm);
-               kfree(mm);
+               spin_unlock(&rman->lock);
+               kfree(rman);
                man->priv = NULL;
-       } else
-               ret = -EBUSY;
-       spin_unlock(&glob->lru_lock);
-       return ret;
+               return 0;
+       }
+       spin_unlock(&rman->lock);
+       return -EBUSY;
 }
 
 static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
                             const char *prefix)
 {
-       struct ttm_bo_global *glob = man->bdev->glob;
-       struct drm_mm *mm = man->priv;
+       struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 
-       spin_lock(&glob->lru_lock);
-       drm_mm_debug_table(mm, prefix);
-       spin_unlock(&glob->lru_lock);
+       spin_lock(&rman->lock);
+       drm_mm_debug_table(&rman->mm, prefix);
+       spin_unlock(&rman->lock);
 }
 
 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
index a7bab87a548bd25de465d5fff45436a177346e33..af789dc869b94e997621ff615c184383c857377c 100644 (file)
@@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
                return ret;
 
        ret = be->func->bind(be, bo_mem);
-       if (ret) {
-               printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
+       if (unlikely(ret != 0))
                return ret;
-       }
 
        ttm->state = tt_bound;
 
index 9b5b4d9dd62c9fceffe9c5fa0dd218ff7119d5a7..3e038a394c510efd6194618a7f3a1f185380f7fc 100644 (file)
@@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
        vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
                first_pfn + 1;
 
-       if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
+       vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
+       if (NULL == vsg->pages)
                return -ENOMEM;
-       memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
        down_read(&current->mm->mmap_sem);
        ret = get_user_pages(current, current->mm,
                             (unsigned long)xfer->mem_addr,
index 51d9f9f1d7f2aa1b2ea04f309569d71bd9244600..76954e3528c1d6ccb763973c22b7f7979ad4f1b4 100644 (file)
@@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
 
        fence_rep.error = ret;
        fence_rep.fence_seq = (uint64_t) sequence;
+       fence_rep.pad64 = 0;
 
        user_fence_rep = (struct drm_vmw_fence_rep __user *)
            (unsigned long)arg->fence_rep;
index 87c6e6156d7de8bc809a62bab06dbf64a7da512b..cceeb42789b655e82e5431d9810b9fa033f883fd 100644 (file)
@@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
                               &vmw_vram_ne_placement,
                               false, &vmw_dmabuf_bo_free);
        vmw_overlay_resume_all(dev_priv);
+       if (unlikely(ret != 0))
+               vfbs->buffer = NULL;
 
        return ret;
 }
@@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
        struct vmw_framebuffer_surface *vfbs =
                vmw_framebuffer_to_vfbs(&vfb->base);
 
+       if (unlikely(vfbs->buffer == NULL))
+               return 0;
+
        bo = &vfbs->buffer->base;
        ttm_bo_unref(&bo);
        vfbs->buffer = NULL;
index a01c47ddb5bc724617b74c15ad3bffc2ed63067f..29113c9b26a8c01cc217cc82906e98a83a8fa36c 100644 (file)
@@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
                return -EINVAL;
        }
 
-       dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
+       dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
 
        if (!dev_priv->ldu_priv)
                return -ENOMEM;
index df2036ed18d5f2c920ba61f99b716998a6f25329..f1a52f9e72988da041c77f19b1035f5b3f160592 100644 (file)
@@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
                return -ENOSYS;
        }
 
-       overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
+       overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
        if (!overlay)
                return -ENOMEM;
 
index 742c423567cf180e0b46da6477924ac5c7fdcac7..0e1edd7311ff72f5805b8ee616edbe59d52472d1 100644 (file)
@@ -3,6 +3,9 @@ config STUB_POULSBO
        depends on PCI
        # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
        # but for select to work, need to select ACPI_VIDEO's dependencies, ick
+       select VIDEO_OUTPUT_CONTROL if ACPI
+       select BACKLIGHT_CLASS_DEVICE if ACPI
+       select INPUT if ACPI
        select ACPI_VIDEO if ACPI
        help
          Choose this option if you have a system that has Intel GMA500
index 267626178678363882ad14d01e96122ccfbfe2d2..4b50601027d3f4787249651c8d58da5126603f12 100644 (file)
@@ -82,7 +82,7 @@ static struct ltc4261_data *ltc4261_update_device(struct device *dev)
                        val = i2c_smbus_read_byte_data(client, i);
                        if (unlikely(val < 0)) {
                                dev_dbg(dev,
-                                       "Failed to read ADC value: error %d",
+                                       "Failed to read ADC value: error %d\n",
                                        val);
                                ret = ERR_PTR(val);
                                goto abort;
@@ -230,8 +230,7 @@ static int ltc4261_probe(struct i2c_client *client,
                return -ENODEV;
 
        if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) {
-               dev_err(&client->dev, "Failed to read register %d:%02x:%02x\n",
-                       adapter->id, client->addr, LTC4261_STATUS);
+               dev_err(&client->dev, "Failed to read status register\n");
                return -ENODEV;
        }
 
index c950be3cce21defb04ec4a75c0f0a05a71e846f8..3a6321cb8030e617aabda9f122fcea901d083f2b 100644 (file)
@@ -99,6 +99,7 @@ config I2C_I801
            ICH10
            5/3400 Series (PCH)
            Cougar Point (PCH)
+           Patsburg (PCH)
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-i801.
index 59d65981eed7edae2704c75fc85672ad08a2983b..02835ce7ff4b1a8969e44b7549b6f93b36b5b3e2 100644 (file)
@@ -3,6 +3,8 @@
     Philip Edelbrock <phil@netroedge.com>, and Mark D. Studebaker
     <mdsxyz123@yahoo.com>
     Copyright (C) 2007, 2008   Jean Delvare <khali@linux-fr.org>
+    Copyright (C) 2010         Intel Corporation,
+                               David Woodhouse <dwmw2@infradead.org>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
   ICH10                 0x3a60     32     hard     yes     yes     yes
   5/3400 Series (PCH)   0x3b30     32     hard     yes     yes     yes
   Cougar Point (PCH)    0x1c22     32     hard     yes     yes     yes
+  Patsburg (PCH)        0x1d22     32     hard     yes     yes     yes
+  Patsburg (PCH) IDF    0x1d70     32     hard     yes     yes     yes
+  Patsburg (PCH) IDF    0x1d71     32     hard     yes     yes     yes
+  Patsburg (PCH) IDF    0x1d72     32     hard     yes     yes     yes
 
   Features supported by this driver:
   Software PEC                     no
   Block buffer                     yes
   Block process call transaction   no
   I2C block read transaction       yes  (doesn't use the block buffer)
+  Slave mode                       no
 
   See the file Documentation/i2c/busses/i2c-i801 for details.
 */
 
-/* Note: we assume there can only be one I801, with one SMBus interface */
-
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/kernel.h>
 #include <linux/dmi.h>
 
 /* I801 SMBus address offsets */
-#define SMBHSTSTS      (0 + i801_smba)
-#define SMBHSTCNT      (2 + i801_smba)
-#define SMBHSTCMD      (3 + i801_smba)
-#define SMBHSTADD      (4 + i801_smba)
-#define SMBHSTDAT0     (5 + i801_smba)
-#define SMBHSTDAT1     (6 + i801_smba)
-#define SMBBLKDAT      (7 + i801_smba)
-#define SMBPEC         (8 + i801_smba)         /* ICH3 and later */
-#define SMBAUXSTS      (12 + i801_smba)        /* ICH4 and later */
-#define SMBAUXCTL      (13 + i801_smba)        /* ICH4 and later */
+#define SMBHSTSTS(p)   (0 + (p)->smba)
+#define SMBHSTCNT(p)   (2 + (p)->smba)
+#define SMBHSTCMD(p)   (3 + (p)->smba)
+#define SMBHSTADD(p)   (4 + (p)->smba)
+#define SMBHSTDAT0(p)  (5 + (p)->smba)
+#define SMBHSTDAT1(p)  (6 + (p)->smba)
+#define SMBBLKDAT(p)   (7 + (p)->smba)
+#define SMBPEC(p)      (8 + (p)->smba)         /* ICH3 and later */
+#define SMBAUXSTS(p)   (12 + (p)->smba)        /* ICH4 and later */
+#define SMBAUXCTL(p)   (13 + (p)->smba)        /* ICH4 and later */
 
 /* PCI Address Constants */
 #define SMBBAR         4
                                 SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | \
                                 SMBHSTSTS_INTR)
 
-static unsigned long i801_smba;
-static unsigned char i801_original_hstcfg;
+/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0        0x1d70
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1        0x1d71
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2        0x1d72
+
+struct i801_priv {
+       struct i2c_adapter adapter;
+       unsigned long smba;
+       unsigned char original_hstcfg;
+       struct pci_dev *pci_dev;
+       unsigned int features;
+};
+
 static struct pci_driver i801_driver;
-static struct pci_dev *I801_dev;
 
 #define FEATURE_SMBUS_PEC      (1 << 0)
 #define FEATURE_BLOCK_BUFFER   (1 << 1)
 #define FEATURE_BLOCK_PROC     (1 << 2)
 #define FEATURE_I2C_BLOCK_READ (1 << 3)
-static unsigned int i801_features;
 
 static const char *i801_feature_names[] = {
        "SMBus PEC",
@@ -151,24 +165,24 @@ MODULE_PARM_DESC(disable_features, "Disable selected driver features");
 
 /* Make sure the SMBus host is ready to start transmitting.
    Return 0 if it is, -EBUSY if it is not. */
-static int i801_check_pre(void)
+static int i801_check_pre(struct i801_priv *priv)
 {
        int status;
 
-       status = inb_p(SMBHSTSTS);
+       status = inb_p(SMBHSTSTS(priv));
        if (status & SMBHSTSTS_HOST_BUSY) {
-               dev_err(&I801_dev->dev, "SMBus is busy, can't use it!\n");
+               dev_err(&priv->pci_dev->dev, "SMBus is busy, can't use it!\n");
                return -EBUSY;
        }
 
        status &= STATUS_FLAGS;
        if (status) {
-               dev_dbg(&I801_dev->dev, "Clearing status flags (%02x)\n",
+               dev_dbg(&priv->pci_dev->dev, "Clearing status flags (%02x)\n",
                        status);
-               outb_p(status, SMBHSTSTS);
-               status = inb_p(SMBHSTSTS) & STATUS_FLAGS;
+               outb_p(status, SMBHSTSTS(priv));
+               status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
                if (status) {
-                       dev_err(&I801_dev->dev,
+                       dev_err(&priv->pci_dev->dev,
                                "Failed clearing status flags (%02x)\n",
                                status);
                        return -EBUSY;
@@ -179,48 +193,50 @@ static int i801_check_pre(void)
 }
 
 /* Convert the status register to an error code, and clear it. */
-static int i801_check_post(int status, int timeout)
+static int i801_check_post(struct i801_priv *priv, int status, int timeout)
 {
        int result = 0;
 
        /* If the SMBus is still busy, we give up */
        if (timeout) {
-               dev_err(&I801_dev->dev, "Transaction timeout\n");
+               dev_err(&priv->pci_dev->dev, "Transaction timeout\n");
                /* try to stop the current command */
-               dev_dbg(&I801_dev->dev, "Terminating the current operation\n");
-               outb_p(inb_p(SMBHSTCNT) | SMBHSTCNT_KILL, SMBHSTCNT);
+               dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n");
+               outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
+                      SMBHSTCNT(priv));
                msleep(1);
-               outb_p(inb_p(SMBHSTCNT) & (~SMBHSTCNT_KILL), SMBHSTCNT);
+               outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
+                      SMBHSTCNT(priv));
 
                /* Check if it worked */
-               status = inb_p(SMBHSTSTS);
+               status = inb_p(SMBHSTSTS(priv));
                if ((status & SMBHSTSTS_HOST_BUSY) ||
                    !(status & SMBHSTSTS_FAILED))
-                       dev_err(&I801_dev->dev,
+                       dev_err(&priv->pci_dev->dev,
                                "Failed terminating the transaction\n");
-               outb_p(STATUS_FLAGS, SMBHSTSTS);
+               outb_p(STATUS_FLAGS, SMBHSTSTS(priv));
                return -ETIMEDOUT;
        }
 
        if (status & SMBHSTSTS_FAILED) {
                result = -EIO;
-               dev_err(&I801_dev->dev, "Transaction failed\n");
+               dev_err(&priv->pci_dev->dev, "Transaction failed\n");
        }
        if (status & SMBHSTSTS_DEV_ERR) {
                result = -ENXIO;
-               dev_dbg(&I801_dev->dev, "No response\n");
+               dev_dbg(&priv->pci_dev->dev, "No response\n");
        }
        if (status & SMBHSTSTS_BUS_ERR) {
                result = -EAGAIN;
-               dev_dbg(&I801_dev->dev, "Lost arbitration\n");
+               dev_dbg(&priv->pci_dev->dev, "Lost arbitration\n");
        }
 
        if (result) {
                /* Clear error flags */
-               outb_p(status & STATUS_FLAGS, SMBHSTSTS);
-               status = inb_p(SMBHSTSTS) & STATUS_FLAGS;
+               outb_p(status & STATUS_FLAGS, SMBHSTSTS(priv));
+               status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
                if (status) {
-                       dev_warn(&I801_dev->dev, "Failed clearing status "
+                       dev_warn(&priv->pci_dev->dev, "Failed clearing status "
                                 "flags at end of transaction (%02x)\n",
                                 status);
                }
@@ -229,86 +245,88 @@ static int i801_check_post(int status, int timeout)
        return result;
 }
 
-static int i801_transaction(int xact)
+static int i801_transaction(struct i801_priv *priv, int xact)
 {
        int status;
        int result;
        int timeout = 0;
 
-       result = i801_check_pre();
+       result = i801_check_pre(priv);
        if (result < 0)
                return result;
 
        /* the current contents of SMBHSTCNT can be overwritten, since PEC,
         * INTREN, SMBSCMD are passed in xact */
-       outb_p(xact | I801_START, SMBHSTCNT);
+       outb_p(xact | I801_START, SMBHSTCNT(priv));
 
        /* We will always wait for a fraction of a second! */
        do {
                msleep(1);
-               status = inb_p(SMBHSTSTS);
+               status = inb_p(SMBHSTSTS(priv));
        } while ((status & SMBHSTSTS_HOST_BUSY) && (timeout++ < MAX_TIMEOUT));
 
-       result = i801_check_post(status, timeout > MAX_TIMEOUT);
+       result = i801_check_post(priv, status, timeout > MAX_TIMEOUT);
        if (result < 0)
                return result;
 
-       outb_p(SMBHSTSTS_INTR, SMBHSTSTS);
+       outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
        return 0;
 }
 
 /* wait for INTR bit as advised by Intel */
-static void i801_wait_hwpec(void)
+static void i801_wait_hwpec(struct i801_priv *priv)
 {
        int timeout = 0;
        int status;
 
        do {
                msleep(1);
-               status = inb_p(SMBHSTSTS);
+               status = inb_p(SMBHSTSTS(priv));
        } while ((!(status & SMBHSTSTS_INTR))
                 && (timeout++ < MAX_TIMEOUT));
 
        if (timeout > MAX_TIMEOUT)
-               dev_dbg(&I801_dev->dev, "PEC Timeout!\n");
+               dev_dbg(&priv->pci_dev->dev, "PEC Timeout!\n");
 
-       outb_p(status, SMBHSTSTS);
+       outb_p(status, SMBHSTSTS(priv));
 }
 
-static int i801_block_transaction_by_block(union i2c_smbus_data *data,
+static int i801_block_transaction_by_block(struct i801_priv *priv,
+                                          union i2c_smbus_data *data,
                                           char read_write, int hwpec)
 {
        int i, len;
        int status;
 
-       inb_p(SMBHSTCNT); /* reset the data buffer index */
+       inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
 
        /* Use 32-byte buffer to process this transaction */
        if (read_write == I2C_SMBUS_WRITE) {
                len = data->block[0];
-               outb_p(len, SMBHSTDAT0);
+               outb_p(len, SMBHSTDAT0(priv));
                for (i = 0; i < len; i++)
-                       outb_p(data->block[i+1], SMBBLKDAT);
+                       outb_p(data->block[i+1], SMBBLKDAT(priv));
        }
 
-       status = i801_transaction(I801_BLOCK_DATA | ENABLE_INT9 |
+       status = i801_transaction(priv, I801_BLOCK_DATA | ENABLE_INT9 |
                                  I801_PEC_EN * hwpec);
        if (status)
                return status;
 
        if (read_write == I2C_SMBUS_READ) {
-               len = inb_p(SMBHSTDAT0);
+               len = inb_p(SMBHSTDAT0(priv));
                if (len < 1 || len > I2C_SMBUS_BLOCK_MAX)
                        return -EPROTO;
 
                data->block[0] = len;
                for (i = 0; i < len; i++)
-                       data->block[i + 1] = inb_p(SMBBLKDAT);
+                       data->block[i + 1] = inb_p(SMBBLKDAT(priv));
        }
        return 0;
 }
 
-static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
+static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+                                              union i2c_smbus_data *data,
                                               char read_write, int command,
                                               int hwpec)
 {
@@ -318,15 +336,15 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
        int result;
        int timeout;
 
-       result = i801_check_pre();
+       result = i801_check_pre(priv);
        if (result < 0)
                return result;
 
        len = data->block[0];
 
        if (read_write == I2C_SMBUS_WRITE) {
-               outb_p(len, SMBHSTDAT0);
-               outb_p(data->block[1], SMBBLKDAT);
+               outb_p(len, SMBHSTDAT0(priv));
+               outb_p(data->block[1], SMBBLKDAT(priv));
        }
 
        for (i = 1; i <= len; i++) {
@@ -342,34 +360,37 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
                        else
                                smbcmd = I801_BLOCK_DATA;
                }
-               outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT);
+               outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT(priv));
 
                if (i == 1)
-                       outb_p(inb(SMBHSTCNT) | I801_START, SMBHSTCNT);
+                       outb_p(inb(SMBHSTCNT(priv)) | I801_START,
+                              SMBHSTCNT(priv));
 
                /* We will always wait for a fraction of a second! */
                timeout = 0;
                do {
                        msleep(1);
-                       status = inb_p(SMBHSTSTS);
+                       status = inb_p(SMBHSTSTS(priv));
                } while ((!(status & SMBHSTSTS_BYTE_DONE))
                         && (timeout++ < MAX_TIMEOUT));
 
-               result = i801_check_post(status, timeout > MAX_TIMEOUT);
+               result = i801_check_post(priv, status, timeout > MAX_TIMEOUT);
                if (result < 0)
                        return result;
 
                if (i == 1 && read_write == I2C_SMBUS_READ
                 && command != I2C_SMBUS_I2C_BLOCK_DATA) {
-                       len = inb_p(SMBHSTDAT0);
+                       len = inb_p(SMBHSTDAT0(priv));
                        if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
-                               dev_err(&I801_dev->dev,
+                               dev_err(&priv->pci_dev->dev,
                                        "Illegal SMBus block read size %d\n",
                                        len);
                                /* Recover */
-                               while (inb_p(SMBHSTSTS) & SMBHSTSTS_HOST_BUSY)
-                                       outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS);
-                               outb_p(SMBHSTSTS_INTR, SMBHSTSTS);
+                               while (inb_p(SMBHSTSTS(priv)) &
+                                      SMBHSTSTS_HOST_BUSY)
+                                       outb_p(SMBHSTSTS_BYTE_DONE,
+                                              SMBHSTSTS(priv));
+                               outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
                                return -EPROTO;
                        }
                        data->block[0] = len;
@@ -377,27 +398,28 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
 
                /* Retrieve/store value in SMBBLKDAT */
                if (read_write == I2C_SMBUS_READ)
-                       data->block[i] = inb_p(SMBBLKDAT);
+                       data->block[i] = inb_p(SMBBLKDAT(priv));
                if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
-                       outb_p(data->block[i+1], SMBBLKDAT);
+                       outb_p(data->block[i+1], SMBBLKDAT(priv));
 
                /* signals SMBBLKDAT ready */
-               outb_p(SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR, SMBHSTSTS);
+               outb_p(SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR, SMBHSTSTS(priv));
        }
 
        return 0;
 }
 
-static int i801_set_block_buffer_mode(void)
+static int i801_set_block_buffer_mode(struct i801_priv *priv)
 {
-       outb_p(inb_p(SMBAUXCTL) | SMBAUXCTL_E32B, SMBAUXCTL);
-       if ((inb_p(SMBAUXCTL) & SMBAUXCTL_E32B) == 0)
+       outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
+       if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0)
                return -EIO;
        return 0;
 }
 
 /* Block transaction function */
-static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
+static int i801_block_transaction(struct i801_priv *priv,
+                                 union i2c_smbus_data *data, char read_write,
                                  int command, int hwpec)
 {
        int result = 0;
@@ -406,11 +428,11 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
        if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
                if (read_write == I2C_SMBUS_WRITE) {
                        /* set I2C_EN bit in configuration register */
-                       pci_read_config_byte(I801_dev, SMBHSTCFG, &hostc);
-                       pci_write_config_byte(I801_dev, SMBHSTCFG,
+                       pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc);
+                       pci_write_config_byte(priv->pci_dev, SMBHSTCFG,
                                              hostc | SMBHSTCFG_I2C_EN);
-               } else if (!(i801_features & FEATURE_I2C_BLOCK_READ)) {
-                       dev_err(&I801_dev->dev,
+               } else if (!(priv->features & FEATURE_I2C_BLOCK_READ)) {
+                       dev_err(&priv->pci_dev->dev,
                                "I2C block read is unsupported!\n");
                        return -EOPNOTSUPP;
                }
@@ -429,22 +451,23 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
        /* Experience has shown that the block buffer can only be used for
           SMBus (not I2C) block transactions, even though the datasheet
           doesn't mention this limitation. */
-       if ((i801_features & FEATURE_BLOCK_BUFFER)
+       if ((priv->features & FEATURE_BLOCK_BUFFER)
         && command != I2C_SMBUS_I2C_BLOCK_DATA
-        && i801_set_block_buffer_mode() == 0)
-               result = i801_block_transaction_by_block(data, read_write,
-                                                        hwpec);
+        && i801_set_block_buffer_mode(priv) == 0)
+               result = i801_block_transaction_by_block(priv, data,
+                                                        read_write, hwpec);
        else
-               result = i801_block_transaction_byte_by_byte(data, read_write,
+               result = i801_block_transaction_byte_by_byte(priv, data,
+                                                            read_write,
                                                             command, hwpec);
 
        if (result == 0 && hwpec)
-               i801_wait_hwpec();
+               i801_wait_hwpec(priv);
 
        if (command == I2C_SMBUS_I2C_BLOCK_DATA
         && read_write == I2C_SMBUS_WRITE) {
                /* restore saved configuration register value */
-               pci_write_config_byte(I801_dev, SMBHSTCFG, hostc);
+               pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc);
        }
        return result;
 }
@@ -457,81 +480,85 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
        int hwpec;
        int block = 0;
        int ret, xact = 0;
+       struct i801_priv *priv = i2c_get_adapdata(adap);
 
-       hwpec = (i801_features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
+       hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
                && size != I2C_SMBUS_QUICK
                && size != I2C_SMBUS_I2C_BLOCK_DATA;
 
        switch (size) {
        case I2C_SMBUS_QUICK:
                outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
-                      SMBHSTADD);
+                      SMBHSTADD(priv));
                xact = I801_QUICK;
                break;
        case I2C_SMBUS_BYTE:
                outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
-                      SMBHSTADD);
+                      SMBHSTADD(priv));
                if (read_write == I2C_SMBUS_WRITE)
-                       outb_p(command, SMBHSTCMD);
+                       outb_p(command, SMBHSTCMD(priv));
                xact = I801_BYTE;
                break;
        case I2C_SMBUS_BYTE_DATA:
                outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
-                      SMBHSTADD);
-               outb_p(command, SMBHSTCMD);
+                      SMBHSTADD(priv));
+               outb_p(command, SMBHSTCMD(priv));
                if (read_write == I2C_SMBUS_WRITE)
-                       outb_p(data->byte, SMBHSTDAT0);
+                       outb_p(data->byte, SMBHSTDAT0(priv));
                xact = I801_BYTE_DATA;
                break;
        case I2C_SMBUS_WORD_DATA:
                outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
-                      SMBHSTADD);
-               outb_p(command, SMBHSTCMD);
+                      SMBHSTADD(priv));
+               outb_p(command, SMBHSTCMD(priv));
                if (read_write == I2C_SMBUS_WRITE) {
-                       outb_p(data->word & 0xff, SMBHSTDAT0);
-                       outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1);
+                       outb_p(data->word & 0xff, SMBHSTDAT0(priv));
+                       outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
                }
                xact = I801_WORD_DATA;
                break;
        case I2C_SMBUS_BLOCK_DATA:
                outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
-                      SMBHSTADD);
-               outb_p(command, SMBHSTCMD);
+                      SMBHSTADD(priv));
+               outb_p(command, SMBHSTCMD(priv));
                block = 1;
                break;
        case I2C_SMBUS_I2C_BLOCK_DATA:
                /* NB: page 240 of ICH5 datasheet shows that the R/#W
                 * bit should be cleared here, even when reading */
-               outb_p((addr & 0x7f) << 1, SMBHSTADD);
+               outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
                if (read_write == I2C_SMBUS_READ) {
                        /* NB: page 240 of ICH5 datasheet also shows
                         * that DATA1 is the cmd field when reading */
-                       outb_p(command, SMBHSTDAT1);
+                       outb_p(command, SMBHSTDAT1(priv));
                } else
-                       outb_p(command, SMBHSTCMD);
+                       outb_p(command, SMBHSTCMD(priv));
                block = 1;
                break;
        default:
-               dev_err(&I801_dev->dev, "Unsupported transaction %d\n", size);
+               dev_err(&priv->pci_dev->dev, "Unsupported transaction %d\n",
+                       size);
                return -EOPNOTSUPP;
        }
 
        if (hwpec)      /* enable/disable hardware PEC */
-               outb_p(inb_p(SMBAUXCTL) | SMBAUXCTL_CRC, SMBAUXCTL);
+               outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv));
        else
-               outb_p(inb_p(SMBAUXCTL) & (~SMBAUXCTL_CRC), SMBAUXCTL);
+               outb_p(inb_p(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC),
+                      SMBAUXCTL(priv));
 
        if (block)
-               ret = i801_block_transaction(data, read_write, size, hwpec);
+               ret = i801_block_transaction(priv, data, read_write, size,
+                                            hwpec);
        else
-               ret = i801_transaction(xact | ENABLE_INT9);
+               ret = i801_transaction(priv, xact | ENABLE_INT9);
 
        /* Some BIOSes don't like it when PEC is enabled at reboot or resume
           time, so we forcibly disable it after every transaction. Turn off
           E32B for the same reason. */
        if (hwpec || block)
-               outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
-                      SMBAUXCTL);
+               outb_p(inb_p(SMBAUXCTL(priv)) &
+                      ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
 
        if (block)
                return ret;
@@ -543,10 +570,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
        switch (xact & 0x7f) {
        case I801_BYTE: /* Result put in SMBHSTDAT0 */
        case I801_BYTE_DATA:
-               data->byte = inb_p(SMBHSTDAT0);
+               data->byte = inb_p(SMBHSTDAT0(priv));
                break;
        case I801_WORD_DATA:
-               data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8);
+               data->word = inb_p(SMBHSTDAT0(priv)) +
+                            (inb_p(SMBHSTDAT1(priv)) << 8);
                break;
        }
        return 0;
@@ -555,11 +583,13 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
 
 static u32 i801_func(struct i2c_adapter *adapter)
 {
+       struct i801_priv *priv = i2c_get_adapdata(adapter);
+
        return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
               I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
               I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK |
-              ((i801_features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) |
-              ((i801_features & FEATURE_I2C_BLOCK_READ) ?
+              ((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) |
+              ((priv->features & FEATURE_I2C_BLOCK_READ) ?
                I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0);
 }
 
@@ -568,12 +598,6 @@ static const struct i2c_algorithm smbus_algorithm = {
        .functionality  = i801_func,
 };
 
-static struct i2c_adapter i801_adapter = {
-       .owner          = THIS_MODULE,
-       .class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
-       .algo           = &smbus_algorithm,
-};
-
 static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
@@ -592,6 +616,10 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) },
        { 0, }
 };
 
@@ -704,16 +732,25 @@ static int __devinit i801_probe(struct pci_dev *dev,
 {
        unsigned char temp;
        int err, i;
+       struct i801_priv *priv;
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       i2c_set_adapdata(&priv->adapter, priv);
+       priv->adapter.owner = THIS_MODULE;
+       priv->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+       priv->adapter.algo = &smbus_algorithm;
 
-       I801_dev = dev;
-       i801_features = 0;
+       priv->pci_dev = dev;
        switch (dev->device) {
        default:
-               i801_features |= FEATURE_I2C_BLOCK_READ;
+               priv->features |= FEATURE_I2C_BLOCK_READ;
                /* fall through */
        case PCI_DEVICE_ID_INTEL_82801DB_3:
-               i801_features |= FEATURE_SMBUS_PEC;
-               i801_features |= FEATURE_BLOCK_BUFFER;
+               priv->features |= FEATURE_SMBUS_PEC;
+               priv->features |= FEATURE_BLOCK_BUFFER;
                /* fall through */
        case PCI_DEVICE_ID_INTEL_82801CA_3:
        case PCI_DEVICE_ID_INTEL_82801BA_2:
@@ -724,11 +761,11 @@ static int __devinit i801_probe(struct pci_dev *dev,
 
        /* Disable features on user request */
        for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
-               if (i801_features & disable_features & (1 << i))
+               if (priv->features & disable_features & (1 << i))
                        dev_notice(&dev->dev, "%s disabled by user\n",
                                   i801_feature_names[i]);
        }
-       i801_features &= ~disable_features;
+       priv->features &= ~disable_features;
 
        err = pci_enable_device(dev);
        if (err) {
@@ -738,8 +775,8 @@ static int __devinit i801_probe(struct pci_dev *dev,
        }
 
        /* Determine the address of the SMBus area */
-       i801_smba = pci_resource_start(dev, SMBBAR);
-       if (!i801_smba) {
+       priv->smba = pci_resource_start(dev, SMBBAR);
+       if (!priv->smba) {
                dev_err(&dev->dev, "SMBus base address uninitialized, "
                        "upgrade BIOS\n");
                err = -ENODEV;
@@ -755,19 +792,19 @@ static int __devinit i801_probe(struct pci_dev *dev,
        err = pci_request_region(dev, SMBBAR, i801_driver.name);
        if (err) {
                dev_err(&dev->dev, "Failed to request SMBus region "
-                       "0x%lx-0x%Lx\n", i801_smba,
+                       "0x%lx-0x%Lx\n", priv->smba,
                        (unsigned long long)pci_resource_end(dev, SMBBAR));
                goto exit;
        }
 
-       pci_read_config_byte(I801_dev, SMBHSTCFG, &temp);
-       i801_original_hstcfg = temp;
+       pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp);
+       priv->original_hstcfg = temp;
        temp &= ~SMBHSTCFG_I2C_EN;      /* SMBus timing */
        if (!(temp & SMBHSTCFG_HST_EN)) {
                dev_info(&dev->dev, "Enabling SMBus device\n");
                temp |= SMBHSTCFG_HST_EN;
        }
-       pci_write_config_byte(I801_dev, SMBHSTCFG, temp);
+       pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp);
 
        if (temp & SMBHSTCFG_SMB_SMI_EN)
                dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n");
@@ -775,19 +812,19 @@ static int __devinit i801_probe(struct pci_dev *dev,
                dev_dbg(&dev->dev, "SMBus using PCI Interrupt\n");
 
        /* Clear special mode bits */
-       if (i801_features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
-               outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
-                      SMBAUXCTL);
+       if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
+               outb_p(inb_p(SMBAUXCTL(priv)) &
+                      ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
 
        /* set up the sysfs linkage to our parent device */
-       i801_adapter.dev.parent = &dev->dev;
+       priv->adapter.dev.parent = &dev->dev;
 
        /* Retry up to 3 times on lost arbitration */
-       i801_adapter.retries = 3;
+       priv->adapter.retries = 3;
 
-       snprintf(i801_adapter.name, sizeof(i801_adapter.name),
-               "SMBus I801 adapter at %04lx", i801_smba);
-       err = i2c_add_adapter(&i801_adapter);
+       snprintf(priv->adapter.name, sizeof(priv->adapter.name),
+               "SMBus I801 adapter at %04lx", priv->smba);
+       err = i2c_add_adapter(&priv->adapter);
        if (err) {
                dev_err(&dev->dev, "Failed to add SMBus adapter\n");
                goto exit_release;
@@ -801,27 +838,33 @@ static int __devinit i801_probe(struct pci_dev *dev,
                memset(&info, 0, sizeof(struct i2c_board_info));
                info.addr = apanel_addr;
                strlcpy(info.type, "fujitsu_apanel", I2C_NAME_SIZE);
-               i2c_new_device(&i801_adapter, &info);
+               i2c_new_device(&priv->adapter, &info);
        }
 #endif
 #if defined CONFIG_SENSORS_FSCHMD || defined CONFIG_SENSORS_FSCHMD_MODULE
        if (dmi_name_in_vendors("FUJITSU"))
-               dmi_walk(dmi_check_onboard_devices, &i801_adapter);
+               dmi_walk(dmi_check_onboard_devices, &priv->adapter);
 #endif
 
+       pci_set_drvdata(dev, priv);
        return 0;
 
 exit_release:
        pci_release_region(dev, SMBBAR);
 exit:
+       kfree(priv);
        return err;
 }
 
 static void __devexit i801_remove(struct pci_dev *dev)
 {
-       i2c_del_adapter(&i801_adapter);
-       pci_write_config_byte(I801_dev, SMBHSTCFG, i801_original_hstcfg);
+       struct i801_priv *priv = pci_get_drvdata(dev);
+
+       i2c_del_adapter(&priv->adapter);
+       pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
        pci_release_region(dev, SMBBAR);
+       pci_set_drvdata(dev, NULL);
+       kfree(priv);
        /*
         * do not call pci_disable_device(dev) since it can cause hard hangs on
         * some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
@@ -831,8 +874,10 @@ static void __devexit i801_remove(struct pci_dev *dev)
 #ifdef CONFIG_PM
 static int i801_suspend(struct pci_dev *dev, pm_message_t mesg)
 {
+       struct i801_priv *priv = pci_get_drvdata(dev);
+
        pci_save_state(dev);
-       pci_write_config_byte(dev, SMBHSTCFG, i801_original_hstcfg);
+       pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
        pci_set_power_state(dev, pci_choose_state(dev, mesg));
        return 0;
 }
index a9cf768316343df7eafeda3f7c4bb58876c3f571..b77f9991278e791557e47638ce078b157cf52993 100644 (file)
@@ -630,7 +630,7 @@ static void atp_complete_geyser_3_4(struct urb *urb)
        /* Just update the base values (i.e. touchpad in untouched state) */
        if (dev->data[dev->info->datalen - 1] & ATP_STATUS_BASE_UPDATE) {
 
-               dprintk(KERN_DEBUG "appletouch: updated base values\n");
+               dprintk("appletouch: updated base values\n");
 
                memcpy(dev->xy_old, dev->xy_cur, sizeof(dev->xy_old));
                goto exit;
index ba6f0bd1e762e494961f6eab23204fb37eff63c0..bc3b5187f3a391504f275567d7143e878dda5c9b 100644 (file)
@@ -129,6 +129,9 @@ struct ad7879 {
        u16                     cmd_crtl1;
        u16                     cmd_crtl2;
        u16                     cmd_crtl3;
+       int                     x;
+       int                     y;
+       int                     Rt;
 };
 
 static int ad7879_read(struct ad7879 *ts, u8 reg)
@@ -175,13 +178,32 @@ static int ad7879_report(struct ad7879 *ts)
                Rt /= z1;
                Rt = (Rt + 2047) >> 12;
 
-               if (!timer_pending(&ts->timer))
+               /*
+                * Sample found inconsistent, pressure is beyond
+                * the maximum. Don't report it to user space.
+                */
+               if (Rt > ts->pressure_max)
+                       return -EINVAL;
+
+               /*
+                * Note that we delay reporting events by one sample.
+                * This is done to avoid reporting last sample of the
+                * touch sequence, which may be incomplete if finger
+                * leaves the surface before last reading is taken.
+                */
+               if (timer_pending(&ts->timer)) {
+                       /* Touch continues */
                        input_report_key(input_dev, BTN_TOUCH, 1);
+                       input_report_abs(input_dev, ABS_X, ts->x);
+                       input_report_abs(input_dev, ABS_Y, ts->y);
+                       input_report_abs(input_dev, ABS_PRESSURE, ts->Rt);
+                       input_sync(input_dev);
+               }
+
+               ts->x = x;
+               ts->y = y;
+               ts->Rt = Rt;
 
-               input_report_abs(input_dev, ABS_X, x);
-               input_report_abs(input_dev, ABS_Y, y);
-               input_report_abs(input_dev, ABS_PRESSURE, Rt);
-               input_sync(input_dev);
                return 0;
        }
 
index ccde586025632671d5ea6279b89a3602262600a3..2ca9e5d6646028c428ec06fd525ad971629d98a2 100644 (file)
@@ -514,7 +514,7 @@ err_free_irq:
 err_cs_disable:
        pdata->cs_dis(pdata->cs_pin);
 err_free_mem:
-       input_free_device(bu21013_data->in_dev);
+       input_free_device(in_dev);
        kfree(bu21013_data);
 
        return error;
index 40b914bded8c2a3237abca74d295d8d80a02b2e3..2e72227bd071b9d18329e60a0e4b06a444700ec9 100644 (file)
@@ -1427,8 +1427,8 @@ modeisar(struct BCState *bcs, int mode, int bc)
                                        &bcs->hw.isar.reg->Flags))
                                        bcs->hw.isar.dpath = 1;
                                else {
-                                       printk(KERN_WARNING"isar modeisar analog funktions only with DP1\n");
-                                       debugl1(cs, "isar modeisar analog funktions only with DP1");
+                                       printk(KERN_WARNING"isar modeisar analog functions only with DP1\n");
+                                       debugl1(cs, "isar modeisar analog functions only with DP1");
                                        return(1);
                                }
                                break;
index cc2a88d5192fbb359f3027082c0843ad34055d97..77b8fd20cd9046b8e0c785c041c7569285aa6043 100644 (file)
@@ -10,7 +10,7 @@ menuconfig NEW_LEDS
 if NEW_LEDS
 
 config LEDS_CLASS
-       tristate "LED Class Support"
+       bool "LED Class Support"
        help
          This option enables the led sysfs class in /sys/class/leds.  You'll
          need this to do anything useful with LEDs.  If unsure, say N.
@@ -176,6 +176,24 @@ config LEDS_LP3944
          To compile this driver as a module, choose M here: the
          module will be called leds-lp3944.
 
+config LEDS_LP5521
+       tristate "LED Support for N.S. LP5521 LED driver chip"
+       depends on LEDS_CLASS && I2C
+       help
+         If you say yes here you get support for the National Semiconductor
+         LP5521 LED driver. It is 3 channel chip with programmable engines.
+         Driver provides direct control via LED class and interface for
+         programming the engines.
+
+config LEDS_LP5523
+       tristate "LED Support for N.S. LP5523 LED driver chip"
+       depends on LEDS_CLASS && I2C
+       help
+         If you say yes here you get support for the National Semiconductor
+         LP5523 LED driver. It is 9 channel chip with programmable engines.
+         Driver provides direct control via LED class and interface for
+         programming the engines.
+
 config LEDS_CLEVO_MAIL
        tristate "Mail LED on Clevo notebook"
        depends on X86 && SERIO_I8042 && DMI
index 9c96db40ef6d7d3a12f776ab4d764371a71026a5..aae6989ff6b63445e6b6c974df537f0f0784fe3f 100644 (file)
@@ -23,6 +23,8 @@ obj-$(CONFIG_LEDS_SUNFIRE)            += leds-sunfire.o
 obj-$(CONFIG_LEDS_PCA9532)             += leds-pca9532.o
 obj-$(CONFIG_LEDS_GPIO)                        += leds-gpio.o
 obj-$(CONFIG_LEDS_LP3944)              += leds-lp3944.o
+obj-$(CONFIG_LEDS_LP5521)              += leds-lp5521.o
+obj-$(CONFIG_LEDS_LP5523)              += leds-lp5523.o
 obj-$(CONFIG_LEDS_CLEVO_MAIL)          += leds-clevo-mail.o
 obj-$(CONFIG_LEDS_HP6XX)               += leds-hp6xx.o
 obj-$(CONFIG_LEDS_FSG)                 += leds-fsg.o
index 2606600765079f946501717888a411eef20ac3fd..211e21f34bd57d5f6e2b079dcd22194713b3d5f5 100644 (file)
@@ -81,6 +81,79 @@ static struct device_attribute led_class_attrs[] = {
        __ATTR_NULL,
 };
 
+static void led_timer_function(unsigned long data)
+{
+       struct led_classdev *led_cdev = (void *)data;
+       unsigned long brightness;
+       unsigned long delay;
+
+       if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
+               led_set_brightness(led_cdev, LED_OFF);
+               return;
+       }
+
+       brightness = led_get_brightness(led_cdev);
+       if (!brightness) {
+               /* Time to switch the LED on. */
+               brightness = led_cdev->blink_brightness;
+               delay = led_cdev->blink_delay_on;
+       } else {
+               /* Store the current brightness value to be able
+                * to restore it when the delay_off period is over.
+                */
+               led_cdev->blink_brightness = brightness;
+               brightness = LED_OFF;
+               delay = led_cdev->blink_delay_off;
+       }
+
+       led_set_brightness(led_cdev, brightness);
+
+       mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
+}
+
+static void led_stop_software_blink(struct led_classdev *led_cdev)
+{
+       /* deactivate previous settings */
+       del_timer_sync(&led_cdev->blink_timer);
+       led_cdev->blink_delay_on = 0;
+       led_cdev->blink_delay_off = 0;
+}
+
+static void led_set_software_blink(struct led_classdev *led_cdev,
+                                  unsigned long delay_on,
+                                  unsigned long delay_off)
+{
+       int current_brightness;
+
+       current_brightness = led_get_brightness(led_cdev);
+       if (current_brightness)
+               led_cdev->blink_brightness = current_brightness;
+       if (!led_cdev->blink_brightness)
+               led_cdev->blink_brightness = led_cdev->max_brightness;
+
+       if (delay_on == led_cdev->blink_delay_on &&
+           delay_off == led_cdev->blink_delay_off)
+               return;
+
+       led_stop_software_blink(led_cdev);
+
+       led_cdev->blink_delay_on = delay_on;
+       led_cdev->blink_delay_off = delay_off;
+
+       /* never on - don't blink */
+       if (!delay_on)
+               return;
+
+       /* never off - just set to brightness */
+       if (!delay_off) {
+               led_set_brightness(led_cdev, led_cdev->blink_brightness);
+               return;
+       }
+
+       mod_timer(&led_cdev->blink_timer, jiffies + 1);
+}
+
+
 /**
  * led_classdev_suspend - suspend an led_classdev.
  * @led_cdev: the led_classdev to suspend.
@@ -148,6 +221,10 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
 
        led_update_brightness(led_cdev);
 
+       init_timer(&led_cdev->blink_timer);
+       led_cdev->blink_timer.function = led_timer_function;
+       led_cdev->blink_timer.data = (unsigned long)led_cdev;
+
 #ifdef CONFIG_LEDS_TRIGGERS
        led_trigger_set_default(led_cdev);
 #endif
@@ -157,7 +234,6 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
 
        return 0;
 }
-
 EXPORT_SYMBOL_GPL(led_classdev_register);
 
 /**
@@ -175,6 +251,9 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
        up_write(&led_cdev->trigger_lock);
 #endif
 
+       /* Stop blinking */
+       led_brightness_set(led_cdev, LED_OFF);
+
        device_unregister(led_cdev->dev);
 
        down_write(&leds_list_lock);
@@ -183,6 +262,30 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
 }
 EXPORT_SYMBOL_GPL(led_classdev_unregister);
 
+void led_blink_set(struct led_classdev *led_cdev,
+                  unsigned long *delay_on,
+                  unsigned long *delay_off)
+{
+       if (led_cdev->blink_set &&
+           led_cdev->blink_set(led_cdev, delay_on, delay_off))
+               return;
+
+       /* blink with 1 Hz as default if nothing specified */
+       if (!*delay_on && !*delay_off)
+               *delay_on = *delay_off = 500;
+
+       led_set_software_blink(led_cdev, *delay_on, *delay_off);
+}
+EXPORT_SYMBOL(led_blink_set);
+
+void led_brightness_set(struct led_classdev *led_cdev,
+                       enum led_brightness brightness)
+{
+       led_stop_software_blink(led_cdev);
+       led_cdev->brightness_set(led_cdev, brightness);
+}
+EXPORT_SYMBOL(led_brightness_set);
+
 static int __init leds_init(void)
 {
        leds_class = class_create(THIS_MODULE, "leds");
index f1c00db88b5e148fd229410d5959f4c4e0e9580b..c41eb6180c9c2eb2e88ce2dfbfc64d7ff90daf2a 100644 (file)
@@ -113,7 +113,7 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
                if (led_cdev->trigger->deactivate)
                        led_cdev->trigger->deactivate(led_cdev);
                led_cdev->trigger = NULL;
-               led_set_brightness(led_cdev, LED_OFF);
+               led_brightness_set(led_cdev, LED_OFF);
        }
        if (trigger) {
                write_lock_irqsave(&trigger->leddev_list_lock, flags);
index ea57e05d08f3f757e67f7a3ed7c5d2c5c0a28aae..4d9fa38d9ff6a3cffb2063823c92b0d147ce13d8 100644 (file)
@@ -316,7 +316,7 @@ static struct of_platform_driver of_gpio_leds_driver = {
 
 static int __init gpio_led_init(void)
 {
-       int ret;
+       int ret = 0;
 
 #ifdef CONFIG_LEDS_GPIO_PLATFORM       
        ret = platform_driver_register(&gpio_led_driver);
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
new file mode 100644 (file)
index 0000000..3782f31
--- /dev/null
@@ -0,0 +1,821 @@
+/*
+ * LP5521 LED chip driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.         See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
+#include <linux/leds-lp5521.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define LP5521_PROGRAM_LENGTH          32      /* in bytes */
+
+#define LP5521_MAX_LEDS                        3       /* Maximum number of LEDs */
+#define LP5521_MAX_ENGINES             3       /* Maximum number of engines */
+
+#define LP5521_ENG_MASK_BASE           0x30    /* 00110000 */
+#define LP5521_ENG_STATUS_MASK         0x07    /* 00000111 */
+
+#define LP5521_CMD_LOAD                        0x15    /* 00010101 */
+#define LP5521_CMD_RUN                 0x2a    /* 00101010 */
+#define LP5521_CMD_DIRECT              0x3f    /* 00111111 */
+#define LP5521_CMD_DISABLED            0x00    /* 00000000 */
+
+/* Registers */
+#define LP5521_REG_ENABLE              0x00
+#define LP5521_REG_OP_MODE             0x01
+#define LP5521_REG_R_PWM               0x02
+#define LP5521_REG_G_PWM               0x03
+#define LP5521_REG_B_PWM               0x04
+#define LP5521_REG_R_CURRENT           0x05
+#define LP5521_REG_G_CURRENT           0x06
+#define LP5521_REG_B_CURRENT           0x07
+#define LP5521_REG_CONFIG              0x08
+#define LP5521_REG_R_CHANNEL_PC                0x09
+#define LP5521_REG_G_CHANNEL_PC                0x0A
+#define LP5521_REG_B_CHANNEL_PC                0x0B
+#define LP5521_REG_STATUS              0x0C
+#define LP5521_REG_RESET               0x0D
+#define LP5521_REG_GPO                 0x0E
+#define LP5521_REG_R_PROG_MEM          0x10
+#define LP5521_REG_G_PROG_MEM          0x30
+#define LP5521_REG_B_PROG_MEM          0x50
+
+#define LP5521_PROG_MEM_BASE           LP5521_REG_R_PROG_MEM
+#define LP5521_PROG_MEM_SIZE           0x20
+
+/* Base register to set LED current */
+#define LP5521_REG_LED_CURRENT_BASE    LP5521_REG_R_CURRENT
+
+/* Base register to set the brightness */
+#define LP5521_REG_LED_PWM_BASE                LP5521_REG_R_PWM
+
+/* Bits in ENABLE register */
+#define LP5521_MASTER_ENABLE           0x40    /* Chip master enable */
+#define LP5521_LOGARITHMIC_PWM         0x80    /* Logarithmic PWM adjustment */
+#define LP5521_EXEC_RUN                        0x2A
+
+/* Bits in CONFIG register */
+#define LP5521_PWM_HF                  0x40    /* PWM: 0 = 256Hz, 1 = 558Hz */
+#define LP5521_PWRSAVE_EN              0x20    /* 1 = Power save mode */
+#define LP5521_CP_MODE_OFF             0       /* Charge pump (CP) off */
+#define LP5521_CP_MODE_BYPASS          8       /* CP forced to bypass mode */
+#define LP5521_CP_MODE_1X5             0x10    /* CP forced to 1.5x mode */
+#define LP5521_CP_MODE_AUTO            0x18    /* Automatic mode selection */
+#define LP5521_R_TO_BATT               4       /* R out: 0 = CP, 1 = Vbat */
+#define LP5521_CLK_SRC_EXT             0       /* Ext-clk source (CLK_32K) */
+#define LP5521_CLK_INT                 1       /* Internal clock */
+#define LP5521_CLK_AUTO                        2       /* Automatic clock selection */
+
+/* Status */
+#define LP5521_EXT_CLK_USED            0x08
+
+struct lp5521_engine {
+       const struct attribute_group *attributes;
+       int             id;
+       u8              mode;
+       u8              prog_page;
+       u8              engine_mask;
+};
+
+struct lp5521_led {
+       int                     id;
+       u8                      chan_nr;
+       u8                      led_current;
+       u8                      max_current;
+       struct led_classdev     cdev;
+       struct work_struct      brightness_work;
+       u8                      brightness;
+};
+
+struct lp5521_chip {
+       struct lp5521_platform_data *pdata;
+       struct mutex            lock; /* Serialize control */
+       struct i2c_client       *client;
+       struct lp5521_engine    engines[LP5521_MAX_ENGINES];
+       struct lp5521_led       leds[LP5521_MAX_LEDS];
+       u8                      num_channels;
+       u8                      num_leds;
+};
+
+#define cdev_to_led(c)         container_of(c, struct lp5521_led, cdev)
+#define engine_to_lp5521(eng)  container_of((eng), struct lp5521_chip, \
+                                               engines[(eng)->id - 1])
+#define led_to_lp5521(led)     container_of((led), struct lp5521_chip, \
+                                               leds[(led)->id])
+
+static void lp5521_led_brightness_work(struct work_struct *work);
+
+static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value)
+{
+       return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf)
+{
+       s32 ret;
+
+       ret = i2c_smbus_read_byte_data(client, reg);
+       if (ret < 0)
+               return -EIO;
+
+       *buf = ret;
+       return 0;
+}
+
+static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode)
+{
+       struct lp5521_chip *chip = engine_to_lp5521(engine);
+       struct i2c_client *client = chip->client;
+       int ret;
+       u8 engine_state;
+
+       /* Only transition between RUN and DIRECT mode are handled here */
+       if (mode == LP5521_CMD_LOAD)
+               return 0;
+
+       if (mode == LP5521_CMD_DISABLED)
+               mode = LP5521_CMD_DIRECT;
+
+       ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state);
+
+       /* set mode only for this engine */
+       engine_state &= ~(engine->engine_mask);
+       mode &= engine->engine_mask;
+       engine_state |= mode;
+       ret |= lp5521_write(client, LP5521_REG_OP_MODE, engine_state);
+
+       return ret;
+}
+
+static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
+{
+       struct lp5521_chip *chip = engine_to_lp5521(eng);
+       struct i2c_client *client = chip->client;
+       int ret;
+       int addr;
+       u8 mode;
+
+       /* move current engine to direct mode and remember the state */
+       ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
+       usleep_range(1000, 10000);
+       ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+
+       /* For loading, all the engines to load mode */
+       lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
+       usleep_range(1000, 10000);
+       lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD);
+       usleep_range(1000, 10000);
+
+       addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE;
+       i2c_smbus_write_i2c_block_data(client,
+                               addr,
+                               LP5521_PROG_MEM_SIZE,
+                               pattern);
+
+       ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
+       return ret;
+}
+
+static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
+{
+       return lp5521_write(chip->client,
+                   LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
+                   curr);
+}
+
+static void lp5521_init_engine(struct lp5521_chip *chip,
+                       const struct attribute_group *attr_group)
+{
+       int i;
+       for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
+               chip->engines[i].id = i + 1;
+               chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
+               chip->engines[i].prog_page = i;
+               chip->engines[i].attributes = &attr_group[i];
+       }
+}
+
+static int lp5521_configure(struct i2c_client *client,
+                       const struct attribute_group *attr_group)
+{
+       struct lp5521_chip *chip = i2c_get_clientdata(client);
+       int ret;
+
+       lp5521_init_engine(chip, attr_group);
+
+       lp5521_write(client, LP5521_REG_RESET, 0xff);
+
+       usleep_range(10000, 20000);
+
+       /* Set all PWMs to direct control mode */
+       ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F);
+
+       /* Enable auto-powersave, set charge pump to auto, red to battery */
+       ret |= lp5521_write(client, LP5521_REG_CONFIG,
+               LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT);
+
+       /* Initialize all channels PWM to zero -> leds off */
+       ret |= lp5521_write(client, LP5521_REG_R_PWM, 0);
+       ret |= lp5521_write(client, LP5521_REG_G_PWM, 0);
+       ret |= lp5521_write(client, LP5521_REG_B_PWM, 0);
+
+       /* Set engines are set to run state when OP_MODE enables engines */
+       ret |= lp5521_write(client, LP5521_REG_ENABLE,
+                       LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM |
+                       LP5521_EXEC_RUN);
+       /* enable takes 500us */
+       usleep_range(500, 20000);
+
+       return ret;
+}
+
+static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf)
+{
+       int ret;
+       u8 status;
+
+       ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status);
+       if (ret < 0)
+               return ret;
+
+       /* Check that ext clock is really in use if requested */
+       if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT)
+               if  ((status & LP5521_EXT_CLK_USED) == 0)
+                       return -EIO;
+       return 0;
+}
+
+static void lp5521_set_brightness(struct led_classdev *cdev,
+                            enum led_brightness brightness)
+{
+       struct lp5521_led *led = cdev_to_led(cdev);
+       led->brightness = (u8)brightness;
+       schedule_work(&led->brightness_work);
+}
+
+static void lp5521_led_brightness_work(struct work_struct *work)
+{
+       struct lp5521_led *led = container_of(work,
+                                             struct lp5521_led,
+                                             brightness_work);
+       struct lp5521_chip *chip = led_to_lp5521(led);
+       struct i2c_client *client = chip->client;
+
+       mutex_lock(&chip->lock);
+       lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr,
+               led->brightness);
+       mutex_unlock(&chip->lock);
+}
+
+/* Detect the chip by setting its ENABLE register and reading it back. */
+static int lp5521_detect(struct i2c_client *client)
+{
+       int ret;
+       u8 buf;
+
+       ret = lp5521_write(client, LP5521_REG_ENABLE,
+                       LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM);
+       if (ret)
+               return ret;
+       usleep_range(1000, 10000);
+       ret = lp5521_read(client, LP5521_REG_ENABLE, &buf);
+       if (ret)
+               return ret;
+       if (buf != (LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM))
+               return -ENODEV;
+
+       return 0;
+}
+
+/* Set engine mode and create appropriate sysfs attributes, if required. */
+static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
+{
+       struct lp5521_chip *chip = engine_to_lp5521(engine);
+       struct i2c_client *client = chip->client;
+       struct device *dev = &client->dev;
+       int ret = 0;
+
+       /* if in that mode already do nothing, except for run */
+       if (mode == engine->mode && mode != LP5521_CMD_RUN)
+               return 0;
+
+       if (mode == LP5521_CMD_RUN) {
+               ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN);
+       } else if (mode == LP5521_CMD_LOAD) {
+               lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
+               lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
+
+               ret = sysfs_create_group(&dev->kobj, engine->attributes);
+               if (ret)
+                       return ret;
+       } else if (mode == LP5521_CMD_DISABLED) {
+               lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
+       }
+
+       /* remove load attribute from sysfs if not in load mode */
+       if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD)
+               sysfs_remove_group(&dev->kobj, engine->attributes);
+
+       engine->mode = mode;
+
+       return ret;
+}
+
+static int lp5521_do_store_load(struct lp5521_engine *engine,
+                               const char *buf, size_t len)
+{
+       struct lp5521_chip *chip = engine_to_lp5521(engine);
+       struct i2c_client *client = chip->client;
+       int  ret, nrchars, offset = 0, i = 0;
+       char c[3];
+       unsigned cmd;
+       u8 pattern[LP5521_PROGRAM_LENGTH] = {0};
+
+       while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
+               /* separate sscanfs because length is working only for %s */
+               ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
+               ret = sscanf(c, "%2x", &cmd);
+               if (ret != 1)
+                       goto fail;
+               pattern[i] = (u8)cmd;
+
+               offset += nrchars;
+               i++;
+       }
+
+       /* Each instruction is 16bit long. Check that length is even */
+       if (i % 2)
+               goto fail;
+
+       mutex_lock(&chip->lock);
+       ret = lp5521_load_program(engine, pattern);
+       mutex_unlock(&chip->lock);
+
+       if (ret) {
+               dev_err(&client->dev, "failed loading pattern\n");
+               return ret;
+       }
+
+       return len;
+fail:
+       dev_err(&client->dev, "wrong pattern format\n");
+       return -EINVAL;
+}
+
+static ssize_t store_engine_load(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t len, int nr)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lp5521_chip *chip = i2c_get_clientdata(client);
+       return lp5521_do_store_load(&chip->engines[nr - 1], buf, len);
+}
+
+#define store_load(nr)                                                 \
+static ssize_t store_engine##nr##_load(struct device *dev,             \
+                                    struct device_attribute *attr,     \
+                                    const char *buf, size_t len)       \
+{                                                                      \
+       return store_engine_load(dev, attr, buf, len, nr);              \
+}
+store_load(1)
+store_load(2)
+store_load(3)
+
+static ssize_t show_engine_mode(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf, int nr)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lp5521_chip *chip = i2c_get_clientdata(client);
+       switch (chip->engines[nr - 1].mode) {
+       case LP5521_CMD_RUN:
+               return sprintf(buf, "run\n");
+       case LP5521_CMD_LOAD:
+               return sprintf(buf, "load\n");
+       case LP5521_CMD_DISABLED:
+               return sprintf(buf, "disabled\n");
+       default:
+               return sprintf(buf, "disabled\n");
+       }
+}
+
+#define show_mode(nr)                                                  \
+static ssize_t show_engine##nr##_mode(struct device *dev,              \
+                                   struct device_attribute *attr,      \
+                                   char *buf)                          \
+{                                                                      \
+       return show_engine_mode(dev, attr, buf, nr);                    \
+}
+show_mode(1)
+show_mode(2)
+show_mode(3)
+
+static ssize_t store_engine_mode(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t len, int nr)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lp5521_chip *chip = i2c_get_clientdata(client);
+       struct lp5521_engine *engine = &chip->engines[nr - 1];
+       mutex_lock(&chip->lock);
+
+       if (!strncmp(buf, "run", 3))
+               lp5521_set_mode(engine, LP5521_CMD_RUN);
+       else if (!strncmp(buf, "load", 4))
+               lp5521_set_mode(engine, LP5521_CMD_LOAD);
+       else if (!strncmp(buf, "disabled", 8))
+               lp5521_set_mode(engine, LP5521_CMD_DISABLED);
+
+       mutex_unlock(&chip->lock);
+       return len;
+}
+
+#define store_mode(nr)                                                 \
+static ssize_t store_engine##nr##_mode(struct device *dev,             \
+                                    struct device_attribute *attr,     \
+                                    const char *buf, size_t len)       \
+{                                                                      \
+       return store_engine_mode(dev, attr, buf, len, nr);              \
+}
+store_mode(1)
+store_mode(2)
+store_mode(3)
+
+static ssize_t show_max_current(struct device *dev,
+                           struct device_attribute *attr,
+                           char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lp5521_led *led = cdev_to_led(led_cdev);
+
+       return sprintf(buf, "%d\n", led->max_current);
+}
+
+static ssize_t show_current(struct device *dev,
+                           struct device_attribute *attr,
+                           char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lp5521_led *led = cdev_to_led(led_cdev);
+
+       return sprintf(buf, "%d\n", led->led_current);
+}
+
+static ssize_t store_current(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t len)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lp5521_led *led = cdev_to_led(led_cdev);
+       struct lp5521_chip *chip = led_to_lp5521(led);
+       ssize_t ret;
+       unsigned long curr;
+
+       if (strict_strtoul(buf, 0, &curr))
+               return -EINVAL;
+
+       if (curr > led->max_current)
+               return -EINVAL;
+
+       mutex_lock(&chip->lock);
+       ret = lp5521_set_led_current(chip, led->id, curr);
+       mutex_unlock(&chip->lock);
+
+       if (ret < 0)
+               return ret;
+
+       led->led_current = (u8)curr;
+
+       return len;
+}
+
+static ssize_t lp5521_selftest(struct device *dev,
+                              struct device_attribute *attr,
+                              char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lp5521_chip *chip = i2c_get_clientdata(client);
+       int ret;
+
+       mutex_lock(&chip->lock);
+       ret = lp5521_run_selftest(chip, buf);
+       mutex_unlock(&chip->lock);
+       return sprintf(buf, "%s\n", ret ? "FAIL" : "OK");
+}
+
+/* led class device attributes */
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
+static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
+
+static struct attribute *lp5521_led_attributes[] = {
+       &dev_attr_led_current.attr,
+       &dev_attr_max_current.attr,
+       NULL,
+};
+
+static struct attribute_group lp5521_led_attribute_group = {
+       .attrs = lp5521_led_attributes
+};
+
+/* device attributes */
+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
+                  show_engine1_mode, store_engine1_mode);
+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
+                  show_engine2_mode, store_engine2_mode);
+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
+                  show_engine3_mode, store_engine3_mode);
+static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
+static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
+static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
+static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
+
+static struct attribute *lp5521_attributes[] = {
+       &dev_attr_engine1_mode.attr,
+       &dev_attr_engine2_mode.attr,
+       &dev_attr_engine3_mode.attr,
+       &dev_attr_selftest.attr,
+       NULL
+};
+
+static struct attribute *lp5521_engine1_attributes[] = {
+       &dev_attr_engine1_load.attr,
+       NULL
+};
+
+static struct attribute *lp5521_engine2_attributes[] = {
+       &dev_attr_engine2_load.attr,
+       NULL
+};
+
+static struct attribute *lp5521_engine3_attributes[] = {
+       &dev_attr_engine3_load.attr,
+       NULL
+};
+
+static const struct attribute_group lp5521_group = {
+       .attrs = lp5521_attributes,
+};
+
+static const struct attribute_group lp5521_engine_group[] = {
+       {.attrs = lp5521_engine1_attributes },
+       {.attrs = lp5521_engine2_attributes },
+       {.attrs = lp5521_engine3_attributes },
+};
+
+static int lp5521_register_sysfs(struct i2c_client *client)
+{
+       struct device *dev = &client->dev;
+       return sysfs_create_group(&dev->kobj, &lp5521_group);
+}
+
+static void lp5521_unregister_sysfs(struct i2c_client *client)
+{
+       struct lp5521_chip *chip = i2c_get_clientdata(client);
+       struct device *dev = &client->dev;
+       int i;
+
+       sysfs_remove_group(&dev->kobj, &lp5521_group);
+
+       for (i = 0; i <  ARRAY_SIZE(chip->engines); i++) {
+               if (chip->engines[i].mode == LP5521_CMD_LOAD)
+                       sysfs_remove_group(&dev->kobj,
+                                       chip->engines[i].attributes);
+       }
+
+       for (i = 0; i < chip->num_leds; i++)
+               sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
+                               &lp5521_led_attribute_group);
+}
+
+static int __init lp5521_init_led(struct lp5521_led *led,
+                               struct i2c_client *client,
+                               int chan, struct lp5521_platform_data *pdata)
+{
+       struct device *dev = &client->dev;
+       char name[32];
+       int res;
+
+       if (chan >= LP5521_MAX_LEDS)
+               return -EINVAL;
+
+       if (pdata->led_config[chan].led_current == 0)
+               return 0;
+
+       led->led_current = pdata->led_config[chan].led_current;
+       led->max_current = pdata->led_config[chan].max_current;
+       led->chan_nr = pdata->led_config[chan].chan_nr;
+
+       if (led->chan_nr >= LP5521_MAX_LEDS) {
+               dev_err(dev, "Use channel numbers between 0 and %d\n",
+                       LP5521_MAX_LEDS - 1);
+               return -EINVAL;
+       }
+
+       snprintf(name, sizeof(name), "%s:channel%d", client->name, chan);
+       led->cdev.brightness_set = lp5521_set_brightness;
+       led->cdev.name = name;
+       res = led_classdev_register(dev, &led->cdev);
+       if (res < 0) {
+               dev_err(dev, "couldn't register led on channel %d\n", chan);
+               return res;
+       }
+
+       res = sysfs_create_group(&led->cdev.dev->kobj,
+                       &lp5521_led_attribute_group);
+       if (res < 0) {
+               dev_err(dev, "couldn't register current attribute\n");
+               led_classdev_unregister(&led->cdev);
+               return res;
+       }
+       return 0;
+}
+
+static int lp5521_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
+{
+       struct lp5521_chip              *chip;
+       struct lp5521_platform_data     *pdata;
+       int ret, i, led;
+
+       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       i2c_set_clientdata(client, chip);
+       chip->client = client;
+
+       pdata = client->dev.platform_data;
+
+       if (!pdata) {
+               dev_err(&client->dev, "no platform data\n");
+               ret = -EINVAL;
+               goto fail1;
+       }
+
+       mutex_init(&chip->lock);
+
+       chip->pdata   = pdata;
+
+       if (pdata->setup_resources) {
+               ret = pdata->setup_resources();
+               if (ret < 0)
+                       goto fail1;
+       }
+
+       if (pdata->enable) {
+               pdata->enable(0);
+               usleep_range(1000, 10000);
+               pdata->enable(1);
+               usleep_range(1000, 10000); /* Spec says min 500us */
+       }
+
+       ret = lp5521_detect(client);
+
+       if (ret) {
+               dev_err(&client->dev, "Chip not found\n");
+               goto fail2;
+       }
+
+       dev_info(&client->dev, "%s programmable led chip found\n", id->name);
+
+       ret = lp5521_configure(client, lp5521_engine_group);
+       if (ret < 0) {
+               dev_err(&client->dev, "error configuring chip\n");
+               goto fail2;
+       }
+
+       /* Initialize leds */
+       chip->num_channels = pdata->num_channels;
+       chip->num_leds = 0;
+       led = 0;
+       for (i = 0; i < pdata->num_channels; i++) {
+               /* Do not initialize channels that are not connected */
+               if (pdata->led_config[i].led_current == 0)
+                       continue;
+
+               ret = lp5521_init_led(&chip->leds[led], client, i, pdata);
+               if (ret) {
+                       dev_err(&client->dev, "error initializing leds\n");
+                       goto fail3;
+               }
+               chip->num_leds++;
+
+               chip->leds[led].id = led;
+               /* Set initial LED current */
+               lp5521_set_led_current(chip, led,
+                               chip->leds[led].led_current);
+
+               INIT_WORK(&(chip->leds[led].brightness_work),
+                       lp5521_led_brightness_work);
+
+               led++;
+       }
+
+       ret = lp5521_register_sysfs(client);
+       if (ret) {
+               dev_err(&client->dev, "registering sysfs failed\n");
+               goto fail3;
+       }
+       return ret;
+fail3:
+       for (i = 0; i < chip->num_leds; i++) {
+               led_classdev_unregister(&chip->leds[i].cdev);
+               cancel_work_sync(&chip->leds[i].brightness_work);
+       }
+fail2:
+       if (pdata->enable)
+               pdata->enable(0);
+       if (pdata->release_resources)
+               pdata->release_resources();
+fail1:
+       kfree(chip);
+       return ret;
+}
+
+static int lp5521_remove(struct i2c_client *client)
+{
+       struct lp5521_chip *chip = i2c_get_clientdata(client);
+       int i;
+
+       lp5521_unregister_sysfs(client);
+
+       for (i = 0; i < chip->num_leds; i++) {
+               led_classdev_unregister(&chip->leds[i].cdev);
+               cancel_work_sync(&chip->leds[i].brightness_work);
+       }
+
+       if (chip->pdata->enable)
+               chip->pdata->enable(0);
+       if (chip->pdata->release_resources)
+               chip->pdata->release_resources();
+       kfree(chip);
+       return 0;
+}
+
+static const struct i2c_device_id lp5521_id[] = {
+       { "lp5521", 0 }, /* Three channel chip */
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, lp5521_id);
+
+static struct i2c_driver lp5521_driver = {
+       .driver = {
+               .name   = "lp5521",
+       },
+       .probe          = lp5521_probe,
+       .remove         = lp5521_remove,
+       .id_table       = lp5521_id,
+};
+
+static int __init lp5521_init(void)
+{
+       int ret;
+
+       ret = i2c_add_driver(&lp5521_driver);
+
+       if (ret < 0)
+               printk(KERN_ALERT "Adding lp5521 driver failed\n");
+
+       return ret;
+}
+
+static void __exit lp5521_exit(void)
+{
+       i2c_del_driver(&lp5521_driver);
+}
+
+module_init(lp5521_init);
+module_exit(lp5521_exit);
+
+MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
+MODULE_DESCRIPTION("LP5521 LED engine");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
new file mode 100644 (file)
index 0000000..1e11fcc
--- /dev/null
@@ -0,0 +1,1065 @@
+/*
+ * lp5523.c - LP5523 LED Driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
+#include <linux/leds-lp5523.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define LP5523_REG_ENABLE              0x00
+#define LP5523_REG_OP_MODE             0x01
+#define LP5523_REG_RATIOMETRIC_MSB     0x02
+#define LP5523_REG_RATIOMETRIC_LSB     0x03
+#define LP5523_REG_ENABLE_LEDS_MSB     0x04
+#define LP5523_REG_ENABLE_LEDS_LSB     0x05
+#define LP5523_REG_LED_CNTRL_BASE      0x06
+#define LP5523_REG_LED_PWM_BASE                0x16
+#define LP5523_REG_LED_CURRENT_BASE    0x26
+#define LP5523_REG_CONFIG              0x36
+#define LP5523_REG_CHANNEL1_PC         0x37
+#define LP5523_REG_CHANNEL2_PC         0x38
+#define LP5523_REG_CHANNEL3_PC         0x39
+#define LP5523_REG_STATUS              0x3a
+#define LP5523_REG_GPO                 0x3b
+#define LP5523_REG_VARIABLE            0x3c
+#define LP5523_REG_RESET               0x3d
+#define LP5523_REG_TEMP_CTRL           0x3e
+#define LP5523_REG_TEMP_READ           0x3f
+#define LP5523_REG_TEMP_WRITE          0x40
+#define LP5523_REG_LED_TEST_CTRL       0x41
+#define LP5523_REG_LED_TEST_ADC                0x42
+#define LP5523_REG_ENG1_VARIABLE       0x45
+#define LP5523_REG_ENG2_VARIABLE       0x46
+#define LP5523_REG_ENG3_VARIABLE       0x47
+#define LP5523_REG_MASTER_FADER1       0x48
+#define LP5523_REG_MASTER_FADER2       0x49
+#define LP5523_REG_MASTER_FADER3       0x4a
+#define LP5523_REG_CH1_PROG_START      0x4c
+#define LP5523_REG_CH2_PROG_START      0x4d
+#define LP5523_REG_CH3_PROG_START      0x4e
+#define LP5523_REG_PROG_PAGE_SEL       0x4f
+#define LP5523_REG_PROG_MEM            0x50
+
+#define LP5523_CMD_LOAD                        0x15 /* 00010101 */
+#define LP5523_CMD_RUN                 0x2a /* 00101010 */
+#define LP5523_CMD_DISABLED            0x00 /* 00000000 */
+
+#define LP5523_ENABLE                  0x40
+#define LP5523_AUTO_INC                        0x40
+#define LP5523_PWR_SAVE                        0x20
+#define LP5523_PWM_PWR_SAVE            0x04
+#define LP5523_CP_1                    0x08
+#define LP5523_CP_1_5                  0x10
+#define LP5523_CP_AUTO                 0x18
+#define LP5523_INT_CLK                 0x01
+#define LP5523_AUTO_CLK                        0x02
+#define LP5523_EN_LEDTEST              0x80
+#define LP5523_LEDTEST_DONE            0x80
+
+#define LP5523_DEFAULT_CURRENT         50 /* microAmps */
+#define LP5523_PROGRAM_LENGTH          32 /* in bytes */
+#define LP5523_PROGRAM_PAGES           6
+#define LP5523_ADC_SHORTCIRC_LIM       80
+
+#define LP5523_LEDS                    9
+#define LP5523_ENGINES                 3
+
+#define LP5523_ENG_MASK_BASE           0x30 /* 00110000 */
+
+#define LP5523_ENG_STATUS_MASK          0x07 /* 00000111 */
+
+#define LP5523_IRQ_FLAGS                IRQF_TRIGGER_FALLING
+
+#define LP5523_EXT_CLK_USED            0x08
+
+#define LED_ACTIVE(mux, led)           (!!(mux & (0x0001 << led)))
+#define SHIFT_MASK(id)                 (((id) - 1) * 2)
+
+struct lp5523_engine {
+       const struct attribute_group *attributes;
+       int             id;
+       u8              mode;
+       u8              prog_page;
+       u8              mux_page;
+       u16             led_mux;
+       u8              engine_mask;
+};
+
+struct lp5523_led {
+       int                     id;
+       u8                      chan_nr;
+       u8                      led_current;
+       u8                      max_current;
+       struct led_classdev     cdev;
+       struct work_struct      brightness_work;
+       u8                      brightness;
+};
+
+struct lp5523_chip {
+       struct mutex            lock; /* Serialize control */
+       struct i2c_client       *client;
+       struct lp5523_engine    engines[LP5523_ENGINES];
+       struct lp5523_led       leds[LP5523_LEDS];
+       struct lp5523_platform_data *pdata;
+       u8                      num_channels;
+       u8                      num_leds;
+};
+
+#define cdev_to_led(c)          container_of(c, struct lp5523_led, cdev)
+
+static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine)
+{
+       return container_of(engine, struct lp5523_chip,
+                           engines[engine->id - 1]);
+}
+
+static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led)
+{
+       return container_of(led, struct lp5523_chip,
+                           leds[led->id]);
+}
+
+static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode);
+static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode);
+static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern);
+
+static void lp5523_led_brightness_work(struct work_struct *work);
+
+static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
+{
+       return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
+{
+       s32 ret = i2c_smbus_read_byte_data(client, reg);
+
+       if (ret < 0)
+               return -EIO;
+
+       *buf = ret;
+       return 0;
+}
+
+static int lp5523_detect(struct i2c_client *client)
+{
+       int ret;
+       u8 buf;
+
+       ret = lp5523_write(client, LP5523_REG_ENABLE, 0x40);
+       if (ret)
+               return ret;
+       ret = lp5523_read(client, LP5523_REG_ENABLE, &buf);
+       if (ret)
+               return ret;
+       if (buf == 0x40)
+               return 0;
+       else
+               return -ENODEV;
+}
+
+static int lp5523_configure(struct i2c_client *client)
+{
+       struct lp5523_chip *chip = i2c_get_clientdata(client);
+       int ret = 0;
+       u8 status;
+
+       /* one pattern per engine setting led mux start and stop addresses */
+       u8 pattern[][LP5523_PROGRAM_LENGTH] =  {
+               { 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
+               { 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
+               { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
+       };
+
+       lp5523_write(client, LP5523_REG_RESET, 0xff);
+
+       usleep_range(10000, 100000);
+
+       ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
+       /* Chip startup time after reset is 500 us */
+       usleep_range(1000, 10000);
+
+       ret |= lp5523_write(client, LP5523_REG_CONFIG,
+                           LP5523_AUTO_INC | LP5523_PWR_SAVE |
+                           LP5523_CP_AUTO | LP5523_AUTO_CLK |
+                           LP5523_PWM_PWR_SAVE);
+
+       /* turn on all leds */
+       ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01);
+       ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
+
+       /* hardcode 32 bytes of memory for each engine from program memory */
+       ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00);
+       ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10);
+       ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20);
+
+       /* write led mux address space for each channel */
+       ret |= lp5523_load_program(&chip->engines[0], pattern[0]);
+       ret |= lp5523_load_program(&chip->engines[1], pattern[1]);
+       ret |= lp5523_load_program(&chip->engines[2], pattern[2]);
+
+       if (ret) {
+               dev_err(&client->dev, "could not load mux programs\n");
+               return -1;
+       }
+
+       /* set all engines exec state and mode to run 00101010 */
+       ret |= lp5523_write(client, LP5523_REG_ENABLE,
+                           (LP5523_CMD_RUN | LP5523_ENABLE));
+
+       ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN);
+
+       if (ret) {
+               dev_err(&client->dev, "could not start mux programs\n");
+               return -1;
+       }
+
+       /* Wait 3ms and check the engine status */
+       usleep_range(3000, 20000);
+       lp5523_read(client, LP5523_REG_STATUS, &status);
+       status &= LP5523_ENG_STATUS_MASK;
+
+       if (status == LP5523_ENG_STATUS_MASK) {
+               dev_dbg(&client->dev, "all engines configured\n");
+       } else {
+               dev_info(&client->dev, "status == %x\n", status);
+               dev_err(&client->dev, "cound not configure LED engine\n");
+               return -1;
+       }
+
+       dev_info(&client->dev, "disabling engines\n");
+
+       ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
+
+       return ret;
+}
+
+static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode)
+{
+       struct lp5523_chip *chip = engine_to_lp5523(engine);
+       struct i2c_client *client = chip->client;
+       int ret;
+       u8 engine_state;
+
+       ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state);
+       if (ret)
+               goto fail;
+
+       engine_state &= ~(engine->engine_mask);
+
+       /* set mode only for this engine */
+       mode &= engine->engine_mask;
+
+       engine_state |= mode;
+
+       ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state);
+fail:
+       return ret;
+}
+
+static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux)
+{
+       struct lp5523_chip *chip = engine_to_lp5523(engine);
+       struct i2c_client *client = chip->client;
+       int ret = 0;
+
+       ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+
+       ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page);
+       ret |= lp5523_write(client, LP5523_REG_PROG_MEM,
+                           (u8)(mux >> 8));
+       ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux));
+       engine->led_mux = mux;
+
+       return ret;
+}
+
+static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern)
+{
+       struct lp5523_chip *chip = engine_to_lp5523(engine);
+       struct i2c_client *client = chip->client;
+
+       int ret = 0;
+
+       ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+
+       ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL,
+                           engine->prog_page);
+       ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM,
+                                             LP5523_PROGRAM_LENGTH, pattern);
+
+       return ret;
+}
+
+static int lp5523_run_program(struct lp5523_engine *engine)
+{
+       struct lp5523_chip *chip = engine_to_lp5523(engine);
+       struct i2c_client *client = chip->client;
+       int ret;
+
+       ret = lp5523_write(client, LP5523_REG_ENABLE,
+                                       LP5523_CMD_RUN | LP5523_ENABLE);
+       if (ret)
+               goto fail;
+
+       ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN);
+fail:
+       return ret;
+}
+
+static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
+{
+       int i;
+       u16 tmp_mux = 0;
+       len = len < LP5523_LEDS ? len : LP5523_LEDS;
+       for (i = 0; i < len; i++) {
+               switch (buf[i]) {
+               case '1':
+                       tmp_mux |= (1 << i);
+                       break;
+               case '0':
+                       break;
+               case '\n':
+                       i = len;
+                       break;
+               default:
+                       return -1;
+               }
+       }
+       *mux = tmp_mux;
+
+       return 0;
+}
+
+static void lp5523_mux_to_array(u16 led_mux, char *array)
+{
+       int i, pos = 0;
+       for (i = 0; i < LP5523_LEDS; i++)
+               pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i));
+
+       array[pos] = '\0';
+}
+
+/*--------------------------------------------------------------*/
+/*                     Sysfs interface                         */
+/*--------------------------------------------------------------*/
+
+static ssize_t show_engine_leds(struct device *dev,
+                           struct device_attribute *attr,
+                           char *buf, int nr)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lp5523_chip *chip = i2c_get_clientdata(client);
+       char mux[LP5523_LEDS + 1];
+
+       lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux);
+
+       return sprintf(buf, "%s\n", mux);
+}
+
+#define show_leds(nr)                                                  \
+static ssize_t show_engine##nr##_leds(struct device *dev,              \
+                           struct device_attribute *attr,              \
+                           char *buf)                                  \
+{                                                                      \
+       return show_engine_leds(dev, attr, buf, nr);                    \
+}
+show_leds(1)
+show_leds(2)
+show_leds(3)
+
+static ssize_t store_engine_leds(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t len, int nr)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lp5523_chip *chip = i2c_get_clientdata(client);
+       u16 mux = 0;
+
+       if (lp5523_mux_parse(buf, &mux, len))
+               return -EINVAL;
+
+       if (lp5523_load_mux(&chip->engines[nr - 1], mux))
+               return -EINVAL;
+
+       return len;
+}
+
+#define store_leds(nr)                                         \
+static ssize_t store_engine##nr##_leds(struct device *dev,     \
+                            struct device_attribute *attr,     \
+                            const char *buf, size_t len)       \
+{                                                              \
+       return store_engine_leds(dev, attr, buf, len, nr);      \
+}
+store_leds(1)
+store_leds(2)
+store_leds(3)
+
+static ssize_t lp5523_selftest(struct device *dev,
+                              struct device_attribute *attr,
+                              char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lp5523_chip *chip = i2c_get_clientdata(client);
+       int i, ret, pos = 0;
+       int led = 0;
+       u8 status, adc, vdd;
+
+       mutex_lock(&chip->lock);
+
+       ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+       if (ret < 0)
+               goto fail;
+
+       /* Check that ext clock is really in use if requested */
+       if ((chip->pdata) && (chip->pdata->clock_mode == LP5523_CLOCK_EXT))
+               if  ((status & LP5523_EXT_CLK_USED) == 0)
+                       goto fail;
+
+       /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */
+       lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL,
+                                   LP5523_EN_LEDTEST | 16);
+       usleep_range(3000, 10000);
+       ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+       if (!(status & LP5523_LEDTEST_DONE))
+               usleep_range(3000, 10000);
+
+       ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
+       vdd--;  /* There may be some fluctuation in measurement */
+
+       for (i = 0; i < LP5523_LEDS; i++) {
+               /* Skip non-existing channels */
+               if (chip->pdata->led_config[i].led_current == 0)
+                       continue;
+
+               /* Set default current */
+               lp5523_write(chip->client,
+                       LP5523_REG_LED_CURRENT_BASE + i,
+                       chip->pdata->led_config[i].led_current);
+
+               lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff);
+               /* let current stabilize 2ms before measurements start */
+               usleep_range(2000, 10000);
+               lp5523_write(chip->client,
+                            LP5523_REG_LED_TEST_CTRL,
+                            LP5523_EN_LEDTEST | i);
+               /* ledtest takes 2.7ms */
+               usleep_range(3000, 10000);
+               ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+               if (!(status & LP5523_LEDTEST_DONE))
+                       usleep_range(3000, 10000);
+               ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
+
+               if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
+                       pos += sprintf(buf + pos, "LED %d FAIL\n", i);
+
+               lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00);
+
+               /* Restore current */
+               lp5523_write(chip->client,
+                       LP5523_REG_LED_CURRENT_BASE + i,
+                       chip->leds[led].led_current);
+               led++;
+       }
+       if (pos == 0)
+               pos = sprintf(buf, "OK\n");
+       goto release_lock;
+fail:
+       pos = sprintf(buf, "FAIL\n");
+
+release_lock:
+       mutex_unlock(&chip->lock);
+
+       return pos;
+}
+
+static void lp5523_set_brightness(struct led_classdev *cdev,
+                            enum led_brightness brightness)
+{
+       struct lp5523_led *led = cdev_to_led(cdev);
+
+       led->brightness = (u8)brightness;
+
+       schedule_work(&led->brightness_work);
+}
+
+static void lp5523_led_brightness_work(struct work_struct *work)
+{
+       struct lp5523_led *led = container_of(work,
+                                             struct lp5523_led,
+                                             brightness_work);
+       struct lp5523_chip *chip = led_to_lp5523(led);
+       struct i2c_client *client = chip->client;
+
+       mutex_lock(&chip->lock);
+
+       lp5523_write(client, LP5523_REG_LED_PWM_BASE + led->chan_nr,
+                    led->brightness);
+
+       mutex_unlock(&chip->lock);
+}
+
+static int lp5523_do_store_load(struct lp5523_engine *engine,
+                               const char *buf, size_t len)
+{
+       struct lp5523_chip *chip = engine_to_lp5523(engine);
+       struct i2c_client *client = chip->client;
+       int  ret, nrchars, offset = 0, i = 0;
+       char c[3];
+       unsigned cmd;
+       u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
+
+       while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) {
+               /* separate sscanfs because length is working only for %s */
+               ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
+               ret = sscanf(c, "%2x", &cmd);
+               if (ret != 1)
+                       goto fail;
+               pattern[i] = (u8)cmd;
+
+               offset += nrchars;
+               i++;
+       }
+
+       /* Each instruction is 16bit long. Check that length is even */
+       if (i % 2)
+               goto fail;
+
+       mutex_lock(&chip->lock);
+
+       ret = lp5523_load_program(engine, pattern);
+       mutex_unlock(&chip->lock);
+
+       if (ret) {
+               dev_err(&client->dev, "failed loading pattern\n");
+               return ret;
+       }
+
+       return len;
+fail:
+       dev_err(&client->dev, "wrong pattern format\n");
+       return -EINVAL;
+}
+
+static ssize_t store_engine_load(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t len, int nr)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lp5523_chip *chip = i2c_get_clientdata(client);
+       return lp5523_do_store_load(&chip->engines[nr - 1], buf, len);
+}
+
+#define store_load(nr)                                                 \
+static ssize_t store_engine##nr##_load(struct device *dev,             \
+                                    struct device_attribute *attr,     \
+                                    const char *buf, size_t len)       \
+{                                                                      \
+       return store_engine_load(dev, attr, buf, len, nr);              \
+}
+store_load(1)
+store_load(2)
+store_load(3)
+
+static ssize_t show_engine_mode(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf, int nr)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lp5523_chip *chip = i2c_get_clientdata(client);
+       switch (chip->engines[nr - 1].mode) {
+       case LP5523_CMD_RUN:
+               return sprintf(buf, "run\n");
+       case LP5523_CMD_LOAD:
+               return sprintf(buf, "load\n");
+       case LP5523_CMD_DISABLED:
+               return sprintf(buf, "disabled\n");
+       default:
+               return sprintf(buf, "disabled\n");
+       }
+}
+
+#define show_mode(nr)                                                  \
+static ssize_t show_engine##nr##_mode(struct device *dev,              \
+                                   struct device_attribute *attr,      \
+                                   char *buf)                          \
+{                                                                      \
+       return show_engine_mode(dev, attr, buf, nr);                    \
+}
+show_mode(1)
+show_mode(2)
+show_mode(3)
+
+static ssize_t store_engine_mode(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t len, int nr)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lp5523_chip *chip = i2c_get_clientdata(client);
+       struct lp5523_engine *engine = &chip->engines[nr - 1];
+       mutex_lock(&chip->lock);
+
+       if (!strncmp(buf, "run", 3))
+               lp5523_set_mode(engine, LP5523_CMD_RUN);
+       else if (!strncmp(buf, "load", 4))
+               lp5523_set_mode(engine, LP5523_CMD_LOAD);
+       else if (!strncmp(buf, "disabled", 8))
+               lp5523_set_mode(engine, LP5523_CMD_DISABLED);
+
+       mutex_unlock(&chip->lock);
+       return len;
+}
+
+#define store_mode(nr)                                                 \
+static ssize_t store_engine##nr##_mode(struct device *dev,             \
+                                    struct device_attribute *attr,     \
+                                    const char *buf, size_t len)       \
+{                                                                      \
+       return store_engine_mode(dev, attr, buf, len, nr);              \
+}
+store_mode(1)
+store_mode(2)
+store_mode(3)
+
+static ssize_t show_max_current(struct device *dev,
+                           struct device_attribute *attr,
+                           char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lp5523_led *led = cdev_to_led(led_cdev);
+
+       return sprintf(buf, "%d\n", led->max_current);
+}
+
+static ssize_t show_current(struct device *dev,
+                           struct device_attribute *attr,
+                           char *buf)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lp5523_led *led = cdev_to_led(led_cdev);
+
+       return sprintf(buf, "%d\n", led->led_current);
+}
+
+static ssize_t store_current(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t len)
+{
+       struct led_classdev *led_cdev = dev_get_drvdata(dev);
+       struct lp5523_led *led = cdev_to_led(led_cdev);
+       struct lp5523_chip *chip = led_to_lp5523(led);
+       ssize_t ret;
+       unsigned long curr;
+
+       if (strict_strtoul(buf, 0, &curr))
+               return -EINVAL;
+
+       if (curr > led->max_current)
+               return -EINVAL;
+
+       mutex_lock(&chip->lock);
+       ret = lp5523_write(chip->client,
+                       LP5523_REG_LED_CURRENT_BASE + led->chan_nr,
+                       (u8)curr);
+       mutex_unlock(&chip->lock);
+
+       if (ret < 0)
+               return ret;
+
+       led->led_current = (u8)curr;
+
+       return len;
+}
+
+/* led class device attributes */
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
+static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
+
+static struct attribute *lp5523_led_attributes[] = {
+       &dev_attr_led_current.attr,
+       &dev_attr_max_current.attr,
+       NULL,
+};
+
+static struct attribute_group lp5523_led_attribute_group = {
+       .attrs = lp5523_led_attributes
+};
+
+/* device attributes */
+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
+                  show_engine1_mode, store_engine1_mode);
+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
+                  show_engine2_mode, store_engine2_mode);
+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
+                  show_engine3_mode, store_engine3_mode);
+static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO,
+                  show_engine1_leds, store_engine1_leds);
+static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO,
+                  show_engine2_leds, store_engine2_leds);
+static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO,
+                  show_engine3_leds, store_engine3_leds);
+static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
+static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
+static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
+static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
+
+static struct attribute *lp5523_attributes[] = {
+       &dev_attr_engine1_mode.attr,
+       &dev_attr_engine2_mode.attr,
+       &dev_attr_engine3_mode.attr,
+       &dev_attr_selftest.attr,
+       NULL
+};
+
+static struct attribute *lp5523_engine1_attributes[] = {
+       &dev_attr_engine1_load.attr,
+       &dev_attr_engine1_leds.attr,
+       NULL
+};
+
+static struct attribute *lp5523_engine2_attributes[] = {
+       &dev_attr_engine2_load.attr,
+       &dev_attr_engine2_leds.attr,
+       NULL
+};
+
+static struct attribute *lp5523_engine3_attributes[] = {
+       &dev_attr_engine3_load.attr,
+       &dev_attr_engine3_leds.attr,
+       NULL
+};
+
+static const struct attribute_group lp5523_group = {
+       .attrs = lp5523_attributes,
+};
+
+static const struct attribute_group lp5523_engine_group[] = {
+       {.attrs = lp5523_engine1_attributes },
+       {.attrs = lp5523_engine2_attributes },
+       {.attrs = lp5523_engine3_attributes },
+};
+
+static int lp5523_register_sysfs(struct i2c_client *client)
+{
+       struct device *dev = &client->dev;
+       int ret;
+
+       ret = sysfs_create_group(&dev->kobj, &lp5523_group);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static void lp5523_unregister_sysfs(struct i2c_client *client)
+{
+       struct lp5523_chip *chip = i2c_get_clientdata(client);
+       struct device *dev = &client->dev;
+       int i;
+
+       sysfs_remove_group(&dev->kobj, &lp5523_group);
+
+       for (i = 0; i < ARRAY_SIZE(chip->engines); i++)
+               if (chip->engines[i].mode == LP5523_CMD_LOAD)
+                       sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]);
+
+       for (i = 0; i < chip->num_leds; i++)
+               sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
+                               &lp5523_led_attribute_group);
+}
+
+/*--------------------------------------------------------------*/
+/*                     Set chip operating mode                 */
+/*--------------------------------------------------------------*/
+static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
+{
+       /*  engine to chip */
+       struct lp5523_chip *chip = engine_to_lp5523(engine);
+       struct i2c_client *client = chip->client;
+       struct device *dev = &client->dev;
+       int ret = 0;
+
+       /* if in that mode already do nothing, except for run */
+       if (mode == engine->mode && mode != LP5523_CMD_RUN)
+               return 0;
+
+       if (mode == LP5523_CMD_RUN) {
+               ret = lp5523_run_program(engine);
+       } else if (mode == LP5523_CMD_LOAD) {
+               lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
+               lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+
+               ret = sysfs_create_group(&dev->kobj, engine->attributes);
+               if (ret)
+                       return ret;
+       } else if (mode == LP5523_CMD_DISABLED) {
+               lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
+       }
+
+       /* remove load attribute from sysfs if not in load mode */
+       if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD)
+               sysfs_remove_group(&dev->kobj, engine->attributes);
+
+       engine->mode = mode;
+
+       return ret;
+}
+
+/*--------------------------------------------------------------*/
+/*                     Probe, Attach, Remove                   */
+/*--------------------------------------------------------------*/
+static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
+{
+       if (id < 1 || id > LP5523_ENGINES)
+               return -1;
+       engine->id = id;
+       engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
+       engine->prog_page = id - 1;
+       engine->mux_page = id + 2;
+       engine->attributes = &lp5523_engine_group[id - 1];
+
+       return 0;
+}
+
+static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
+                          int chan, struct lp5523_platform_data *pdata)
+{
+       char name[32];
+       int res;
+
+       if (chan >= LP5523_LEDS)
+               return -EINVAL;
+
+       if (pdata->led_config[chan].led_current) {
+               led->led_current = pdata->led_config[chan].led_current;
+               led->max_current = pdata->led_config[chan].max_current;
+               led->chan_nr = pdata->led_config[chan].chan_nr;
+
+               if (led->chan_nr >= LP5523_LEDS) {
+                       dev_err(dev, "Use channel numbers between 0 and %d\n",
+                               LP5523_LEDS - 1);
+                       return -EINVAL;
+               }
+
+               snprintf(name, 32, "lp5523:channel%d", chan);
+
+               led->cdev.name = name;
+               led->cdev.brightness_set = lp5523_set_brightness;
+               res = led_classdev_register(dev, &led->cdev);
+               if (res < 0) {
+                       dev_err(dev, "couldn't register led on channel %d\n",
+                               chan);
+                       return res;
+               }
+               res = sysfs_create_group(&led->cdev.dev->kobj,
+                               &lp5523_led_attribute_group);
+               if (res < 0) {
+                       dev_err(dev, "couldn't register current attribute\n");
+                       led_classdev_unregister(&led->cdev);
+                       return res;
+               }
+       } else {
+               led->led_current = 0;
+       }
+       return 0;
+}
+
+static struct i2c_driver lp5523_driver;
+
+static int lp5523_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
+{
+       struct lp5523_chip              *chip;
+       struct lp5523_platform_data     *pdata;
+       int ret, i, led;
+
+       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       i2c_set_clientdata(client, chip);
+       chip->client = client;
+
+       pdata = client->dev.platform_data;
+
+       if (!pdata) {
+               dev_err(&client->dev, "no platform data\n");
+               ret = -EINVAL;
+               goto fail1;
+       }
+
+       mutex_init(&chip->lock);
+
+       chip->pdata   = pdata;
+
+       if (pdata->setup_resources) {
+               ret = pdata->setup_resources();
+               if (ret < 0)
+                       goto fail1;
+       }
+
+       if (pdata->enable) {
+               pdata->enable(0);
+               usleep_range(1000, 10000);
+               pdata->enable(1);
+               usleep_range(1000, 10000); /* Spec says min 500us */
+       }
+
+       ret = lp5523_detect(client);
+       if (ret)
+               goto fail2;
+
+       dev_info(&client->dev, "LP5523 Programmable led chip found\n");
+
+       /* Initialize engines */
+       for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
+               ret = lp5523_init_engine(&chip->engines[i], i + 1);
+               if (ret) {
+                       dev_err(&client->dev, "error initializing engine\n");
+                       goto fail2;
+               }
+       }
+       ret = lp5523_configure(client);
+       if (ret < 0) {
+               dev_err(&client->dev, "error configuring chip\n");
+               goto fail2;
+       }
+
+       /* Initialize leds */
+       chip->num_channels = pdata->num_channels;
+       chip->num_leds = 0;
+       led = 0;
+       for (i = 0; i < pdata->num_channels; i++) {
+               /* Do not initialize channels that are not connected */
+               if (pdata->led_config[i].led_current == 0)
+                       continue;
+
+               ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata);
+               if (ret) {
+                       dev_err(&client->dev, "error initializing leds\n");
+                       goto fail3;
+               }
+               chip->num_leds++;
+
+               chip->leds[led].id = led;
+               /* Set LED current */
+               lp5523_write(client,
+                         LP5523_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
+                         chip->leds[led].led_current);
+
+               INIT_WORK(&(chip->leds[led].brightness_work),
+                       lp5523_led_brightness_work);
+
+               led++;
+       }
+
+       ret = lp5523_register_sysfs(client);
+       if (ret) {
+               dev_err(&client->dev, "registering sysfs failed\n");
+               goto fail3;
+       }
+       return ret;
+fail3:
+       for (i = 0; i < chip->num_leds; i++) {
+               led_classdev_unregister(&chip->leds[i].cdev);
+               cancel_work_sync(&chip->leds[i].brightness_work);
+       }
+fail2:
+       if (pdata->enable)
+               pdata->enable(0);
+       if (pdata->release_resources)
+               pdata->release_resources();
+fail1:
+       kfree(chip);
+       return ret;
+}
+
+static int lp5523_remove(struct i2c_client *client)
+{
+       struct lp5523_chip *chip = i2c_get_clientdata(client);
+       int i;
+
+       lp5523_unregister_sysfs(client);
+
+       for (i = 0; i < chip->num_leds; i++) {
+               led_classdev_unregister(&chip->leds[i].cdev);
+               cancel_work_sync(&chip->leds[i].brightness_work);
+       }
+
+       if (chip->pdata->enable)
+               chip->pdata->enable(0);
+       if (chip->pdata->release_resources)
+               chip->pdata->release_resources();
+       kfree(chip);
+       return 0;
+}
+
+static const struct i2c_device_id lp5523_id[] = {
+       { "lp5523", 0 },
+       { }
+};
+
+MODULE_DEVICE_TABLE(i2c, lp5523_id);
+
+static struct i2c_driver lp5523_driver = {
+       .driver = {
+               .name   = "lp5523",
+       },
+       .probe          = lp5523_probe,
+       .remove         = lp5523_remove,
+       .id_table       = lp5523_id,
+};
+
+static int __init lp5523_init(void)
+{
+       int ret;
+
+       ret = i2c_add_driver(&lp5523_driver);
+
+       if (ret < 0)
+               printk(KERN_ALERT "Adding lp5523 driver failed\n");
+
+       return ret;
+}
+
+static void __exit lp5523_exit(void)
+{
+       i2c_del_driver(&lp5523_driver);
+}
+
+module_init(lp5523_init);
+module_exit(lp5523_exit);
+
+MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
+MODULE_DESCRIPTION("LP5523 LED engine");
+MODULE_LICENSE("GPL");
index 3063f591f0dca0e0eb809bb4b490039d8bcf2943..1739557a903881a246c14d5047cca5a454081894 100644 (file)
@@ -92,3 +92,5 @@ unmap:
 }
 
 arch_initcall(soekris_init);
+
+MODULE_LICENSE("GPL");
index 82b77bd482ffdb4f952f0785c197db6efe1e8736..b09bcbeade9c433c56365204b4848690bf08a366 100644 (file)
  */
 
 #include <linux/module.h>
-#include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
-#include <linux/timer.h>
 #include <linux/ctype.h>
 #include <linux/leds.h>
-#include <linux/slab.h>
 #include "leds.h"
 
-struct timer_trig_data {
-       int brightness_on;              /* LED brightness during "on" period.
-                                        * (LED_OFF < brightness_on <= LED_FULL)
-                                        */
-       unsigned long delay_on;         /* milliseconds on */
-       unsigned long delay_off;        /* milliseconds off */
-       struct timer_list timer;
-};
-
-static void led_timer_function(unsigned long data)
-{
-       struct led_classdev *led_cdev = (struct led_classdev *) data;
-       struct timer_trig_data *timer_data = led_cdev->trigger_data;
-       unsigned long brightness;
-       unsigned long delay;
-
-       if (!timer_data->delay_on || !timer_data->delay_off) {
-               led_set_brightness(led_cdev, LED_OFF);
-               return;
-       }
-
-       brightness = led_get_brightness(led_cdev);
-       if (!brightness) {
-               /* Time to switch the LED on. */
-               brightness = timer_data->brightness_on;
-               delay = timer_data->delay_on;
-       } else {
-               /* Store the current brightness value to be able
-                * to restore it when the delay_off period is over.
-                */
-               timer_data->brightness_on = brightness;
-               brightness = LED_OFF;
-               delay = timer_data->delay_off;
-       }
-
-       led_set_brightness(led_cdev, brightness);
-
-       mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay));
-}
-
 static ssize_t led_delay_on_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
-       struct timer_trig_data *timer_data = led_cdev->trigger_data;
 
-       return sprintf(buf, "%lu\n", timer_data->delay_on);
+       return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
 }
 
 static ssize_t led_delay_on_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t size)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
-       struct timer_trig_data *timer_data = led_cdev->trigger_data;
        int ret = -EINVAL;
        char *after;
        unsigned long state = simple_strtoul(buf, &after, 10);
@@ -88,21 +40,7 @@ static ssize_t led_delay_on_store(struct device *dev,
                count++;
 
        if (count == size) {
-               if (timer_data->delay_on != state) {
-                       /* the new value differs from the previous */
-                       timer_data->delay_on = state;
-
-                       /* deactivate previous settings */
-                       del_timer_sync(&timer_data->timer);
-
-                       /* try to activate hardware acceleration, if any */
-                       if (!led_cdev->blink_set ||
-                           led_cdev->blink_set(led_cdev,
-                             &timer_data->delay_on, &timer_data->delay_off)) {
-                               /* no hardware acceleration, blink via timer */
-                               mod_timer(&timer_data->timer, jiffies + 1);
-                       }
-               }
+               led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
                ret = count;
        }
 
@@ -113,16 +51,14 @@ static ssize_t led_delay_off_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
-       struct timer_trig_data *timer_data = led_cdev->trigger_data;
 
-       return sprintf(buf, "%lu\n", timer_data->delay_off);
+       return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
 }
 
 static ssize_t led_delay_off_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t size)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
-       struct timer_trig_data *timer_data = led_cdev->trigger_data;
        int ret = -EINVAL;
        char *after;
        unsigned long state = simple_strtoul(buf, &after, 10);
@@ -132,21 +68,7 @@ static ssize_t led_delay_off_store(struct device *dev,
                count++;
 
        if (count == size) {
-               if (timer_data->delay_off != state) {
-                       /* the new value differs from the previous */
-                       timer_data->delay_off = state;
-
-                       /* deactivate previous settings */
-                       del_timer_sync(&timer_data->timer);
-
-                       /* try to activate hardware acceleration, if any */
-                       if (!led_cdev->blink_set ||
-                           led_cdev->blink_set(led_cdev,
-                             &timer_data->delay_on, &timer_data->delay_off)) {
-                               /* no hardware acceleration, blink via timer */
-                               mod_timer(&timer_data->timer, jiffies + 1);
-                       }
-               }
+               led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
                ret = count;
        }
 
@@ -158,60 +80,34 @@ static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store);
 
 static void timer_trig_activate(struct led_classdev *led_cdev)
 {
-       struct timer_trig_data *timer_data;
        int rc;
 
-       timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL);
-       if (!timer_data)
-               return;
-
-       timer_data->brightness_on = led_get_brightness(led_cdev);
-       if (timer_data->brightness_on == LED_OFF)
-               timer_data->brightness_on = led_cdev->max_brightness;
-       led_cdev->trigger_data = timer_data;
-
-       init_timer(&timer_data->timer);
-       timer_data->timer.function = led_timer_function;
-       timer_data->timer.data = (unsigned long) led_cdev;
+       led_cdev->trigger_data = NULL;
 
        rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
        if (rc)
-               goto err_out;
+               return;
        rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
        if (rc)
                goto err_out_delayon;
 
-       /* If there is hardware support for blinking, start one
-        * user friendly blink rate chosen by the driver.
-        */
-       if (led_cdev->blink_set)
-               led_cdev->blink_set(led_cdev,
-                       &timer_data->delay_on, &timer_data->delay_off);
+       led_cdev->trigger_data = (void *)1;
 
        return;
 
 err_out_delayon:
        device_remove_file(led_cdev->dev, &dev_attr_delay_on);
-err_out:
-       led_cdev->trigger_data = NULL;
-       kfree(timer_data);
 }
 
 static void timer_trig_deactivate(struct led_classdev *led_cdev)
 {
-       struct timer_trig_data *timer_data = led_cdev->trigger_data;
-       unsigned long on = 0, off = 0;
-
-       if (timer_data) {
+       if (led_cdev->trigger_data) {
                device_remove_file(led_cdev->dev, &dev_attr_delay_on);
                device_remove_file(led_cdev->dev, &dev_attr_delay_off);
-               del_timer_sync(&timer_data->timer);
-               kfree(timer_data);
        }
 
-       /* If there is hardware support for blinking, stop it */
-       if (led_cdev->blink_set)
-               led_cdev->blink_set(led_cdev, &on, &off);
+       /* Stop blinking */
+       led_brightness_set(led_cdev, LED_OFF);
 }
 
 static struct led_trigger timer_led_trigger = {
index 444696625171e42a6948eb858299a8a489626510..f5f4da3d0b675644963af93f327f03f455368bd8 100644 (file)
@@ -80,7 +80,7 @@ static void adb_iop_end_req(struct adb_request *req, int state)
 static void adb_iop_complete(struct iop_msg *msg)
 {
        struct adb_request *req;
-       uint flags;
+       unsigned long flags;
 
        local_irq_save(flags);
 
@@ -103,7 +103,7 @@ static void adb_iop_listen(struct iop_msg *msg)
 {
        struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message;
        struct adb_request *req;
-       uint flags;
+       unsigned long flags;
 #ifdef DEBUG_ADB_IOP
        int i;
 #endif
index 647d52b1a1b75d3a38390d3ec3c9588abca28ac2..f60107c3b091b966895e48f6e1860112bbaa3482 100644 (file)
@@ -389,6 +389,8 @@ static int ir_getkeycode(struct input_dev *dev,
        ke->len = sizeof(entry->scancode);
        memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
 
+       retval = 0;
+
 out:
        spin_unlock_irqrestore(&rc_tab->lock, flags);
        return retval;
index f9b91ba8900c96f027a284185b97fc9dd1e755b0..0ed09358027ec3e74a29d1073f5d7c41398ab6b7 100644 (file)
@@ -123,7 +123,7 @@ static ssize_t als_sensing_range_store(struct device *dev,
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct als_data *data = i2c_get_clientdata(client);
-       unsigned int ret_val;
+       int ret_val;
        unsigned long val;
 
        if (strict_strtoul(buf, 10, &val))
index cee632e645e1982fee9e99ef84e065daf0d17d39..d79a972f2c799c11b2aa4780efb86417eff13854 100644 (file)
@@ -649,7 +649,7 @@ static ssize_t bh1770_power_state_store(struct device *dev,
 {
        struct bh1770_chip *chip =  dev_get_drvdata(dev);
        unsigned long value;
-       size_t ret;
+       ssize_t ret;
 
        if (strict_strtoul(buf, 0, &value))
                return -EINVAL;
@@ -659,8 +659,12 @@ static ssize_t bh1770_power_state_store(struct device *dev,
                pm_runtime_get_sync(dev);
 
                ret = bh1770_lux_rate(chip, chip->lux_rate_index);
-               ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
+               if (ret < 0) {
+                       pm_runtime_put(dev);
+                       goto leave;
+               }
 
+               ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
                if (ret < 0) {
                        pm_runtime_put(dev);
                        goto leave;
index 34fe835921c4b751beafaff119ca97fb352e63f6..ca47e62850752b80772bd3d77a246bfe1109b742 100644 (file)
@@ -87,7 +87,7 @@ static ssize_t als_sensing_range_store(struct device *dev,
                struct device_attribute *attr, const  char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       unsigned int ret_val;
+       int ret_val;
        unsigned long val;
 
        if (strict_strtoul(buf, 10, &val))
@@ -106,6 +106,8 @@ static ssize_t als_sensing_range_store(struct device *dev,
                val = 4;
 
        ret_val = i2c_smbus_read_byte_data(client, 0x00);
+       if (ret_val < 0)
+               return ret_val;
 
        ret_val &= 0xFC; /*reset the bit before setting them */
        ret_val |= val - 1;
index 43579b3b24acce4e23caac9e9ce1d2f2f811af44..53363108994ee93bed670940b0eb91170a389456 100644 (file)
@@ -3043,7 +3043,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
        atl1_pcie_patch(adapter);
        /* assume we have no link for now */
        netif_carrier_off(netdev);
-       netif_stop_queue(netdev);
 
        setup_timer(&adapter->phy_config_timer, atl1_phy_config,
                    (unsigned long)adapter);
index 9eea225decaf724e7d88243fc8c462c62020f81e..863e73a85fbe0ef4cd48ebac5cd5e95846397ebc 100644 (file)
@@ -20,8 +20,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.60.00-3"
-#define DRV_MODULE_RELDATE      "2010/10/19"
+#define DRV_MODULE_VERSION      "1.60.00-4"
+#define DRV_MODULE_RELDATE      "2010/11/01"
 #define BNX2X_BC_VER            0x040200
 
 #define BNX2X_MULTI_QUEUE
index 18c8e23a0e82fafaabe0183700960a194c9524af..4cfd4e9b5586f1740d547b02aa8f86e756c86033 100644 (file)
@@ -244,7 +244,14 @@ struct port_hw_cfg {                           /* port 0: 0x12c  port 1: 0x2bc */
 
        u16 xgxs_config_tx[4];                              /* 0x1A0 */
 
-       u32 Reserved1[57];                                  /* 0x1A8 */
+       u32 Reserved1[56];                                  /* 0x1A8 */
+       u32 default_cfg;                                    /* 0x288 */
+       /*  Enable BAM on KR */
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK                    0x00100000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT                   20
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED                0x00000000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED                 0x00100000
+
        u32 speed_capability_mask2;                         /* 0x28C */
 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK                0x0000FFFF
 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT               0
index 2326774df843841315d783dfa6c53a073a2ef4b5..580919619252e3d0594f5365226bc295a8efcba9 100644 (file)
@@ -610,7 +610,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
        /* reset and unreset the BigMac */
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
                     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-       udelay(10);
+       msleep(1);
 
        REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
                     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -3525,13 +3525,19 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
 
        /* Enable CL37 BAM */
-       bnx2x_cl45_read(bp, phy,
-                       MDIO_AN_DEVAD,
-                       MDIO_AN_REG_8073_BAM, &val);
-       bnx2x_cl45_write(bp, phy,
-                        MDIO_AN_DEVAD,
-                        MDIO_AN_REG_8073_BAM, val | 1);
+       if (REG_RD(bp, params->shmem_base +
+                        offsetof(struct shmem_region, dev_info.
+                                 port_hw_config[params->port].default_cfg)) &
+           PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
 
+               bnx2x_cl45_read(bp, phy,
+                               MDIO_AN_DEVAD,
+                               MDIO_AN_REG_8073_BAM, &val);
+               bnx2x_cl45_write(bp, phy,
+                                MDIO_AN_DEVAD,
+                                MDIO_AN_REG_8073_BAM, val | 1);
+               DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+       }
        if (params->loopback_mode == LOOPBACK_EXT) {
                bnx2x_807x_force_10G(bp, phy);
                DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
@@ -5302,7 +5308,7 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
 {
        struct bnx2x *bp = params->bp;
        u16 autoneg_val, an_1000_val, an_10_100_val;
-       bnx2x_wait_reset_complete(bp, phy);
+
        bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
                      1 << NIG_LATCH_BC_ENABLE_MI_INT);
 
@@ -5431,6 +5437,7 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
 
        /* HW reset */
        bnx2x_ext_phy_hw_reset(bp, params->port);
+       bnx2x_wait_reset_complete(bp, phy);
 
        bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
        return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5441,7 +5448,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                                  struct link_vars *vars)
 {
        struct bnx2x *bp = params->bp;
-       u8 port = params->port, initialize = 1;
+       u8 port, initialize = 1;
        u16 val;
        u16 temp;
        u32 actual_phy_selection;
@@ -5450,11 +5457,16 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
 
        msleep(1);
+       if (CHIP_IS_E2(bp))
+               port = BP_PATH(bp);
+       else
+               port = params->port;
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
                       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
                       port);
-       msleep(200); /* 100 is not enough */
-
+       bnx2x_wait_reset_complete(bp, phy);
+       /* Wait for GPHY to come out of reset */
+       msleep(50);
        /* BCM84823 requires that XGXS links up first @ 10G for normal
        behavior */
        temp = vars->line_speed;
@@ -5625,7 +5637,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
                                   struct link_params *params)
 {
        struct bnx2x *bp = params->bp;
-       u8 port = params->port;
+       u8 port;
+       if (CHIP_IS_E2(bp))
+               port = BP_PATH(bp);
+       else
+               port = params->port;
        bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
                            MISC_REGISTERS_GPIO_OUTPUT_LOW,
                            port);
@@ -6928,7 +6944,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
                  u8 reset_ext_phy)
 {
        struct bnx2x *bp = params->bp;
-       u8 phy_index, port = params->port;
+       u8 phy_index, port = params->port, clear_latch_ind = 0;
        DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
        /* disable attentions */
        vars->link_status = 0;
@@ -6966,9 +6982,18 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
                                params->phy[phy_index].link_reset(
                                        &params->phy[phy_index],
                                        params);
+                       if (params->phy[phy_index].flags &
+                           FLAGS_REARM_LATCH_SIGNAL)
+                               clear_latch_ind = 1;
                }
        }
 
+       if (clear_latch_ind) {
+               /* Clear latching indication */
+               bnx2x_rearm_latch_signal(bp, port, 0);
+               bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+                              1 << NIG_LATCH_BC_ENABLE_MI_INT);
+       }
        if (params->phy[INT_PHY].link_reset)
                params->phy[INT_PHY].link_reset(
                        &params->phy[INT_PHY], params);
@@ -6999,6 +7024,7 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
        s8 port;
        s8 port_of_path = 0;
 
+       bnx2x_ext_phy_hw_reset(bp, 0);
        /* PART1 - Reset both phys */
        for (port = PORT_MAX - 1; port >= PORT_0; port--) {
                u32 shmem_base, shmem2_base;
@@ -7021,7 +7047,8 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
                        return -EINVAL;
                }
                /* disable attentions */
-               bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+               bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+                              port_of_path*4,
                             (NIG_MASK_XGXS0_LINK_STATUS |
                              NIG_MASK_XGXS0_LINK10G |
                              NIG_MASK_SERDES0_LINK_STATUS |
@@ -7132,7 +7159,7 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
                (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
        REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
 
-       bnx2x_ext_phy_hw_reset(bp, 1);
+       bnx2x_ext_phy_hw_reset(bp, 0);
        msleep(5);
        for (port = 0; port < PORT_MAX; port++) {
                u32 shmem_base, shmem2_base;
index 8427533fe313c35cb38727e533cd77dd35cdb4af..8b4cea57a6c5c47f585f5fcae935771e575dcd18 100644 (file)
@@ -33,6 +33,9 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
 MODULE_DESCRIPTION("CAIF SPI driver");
 
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
+
 static int spi_loop;
 module_param(spi_loop, bool, S_IRUGO);
 MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@ MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
 module_param(spi_frm_align, int, S_IRUGO);
 MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
 
-/* SPI padding options. */
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
 module_param(spi_up_head_align, int, S_IRUGO);
 MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
 
@@ -240,15 +246,13 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
 static const struct file_operations dbgfs_state_fops = {
        .open = dbgfs_open,
        .read = dbgfs_state,
-       .owner = THIS_MODULE,
-       .llseek = default_llseek,
+       .owner = THIS_MODULE
 };
 
 static const struct file_operations dbgfs_frame_fops = {
        .open = dbgfs_open,
        .read = dbgfs_frame,
-       .owner = THIS_MODULE,
-       .llseek = default_llseek,
+       .owner = THIS_MODULE
 };
 
 static inline void dev_debugfs_add(struct cfspi *cfspi)
@@ -337,6 +341,9 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
        u8 *dst = buf;
        caif_assert(buf);
 
+       if (cfspi->slave && !cfspi->slave_talked)
+               cfspi->slave_talked = true;
+
        do {
                struct sk_buff *skb;
                struct caif_payload_info *info;
@@ -357,8 +364,8 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
                 * Compute head offset i.e. number of bytes to add to
                 * get the start of the payload aligned.
                 */
-               if (spi_up_head_align) {
-                       spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+               if (spi_up_head_align > 1) {
+                       spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
                        *dst = (u8)(spad - 1);
                        dst += spad;
                }
@@ -373,7 +380,7 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
                 * Compute tail offset i.e. number of bytes to add to
                 * get the complete CAIF frame aligned.
                 */
-               epad = (skb->len + spad) & spi_up_tail_align;
+               epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
                dst += epad;
 
                dev_kfree_skb(skb);
@@ -417,14 +424,14 @@ int cfspi_xmitlen(struct cfspi *cfspi)
                 * Compute head offset i.e. number of bytes to add to
                 * get the start of the payload aligned.
                 */
-               if (spi_up_head_align)
-                       spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+               if (spi_up_head_align > 1)
+                       spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
 
                /*
                 * Compute tail offset i.e. number of bytes to add to
                 * get the complete CAIF frame aligned.
                 */
-               epad = (skb->len + spad) & spi_up_tail_align;
+               epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
 
                if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
                        skb_queue_tail(&cfspi->chead, skb);
@@ -433,6 +440,7 @@ int cfspi_xmitlen(struct cfspi *cfspi)
                } else {
                        /* Put back packet. */
                        skb_queue_head(&cfspi->qhead, skb);
+                       break;
                }
        } while (pkts <= CAIF_MAX_SPI_PKTS);
 
@@ -453,6 +461,15 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
 {
        struct cfspi *cfspi = (struct cfspi *)ifc->priv;
 
+       /*
+        * The slave device is the master on the link. Interrupts before the
+        * slave has transmitted are considered spurious.
+        */
+       if (cfspi->slave && !cfspi->slave_talked) {
+               printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
+               return;
+       }
+
        if (!in_interrupt())
                spin_lock(&cfspi->lock);
        if (assert) {
@@ -465,7 +482,8 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
                spin_unlock(&cfspi->lock);
 
        /* Wake up the xfer thread. */
-       wake_up_interruptible(&cfspi->wait);
+       if (assert)
+               wake_up_interruptible(&cfspi->wait);
 }
 
 static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -523,7 +541,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
                 * Compute head offset i.e. number of bytes added to
                 * get the start of the payload aligned.
                 */
-               if (spi_down_head_align) {
+               if (spi_down_head_align > 1) {
                        spad = 1 + *src;
                        src += spad;
                }
@@ -564,7 +582,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
                 * Compute tail offset i.e. number of bytes added to
                 * get the complete CAIF frame aligned.
                 */
-               epad = (pkt_len + spad) & spi_down_tail_align;
+               epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
                src += epad;
        } while ((src - buf) < len);
 
@@ -625,11 +643,20 @@ int cfspi_spi_probe(struct platform_device *pdev)
        cfspi->ndev = ndev;
        cfspi->pdev = pdev;
 
-       /* Set flow info */
+       /* Set flow info. */
        cfspi->flow_off_sent = 0;
        cfspi->qd_low_mark = LOW_WATER_MARK;
        cfspi->qd_high_mark = HIGH_WATER_MARK;
 
+       /* Set slave info. */
+       if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
+               cfspi->slave = true;
+               cfspi->slave_talked = false;
+       } else {
+               cfspi->slave = false;
+               cfspi->slave_talked = false;
+       }
+
        /* Assign the SPI device. */
        cfspi->dev = dev;
        /* Assign the device ifc to this SPI interface. */
index 2111dbfea6feb8b53f56e73567e54fd4d11a818b..1b9943a4edabb8f48678d7384ac2141d47264908 100644 (file)
@@ -36,10 +36,15 @@ static inline int forward_to_spi_cmd(struct cfspi *cfspi)
 #endif
 
 int spi_frm_align = 2;
-int spi_up_head_align = 1;
-int spi_up_tail_align;
-int spi_down_head_align = 3;
-int spi_down_tail_align = 1;
+
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+int spi_up_head_align   = 1 << 1;
+int spi_up_tail_align   = 1 << 0;
+int spi_down_head_align = 1 << 2;
+int spi_down_tail_align = 1 << 1;
 
 #ifdef CONFIG_DEBUG_FS
 static inline void debugfs_store_prev(struct cfspi *cfspi)
index 407d4e2720750246356af4cf7fd627fe9cad3d91..046d846c652dc4c807bb79b9ee5ffe343368919d 100644 (file)
@@ -3341,7 +3341,6 @@ static int __devinit init_one(struct pci_dev *pdev,
                                adapter->name = adapter->port[i]->name;
 
                        __set_bit(i, &adapter->registered_device_map);
-                       netif_tx_stop_all_queues(adapter->port[i]);
                }
        }
        if (!adapter->registered_device_map) {
index f17703f410b3673a096aec05f9085329650c5c52..f50bc98310f8b319cf8cdfe1f1b63a23fff19e72 100644 (file)
@@ -3736,7 +3736,6 @@ static int __devinit init_one(struct pci_dev *pdev,
 
                        __set_bit(i, &adapter->registered_device_map);
                        adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
-                       netif_tx_stop_all_queues(adapter->port[i]);
                }
        }
        if (!adapter->registered_device_map) {
index 555ecc5a2e939b25028ac6a61cf2792da379caa7..6de5e2e448a5671afcdbb441e3810531690ddfb0 100644 (file)
@@ -2600,7 +2600,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
                pi->xact_addr_filt = -1;
                pi->rx_offload = RX_CSO;
                netif_carrier_off(netdev);
-               netif_tx_stop_all_queues(netdev);
                netdev->irq = pdev->irq;
 
                netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
index 385dc3204cb7eefbbaa184f0601267abe7c392ee..06bb9b7994585318bb723046a0804a39a4a4a5e8 100644 (file)
@@ -2871,7 +2871,6 @@ static int __devinit emac_probe(struct platform_device *ofdev,
        SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
 
        netif_carrier_off(ndev);
-       netif_stop_queue(ndev);
 
        err = register_netdev(ndev);
        if (err) {
index d85edf3119c2625d999b22a41b6159ba9ebf7a24..c57d9a43cecacbf8a668a0c4b1cca5e7b93c3521 100644 (file)
@@ -2955,11 +2955,7 @@ jme_init_one(struct pci_dev *pdev,
         * Tell stack that we are not ready to work until open()
         */
        netif_carrier_off(netdev);
-       netif_stop_queue(netdev);
 
-       /*
-        * Register netdev
-        */
        rc = register_netdev(netdev);
        if (rc) {
                pr_err("Cannot register net device\n");
index a75ba9517404d94ca7c3da59472cfb80a57025c6..e1d30d7f207121a5de4636fd85b8bf7d9b76acb5 100644 (file)
@@ -41,9 +41,6 @@
 MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
-MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
 MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
 
 char netxen_nic_driver_name[] = "netxen_nic";
index 7a298cdf9ab398135b2f59df9c7ad642c8f8840b..a3dcd04be22f6291e2546f84e474cef31e7fae34 100644 (file)
@@ -1450,7 +1450,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
        netdev->irq = adapter->msix_entries[0].vector;
 
        netif_carrier_off(netdev);
-       netif_stop_queue(netdev);
 
        err = register_netdev(netdev);
        if (err) {
index 52f38e12a879db9d344fb80f169f05eba31fa860..50f712e99e9634d2a348ba497a146b3d86c9dfde 100644 (file)
@@ -22,7 +22,7 @@
 #define __SMSC911X_H__
 
 #define TX_FIFO_LOW_THRESHOLD  ((u32)1600)
-#define SMSC911X_EEPROM_SIZE   ((u32)7)
+#define SMSC911X_EEPROM_SIZE   ((u32)128)
 #define USE_DEBUG              0
 
 /* This is the maximum number of packets to be received every
index 28e1ffb13db99df5b0537a7426b2af3c03980feb..c78a50586c1d86a0fffa9a59ab461a1996da8122 100644 (file)
@@ -2021,7 +2021,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
        de->media_timer.data = (unsigned long) de;
 
        netif_carrier_off(dev);
-       netif_stop_queue(dev);
 
        /* wake up device, assign resources */
        rc = pci_enable_device(pdev);
index ca7fc9df1ccf900533d56c35326b360316283af2..c04d49e31f814fe11d2f1b730dcba014afeb324c 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
+#include <linux/pm_runtime.h>
 
 #define DRIVER_VERSION         "22-Aug-2005"
 
@@ -1273,6 +1274,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        struct usb_device               *xdev;
        int                             status;
        const char                      *name;
+       struct usb_driver       *driver = to_usb_driver(udev->dev.driver);
+
+       /* usbnet already took usb runtime pm, so have to enable the feature
+        * for usb interface, otherwise usb_autopm_get_interface may return
+        * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+        */
+       if (!driver->supports_autosuspend) {
+               driver->supports_autosuspend = 1;
+               pm_runtime_enable(&udev->dev);
+       }
 
        name = udev->dev.driver->name;
        info = (struct driver_info *) prod->driver_info;
index 32dee2ce5d3177706f8bc7394794952a87389324..d5ef696298eed37db811f55e60c0b7f3c0ded6f6 100644 (file)
@@ -54,6 +54,7 @@
 
 #define DRV_DESCRIPTION "802.11 data/management/control stack"
 #define DRV_NAME        "libipw"
+#define DRV_PROCNAME   "ieee80211"
 #define DRV_VERSION    LIBIPW_VERSION
 #define DRV_COPYRIGHT   "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
 
@@ -293,16 +294,16 @@ static int __init libipw_init(void)
        struct proc_dir_entry *e;
 
        libipw_debug_level = debug;
-       libipw_proc = proc_mkdir("ieee80211", init_net.proc_net);
+       libipw_proc = proc_mkdir(DRV_PROCNAME, init_net.proc_net);
        if (libipw_proc == NULL) {
-               LIBIPW_ERROR("Unable to create " DRV_NAME
+               LIBIPW_ERROR("Unable to create " DRV_PROCNAME
                                " proc directory\n");
                return -EIO;
        }
        e = proc_create("debug_level", S_IRUGO | S_IWUSR, libipw_proc,
                        &debug_level_proc_fops);
        if (!e) {
-               remove_proc_entry(DRV_NAME, init_net.proc_net);
+               remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
                libipw_proc = NULL;
                return -EIO;
        }
@@ -319,7 +320,7 @@ static void __exit libipw_exit(void)
 #ifdef CONFIG_LIBIPW_DEBUG
        if (libipw_proc) {
                remove_proc_entry("debug_level", libipw_proc);
-               remove_proc_entry(DRV_NAME, init_net.proc_net);
+               remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
                libipw_proc = NULL;
        }
 #endif                         /* CONFIG_LIBIPW_DEBUG */
index eea1ef2f502bd3c926599b2308901277e35250ef..4396d4b9bfb9a68ed3c0d7b41a96a80963d30c9f 100644 (file)
@@ -221,9 +221,6 @@ config RT2X00_LIB_LEDS
        boolean
        default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
 
-comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
-       depends on RT2X00_LIB=y && LEDS_CLASS=m
-
 config RT2X00_LIB_DEBUGFS
        bool "Ralink debugfs support"
        depends on RT2X00_LIB && MAC80211_DEBUGFS
index 68cf0c99138a94517f3f2e9dae03de4149049092..7b5080c455690941a97294f03583b64e109927ce 100644 (file)
@@ -1159,11 +1159,11 @@ int __devinit rio_init_mports(void)
 
        list_for_each_entry(port, &rio_mports, node) {
                if (!request_mem_region(port->iores.start,
-                                       port->iores.end - port->iores.start,
+                                       resource_size(&port->iores),
                                        port->name)) {
                        printk(KERN_ERR
                               "RIO: Error requesting master port region 0x%016llx-0x%016llx\n",
-                              (u64)port->iores.start, (u64)port->iores.end - 1);
+                              (u64)port->iores.start, (u64)port->iores.end);
                        rc = -ENOMEM;
                        goto out;
                }
index 359d1e04626cb938784290a5700058a3115d7b34..f0d63892264410b23e19753885868cca81071eda 100644 (file)
@@ -35,7 +35,7 @@
 
 #ifdef CONFIG_SH_SECUREEDGE5410
 #include <asm/rtc.h>
-#include <mach/snapgear.h>
+#include <mach/secureedge5410.h>
 
 #define        RTC_RESET       0x1000
 #define        RTC_IODATA      0x0800
index 938d5036016687d06267d3fab991eb841e122e20..b464ae01086cfef7773814c765418f81863e695e 100644 (file)
@@ -270,7 +270,7 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
        if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
                sense = (char *) &fcp_rsp[1];
                if (rsp_flags & FCP_RSP_LEN_VAL)
-                       sense += fcp_rsp->ext.fr_sns_len;
+                       sense += fcp_rsp->ext.fr_rsp_len;
                sense_len = min(fcp_rsp->ext.fr_sns_len,
                                (u32) SCSI_SENSE_BUFFERSIZE);
                memcpy(scsi->sense_buffer, sense, sense_len);
index beaf0916ceab73e627c95931aa6186ab3df7dd55..be0317457147fc4a891f4b0be67c8116b98526b1 100644 (file)
@@ -532,9 +532,6 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
                fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
                adapter->hydra_version = 0;
 
-               atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
-                               &adapter->status);
-
                zfcp_fsf_link_down_info_eval(req,
                        &qtcb->header.fsf_status_qual.link_down_info);
                break;
index 1119c535a667318381409eee3626bc2dcb85f1fa..20796ebc33cec3435cb5b2a2de297580e534fb8c 100644 (file)
@@ -142,6 +142,8 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
                return -ENOMEM;
        }
 
+       get_device(&port->dev);
+
        if (device_register(&unit->dev)) {
                put_device(&unit->dev);
                return -ENOMEM;
@@ -152,8 +154,6 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
                return -EINVAL;
        }
 
-       get_device(&port->dev);
-
        write_lock_irq(&port->unit_list_lock);
        list_add_tail(&unit->list, &port->unit_list);
        write_unlock_irq(&port->unit_list_lock);
index ceaac65a91ff4f9cae03ea55197c2266948b5d21..ff2bd07161f746a1c3dee089807628b6fe90a840 100644 (file)
@@ -29,13 +29,13 @@ struct bfa_s;
 typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
 typedef void    (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
 
-/**
+/*
  * Interrupt message handlers
  */
 void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
 void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
 
-/**
+/*
  * Request and response queue related defines
  */
 #define BFA_REQQ_NELEMS_MIN    (4)
@@ -58,9 +58,9 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
 #define bfa_reqq_produce(__bfa, __reqq)        do {                            \
                (__bfa)->iocfc.req_cq_pi[__reqq]++;                     \
                (__bfa)->iocfc.req_cq_pi[__reqq] &=                     \
-                       ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1);      \
-               bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \
-                             (__bfa)->iocfc.req_cq_pi[__reqq]);      \
+                       ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
+               writel((__bfa)->iocfc.req_cq_pi[__reqq],                \
+                       (__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]);      \
                mmiowb();      \
        } while (0)
 
@@ -76,7 +76,7 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
        (__index) &= ((__size) - 1);                    \
 } while (0)
 
-/**
+/*
  * Queue element to wait for room in request queue. FIFO order is
  * maintained when fullfilling requests.
  */
@@ -86,7 +86,7 @@ struct bfa_reqq_wait_s {
        void            *cbarg;
 };
 
-/**
+/*
  * Circular queue usage assignments
  */
 enum {
@@ -113,7 +113,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
 
 #define bfa_reqq(__bfa, __reqq)        (&(__bfa)->reqq_waitq[__reqq])
 
-/**
+/*
  * static inline void
  * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
  */
@@ -130,7 +130,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
 #define bfa_reqq_wcancel(__wqe)        list_del(&(__wqe)->qe)
 
 
-/**
+/*
  * Generic BFA callback element.
  */
 struct bfa_cb_qe_s {
@@ -163,7 +163,7 @@ struct bfa_cb_qe_s {
        } while (0)
 
 
-/**
+/*
  * PCI devices supported by the current BFA
  */
 struct bfa_pciid_s {
@@ -173,7 +173,7 @@ struct bfa_pciid_s {
 
 extern char     bfa_version[];
 
-/**
+/*
  * BFA memory resources
  */
 enum bfa_mem_type {
@@ -202,19 +202,19 @@ struct bfa_meminfo_s {
        ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
 
 struct bfa_iocfc_regs_s {
-       bfa_os_addr_t   intr_status;
-       bfa_os_addr_t   intr_mask;
-       bfa_os_addr_t   cpe_q_pi[BFI_IOC_MAX_CQS];
-       bfa_os_addr_t   cpe_q_ci[BFI_IOC_MAX_CQS];
-       bfa_os_addr_t   cpe_q_depth[BFI_IOC_MAX_CQS];
-       bfa_os_addr_t   cpe_q_ctrl[BFI_IOC_MAX_CQS];
-       bfa_os_addr_t   rme_q_ci[BFI_IOC_MAX_CQS];
-       bfa_os_addr_t   rme_q_pi[BFI_IOC_MAX_CQS];
-       bfa_os_addr_t   rme_q_depth[BFI_IOC_MAX_CQS];
-       bfa_os_addr_t   rme_q_ctrl[BFI_IOC_MAX_CQS];
+       void __iomem    *intr_status;
+       void __iomem    *intr_mask;
+       void __iomem    *cpe_q_pi[BFI_IOC_MAX_CQS];
+       void __iomem    *cpe_q_ci[BFI_IOC_MAX_CQS];
+       void __iomem    *cpe_q_depth[BFI_IOC_MAX_CQS];
+       void __iomem    *cpe_q_ctrl[BFI_IOC_MAX_CQS];
+       void __iomem    *rme_q_ci[BFI_IOC_MAX_CQS];
+       void __iomem    *rme_q_pi[BFI_IOC_MAX_CQS];
+       void __iomem    *rme_q_depth[BFI_IOC_MAX_CQS];
+       void __iomem    *rme_q_ctrl[BFI_IOC_MAX_CQS];
 };
 
-/**
+/*
  * MSIX vector handlers
  */
 #define BFA_MSIX_MAX_VECTORS   22
@@ -224,7 +224,7 @@ struct bfa_msix_s {
        bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
 };
 
-/**
+/*
  * Chip specific interfaces
  */
 struct bfa_hwif_s {
@@ -343,7 +343,7 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
                                struct bfi_pbc_vport_s *pbc_vport);
 
 
-/**
+/*
  *----------------------------------------------------------------------
  *             BFA public interfaces
  *----------------------------------------------------------------------
index a989a94c38da3f43238bbb875d65c22b9dec2629..6f021015f1f67c14fd400b38a2669683db73661b 100644 (file)
@@ -37,18 +37,18 @@ bfad_int_to_lun(u32 luno)
        } lun;
 
        lun.bfa_lun     = 0;
-       lun.scsi_lun[0] = bfa_os_htons(luno);
+       lun.scsi_lun[0] = cpu_to_be16(luno);
 
        return lun.bfa_lun;
 }
 
-/**
+/*
  * Get LUN for the I/O request
  */
 #define bfa_cb_ioim_get_lun(__dio)     \
        bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun)
 
-/**
+/*
  * Get CDB for the I/O request
  */
 static inline u8 *
@@ -59,7 +59,7 @@ bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio)
        return (u8 *) cmnd->cmnd;
 }
 
-/**
+/*
  * Get I/O direction (read/write) for the I/O request
  */
 static inline enum fcp_iodir
@@ -77,7 +77,7 @@ bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio)
                return FCP_IODIR_NONE;
 }
 
-/**
+/*
  * Get IO size in bytes for the I/O request
  */
 static inline u32
@@ -88,7 +88,7 @@ bfa_cb_ioim_get_size(struct bfad_ioim_s *dio)
        return scsi_bufflen(cmnd);
 }
 
-/**
+/*
  * Get timeout for the I/O request
  */
 static inline u8
@@ -104,7 +104,7 @@ bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio)
        return 0;
 }
 
-/**
+/*
  * Get Command Reference Number for the I/O request. 0 if none.
  */
 static inline u8
@@ -113,7 +113,7 @@ bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio)
        return 0;
 }
 
-/**
+/*
  * Get SAM-3 priority for the I/O request. 0 is default.
  */
 static inline u8
@@ -122,7 +122,7 @@ bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio)
        return 0;
 }
 
-/**
+/*
  * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0).
  */
 static inline u8
@@ -148,7 +148,7 @@ bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
        return task_attr;
 }
 
-/**
+/*
  * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16).
  */
 static inline u8
@@ -159,7 +159,7 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
        return cmnd->cmd_len;
 }
 
-/**
+/*
  * Assign queue to be used for the I/O request. This value depends on whether
  * the driver wants to use the queues via any specific algorithm. Currently,
  * this is not supported.
index c2fa07f2485dc909bc7910efa925ee8c01bcb877..2345f48dc57fc9a3940665d75b11a3a205f22b16 100644 (file)
 
 BFA_TRC_FILE(HAL, CORE);
 
-/**
+/*
  * BFA IOC FC related definitions
  */
 
-/**
+/*
  * IOC local definitions
  */
 #define BFA_IOCFC_TOV          5000    /* msecs */
@@ -54,7 +54,7 @@ enum {
 #define DEF_CFG_NUM_SBOOT_TGTS         16
 #define DEF_CFG_NUM_SBOOT_LUNS         16
 
-/**
+/*
  * forward declaration for IOC FC functions
  */
 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
@@ -63,7 +63,7 @@ static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
 static void bfa_iocfc_reset_cbfn(void *bfa_arg);
 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
 
-/**
+/*
  * BFA Interrupt handling functions
  */
 static void
@@ -86,7 +86,7 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
 
        waitq = bfa_reqq(bfa, qid);
        list_for_each_safe(qe, qen, waitq) {
-               /**
+               /*
                 * Callback only as long as there is room in request queue
                 */
                if (bfa_reqq_full(bfa, qid))
@@ -104,7 +104,7 @@ bfa_msix_all(struct bfa_s *bfa, int vec)
        bfa_intx(bfa);
 }
 
-/**
+/*
  *  hal_intr_api
  */
 bfa_boolean_t
@@ -113,15 +113,15 @@ bfa_intx(struct bfa_s *bfa)
        u32 intr, qintr;
        int queue;
 
-       intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
+       intr = readl(bfa->iocfc.bfa_regs.intr_status);
        if (!intr)
                return BFA_FALSE;
 
-       /**
+       /*
         * RME completion queue interrupt
         */
        qintr = intr & __HFN_INT_RME_MASK;
-       bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
+       writel(qintr, bfa->iocfc.bfa_regs.intr_status);
 
        for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
                if (intr & (__HFN_INT_RME_Q0 << queue))
@@ -131,11 +131,11 @@ bfa_intx(struct bfa_s *bfa)
        if (!intr)
                return BFA_TRUE;
 
-       /**
+       /*
         * CPE completion queue interrupt
         */
        qintr = intr & __HFN_INT_CPE_MASK;
-       bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
+       writel(qintr, bfa->iocfc.bfa_regs.intr_status);
 
        for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
                if (intr & (__HFN_INT_CPE_Q0 << queue))
@@ -153,13 +153,13 @@ bfa_intx(struct bfa_s *bfa)
 void
 bfa_intx_enable(struct bfa_s *bfa)
 {
-       bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask);
+       writel(bfa->iocfc.intr_mask, bfa->iocfc.bfa_regs.intr_mask);
 }
 
 void
 bfa_intx_disable(struct bfa_s *bfa)
 {
-       bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
+       writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
 }
 
 void
@@ -188,8 +188,8 @@ bfa_isr_enable(struct bfa_s *bfa)
                                __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
                                __HFN_INT_MBOX_LPU1);
 
-       bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
-       bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
+       writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
+       writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
        bfa->iocfc.intr_mask = ~intr_unmask;
        bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
 }
@@ -198,7 +198,7 @@ void
 bfa_isr_disable(struct bfa_s *bfa)
 {
        bfa_isr_mode_set(bfa, BFA_FALSE);
-       bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
+       writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
        bfa_msix_uninstall(bfa);
 }
 
@@ -211,7 +211,7 @@ bfa_msix_reqq(struct bfa_s *bfa, int qid)
 
        bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
 
-       /**
+       /*
         * Resume any pending requests in the corresponding reqq.
         */
        waitq = bfa_reqq(bfa, qid);
@@ -259,14 +259,14 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid)
                }
        }
 
-       /**
+       /*
         * update CI
         */
        bfa_rspq_ci(bfa, qid) = pi;
-       bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
+       writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
        mmiowb();
 
-       /**
+       /*
         * Resume any pending requests in the corresponding reqq.
         */
        waitq = bfa_reqq(bfa, qid);
@@ -279,7 +279,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 {
        u32 intr, curr_value;
 
-       intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
+       intr = readl(bfa->iocfc.bfa_regs.intr_status);
 
        if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
                bfa_msix_lpu(bfa);
@@ -289,30 +289,30 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 
        if (intr) {
                if (intr & __HFN_INT_LL_HALT) {
-                       /**
+                       /*
                         * If LL_HALT bit is set then FW Init Halt LL Port
                         * Register needs to be cleared as well so Interrupt
                         * Status Register will be cleared.
                         */
-                       curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
+                       curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
                        curr_value &= ~__FW_INIT_HALT_P;
-                       bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
+                       writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
                }
 
                if (intr & __HFN_INT_ERR_PSS) {
-                       /**
+                       /*
                         * ERR_PSS bit needs to be cleared as well in case
                         * interrups are shared so driver's interrupt handler is
                         * still called eventhough it is already masked out.
                         */
-                       curr_value = bfa_reg_read(
+                       curr_value = readl(
                                        bfa->ioc.ioc_regs.pss_err_status_reg);
                        curr_value &= __PSS_ERR_STATUS_SET;
-                       bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
-                                       curr_value);
+                       writel(curr_value,
+                               bfa->ioc.ioc_regs.pss_err_status_reg);
                }
 
-               bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
+               writel(intr, bfa->iocfc.bfa_regs.intr_status);
                bfa_msix_errint(bfa, intr);
        }
 }
@@ -323,11 +323,11 @@ bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
        bfa_isrs[mc] = isr_func;
 }
 
-/**
+/*
  * BFA IOC FC related functions
  */
 
-/**
+/*
  *  hal_ioc_pvt BFA IOC private functions
  */
 
@@ -366,7 +366,7 @@ bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
                            BFA_CACHELINE_SZ);
 }
 
-/**
+/*
  * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
  */
 static void
@@ -384,14 +384,14 @@ bfa_iocfc_send_cfg(void *bfa_arg)
 
        bfa_iocfc_reset_queues(bfa);
 
-       /**
+       /*
         * initialize IOC configuration info
         */
        cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
        cfg_info->num_cqs = cfg->fwcfg.num_cqs;
 
        bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
-       /**
+       /*
         * dma map REQ and RSP circular queues and shadow pointers
         */
        for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
@@ -400,17 +400,17 @@ bfa_iocfc_send_cfg(void *bfa_arg)
                bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
                                    iocfc->req_cq_shadow_ci[i].pa);
                cfg_info->req_cq_elems[i] =
-                       bfa_os_htons(cfg->drvcfg.num_reqq_elems);
+                       cpu_to_be16(cfg->drvcfg.num_reqq_elems);
 
                bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
                                    iocfc->rsp_cq_ba[i].pa);
                bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
                                    iocfc->rsp_cq_shadow_pi[i].pa);
                cfg_info->rsp_cq_elems[i] =
-                       bfa_os_htons(cfg->drvcfg.num_rspq_elems);
+                       cpu_to_be16(cfg->drvcfg.num_rspq_elems);
        }
 
-       /**
+       /*
         * Enable interrupt coalescing if it is driver init path
         * and not ioc disable/enable path.
         */
@@ -419,7 +419,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
 
        iocfc->cfgdone = BFA_FALSE;
 
-       /**
+       /*
         * dma map IOC configuration itself
         */
        bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
@@ -440,9 +440,9 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        iocfc->bfa = bfa;
        iocfc->action = BFA_IOCFC_ACT_NONE;
 
-       bfa_os_assign(iocfc->cfg, *cfg);
+       iocfc->cfg = *cfg;
 
-       /**
+       /*
         * Initialize chip specific handlers.
         */
        if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
@@ -503,13 +503,13 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
        for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
                iocfc->req_cq_ba[i].kva = dm_kva;
                iocfc->req_cq_ba[i].pa = dm_pa;
-               bfa_os_memset(dm_kva, 0, per_reqq_sz);
+               memset(dm_kva, 0, per_reqq_sz);
                dm_kva += per_reqq_sz;
                dm_pa += per_reqq_sz;
 
                iocfc->rsp_cq_ba[i].kva = dm_kva;
                iocfc->rsp_cq_ba[i].pa = dm_pa;
-               bfa_os_memset(dm_kva, 0, per_rspq_sz);
+               memset(dm_kva, 0, per_rspq_sz);
                dm_kva += per_rspq_sz;
                dm_pa += per_rspq_sz;
        }
@@ -559,7 +559,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
        }
 }
 
-/**
+/*
  * Start BFA submodules.
  */
 static void
@@ -573,7 +573,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
                hal_mods[i]->start(bfa);
 }
 
-/**
+/*
  * Disable BFA submodules.
  */
 static void
@@ -623,7 +623,7 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
                complete(&bfad->disable_comp);
 }
 
-/**
+/*
  * Update BFA configuration from firmware configuration.
  */
 static void
@@ -634,15 +634,15 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
        struct bfa_iocfc_fwcfg_s        *fwcfg   = &cfgrsp->fwcfg;
 
        fwcfg->num_cqs        = fwcfg->num_cqs;
-       fwcfg->num_ioim_reqs  = bfa_os_ntohs(fwcfg->num_ioim_reqs);
-       fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
-       fwcfg->num_fcxp_reqs  = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
-       fwcfg->num_uf_bufs    = bfa_os_ntohs(fwcfg->num_uf_bufs);
-       fwcfg->num_rports     = bfa_os_ntohs(fwcfg->num_rports);
+       fwcfg->num_ioim_reqs  = be16_to_cpu(fwcfg->num_ioim_reqs);
+       fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
+       fwcfg->num_fcxp_reqs  = be16_to_cpu(fwcfg->num_fcxp_reqs);
+       fwcfg->num_uf_bufs    = be16_to_cpu(fwcfg->num_uf_bufs);
+       fwcfg->num_rports     = be16_to_cpu(fwcfg->num_rports);
 
        iocfc->cfgdone = BFA_TRUE;
 
-       /**
+       /*
         * Configuration is complete - initialize/start submodules
         */
        bfa_fcport_init(bfa);
@@ -665,7 +665,7 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
        }
 }
 
-/**
+/*
  * IOC enable request is complete
  */
 static void
@@ -684,7 +684,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
        bfa_iocfc_send_cfg(bfa);
 }
 
-/**
+/*
  * IOC disable request is complete
  */
 static void
@@ -705,7 +705,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
        }
 }
 
-/**
+/*
  * Notify sub-modules of hardware failure.
  */
 static void
@@ -723,7 +723,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
                             bfa);
 }
 
-/**
+/*
  * Actions on chip-reset completion.
  */
 static void
@@ -735,11 +735,11 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
        bfa_isr_enable(bfa);
 }
 
-/**
+/*
  *  hal_ioc_public
  */
 
-/**
+/*
  * Query IOC memory requirement information.
  */
 void
@@ -754,7 +754,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
        *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
 }
 
-/**
+/*
  * Query IOC memory requirement information.
  */
 void
@@ -772,7 +772,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        ioc->trcmod = bfa->trcmod;
        bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
 
-       /**
+       /*
         * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
         */
        if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
@@ -790,7 +790,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
                INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
 }
 
-/**
+/*
  * Query IOC memory requirement information.
  */
 void
@@ -799,7 +799,7 @@ bfa_iocfc_detach(struct bfa_s *bfa)
        bfa_ioc_detach(&bfa->ioc);
 }
 
-/**
+/*
  * Query IOC memory requirement information.
  */
 void
@@ -809,7 +809,7 @@ bfa_iocfc_init(struct bfa_s *bfa)
        bfa_ioc_enable(&bfa->ioc);
 }
 
-/**
+/*
  * IOC start called from bfa_start(). Called to start IOC operations
  * at driver instantiation for this instance.
  */
@@ -820,7 +820,7 @@ bfa_iocfc_start(struct bfa_s *bfa)
                bfa_iocfc_start_submod(bfa);
 }
 
-/**
+/*
  * IOC stop called from bfa_stop(). Called only when driver is unloaded
  * for this instance.
  */
@@ -876,12 +876,12 @@ bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
        attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
 
        attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
-                               bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
-                               bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
+                               be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
+                               be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
 
        attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
-                       bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
-                       bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
+                       be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
+                       be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
 
        attr->config    = iocfc->cfg;
 }
@@ -893,8 +893,8 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
        struct bfi_iocfc_set_intr_req_s *m;
 
        iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
-       iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
-       iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
+       iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
+       iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
 
        if (!bfa_iocfc_is_operational(bfa))
                return BFA_STATUS_OK;
@@ -924,7 +924,7 @@ bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
        iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
        bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
 }
-/**
+/*
  * Enable IOC after it is disabled.
  */
 void
@@ -953,7 +953,7 @@ bfa_iocfc_is_operational(struct bfa_s *bfa)
        return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
 }
 
-/**
+/*
  * Return boot target port wwns -- read from boot information in flash.
  */
 void
@@ -998,11 +998,11 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
        return cfgrsp->pbc_cfg.nvports;
 }
 
-/**
+/*
  *  hal_api
  */
 
-/**
+/*
  * Use this function query the memory requirement of the BFA library.
  * This function needs to be called before bfa_attach() to get the
  * memory required of the BFA layer for a given driver configuration.
@@ -1038,7 +1038,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
 
        bfa_assert((cfg != NULL) && (meminfo != NULL));
 
-       bfa_os_memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
+       memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
        meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
                BFA_MEM_TYPE_KVA;
        meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
@@ -1055,7 +1055,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
        meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
 }
 
-/**
+/*
  * Use this function to do attach the driver instance with the BFA
  * library. This function will not trigger any HW initialization
  * process (which will be done in bfa_init() call)
@@ -1092,7 +1092,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 
        bfa_assert((cfg != NULL) && (meminfo != NULL));
 
-       /**
+       /*
         * initialize all memory pointers for iterative allocation
         */
        for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
@@ -1109,7 +1109,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        bfa_com_port_attach(bfa, meminfo);
 }
 
-/**
+/*
  * Use this function to delete a BFA IOC. IOC should be stopped (by
  * calling bfa_stop()) before this function call.
  *
@@ -1146,7 +1146,7 @@ bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
        bfa->plog = plog;
 }
 
-/**
+/*
  * Initialize IOC.
  *
  * This function will return immediately, when the IOC initialization is
@@ -1169,7 +1169,7 @@ bfa_init(struct bfa_s *bfa)
        bfa_iocfc_init(bfa);
 }
 
-/**
+/*
  * Use this function initiate the IOC configuration setup. This function
  * will return immediately.
  *
@@ -1183,7 +1183,7 @@ bfa_start(struct bfa_s *bfa)
        bfa_iocfc_start(bfa);
 }
 
-/**
+/*
  * Use this function quiese the IOC. This function will return immediately,
  * when the IOC is actually stopped, the bfad->comp will be set.
  *
@@ -1243,7 +1243,7 @@ bfa_attach_fcs(struct bfa_s *bfa)
        bfa->fcs = BFA_TRUE;
 }
 
-/**
+/*
  * Periodic timer heart beat from driver
  */
 void
@@ -1252,7 +1252,7 @@ bfa_timer_tick(struct bfa_s *bfa)
        bfa_timer_beat(&bfa->timer_mod);
 }
 
-/**
+/*
  * Return the list of PCI vendor/device id lists supported by this
  * BFA instance.
  */
@@ -1270,7 +1270,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
        *pciids = __pciids;
 }
 
-/**
+/*
  * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
  * into BFA layer). The OS driver can then turn back and overwrite entries that
  * have been configured by the user.
@@ -1328,7 +1328,7 @@ bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
        bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
 }
 
-/**
+/*
  * Retrieve firmware trace information on IOC failure.
  */
 bfa_status_t
@@ -1337,7 +1337,7 @@ bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
        return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
 }
 
-/**
+/*
  * Clear the saved firmware trace information of an IOC.
  */
 void
@@ -1346,7 +1346,7 @@ bfa_debug_fwsave_clear(struct bfa_s *bfa)
        bfa_ioc_debug_fwsave_clear(&bfa->ioc);
 }
 
-/**
+/*
  * Fetch firmware trace data.
  *
  * @param[in]          bfa                     BFA instance
@@ -1362,7 +1362,7 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
        return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
 }
 
-/**
+/*
  * Dump firmware memory.
  *
  * @param[in]          bfa             BFA instance
@@ -1378,7 +1378,7 @@ bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
 {
        return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
 }
-/**
+/*
  * Reset hw semaphore & usage cnt regs and initialize.
  */
 void
@@ -1388,7 +1388,7 @@ bfa_chip_reset(struct bfa_s *bfa)
        bfa_ioc_pll_init(&bfa->ioc);
 }
 
-/**
+/*
  * Fetch firmware statistics data.
  *
  * @param[in]          bfa             BFA instance
index 7260c74620f8fa5f2fd4cbe0060d1e87a326d6b4..99f242b9aa3110216fb93915338a86d8c22f7955 100644 (file)
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  bfa_cs.h BFA common services
  */
 
@@ -24,7 +24,7 @@
 
 #include "bfa_os_inc.h"
 
-/**
+/*
  * BFA TRC
  */
 
@@ -73,7 +73,7 @@ enum {
 #define BFA_TRC_MOD_SH 10
 #define BFA_TRC_MOD(__mod)     ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
 
-/**
+/*
  * Define a new tracing file (module). Module should match one defined above.
  */
 #define BFA_TRC_FILE(__mod, __submod)                                  \
@@ -155,7 +155,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
 #define bfa_trc_fp(_trcp, _data)
 #endif
 
-/**
+/*
  * @ BFA LOG interfaces
  */
 #define bfa_assert(__cond)     do {                                    \
@@ -249,13 +249,13 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
 #define bfa_q_is_on_q(_q, _qe)      \
        bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
 
-/**
+/*
  * @ BFA state machine interfaces
  */
 
 typedef void (*bfa_sm_t)(void *sm, int event);
 
-/**
+/*
  * oc - object class eg. bfa_ioc
  * st - state, eg. reset
  * otype - object type, eg. struct bfa_ioc_s
@@ -269,7 +269,7 @@ typedef void (*bfa_sm_t)(void *sm, int event);
 #define bfa_sm_get_state(_sm)          ((_sm)->sm)
 #define bfa_sm_cmp_state(_sm, _state)  ((_sm)->sm == (bfa_sm_t)(_state))
 
-/**
+/*
  * For converting from state machine function to state encoding.
  */
 struct bfa_sm_table_s {
@@ -279,12 +279,12 @@ struct bfa_sm_table_s {
 };
 #define BFA_SM(_sm)    ((bfa_sm_t)(_sm))
 
-/**
+/*
  * State machine with entry actions.
  */
 typedef void (*bfa_fsm_t)(void *fsm, int event);
 
-/**
+/*
  * oc - object class eg. bfa_ioc
  * st - state, eg. reset
  * otype - object type, eg. struct bfa_ioc_s
@@ -314,7 +314,7 @@ bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
        return smt[i].state;
 }
 
-/**
+/*
  * @ Generic wait counter.
  */
 
@@ -340,7 +340,7 @@ bfa_wc_down(struct bfa_wc_s *wc)
                wc->wc_resume(wc->wc_cbarg);
 }
 
-/**
+/*
  * Initialize a waiting counter.
  */
 static inline void
@@ -352,7 +352,7 @@ bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
        bfa_wc_up(wc);
 }
 
-/**
+/*
  * Wait for counter to reach zero
  */
 static inline void
index d49877ff5140a6856620a8d45831f4104d806d57..4b5b9e35abb9889e72824d01f5ddf361817fc7dd 100644 (file)
@@ -24,7 +24,7 @@
 #define BFA_MFG_SERIALNUM_SIZE                  11
 #define STRSZ(_n)                               (((_n) + 4) & ~3)
 
-/**
+/*
  * Manufacturing card type
  */
 enum {
@@ -45,7 +45,7 @@ enum {
 
 #pragma pack(1)
 
-/**
+/*
  * Check if Mezz card
  */
 #define bfa_mfg_is_mezz(type) (( \
@@ -55,7 +55,7 @@ enum {
        (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
        (type) == BFA_MFG_TYPE_LIGHTNING))
 
-/**
+/*
  * Check if the card having old wwn/mac handling
  */
 #define bfa_mfg_is_old_wwn_mac_model(type) (( \
@@ -78,12 +78,12 @@ do {                                                            \
        (m)[2] = t & 0xFF;                                      \
 } while (0)
 
-/**
+/*
  * VPD data length
  */
 #define BFA_MFG_VPD_LEN                 512
 
-/**
+/*
  * VPD vendor tag
  */
 enum {
@@ -97,7 +97,7 @@ enum {
        BFA_MFG_VPD_PCI_BRCD    = 0xf8,  /*  PCI VPD Brocade            */
 };
 
-/**
+/*
  * All numerical fields are in big-endian format.
  */
 struct bfa_mfg_vpd_s {
@@ -112,7 +112,7 @@ struct bfa_mfg_vpd_s {
 
 #pragma pack()
 
-/**
+/*
  * Status return values
  */
 enum bfa_status {
@@ -167,11 +167,11 @@ enum bfa_boolean {
 #define BFA_STRING_32  32
 #define BFA_VERSION_LEN 64
 
-/**
+/*
  * ---------------------- adapter definitions ------------
  */
 
-/**
+/*
  * BFA adapter level attributes.
  */
 enum {
@@ -215,7 +215,7 @@ struct bfa_adapter_attr_s {
        u8              trunk_capable;
 };
 
-/**
+/*
  * ---------------------- IOC definitions ------------
  */
 
@@ -224,7 +224,7 @@ enum {
        BFA_IOC_CHIP_REV_LEN    = 8,
 };
 
-/**
+/*
  * Driver and firmware versions.
  */
 struct bfa_ioc_driver_attr_s {
@@ -236,7 +236,7 @@ struct bfa_ioc_driver_attr_s {
        char            ob_ver[BFA_VERSION_LEN];        /*  openboot version */
 };
 
-/**
+/*
  * IOC PCI device attributes
  */
 struct bfa_ioc_pci_attr_s {
@@ -249,7 +249,7 @@ struct bfa_ioc_pci_attr_s {
        char            chip_rev[BFA_IOC_CHIP_REV_LEN];  /*  chip revision */
 };
 
-/**
+/*
  * IOC states
  */
 enum bfa_ioc_state {
@@ -267,7 +267,7 @@ enum bfa_ioc_state {
        BFA_IOC_ENABLING        = 12,   /*  IOC is being enabled */
 };
 
-/**
+/*
  * IOC firmware stats
  */
 struct bfa_fw_ioc_stats_s {
@@ -279,7 +279,7 @@ struct bfa_fw_ioc_stats_s {
        u32     unknown_reqs;
 };
 
-/**
+/*
  * IOC driver stats
  */
 struct bfa_ioc_drv_stats_s {
@@ -296,7 +296,7 @@ struct bfa_ioc_drv_stats_s {
        u32     enable_replies;
 };
 
-/**
+/*
  * IOC statistics
  */
 struct bfa_ioc_stats_s {
@@ -310,7 +310,7 @@ enum bfa_ioc_type_e {
        BFA_IOC_TYPE_LL         = 3,
 };
 
-/**
+/*
  * IOC attributes returned in queries
  */
 struct bfa_ioc_attr_s {
@@ -323,11 +323,11 @@ struct bfa_ioc_attr_s {
        u8                              rsvd[7];        /*  64bit align    */
 };
 
-/**
+/*
  * ---------------------- mfg definitions ------------
  */
 
-/**
+/*
  * Checksum size
  */
 #define BFA_MFG_CHKSUM_SIZE                    16
@@ -340,7 +340,7 @@ struct bfa_ioc_attr_s {
 
 #pragma pack(1)
 
-/**
+/*
  * All numerical fields are in big-endian format.
  */
 struct bfa_mfg_block_s {
@@ -373,11 +373,11 @@ struct bfa_mfg_block_s {
 
 #pragma pack()
 
-/**
+/*
  * ---------------------- pci definitions ------------
  */
 
-/**
+/*
  * PCI device and vendor ID information
  */
 enum {
@@ -392,14 +392,14 @@ enum {
        ((devid) == BFA_PCI_DEVICE_ID_CT ||     \
         (devid) == BFA_PCI_DEVICE_ID_CT_FC)
 
-/**
+/*
  * PCI sub-system device and vendor ID information
  */
 enum {
        BFA_PCI_FCOE_SSDEVICE_ID        = 0x14,
 };
 
-/**
+/*
  * Maximum number of device address ranges mapped through different BAR(s)
  */
 #define BFA_PCI_ACCESS_RANGES 1
@@ -430,7 +430,7 @@ enum {
 #define BOOT_CFG_REV1   1
 #define BOOT_CFG_VLAN   1
 
-/**
+/*
  *      Boot options setting. Boot options setting determines from where
  *      to get the boot lun information
  */
@@ -442,7 +442,7 @@ enum bfa_boot_bootopt {
 };
 
 #pragma pack(1)
-/**
+/*
  * Boot lun information.
  */
 struct bfa_boot_bootlun_s {
@@ -451,7 +451,7 @@ struct bfa_boot_bootlun_s {
 };
 #pragma pack()
 
-/**
+/*
  * BOOT boot configuraton
  */
 struct bfa_boot_pbc_s {
index 96905d301828f90b3060752661e5d247c54fa18a..191d34a58b9cf15e5167b419832fc287b0201779 100644 (file)
@@ -21,7 +21,7 @@
 #include "bfa_fc.h"
 #include "bfa_defs_svc.h"
 
-/**
+/*
  * VF states
  */
 enum bfa_vf_state {
@@ -35,7 +35,7 @@ enum bfa_vf_state {
        BFA_VF_ISOLATED  = 7,   /*  port isolated due to vf_id mismatch */
 };
 
-/**
+/*
  * VF statistics
  */
 struct bfa_vf_stats_s {
@@ -55,7 +55,7 @@ struct bfa_vf_stats_s {
        u32     resvd; /*  padding for 64 bit alignment */
 };
 
-/**
+/*
  * VF attributes returned in queries
  */
 struct bfa_vf_attr_s {
@@ -67,7 +67,7 @@ struct bfa_vf_attr_s {
 #define BFA_FCS_MAX_LPORTS 256
 #define BFA_FCS_FABRIC_IPADDR_SZ  16
 
-/**
+/*
  * symbolic names for base port/virtual port
  */
 #define BFA_SYMNAME_MAXLEN     128     /* 128 bytes */
@@ -75,7 +75,7 @@ struct bfa_lport_symname_s {
        char        symname[BFA_SYMNAME_MAXLEN];
 };
 
-/**
+/*
 * Roles of FCS port:
  *     - FCP IM and FCP TM roles cannot be enabled together for a FCS port
  *     - Create multiple ports if both IM and TM functions required.
@@ -86,19 +86,19 @@ enum bfa_lport_role {
        BFA_LPORT_ROLE_FCP_MAX  = BFA_LPORT_ROLE_FCP_IM,
 };
 
-/**
+/*
  * FCS port configuration.
  */
 struct bfa_lport_cfg_s {
     wwn_t             pwwn;       /*  port wwn */
     wwn_t             nwwn;       /*  node wwn */
     struct bfa_lport_symname_s  sym_name;   /*  vm port symbolic name */
-       bfa_boolean_t       preboot_vp;  /*  vport created from PBC */
+    bfa_boolean_t       preboot_vp;  /*  vport created from PBC */
     enum bfa_lport_role     roles;      /*  FCS port roles */
     u8      tag[16];   /*  opaque tag from application */
 };
 
-/**
+/*
  * FCS port states
  */
 enum bfa_lport_state {
@@ -108,7 +108,7 @@ enum bfa_lport_state {
        BFA_LPORT_OFFLINE = 3,  /*  No login to fabric */
 };
 
-/**
+/*
  * FCS port type.
  */
 enum bfa_lport_type {
@@ -116,7 +116,7 @@ enum bfa_lport_type {
        BFA_LPORT_TYPE_VIRTUAL,
 };
 
-/**
+/*
  * FCS port offline reason.
  */
 enum bfa_lport_offline_reason {
@@ -128,7 +128,7 @@ enum bfa_lport_offline_reason {
        BFA_LPORT_OFFLINE_FAB_LOGOUT,
 };
 
-/**
+/*
  * FCS lport info.
  */
 struct bfa_lport_info_s {
@@ -150,7 +150,7 @@ struct bfa_lport_info_s {
 
 };
 
-/**
+/*
  * FCS port statistics
  */
 struct bfa_lport_stats_s {
@@ -222,7 +222,7 @@ struct bfa_lport_stats_s {
                                            * (max retry of plogi) */
 };
 
-/**
+/*
  * BFA port attribute returned in queries
  */
 struct bfa_lport_attr_s {
@@ -239,7 +239,7 @@ struct bfa_lport_attr_s {
 };
 
 
-/**
+/*
  * VPORT states
  */
 enum bfa_vport_state {
@@ -258,7 +258,7 @@ enum bfa_vport_state {
        BFA_FCS_VPORT_MAX_STATE,
 };
 
-/**
+/*
  * vport statistics
  */
 struct bfa_vport_stats_s {
@@ -296,7 +296,7 @@ struct bfa_vport_stats_s {
        u32        rsvd;
 };
 
-/**
+/*
  * BFA vport attribute returned in queries
  */
 struct bfa_vport_attr_s {
@@ -305,7 +305,7 @@ struct bfa_vport_attr_s {
        u32          rsvd;
 };
 
-/**
+/*
  * FCS remote port states
  */
 enum bfa_rport_state {
@@ -321,7 +321,7 @@ enum bfa_rport_state {
        BFA_RPORT_NSDISC        = 9,    /*  re-discover rport */
 };
 
-/**
+/*
  *  Rport Scsi Function : Initiator/Target.
  */
 enum bfa_rport_function {
@@ -329,7 +329,7 @@ enum bfa_rport_function {
        BFA_RPORT_TARGET        = 0x02, /*  SCSI Target */
 };
 
-/**
+/*
  * port/node symbolic names for rport
  */
 #define BFA_RPORT_SYMNAME_MAXLEN       255
@@ -337,7 +337,7 @@ struct bfa_rport_symname_s {
        char            symname[BFA_RPORT_SYMNAME_MAXLEN];
 };
 
-/**
+/*
  * FCS remote port statistics
  */
 struct bfa_rport_stats_s {
@@ -374,7 +374,7 @@ struct bfa_rport_stats_s {
        struct bfa_rport_hal_stats_s    hal_stats;  /*  BFA rport stats    */
 };
 
-/**
+/*
  * FCS remote port attributes returned in queries
  */
 struct bfa_rport_attr_s {
@@ -411,7 +411,7 @@ struct bfa_rport_remote_link_stats_s {
 #define BFA_MAX_IO_INDEX 7
 #define BFA_NO_IO_INDEX 9
 
-/**
+/*
  * FCS itnim states
  */
 enum bfa_itnim_state {
@@ -425,7 +425,7 @@ enum bfa_itnim_state {
        BFA_ITNIM_INITIATIOR    = 7,    /*  initiator */
 };
 
-/**
+/*
  * FCS remote port statistics
  */
 struct bfa_itnim_stats_s {
@@ -443,7 +443,7 @@ struct bfa_itnim_stats_s {
        u32     rsvd;           /* padding for 64 bit alignment */
 };
 
-/**
+/*
  * FCS itnim attributes returned in queries
  */
 struct bfa_itnim_attr_s {
index 56226fcf9470e167b71eb3f9cc5b8136d8bfb6db..e24e9f7ca81ff0b68f0b6ea2383f374a73d47165 100644 (file)
@@ -27,7 +27,7 @@
 #define BFA_IOCFCOE_INTR_DELAY 25
 #define BFA_IOCFCOE_INTR_LATENCY 5
 
-/**
+/*
  * Interrupt coalescing configuration.
  */
 #pragma pack(1)
@@ -38,7 +38,7 @@ struct bfa_iocfc_intr_attr_s {
        u16     delay;          /*  delay in microseconds     */
 };
 
-/**
+/*
  * IOC firmware configuraton
  */
 struct bfa_iocfc_fwcfg_s {
@@ -71,7 +71,7 @@ struct bfa_iocfc_drvcfg_s {
        u32             rsvd;
 };
 
-/**
+/*
  * IOC configuration
  */
 struct bfa_iocfc_cfg_s {
@@ -79,7 +79,7 @@ struct bfa_iocfc_cfg_s {
        struct bfa_iocfc_drvcfg_s       drvcfg; /*  driver side config    */
 };
 
-/**
+/*
  * IOC firmware IO stats
  */
 struct bfa_fw_io_stats_s {
@@ -152,7 +152,7 @@ struct bfa_fw_io_stats_s {
                                                 */
 };
 
-/**
+/*
  * IOC port firmware stats
  */
 
@@ -262,7 +262,7 @@ struct bfa_fw_fcoe_stats_s {
     u32    mac_invalids;       /*  Invalid mac assigned                */
 };
 
-/**
+/*
  * IOC firmware FCoE port stats
  */
 struct bfa_fw_fcoe_port_stats_s {
@@ -270,7 +270,7 @@ struct bfa_fw_fcoe_port_stats_s {
     struct bfa_fw_fip_stats_s   fip_stats;
 };
 
-/**
+/*
  * IOC firmware FC uport stats
  */
 struct bfa_fw_fc_uport_stats_s {
@@ -278,7 +278,7 @@ struct bfa_fw_fc_uport_stats_s {
        struct bfa_fw_port_lksm_stats_s         lksm_stats;
 };
 
-/**
+/*
  * IOC firmware FC port stats
  */
 union bfa_fw_fc_port_stats_s {
@@ -286,7 +286,7 @@ union bfa_fw_fc_port_stats_s {
        struct bfa_fw_fcoe_port_stats_s fcoe_stats;
 };
 
-/**
+/*
  * IOC firmware port stats
  */
 struct bfa_fw_port_stats_s {
@@ -295,7 +295,7 @@ struct bfa_fw_port_stats_s {
        union  bfa_fw_fc_port_stats_s           fc_port;
 };
 
-/**
+/*
  * fcxchg module statistics
  */
 struct bfa_fw_fcxchg_stats_s {
@@ -308,7 +308,7 @@ struct bfa_fw_lpsm_stats_s {
        u32     cls_tx;
 };
 
-/**
+/*
  *  Trunk statistics
  */
 struct bfa_fw_trunk_stats_s {
@@ -334,7 +334,7 @@ struct bfa_fw_advsm_stats_s {
        u32 elp_dropped;                /*  ELP dropped         */
 };
 
-/**
+/*
  * IOCFC firmware stats
  */
 struct bfa_fw_iocfc_stats_s {
@@ -345,7 +345,7 @@ struct bfa_fw_iocfc_stats_s {
        u32     set_intr_reqs;  /*  set interrupt reqs */
 };
 
-/**
+/*
  * IOC attributes returned in queries
  */
 struct bfa_iocfc_attr_s {
@@ -353,7 +353,7 @@ struct bfa_iocfc_attr_s {
        struct bfa_iocfc_intr_attr_s    intr_attr;      /*  interrupt attr */
 };
 
-/**
+/*
  * Eth_sndrcv mod stats
  */
 struct bfa_fw_eth_sndrcv_stats_s {
@@ -361,7 +361,7 @@ struct bfa_fw_eth_sndrcv_stats_s {
        u32     rsvd;           /*  64bit align    */
 };
 
-/**
+/*
  * CT MAC mod stats
  */
 struct bfa_fw_mac_mod_stats_s {
@@ -379,7 +379,7 @@ struct bfa_fw_mac_mod_stats_s {
        u32     rsvd;           /*  64bit align    */
 };
 
-/**
+/*
  * CT MOD stats
  */
 struct bfa_fw_ct_mod_stats_s {
@@ -391,7 +391,7 @@ struct bfa_fw_ct_mod_stats_s {
        u32     rsvd;           /*  64bit align    */
 };
 
-/**
+/*
  * IOC firmware stats
  */
 struct bfa_fw_stats_s {
@@ -412,7 +412,7 @@ struct bfa_fw_stats_s {
 #define BFA_IOCFC_PATHTOV_MAX  60
 #define BFA_IOCFC_QDEPTH_MAX   2000
 
-/**
+/*
  * QoS states
  */
 enum bfa_qos_state {
@@ -420,7 +420,7 @@ enum bfa_qos_state {
        BFA_QOS_OFFLINE = 2,            /*  QoS is offline */
 };
 
-/**
+/*
  * QoS  Priority levels.
  */
 enum bfa_qos_priority {
@@ -430,7 +430,7 @@ enum bfa_qos_priority {
        BFA_QOS_LOW  =  3,      /*  QoS Priority Level Low */
 };
 
-/**
+/*
  * QoS  bandwidth allocation for each priority level
  */
 enum bfa_qos_bw_alloc {
@@ -439,7 +439,7 @@ enum bfa_qos_bw_alloc {
        BFA_QOS_BW_LOW  =  10,  /*  bandwidth allocation for Low */
 };
 #pragma pack(1)
-/**
+/*
  * QoS attribute returned in QoS Query
  */
 struct bfa_qos_attr_s {
@@ -448,7 +448,7 @@ struct bfa_qos_attr_s {
        u32  total_bb_cr;               /*  Total BB Credits */
 };
 
-/**
+/*
  * These fields should be displayed only from the CLI.
  * There will be a separate BFAL API (get_qos_vc_attr ?)
  * to retrieve this.
@@ -471,7 +471,7 @@ struct bfa_qos_vc_attr_s {
                                                            * total_vc_count */
 };
 
-/**
+/*
  * QoS statistics
  */
 struct bfa_qos_stats_s {
@@ -489,7 +489,7 @@ struct bfa_qos_stats_s {
        u32     rsvd;               /* padding for 64 bit alignment */
 };
 
-/**
+/*
  * FCoE statistics
  */
 struct bfa_fcoe_stats_s {
@@ -540,7 +540,7 @@ struct bfa_fcoe_stats_s {
        u64     rxf_bcast_vlan; /*  Rx FCoE broadcast vlan frames   */
 };
 
-/**
+/*
  * QoS or FCoE stats (fcport stats excluding physical FC port stats)
  */
 union bfa_fcport_stats_u {
@@ -639,7 +639,7 @@ enum bfa_port_states {
        BFA_PORT_ST_MAX_STATE,
 };
 
-/**
+/*
  *     Port operational type (in sync with SNIA port type).
  */
 enum bfa_port_type {
@@ -651,7 +651,7 @@ enum bfa_port_type {
        BFA_PORT_TYPE_VPORT     = 22,   /*  NPIV - virtual port */
 };
 
-/**
+/*
  *     Port topology setting. A port's topology and fabric login status
  *     determine its operational type.
  */
@@ -662,7 +662,7 @@ enum bfa_port_topology {
        BFA_PORT_TOPOLOGY_AUTO = 3,     /*  auto topology selection */
 };
 
-/**
+/*
  *     Physical port loopback types.
  */
 enum bfa_port_opmode {
@@ -679,7 +679,7 @@ enum bfa_port_opmode {
        (_mode == BFA_PORT_OPMODE_LB_SLW) ||            \
        (_mode == BFA_PORT_OPMODE_LB_EXT))
 
-/**
+/*
  *     Port link state
  */
 enum bfa_port_linkstate {
@@ -687,7 +687,7 @@ enum bfa_port_linkstate {
        BFA_PORT_LINKDOWN       = 2,    /*  Physical port/Trunk link down */
 };
 
-/**
+/*
  *     Port link state reason code
  */
 enum bfa_port_linkstate_rsn {
@@ -733,7 +733,7 @@ enum bfa_port_linkstate_rsn {
        CEE_ISCSI_PRI_OVERLAP_FCOE_PRI          = 43
 };
 #pragma pack(1)
-/**
+/*
  *      Physical port configuration
  */
 struct bfa_port_cfg_s {
@@ -753,7 +753,7 @@ struct bfa_port_cfg_s {
 };
 #pragma pack()
 
-/**
+/*
  *     Port attribute values.
  */
 struct bfa_port_attr_s {
@@ -800,7 +800,7 @@ struct bfa_port_attr_s {
        u8                      rsvd1[6];
 };
 
-/**
+/*
  *           Port FCP mappings.
  */
 struct bfa_port_fcpmap_s {
@@ -815,7 +815,7 @@ struct bfa_port_fcpmap_s {
        char            luid[256];
 };
 
-/**
+/*
  *           Port RNID info.
  */
 struct bfa_port_rnid_s {
@@ -848,7 +848,7 @@ struct bfa_fcport_fcf_s {
        mac_t      mac;     /*  FCF mac           */
 };
 
-/**
+/*
  *     Trunk states for BCU/BFAL
  */
 enum bfa_trunk_state {
@@ -857,7 +857,7 @@ enum bfa_trunk_state {
        BFA_TRUNK_OFFLINE       = 2,    /*  Trunk is offline            */
 };
 
-/**
+/*
  *     VC attributes for trunked link
  */
 struct bfa_trunk_vc_attr_s {
@@ -867,7 +867,7 @@ struct bfa_trunk_vc_attr_s {
        u16 vc_credits[8];
 };
 
-/**
+/*
  *     Link state information
  */
 struct bfa_port_link_s {
@@ -959,7 +959,7 @@ struct bfa_rport_hal_stats_s {
        u32        rsvd;
 };
 #pragma pack(1)
-/**
+/*
  *  Rport's QoS attributes
  */
 struct bfa_rport_qos_attr_s {
@@ -987,7 +987,7 @@ struct bfa_itnim_ioprofile_s {
        struct bfa_itnim_latency_s io_latency;
 };
 
-/**
+/*
  * FC physical port statistics.
  */
 struct bfa_port_fc_stats_s {
@@ -1022,7 +1022,7 @@ struct bfa_port_fc_stats_s {
        u64     err_enc;        /*  Encoding err frame_8b10b    */
 };
 
-/**
+/*
  * Eth Physical Port statistics.
  */
 struct bfa_port_eth_stats_s {
@@ -1070,7 +1070,7 @@ struct bfa_port_eth_stats_s {
        u64     tx_iscsi_zero_pause; /*  Tx iSCSI zero pause    */
 };
 
-/**
+/*
  *              Port statistics.
  */
 union bfa_port_stats_u {
index 14127646dc54cdb811ff944b7bea9dc0c838d4ed..0222d7c88a9a269812e520037d1d81d1f39070bd 100644 (file)
@@ -17,7 +17,7 @@
 
 #include "bfa_modules.h"
 
-/**
+/*
  * BFA module list terminated by NULL
  */
 struct bfa_module_s *hal_mods[] = {
@@ -31,7 +31,7 @@ struct bfa_module_s *hal_mods[] = {
        NULL
 };
 
-/**
+/*
  * Message handlers for various modules.
  */
 bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
@@ -70,7 +70,7 @@ bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
 };
 
 
-/**
+/*
  * Message handlers for mailbox command classes
  */
 bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
index 6eff705564eb5310d015e1e54a2b82db760a1c3b..e929d25b09e3083d3a095c974d48f4faa464d199 100644 (file)
@@ -1029,7 +1029,7 @@ struct link_e2e_beacon_req_s {
        struct link_e2e_beacon_param_s beacon_parm;
 };
 
-/**
+/*
  * If RPSC request is sent to the Domain Controller, the request is for
  * all the ports within that domain (TODO - I don't think FOS implements
  * this...).
@@ -1049,7 +1049,7 @@ struct fc_rpsc_acc_s {
        struct fc_rpsc_speed_info_s speed_info[1];
 };
 
-/**
+/*
  * If RPSC2 request is sent to the Domain Controller,
  */
 #define FC_BRCD_TOKEN    0x42524344
@@ -1094,7 +1094,7 @@ struct fc_rpsc2_acc_s {
     struct fc_rpsc2_port_info_s port_info[1];    /* port information */
 };
 
-/**
+/*
  * bit fields so that multiple classes can be specified
  */
 enum fc_cos {
@@ -1131,7 +1131,7 @@ struct fc_alpabm_s {
 #define FC_VF_ID_MAX     0xEFF
 #define FC_VF_ID_CTL     0xFEF /*  control VF_ID */
 
-/**
+/*
  * Virtual Fabric Tagging header format
  * @caution This is defined only in BIG ENDIAN format.
  */
@@ -1463,7 +1463,7 @@ struct fcgs_gidpn_resp_s {
        u32     dap:24; /* port identifier */
 };
 
-/**
+/*
  * RFT_ID
  */
 struct fcgs_rftid_req_s {
@@ -1472,7 +1472,7 @@ struct fcgs_rftid_req_s {
        u32     fc4_type[8];    /* fc4 types */
 };
 
-/**
+/*
  * RFF_ID : Register FC4 features.
  */
 
@@ -1487,7 +1487,7 @@ struct fcgs_rffid_req_s {
     u32    fc4_type:8;         /* corresponding FC4 Type */
 };
 
-/**
+/*
  * GID_FT Request
  */
 struct fcgs_gidft_req_s {
@@ -1497,7 +1497,7 @@ struct fcgs_gidft_req_s {
        u8      fc4_type;       /* FC_TYPE_FCP for SCSI devices */
 };             /* GID_FT Request */
 
-/**
+/*
  * GID_FT Response
  */
 struct fcgs_gidft_resp_s {
@@ -1506,7 +1506,7 @@ struct fcgs_gidft_resp_s {
        u32     pid:24; /* port identifier */
 };             /* GID_FT Response */
 
-/**
+/*
  * RSPN_ID
  */
 struct fcgs_rspnid_req_s {
@@ -1516,7 +1516,7 @@ struct fcgs_rspnid_req_s {
        u8              spn[256];       /* symbolic port name */
 };
 
-/**
+/*
  * RPN_ID
  */
 struct fcgs_rpnid_req_s {
@@ -1525,7 +1525,7 @@ struct fcgs_rpnid_req_s {
        wwn_t           port_name;
 };
 
-/**
+/*
  * RNN_ID
  */
 struct fcgs_rnnid_req_s {
@@ -1534,7 +1534,7 @@ struct fcgs_rnnid_req_s {
        wwn_t           node_name;
 };
 
-/**
+/*
  * RCS_ID
  */
 struct fcgs_rcsid_req_s {
@@ -1543,7 +1543,7 @@ struct fcgs_rcsid_req_s {
        u32     cos;
 };
 
-/**
+/*
  * RPT_ID
  */
 struct fcgs_rptid_req_s {
@@ -1553,7 +1553,7 @@ struct fcgs_rptid_req_s {
        u32     rsvd1:24;
 };
 
-/**
+/*
  * GA_NXT Request
  */
 struct fcgs_ganxt_req_s {
@@ -1561,7 +1561,7 @@ struct fcgs_ganxt_req_s {
        u32     port_id:24;
 };
 
-/**
+/*
  * GA_NXT Response
  */
 struct fcgs_ganxt_rsp_s {
index b7d2657ca82a2086d4b6ac492c94e3c7835b36c8..9c725314b513f203fc3d4cfbe667f6850568564c 100644 (file)
@@ -94,13 +94,13 @@ fcbuild_init(void)
         */
        plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
        plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
-       plogi_tmpl.csp.bbcred = bfa_os_htons(0x0004);
+       plogi_tmpl.csp.bbcred = cpu_to_be16(0x0004);
        plogi_tmpl.csp.ciro = 0x1;
        plogi_tmpl.csp.cisc = 0x0;
        plogi_tmpl.csp.altbbcred = 0x0;
-       plogi_tmpl.csp.conseq = bfa_os_htons(0x00FF);
-       plogi_tmpl.csp.ro_bitmap = bfa_os_htons(0x0002);
-       plogi_tmpl.csp.e_d_tov = bfa_os_htonl(2000);
+       plogi_tmpl.csp.conseq = cpu_to_be16(0x00FF);
+       plogi_tmpl.csp.ro_bitmap = cpu_to_be16(0x0002);
+       plogi_tmpl.csp.e_d_tov = cpu_to_be32(2000);
 
        plogi_tmpl.class3.class_valid = 1;
        plogi_tmpl.class3.sequential = 1;
@@ -112,7 +112,7 @@ fcbuild_init(void)
         */
        prli_tmpl.command = FC_ELS_PRLI;
        prli_tmpl.pglen = 0x10;
-       prli_tmpl.pagebytes = bfa_os_htons(0x0014);
+       prli_tmpl.pagebytes = cpu_to_be16(0x0014);
        prli_tmpl.parampage.type = FC_TYPE_FCP;
        prli_tmpl.parampage.imagepair = 1;
        prli_tmpl.parampage.servparams.rxrdisab = 1;
@@ -137,7 +137,7 @@ fcbuild_init(void)
 static void
 fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
 {
-       bfa_os_memset(fchs, 0, sizeof(struct fchs_s));
+       memset(fchs, 0, sizeof(struct fchs_s));
 
        fchs->routing = FC_RTG_FC4_DEV_DATA;
        fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
@@ -148,9 +148,9 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
        fchs->rx_id = FC_RXID_ANY;
        fchs->d_id = (d_id);
        fchs->s_id = (s_id);
-       fchs->ox_id = bfa_os_htons(ox_id);
+       fchs->ox_id = cpu_to_be16(ox_id);
 
-       /**
+       /*
         * @todo no need to set ox_id for request
         *       no need to set rx_id for response
         */
@@ -159,16 +159,16 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
 void
 fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 {
-       bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
+       memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
        fchs->d_id = (d_id);
        fchs->s_id = (s_id);
-       fchs->ox_id = bfa_os_htons(ox_id);
+       fchs->ox_id = cpu_to_be16(ox_id);
 }
 
 static void
 fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 {
-       bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
+       memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
        fchs->d_id = d_id;
        fchs->s_id = s_id;
        fchs->ox_id = ox_id;
@@ -198,7 +198,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
 static void
 fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 {
-       bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
+       memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
        fchs->d_id = d_id;
        fchs->s_id = s_id;
        fchs->ox_id = ox_id;
@@ -211,7 +211,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 {
        struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
 
-       bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+       memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
        plogi->els_cmd.els_code = els_code;
        if (els_code == FC_ELS_PLOGI)
@@ -219,10 +219,10 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
        else
                fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
-       plogi->csp.rxsz = plogi->class3.rxsz = bfa_os_htons(pdu_size);
+       plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size);
 
-       bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
-       bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
+       memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
+       memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
 
        return sizeof(struct fc_logi_s);
 }
@@ -235,12 +235,12 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
        u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
        u32     *vvl_info;
 
-       bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+       memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
        flogi->els_cmd.els_code = FC_ELS_FLOGI;
        fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-       flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
+       flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
        flogi->port_name = port_name;
        flogi->node_name = node_name;
 
@@ -253,14 +253,14 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
        /* set AUTH capability */
        flogi->csp.security = set_auth;
 
-       flogi->csp.bbcred = bfa_os_htons(local_bb_credits);
+       flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
 
        /* Set brcd token in VVL */
        vvl_info = (u32 *)&flogi->vvl[0];
 
        /* set the flag to indicate the presence of VVL */
        flogi->csp.npiv_supp    = 1; /* @todo. field name is not correct */
-       vvl_info[0]     = bfa_os_htonl(FLOGI_VVL_BRCD);
+       vvl_info[0]     = cpu_to_be32(FLOGI_VVL_BRCD);
 
        return sizeof(struct fc_logi_s);
 }
@@ -272,15 +272,15 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 {
        u32        d_id = 0;
 
-       bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+       memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
        fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
        flogi->els_cmd.els_code = FC_ELS_ACC;
-       flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
+       flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
        flogi->port_name = port_name;
        flogi->node_name = node_name;
 
-       flogi->csp.bbcred = bfa_os_htons(local_bb_credits);
+       flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
 
        return sizeof(struct fc_logi_s);
 }
@@ -291,12 +291,12 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 {
        u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
 
-       bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+       memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
        flogi->els_cmd.els_code = FC_ELS_FDISC;
        fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-       flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
+       flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
        flogi->port_name = port_name;
        flogi->node_name = node_name;
 
@@ -346,7 +346,7 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
                if (!plogi->class3.class_valid)
                        return FC_PARSE_FAILURE;
 
-               if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
+               if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
                        return FC_PARSE_FAILURE;
 
                return FC_PARSE_OK;
@@ -363,8 +363,8 @@ fc_plogi_parse(struct fchs_s *fchs)
        if (plogi->class3.class_valid != 1)
                return FC_PARSE_FAILURE;
 
-       if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ)
-           || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ)
+       if ((be16_to_cpu(plogi->class3.rxsz) < FC_MIN_PDUSZ)
+           || (be16_to_cpu(plogi->class3.rxsz) > FC_MAX_PDUSZ)
            || (plogi->class3.rxsz == 0))
                return FC_PARSE_FAILURE;
 
@@ -378,7 +378,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
        struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
 
        fc_els_req_build(fchs, d_id, s_id, ox_id);
-       bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
+       memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
 
        prli->command = FC_ELS_PRLI;
        prli->parampage.servparams.initiator     = 1;
@@ -397,7 +397,7 @@ fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
        struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
 
        fc_els_rsp_build(fchs, d_id, s_id, ox_id);
-       bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
+       memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
 
        prli->command = FC_ELS_ACC;
 
@@ -448,7 +448,7 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
 {
        fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-       bfa_os_memset(logo, '\0', sizeof(struct fc_logo_s));
+       memset(logo, '\0', sizeof(struct fc_logo_s));
        logo->els_cmd.els_code = FC_ELS_LOGO;
        logo->nport_id = (s_id);
        logo->orig_port_name = port_name;
@@ -461,7 +461,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
                 u32 s_id, u16 ox_id, wwn_t port_name,
                 wwn_t node_name, u8 els_code)
 {
-       bfa_os_memset(adisc, '\0', sizeof(struct fc_adisc_s));
+       memset(adisc, '\0', sizeof(struct fc_adisc_s));
 
        adisc->els_cmd.els_code = els_code;
 
@@ -537,7 +537,7 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
        if (pdisc->class3.class_valid != 1)
                return FC_PARSE_FAILURE;
 
-       if ((bfa_os_ntohs(pdisc->class3.rxsz) <
+       if ((be16_to_cpu(pdisc->class3.rxsz) <
                (FC_MIN_PDUSZ - sizeof(struct fchs_s)))
            || (pdisc->class3.rxsz == 0))
                return FC_PARSE_FAILURE;
@@ -554,11 +554,11 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
 u16
 fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 {
-       bfa_os_memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
+       memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
        fchs->cat_info = FC_CAT_ABTS;
        fchs->d_id = (d_id);
        fchs->s_id = (s_id);
-       fchs->ox_id = bfa_os_htons(ox_id);
+       fchs->ox_id = cpu_to_be16(ox_id);
 
        return sizeof(struct fchs_s);
 }
@@ -582,9 +582,9 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
        /*
         * build rrq payload
         */
-       bfa_os_memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
+       memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
        rrq->s_id = (s_id);
-       rrq->ox_id = bfa_os_htons(rrq_oxid);
+       rrq->ox_id = cpu_to_be16(rrq_oxid);
        rrq->rx_id = FC_RXID_ANY;
 
        return sizeof(struct fc_rrq_s);
@@ -598,7 +598,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 
        fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
-       bfa_os_memset(acc, 0, sizeof(struct fc_els_cmd_s));
+       memset(acc, 0, sizeof(struct fc_els_cmd_s));
        acc->els_code = FC_ELS_ACC;
 
        return sizeof(struct fc_els_cmd_s);
@@ -610,7 +610,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
                u8 reason_code_expl)
 {
        fc_els_rsp_build(fchs, d_id, s_id, ox_id);
-       bfa_os_memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
+       memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
 
        ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
        ls_rjt->reason_code = reason_code;
@@ -626,7 +626,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
 {
        fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
 
-       bfa_os_memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s));
+       memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s));
 
        fchs->rx_id = rx_id;
 
@@ -641,7 +641,7 @@ fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
                u32 s_id, u16 ox_id)
 {
        fc_els_rsp_build(fchs, d_id, s_id, ox_id);
-       bfa_os_memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
+       memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
        els_cmd->els_code = FC_ELS_ACC;
 
        return sizeof(struct fc_els_cmd_s);
@@ -656,10 +656,10 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
 
        if (els_code == FC_ELS_PRLO) {
                prlo = (struct fc_prlo_s *) (fc_frame + 1);
-               num_pages = (bfa_os_ntohs(prlo->payload_len) - 4) / 16;
+               num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16;
        } else {
                tprlo = (struct fc_tprlo_s *) (fc_frame + 1);
-               num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16;
+               num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
        }
        return num_pages;
 }
@@ -672,11 +672,11 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
 
        fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
-       bfa_os_memset(tprlo_acc, 0, (num_pages * 16) + 4);
+       memset(tprlo_acc, 0, (num_pages * 16) + 4);
        tprlo_acc->command = FC_ELS_ACC;
 
        tprlo_acc->page_len = 0x10;
-       tprlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
+       tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
 
        for (page = 0; page < num_pages; page++) {
                tprlo_acc->tprlo_acc_params[page].opa_valid = 0;
@@ -685,7 +685,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
                tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0;
                tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0;
        }
-       return bfa_os_ntohs(tprlo_acc->payload_len);
+       return be16_to_cpu(tprlo_acc->payload_len);
 }
 
 u16
@@ -696,10 +696,10 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
 
        fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
-       bfa_os_memset(prlo_acc, 0, (num_pages * 16) + 4);
+       memset(prlo_acc, 0, (num_pages * 16) + 4);
        prlo_acc->command = FC_ELS_ACC;
        prlo_acc->page_len = 0x10;
-       prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
+       prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
 
        for (page = 0; page < num_pages; page++) {
                prlo_acc->prlo_acc_params[page].opa_valid = 0;
@@ -709,7 +709,7 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
                prlo_acc->prlo_acc_params[page].resp_process_assc = 0;
        }
 
-       return bfa_os_ntohs(prlo_acc->payload_len);
+       return be16_to_cpu(prlo_acc->payload_len);
 }
 
 u16
@@ -718,7 +718,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
 {
        fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-       bfa_os_memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
+       memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
 
        rnid->els_cmd.els_code = FC_ELS_RNID;
        rnid->node_id_data_format = data_format;
@@ -732,7 +732,7 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
                  struct fc_rnid_common_id_data_s *common_id_data,
                  struct fc_rnid_general_topology_data_s *gen_topo_data)
 {
-       bfa_os_memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
+       memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
 
        fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
@@ -745,7 +745,7 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
        if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
                rnid_acc->specific_id_data_length =
                        sizeof(struct fc_rnid_general_topology_data_s);
-               bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data);
+               rnid_acc->gen_topology_data = *gen_topo_data;
                return sizeof(struct fc_rnid_acc_s);
        } else {
                return sizeof(struct fc_rnid_acc_s) -
@@ -760,7 +760,7 @@ fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
 {
        fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-       bfa_os_memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
+       memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
 
        rpsc->els_cmd.els_code = FC_ELS_RPSC;
        return sizeof(struct fc_rpsc_cmd_s);
@@ -775,11 +775,11 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
 
        fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
 
-       bfa_os_memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
+       memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
 
        rpsc2->els_cmd.els_code = FC_ELS_RPSC;
-       rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN);
-       rpsc2->num_pids  = bfa_os_htons(npids);
+       rpsc2->token = cpu_to_be32(FC_BRCD_TOKEN);
+       rpsc2->num_pids  = cpu_to_be16(npids);
        for (i = 0; i < npids; i++)
                rpsc2->pid_list[i].pid = pid_list[i];
 
@@ -791,18 +791,18 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
                u32 d_id, u32 s_id, u16 ox_id,
                  struct fc_rpsc_speed_info_s *oper_speed)
 {
-       bfa_os_memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
+       memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
 
        fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
        rpsc_acc->command = FC_ELS_ACC;
-       rpsc_acc->num_entries = bfa_os_htons(1);
+       rpsc_acc->num_entries = cpu_to_be16(1);
 
        rpsc_acc->speed_info[0].port_speed_cap =
-               bfa_os_htons(oper_speed->port_speed_cap);
+               cpu_to_be16(oper_speed->port_speed_cap);
 
        rpsc_acc->speed_info[0].port_op_speed =
-               bfa_os_htons(oper_speed->port_op_speed);
+               cpu_to_be16(oper_speed->port_op_speed);
 
        return sizeof(struct fc_rpsc_acc_s);
 }
@@ -830,12 +830,12 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
 {
        struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
 
-       bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
+       memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
 
        pdisc->els_cmd.els_code = FC_ELS_PDISC;
        fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-       pdisc->csp.rxsz = pdisc->class3.rxsz = bfa_os_htons(pdu_size);
+       pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size);
        pdisc->port_name = port_name;
        pdisc->node_name = node_name;
 
@@ -859,7 +859,7 @@ fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
        if (!pdisc->class3.class_valid)
                return FC_PARSE_NWWN_NOT_EQUAL;
 
-       if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
+       if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
                return FC_PARSE_RXSZ_INVAL;
 
        return FC_PARSE_OK;
@@ -873,10 +873,10 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
        int             page;
 
        fc_els_req_build(fchs, d_id, s_id, ox_id);
-       bfa_os_memset(prlo, 0, (num_pages * 16) + 4);
+       memset(prlo, 0, (num_pages * 16) + 4);
        prlo->command = FC_ELS_PRLO;
        prlo->page_len = 0x10;
-       prlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
+       prlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
 
        for (page = 0; page < num_pages; page++) {
                prlo->prlo_params[page].type = FC_TYPE_FCP;
@@ -886,7 +886,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
                prlo->prlo_params[page].resp_process_assc = 0;
        }
 
-       return bfa_os_ntohs(prlo->payload_len);
+       return be16_to_cpu(prlo->payload_len);
 }
 
 u16
@@ -901,7 +901,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
        if (prlo->command != FC_ELS_ACC)
                return FC_PARSE_FAILURE;
 
-       num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16;
+       num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
 
        for (page = 0; page < num_pages; page++) {
                if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
@@ -931,10 +931,10 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
        int             page;
 
        fc_els_req_build(fchs, d_id, s_id, ox_id);
-       bfa_os_memset(tprlo, 0, (num_pages * 16) + 4);
+       memset(tprlo, 0, (num_pages * 16) + 4);
        tprlo->command = FC_ELS_TPRLO;
        tprlo->page_len = 0x10;
-       tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
+       tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
 
        for (page = 0; page < num_pages; page++) {
                tprlo->tprlo_params[page].type = FC_TYPE_FCP;
@@ -950,7 +950,7 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
                }
        }
 
-       return bfa_os_ntohs(tprlo->payload_len);
+       return be16_to_cpu(tprlo->payload_len);
 }
 
 u16
@@ -965,7 +965,7 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
        if (tprlo->command != FC_ELS_ACC)
                return FC_PARSE_ACC_INVAL;
 
-       num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16;
+       num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
 
        for (page = 0; page < num_pages; page++) {
                if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
@@ -1011,32 +1011,32 @@ fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
 static void
 fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
 {
-       bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
+       memset(cthdr, 0, sizeof(struct ct_hdr_s));
        cthdr->rev_id = CT_GS3_REVISION;
        cthdr->gs_type = CT_GSTYPE_DIRSERVICE;
        cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER;
-       cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
+       cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
 }
 
 static void
 fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
 {
-       bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
+       memset(cthdr, 0, sizeof(struct ct_hdr_s));
        cthdr->rev_id = CT_GS3_REVISION;
        cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
        cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER;
-       cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
+       cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
 }
 
 static void
 fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code,
                                         u8 sub_type)
 {
-       bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
+       memset(cthdr, 0, sizeof(struct ct_hdr_s));
        cthdr->rev_id = CT_GS3_REVISION;
        cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
        cthdr->gs_sub_type = sub_type;
-       cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
+       cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
 }
 
 u16
@@ -1050,7 +1050,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
        fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
        fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN);
 
-       bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
+       memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
        gidpn->port_name = port_name;
        return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s);
 }
@@ -1066,7 +1066,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
        fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
        fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID);
 
-       bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
+       memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
        gpnid->dap = port_id;
        return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s);
 }
@@ -1082,7 +1082,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
        fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
        fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID);
 
-       bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
+       memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
        gnnid->dap = port_id;
        return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s);
 }
@@ -1090,7 +1090,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 u16
 fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
 {
-       if (bfa_os_ntohs(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) {
+       if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) {
                if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY)
                        return FC_PARSE_BUSY;
                else
@@ -1108,7 +1108,7 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
 
        fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-       bfa_os_memset(scr, 0, sizeof(struct fc_scr_s));
+       memset(scr, 0, sizeof(struct fc_scr_s));
        scr->command = FC_ELS_SCR;
        scr->reg_func = FC_SCR_REG_FUNC_FULL;
        if (set_br_reg)
@@ -1129,7 +1129,7 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
        rscn->pagelen = sizeof(rscn->event[0]);
 
        payldlen = sizeof(u32) + rscn->pagelen;
-       rscn->payldlen = bfa_os_htons(payldlen);
+       rscn->payldlen = cpu_to_be16(payldlen);
 
        rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
        rscn->event[0].portid = s_id;
@@ -1149,14 +1149,14 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
        fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
        fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
 
-       bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
+       memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
 
        rftid->dap = s_id;
 
        /* By default, FCP FC4 Type is registered */
        index = FC_TYPE_FCP >> 5;
        type_value = 1 << (FC_TYPE_FCP % 32);
-       rftid->fc4_type[index] = bfa_os_htonl(type_value);
+       rftid->fc4_type[index] = cpu_to_be32(type_value);
 
        return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
 }
@@ -1172,10 +1172,10 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
        fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
        fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
 
-       bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
+       memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
 
        rftid->dap = s_id;
-       bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
+       memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
                (bitmap_size < 32 ? bitmap_size : 32));
 
        return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
@@ -1192,7 +1192,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
        fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
        fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID);
 
-       bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
+       memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
 
        rffid->dap          = s_id;
        rffid->fc4ftr_bits  = fc4_ftrs;
@@ -1214,7 +1214,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
        fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
        fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID);
 
-       bfa_os_memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
+       memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
 
        rspnid->dap = s_id;
        rspnid->spn_len = (u8) strlen((char *)name);
@@ -1235,7 +1235,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
 
        fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT);
 
-       bfa_os_memset(gidft, 0, sizeof(struct fcgs_gidft_req_s));
+       memset(gidft, 0, sizeof(struct fcgs_gidft_req_s));
        gidft->fc4_type = fc4_type;
        gidft->domain_id = 0;
        gidft->area_id = 0;
@@ -1254,7 +1254,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
        fc_gs_fchdr_build(fchs, d_id, s_id, 0);
        fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID);
 
-       bfa_os_memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s));
+       memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s));
        rpnid->port_id = port_id;
        rpnid->port_name = port_name;
 
@@ -1272,7 +1272,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
        fc_gs_fchdr_build(fchs, d_id, s_id, 0);
        fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID);
 
-       bfa_os_memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s));
+       memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s));
        rnnid->port_id = port_id;
        rnnid->node_name = node_name;
 
@@ -1291,7 +1291,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
        fc_gs_fchdr_build(fchs, d_id, s_id, 0);
        fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID);
 
-       bfa_os_memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s));
+       memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s));
        rcsid->port_id = port_id;
        rcsid->cos = cos;
 
@@ -1309,7 +1309,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
        fc_gs_fchdr_build(fchs, d_id, s_id, 0);
        fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID);
 
-       bfa_os_memset(rptid, 0, sizeof(struct fcgs_rptid_req_s));
+       memset(rptid, 0, sizeof(struct fcgs_rptid_req_s));
        rptid->port_id = port_id;
        rptid->port_type = port_type;
 
@@ -1326,7 +1326,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
        fc_gs_fchdr_build(fchs, d_id, s_id, 0);
        fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT);
 
-       bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
+       memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
        ganxt->port_id = port_id;
 
        return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s);
@@ -1365,7 +1365,7 @@ fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
 
        index = fc4_type >> 5;
        type_value = 1 << (fc4_type % 32);
-       ptr[index] = bfa_os_htonl(type_value);
+       ptr[index] = cpu_to_be32(type_value);
 
 }
 
@@ -1383,7 +1383,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
        fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD,
                        CT_GSSUBTYPE_CFGSERVER);
 
-       bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t));
+       memset(gmal, 0, sizeof(fcgs_gmal_req_t));
        gmal->wwn = wwn;
 
        return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t);
@@ -1403,7 +1403,7 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
        fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD,
                        CT_GSSUBTYPE_CFGSERVER);
 
-       bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t));
+       memset(gfn, 0, sizeof(fcgs_gfn_req_t));
        gfn->wwn = wwn;
 
        return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t);
index 33c8dd51f4748677fadc78b518fc51964a318859..135c4427801cf66abc97719c6e9fe78ea6a9a8d1 100644 (file)
@@ -26,7 +26,7 @@ BFA_MODULE(fcpim);
        (__l->__stats += __r->__stats)
 
 
-/**
+/*
  *  BFA ITNIM Related definitions
  */
 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
@@ -72,7 +72,7 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
        }                                                               \
 } while (0)
 
-/**
+/*
  *  bfa_itnim_sm BFA itnim state machine
  */
 
@@ -89,7 +89,7 @@ enum bfa_itnim_event {
        BFA_ITNIM_SM_QRESUME = 9,       /*  queue space available */
 };
 
-/**
+/*
  *  BFA IOIM related definitions
  */
 #define bfa_ioim_move_to_comp_q(__ioim) do {                           \
@@ -107,11 +107,11 @@ enum bfa_itnim_event {
        if ((__fcpim)->profile_start)                                   \
                (__fcpim)->profile_start(__ioim);                       \
 } while (0)
-/**
+/*
  *  hal_ioim_sm
  */
 
-/**
+/*
  * IO state machine events
  */
 enum bfa_ioim_event {
@@ -136,11 +136,11 @@ enum bfa_ioim_event {
 };
 
 
-/**
+/*
  *  BFA TSKIM related definitions
  */
 
-/**
+/*
  * task management completion handling
  */
 #define bfa_tskim_qcomp(__tskim, __cbfn) do {                          \
@@ -165,7 +165,7 @@ enum bfa_tskim_event {
        BFA_TSKIM_SM_CLEANUP_DONE = 9,  /*  TM abort completion */
 };
 
-/**
+/*
  * forward declaration for BFA ITNIM functions
  */
 static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
@@ -183,7 +183,7 @@ static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
 static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
 static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
 
-/**
+/*
  * forward declaration of ITNIM state machine
  */
 static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
@@ -217,7 +217,7 @@ static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
 static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
                                        enum bfa_itnim_event event);
 
-/**
+/*
  * forward declaration for BFA IOIM functions
  */
 static bfa_boolean_t   bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
@@ -233,7 +233,7 @@ static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
 
 
-/**
+/*
  * forward declaration of BFA IO state machine
  */
 static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
@@ -261,7 +261,7 @@ static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
 static void    bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
                                        enum bfa_ioim_event event);
 
-/**
+/*
  * forward declaration for BFA TSKIM functions
  */
 static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
@@ -276,7 +276,7 @@ static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
 static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
 
 
-/**
+/*
  * forward declaration of BFA TSKIM state machine
  */
 static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
@@ -294,11 +294,11 @@ static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
 static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
                                        enum bfa_tskim_event event);
 
-/**
+/*
  *  hal_fcpim_mod BFA FCP Initiator Mode module
  */
 
-/**
+/*
  *     Compute and return memory needed by FCP(im) module.
  */
 static void
@@ -307,7 +307,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
 {
        bfa_itnim_meminfo(cfg, km_len, dm_len);
 
-       /**
+       /*
         * IO memory
         */
        if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
@@ -320,7 +320,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
 
        *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
 
-       /**
+       /*
         * task management command memory
         */
        if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
@@ -463,7 +463,7 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
        struct bfa_itnim_s *itnim;
 
        /* accumulate IO stats from itnim */
-       bfa_os_memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
+       memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
        list_for_each_safe(qe, qen, &fcpim->itnim_q) {
                itnim = (struct bfa_itnim_s *) qe;
                if (itnim->rport->rport_info.lp_tag != lp_tag)
@@ -480,7 +480,7 @@ bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
        struct bfa_itnim_s *itnim;
 
        /* accumulate IO stats from itnim */
-       bfa_os_memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
+       memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
        list_for_each_safe(qe, qen, &fcpim->itnim_q) {
                itnim = (struct bfa_itnim_s *) qe;
                bfa_fcpim_add_stats(modstats, &(itnim->stats));
@@ -560,7 +560,7 @@ bfa_fcpim_clr_modstats(struct bfa_s *bfa)
                itnim = (struct bfa_itnim_s *) qe;
                bfa_itnim_clear_stats(itnim);
        }
-       bfa_os_memset(&fcpim->del_itn_stats, 0,
+       memset(&fcpim->del_itn_stats, 0,
                sizeof(struct bfa_fcpim_del_itn_stats_s));
 
        return BFA_STATUS_OK;
@@ -604,11 +604,11 @@ bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
 
 
 
-/**
+/*
  *  BFA ITNIM module state machine functions
  */
 
-/**
+/*
  *     Beginning/unallocated state - no events expected.
  */
 static void
@@ -629,7 +629,7 @@ bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
        }
 }
 
-/**
+/*
  *     Beginning state, only online event expected.
  */
 static void
@@ -660,7 +660,7 @@ bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
        }
 }
 
-/**
+/*
  *     Waiting for itnim create response from firmware.
  */
 static void
@@ -732,7 +732,7 @@ bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
        }
 }
 
-/**
+/*
  *     Waiting for itnim create response from firmware, a delete is pending.
  */
 static void
@@ -760,7 +760,7 @@ bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
        }
 }
 
-/**
+/*
  *     Online state - normal parking state.
  */
 static void
@@ -802,7 +802,7 @@ bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
        }
 }
 
-/**
+/*
  *     Second level error recovery need.
  */
 static void
@@ -833,7 +833,7 @@ bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
        }
 }
 
-/**
+/*
  *     Going offline. Waiting for active IO cleanup.
  */
 static void
@@ -870,7 +870,7 @@ bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
        }
 }
 
-/**
+/*
  *     Deleting itnim. Waiting for active IO cleanup.
  */
 static void
@@ -898,7 +898,7 @@ bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
        }
 }
 
-/**
+/*
  * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
  */
 static void
@@ -955,7 +955,7 @@ bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
        }
 }
 
-/**
+/*
  *     Offline state.
  */
 static void
@@ -987,7 +987,7 @@ bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
        }
 }
 
-/**
+/*
  *     IOC h/w failed state.
  */
 static void
@@ -1023,7 +1023,7 @@ bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
        }
 }
 
-/**
+/*
  *     Itnim is deleted, waiting for firmware response to delete.
  */
 static void
@@ -1068,7 +1068,7 @@ bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
        }
 }
 
-/**
+/*
  *     Initiate cleanup of all IOs on an IOC failure.
  */
 static void
@@ -1088,7 +1088,7 @@ bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
                bfa_ioim_iocdisable(ioim);
        }
 
-       /**
+       /*
         * For IO request in pending queue, we pretend an early timeout.
         */
        list_for_each_safe(qe, qen, &itnim->pending_q) {
@@ -1102,7 +1102,7 @@ bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
        }
 }
 
-/**
+/*
  *     IO cleanup completion
  */
 static void
@@ -1114,7 +1114,7 @@ bfa_itnim_cleanp_comp(void *itnim_cbarg)
        bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
 }
 
-/**
+/*
  *     Initiate cleanup of all IOs.
  */
 static void
@@ -1129,7 +1129,7 @@ bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
        list_for_each_safe(qe, qen, &itnim->io_q) {
                ioim = (struct bfa_ioim_s *) qe;
 
-               /**
+               /*
                 * Move IO to a cleanup queue from active queue so that a later
                 * TM will not pickup this IO.
                 */
@@ -1176,7 +1176,7 @@ __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
                bfa_cb_itnim_sler(itnim->ditn);
 }
 
-/**
+/*
  * Call to resume any I/O requests waiting for room in request queue.
  */
 static void
@@ -1190,7 +1190,7 @@ bfa_itnim_qresume(void *cbarg)
 
 
 
-/**
+/*
  *  bfa_itnim_public
  */
 
@@ -1210,7 +1210,7 @@ void
 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
                u32 *dm_len)
 {
-       /**
+       /*
         * ITN memory
         */
        *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
@@ -1229,7 +1229,7 @@ bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
        fcpim->itnim_arr = itnim;
 
        for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
-               bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s));
+               memset(itnim, 0, sizeof(struct bfa_itnim_s));
                itnim->bfa = bfa;
                itnim->fcpim = fcpim;
                itnim->reqq = BFA_REQQ_QOS_LO;
@@ -1264,7 +1264,7 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
 
        itnim->msg_no++;
 
-       /**
+       /*
         * check for room in queue to send request now
         */
        m = bfa_reqq_next(itnim->bfa, itnim->reqq);
@@ -1281,7 +1281,7 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
        m->msg_no = itnim->msg_no;
        bfa_stats(itnim, fw_create);
 
-       /**
+       /*
         * queue I/O message to firmware
         */
        bfa_reqq_produce(itnim->bfa, itnim->reqq);
@@ -1293,7 +1293,7 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
 {
        struct bfi_itnim_delete_req_s *m;
 
-       /**
+       /*
         * check for room in queue to send request now
         */
        m = bfa_reqq_next(itnim->bfa, itnim->reqq);
@@ -1307,14 +1307,14 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
        m->fw_handle = itnim->rport->fw_handle;
        bfa_stats(itnim, fw_delete);
 
-       /**
+       /*
         * queue I/O message to firmware
         */
        bfa_reqq_produce(itnim->bfa, itnim->reqq);
        return BFA_TRUE;
 }
 
-/**
+/*
  * Cleanup all pending failed inflight requests.
  */
 static void
@@ -1329,7 +1329,7 @@ bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
        }
 }
 
-/**
+/*
  * Start all pending IO requests.
  */
 static void
@@ -1339,12 +1339,12 @@ bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
 
        bfa_itnim_iotov_stop(itnim);
 
-       /**
+       /*
         * Abort all inflight IO requests in the queue
         */
        bfa_itnim_delayed_comp(itnim, BFA_FALSE);
 
-       /**
+       /*
         * Start all pending IO requests.
         */
        while (!list_empty(&itnim->pending_q)) {
@@ -1354,7 +1354,7 @@ bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
        }
 }
 
-/**
+/*
  * Fail all pending IO requests
  */
 static void
@@ -1362,12 +1362,12 @@ bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
 {
        struct bfa_ioim_s *ioim;
 
-       /**
+       /*
         * Fail all inflight IO requests in the queue
         */
        bfa_itnim_delayed_comp(itnim, BFA_TRUE);
 
-       /**
+       /*
         * Fail any pending IO requests.
         */
        while (!list_empty(&itnim->pending_q)) {
@@ -1377,7 +1377,7 @@ bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
        }
 }
 
-/**
+/*
  * IO TOV timer callback. Fail any pending IO requests.
  */
 static void
@@ -1392,7 +1392,7 @@ bfa_itnim_iotov(void *itnim_arg)
        bfa_cb_itnim_tov(itnim->ditn);
 }
 
-/**
+/*
  * Start IO TOV timer for failing back pending IO requests in offline state.
  */
 static void
@@ -1407,7 +1407,7 @@ bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
        }
 }
 
-/**
+/*
  * Stop IO TOV timer.
  */
 static void
@@ -1419,7 +1419,7 @@ bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
        }
 }
 
-/**
+/*
  * Stop IO TOV timer.
  */
 static void
@@ -1459,11 +1459,11 @@ bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
 
 
 
-/**
+/*
  *  bfa_itnim_public
  */
 
-/**
+/*
  *     Itnim interrupt processing.
  */
 void
@@ -1509,7 +1509,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 
 
 
-/**
+/*
  *  bfa_itnim_api
  */
 
@@ -1552,7 +1552,7 @@ bfa_itnim_offline(struct bfa_itnim_s *itnim)
        bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
 }
 
-/**
+/*
  * Return true if itnim is considered offline for holding off IO request.
  * IO is not held if itnim is being deleted.
  */
@@ -1597,17 +1597,17 @@ void
 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
 {
        int j;
-       bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats));
-       bfa_os_memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
+       memset(&itnim->stats, 0, sizeof(itnim->stats));
+       memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
        for (j = 0; j < BFA_IOBUCKET_MAX; j++)
                itnim->ioprofile.io_latency.min[j] = ~0;
 }
 
-/**
+/*
  *  BFA IO module state machine functions
  */
 
-/**
+/*
  *     IO is not started (unallocated).
  */
 static void
@@ -1657,7 +1657,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
                break;
 
        case BFA_IOIM_SM_ABORT:
-               /**
+               /*
                 * IO in pending queue can get abort requests. Complete abort
                 * requests immediately.
                 */
@@ -1672,7 +1672,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
        }
 }
 
-/**
+/*
  *     IO is waiting for SG pages.
  */
 static void
@@ -1719,7 +1719,7 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
        }
 }
 
-/**
+/*
  *     IO is active.
  */
 static void
@@ -1803,7 +1803,7 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
        }
 }
 
-/**
+/*
 *      IO is retried with new tag.
 */
 static void
@@ -1844,7 +1844,7 @@ bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
                break;
 
        case BFA_IOIM_SM_ABORT:
-               /** in this state IO abort is done.
+               /* in this state IO abort is done.
                 * Waiting for IO tag resource free.
                 */
                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
@@ -1857,7 +1857,7 @@ bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
        }
 }
 
-/**
+/*
  *     IO is being aborted, waiting for completion from firmware.
  */
 static void
@@ -1919,7 +1919,7 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
        }
 }
 
-/**
+/*
  * IO is being cleaned up (implicit abort), waiting for completion from
  * firmware.
  */
@@ -1937,7 +1937,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
                break;
 
        case BFA_IOIM_SM_ABORT:
-               /**
+               /*
                 * IO is already being aborted implicitly
                 */
                ioim->io_cbfn = __bfa_cb_ioim_abort;
@@ -1969,7 +1969,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
                break;
 
        case BFA_IOIM_SM_CLEANUP:
-               /**
+               /*
                 * IO can be in cleanup state already due to TM command.
                 * 2nd cleanup request comes from ITN offline event.
                 */
@@ -1980,7 +1980,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
        }
 }
 
-/**
+/*
  *     IO is waiting for room in request CQ
  */
 static void
@@ -2024,7 +2024,7 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
        }
 }
 
-/**
+/*
  *     Active IO is being aborted, waiting for room in request CQ.
  */
 static void
@@ -2075,7 +2075,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
        }
 }
 
-/**
+/*
  *     Active IO is being cleaned up, waiting for room in request CQ.
  */
 static void
@@ -2091,7 +2091,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
                break;
 
        case BFA_IOIM_SM_ABORT:
-               /**
+               /*
                 * IO is alraedy being cleaned up implicitly
                 */
                ioim->io_cbfn = __bfa_cb_ioim_abort;
@@ -2125,7 +2125,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
        }
 }
 
-/**
+/*
  * IO bfa callback is pending.
  */
 static void
@@ -2152,7 +2152,7 @@ bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
        }
 }
 
-/**
+/*
  * IO bfa callback is pending. IO resource cannot be freed.
  */
 static void
@@ -2185,7 +2185,7 @@ bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
        }
 }
 
-/**
+/*
  * IO is completed, waiting resource free from firmware.
  */
 static void
@@ -2214,7 +2214,7 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
 
 
 
-/**
+/*
  *  hal_ioim_private
  */
 
@@ -2247,7 +2247,7 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
 
        m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
        if (m->io_status == BFI_IOIM_STS_OK) {
-               /**
+               /*
                 * setup sense information, if present
                 */
                if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
@@ -2256,15 +2256,15 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
                        snsinfo = ioim->iosp->snsinfo;
                }
 
-               /**
+               /*
                 * setup residue value correctly for normal completions
                 */
                if (m->resid_flags == FCP_RESID_UNDER) {
-                       residue = bfa_os_ntohl(m->residue);
+                       residue = be32_to_cpu(m->residue);
                        bfa_stats(ioim->itnim, iocomp_underrun);
                }
                if (m->resid_flags == FCP_RESID_OVER) {
-                       residue = bfa_os_ntohl(m->residue);
+                       residue = be32_to_cpu(m->residue);
                        residue = -residue;
                        bfa_stats(ioim->itnim, iocomp_overrun);
                }
@@ -2327,7 +2327,7 @@ bfa_ioim_sgpg_alloced(void *cbarg)
        bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
 }
 
-/**
+/*
  * Send I/O request to firmware.
  */
 static bfa_boolean_t
@@ -2343,7 +2343,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
        struct scatterlist *sg;
        struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
 
-       /**
+       /*
         * check for room in queue to send request now
         */
        m = bfa_reqq_next(ioim->bfa, ioim->reqq);
@@ -2354,14 +2354,14 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
                return BFA_FALSE;
        }
 
-       /**
+       /*
         * build i/o request message next
         */
-       m->io_tag = bfa_os_htons(ioim->iotag);
+       m->io_tag = cpu_to_be16(ioim->iotag);
        m->rport_hdl = ioim->itnim->rport->fw_handle;
        m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
 
-       /**
+       /*
         * build inline IO SG element here
         */
        sge = &m->sges[0];
@@ -2387,18 +2387,17 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
        sge->flags = BFI_SGE_PGDLEN;
        bfa_sge_to_be(sge);
 
-       /**
+       /*
         * set up I/O command parameters
         */
-       bfa_os_assign(m->cmnd, cmnd_z0);
+       m->cmnd = cmnd_z0;
        m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
        m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
-       bfa_os_assign(m->cmnd.cdb,
-                       *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio));
+       m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio);
        fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
-       m->cmnd.fcp_dl = bfa_os_htonl(fcp_dl);
+       m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
 
-       /**
+       /*
         * set up I/O message header
         */
        switch (m->cmnd.iodir) {
@@ -2427,28 +2426,28 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
        m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
        m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
 
-       /**
+       /*
         * Handle large CDB (>16 bytes).
         */
        m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
                                        FCP_CMND_CDB_LEN) / sizeof(u32);
        if (m->cmnd.addl_cdb_len) {
-               bfa_os_memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
+               memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
                                bfa_cb_ioim_get_cdb(ioim->dio) + 1,
                                m->cmnd.addl_cdb_len * sizeof(u32));
                fcp_cmnd_fcpdl(&m->cmnd) =
-                               bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
+                               cpu_to_be32(bfa_cb_ioim_get_size(ioim->dio));
        }
 #endif
 
-       /**
+       /*
         * queue I/O message to firmware
         */
        bfa_reqq_produce(ioim->bfa, ioim->reqq);
        return BFA_TRUE;
 }
 
-/**
+/*
  * Setup any additional SG pages needed.Inline SG element is setup
  * at queuing time.
  */
@@ -2459,7 +2458,7 @@ bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
 
        bfa_assert(ioim->nsges > BFI_SGE_INLINE);
 
-       /**
+       /*
         * allocate SG pages needed
         */
        nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
@@ -2508,7 +2507,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
                        sge->sg_len = sg_dma_len(sg);
                        pgcumsz += sge->sg_len;
 
-                       /**
+                       /*
                         * set flags
                         */
                        if (i < (nsges - 1))
@@ -2523,7 +2522,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
 
                sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
 
-               /**
+               /*
                 * set the link element of each page
                 */
                if (sgeid == ioim->nsges) {
@@ -2540,7 +2539,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
        } while (sgeid < ioim->nsges);
 }
 
-/**
+/*
  * Send I/O abort request to firmware.
  */
 static bfa_boolean_t
@@ -2549,14 +2548,14 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
        struct bfi_ioim_abort_req_s *m;
        enum bfi_ioim_h2i       msgop;
 
-       /**
+       /*
         * check for room in queue to send request now
         */
        m = bfa_reqq_next(ioim->bfa, ioim->reqq);
        if (!m)
                return BFA_FALSE;
 
-       /**
+       /*
         * build i/o request message next
         */
        if (ioim->iosp->abort_explicit)
@@ -2565,17 +2564,17 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
                msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
 
        bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
-       m->io_tag    = bfa_os_htons(ioim->iotag);
+       m->io_tag    = cpu_to_be16(ioim->iotag);
        m->abort_tag = ++ioim->abort_tag;
 
-       /**
+       /*
         * queue I/O message to firmware
         */
        bfa_reqq_produce(ioim->bfa, ioim->reqq);
        return BFA_TRUE;
 }
 
-/**
+/*
  * Call to resume any I/O requests waiting for room in request queue.
  */
 static void
@@ -2591,7 +2590,7 @@ bfa_ioim_qresume(void *cbarg)
 static void
 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
 {
-       /**
+       /*
         * Move IO from itnim queue to fcpim global queue since itnim will be
         * freed.
         */
@@ -2624,13 +2623,13 @@ bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
        return BFA_TRUE;
 }
 
-/**
+/*
  *     or after the link comes back.
  */
 void
 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
 {
-       /**
+       /*
         * If path tov timer expired, failback with PATHTOV status - these
         * IO requests are not normally retried by IO stack.
         *
@@ -2645,7 +2644,7 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
        }
        bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
 
-       /**
+       /*
         * Move IO to fcpim global queue since itnim will be
         * freed.
         */
@@ -2655,11 +2654,11 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
 
 
 
-/**
+/*
  *  hal_ioim_friend
  */
 
-/**
+/*
  * Memory allocation and initialization.
  */
 void
@@ -2671,7 +2670,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
        u8                      *snsinfo;
        u32             snsbufsz;
 
-       /**
+       /*
         * claim memory first
         */
        ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
@@ -2682,7 +2681,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
        fcpim->ioim_sp_arr = iosp;
        bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
 
-       /**
+       /*
         * Claim DMA memory for per IO sense data.
         */
        snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
@@ -2694,7 +2693,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
        snsinfo = fcpim->snsbase.kva;
        bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
 
-       /**
+       /*
         * Initialize ioim free queues
         */
        INIT_LIST_HEAD(&fcpim->ioim_free_q);
@@ -2706,7 +2705,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
                /*
                 * initialize IOIM
                 */
-               bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
+               memset(ioim, 0, sizeof(struct bfa_ioim_s));
                ioim->iotag   = i;
                ioim->bfa     = fcpim->bfa;
                ioim->fcpim   = fcpim;
@@ -2723,7 +2722,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
        }
 }
 
-/**
+/*
  * Driver detach time call.
  */
 void
@@ -2740,7 +2739,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        u16     iotag;
        enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
 
-       iotag = bfa_os_ntohs(rsp->io_tag);
+       iotag = be16_to_cpu(rsp->io_tag);
 
        ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
        bfa_assert(ioim->iotag == iotag);
@@ -2750,7 +2749,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        bfa_trc(ioim->bfa, rsp->reuse_io_tag);
 
        if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
-               bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
+               ioim->iosp->comp_rspmsg = *m;
 
        switch (rsp->io_status) {
        case BFI_IOIM_STS_OK:
@@ -2823,7 +2822,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        struct bfa_ioim_s *ioim;
        u16     iotag;
 
-       iotag = bfa_os_ntohs(rsp->io_tag);
+       iotag = be16_to_cpu(rsp->io_tag);
 
        ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
        bfa_assert(ioim->iotag == iotag);
@@ -2837,7 +2836,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 void
 bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
 {
-       ioim->start_time = bfa_os_get_clock();
+       ioim->start_time = jiffies;
 }
 
 void
@@ -2845,7 +2844,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
 {
        u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
        u32 index = bfa_ioim_get_index(fcp_dl);
-       u64 end_time = bfa_os_get_clock();
+       u64 end_time = jiffies;
        struct bfa_itnim_latency_s *io_lat =
                        &(ioim->itnim->ioprofile.io_latency);
        u32 val = (u32)(end_time - ioim->start_time);
@@ -2859,7 +2858,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
                io_lat->max[index] : val;
        io_lat->avg[index] += val;
 }
-/**
+/*
  * Called by itnim to clean up IO while going offline.
  */
 void
@@ -2882,7 +2881,7 @@ bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
        bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
 }
 
-/**
+/*
  * IOC failure handling.
  */
 void
@@ -2893,7 +2892,7 @@ bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
        bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
 }
 
-/**
+/*
  * IO offline TOV popped. Fail the pending IO.
  */
 void
@@ -2905,11 +2904,11 @@ bfa_ioim_tov(struct bfa_ioim_s *ioim)
 
 
 
-/**
+/*
  *  hal_ioim_api
  */
 
-/**
+/*
  * Allocate IOIM resource for initiator mode I/O request.
  */
 struct bfa_ioim_s *
@@ -2919,7 +2918,7 @@ bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
        struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
        struct bfa_ioim_s *ioim;
 
-       /**
+       /*
         * alocate IOIM resource
         */
        bfa_q_deq(&fcpim->ioim_free_q, &ioim);
@@ -2970,7 +2969,7 @@ bfa_ioim_start(struct bfa_ioim_s *ioim)
 
        bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
 
-       /**
+       /*
         * Obtain the queue over which this request has to be issued
         */
        ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
@@ -2980,7 +2979,7 @@ bfa_ioim_start(struct bfa_ioim_s *ioim)
        bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
 }
 
-/**
+/*
  * Driver I/O abort request.
  */
 bfa_status_t
@@ -2999,11 +2998,11 @@ bfa_ioim_abort(struct bfa_ioim_s *ioim)
 }
 
 
-/**
+/*
  *  BFA TSKIM state machine functions
  */
 
-/**
+/*
  *     Task management command beginning state.
  */
 static void
@@ -3016,7 +3015,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
                bfa_sm_set_state(tskim, bfa_tskim_sm_active);
                bfa_tskim_gather_ios(tskim);
 
-               /**
+               /*
                 * If device is offline, do not send TM on wire. Just cleanup
                 * any pending IO requests and complete TM request.
                 */
@@ -3040,7 +3039,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
        }
 }
 
-/**
+/*
  * brief
  *     TM command is active, awaiting completion from firmware to
  *     cleanup IO requests in TM scope.
@@ -3077,7 +3076,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
        }
 }
 
-/**
+/*
  *     An active TM is being cleaned up since ITN is offline. Awaiting cleanup
  *     completion event from firmware.
  */
@@ -3088,7 +3087,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 
        switch (event) {
        case BFA_TSKIM_SM_DONE:
-               /**
+               /*
                 * Ignore and wait for ABORT completion from firmware.
                 */
                break;
@@ -3121,7 +3120,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
                break;
 
        case BFA_TSKIM_SM_CLEANUP:
-               /**
+               /*
                 * Ignore, TM command completed on wire.
                 * Notify TM conmpletion on IO cleanup completion.
                 */
@@ -3138,7 +3137,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
        }
 }
 
-/**
+/*
  *     Task management command is waiting for room in request CQ
  */
 static void
@@ -3153,7 +3152,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
                break;
 
        case BFA_TSKIM_SM_CLEANUP:
-               /**
+               /*
                 * No need to send TM on wire since ITN is offline.
                 */
                bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
@@ -3173,7 +3172,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
        }
 }
 
-/**
+/*
  *     Task management command is active, awaiting for room in request CQ
  *     to send clean up request.
  */
@@ -3186,7 +3185,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
        switch (event) {
        case BFA_TSKIM_SM_DONE:
                bfa_reqq_wcancel(&tskim->reqq_wait);
-               /**
+               /*
                 *
                 * Fall through !!!
                 */
@@ -3208,7 +3207,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
        }
 }
 
-/**
+/*
  *     BFA callback is pending
  */
 static void
@@ -3236,7 +3235,7 @@ bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
 
 
 
-/**
+/*
  *  hal_tskim_private
  */
 
@@ -3289,7 +3288,7 @@ bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
        return BFA_FALSE;
 }
 
-/**
+/*
  *     Gather affected IO requests and task management commands.
  */
 static void
@@ -3301,7 +3300,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
 
        INIT_LIST_HEAD(&tskim->io_q);
 
-       /**
+       /*
         * Gather any active IO requests first.
         */
        list_for_each_safe(qe, qen, &itnim->io_q) {
@@ -3313,7 +3312,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
                }
        }
 
-       /**
+       /*
         * Failback any pending IO requests immediately.
         */
        list_for_each_safe(qe, qen, &itnim->pending_q) {
@@ -3327,7 +3326,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
        }
 }
 
-/**
+/*
  *     IO cleanup completion
  */
 static void
@@ -3339,7 +3338,7 @@ bfa_tskim_cleanp_comp(void *tskim_cbarg)
        bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
 }
 
-/**
+/*
  *     Gather affected IO requests and task management commands.
  */
 static void
@@ -3359,7 +3358,7 @@ bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
        bfa_wc_wait(&tskim->wc);
 }
 
-/**
+/*
  *     Send task management request to firmware.
  */
 static bfa_boolean_t
@@ -3368,33 +3367,33 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
        struct bfa_itnim_s *itnim = tskim->itnim;
        struct bfi_tskim_req_s *m;
 
-       /**
+       /*
         * check for room in queue to send request now
         */
        m = bfa_reqq_next(tskim->bfa, itnim->reqq);
        if (!m)
                return BFA_FALSE;
 
-       /**
+       /*
         * build i/o request message next
         */
        bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
                        bfa_lpuid(tskim->bfa));
 
-       m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
+       m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
        m->itn_fhdl = tskim->itnim->rport->fw_handle;
        m->t_secs = tskim->tsecs;
        m->lun = tskim->lun;
        m->tm_flags = tskim->tm_cmnd;
 
-       /**
+       /*
         * queue I/O message to firmware
         */
        bfa_reqq_produce(tskim->bfa, itnim->reqq);
        return BFA_TRUE;
 }
 
-/**
+/*
  *     Send abort request to cleanup an active TM to firmware.
  */
 static bfa_boolean_t
@@ -3403,29 +3402,29 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
        struct bfa_itnim_s      *itnim = tskim->itnim;
        struct bfi_tskim_abortreq_s     *m;
 
-       /**
+       /*
         * check for room in queue to send request now
         */
        m = bfa_reqq_next(tskim->bfa, itnim->reqq);
        if (!m)
                return BFA_FALSE;
 
-       /**
+       /*
         * build i/o request message next
         */
        bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
                        bfa_lpuid(tskim->bfa));
 
-       m->tsk_tag  = bfa_os_htons(tskim->tsk_tag);
+       m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
 
-       /**
+       /*
         * queue I/O message to firmware
         */
        bfa_reqq_produce(tskim->bfa, itnim->reqq);
        return BFA_TRUE;
 }
 
-/**
+/*
  *     Call to resume task management cmnd waiting for room in request queue.
  */
 static void
@@ -3437,7 +3436,7 @@ bfa_tskim_qresume(void *cbarg)
        bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
 }
 
-/**
+/*
  * Cleanup IOs associated with a task mangement command on IOC failures.
  */
 static void
@@ -3454,11 +3453,11 @@ bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
 
 
 
-/**
+/*
  *  hal_tskim_friend
  */
 
-/**
+/*
  * Notification on completions from related ioim.
  */
 void
@@ -3467,7 +3466,7 @@ bfa_tskim_iodone(struct bfa_tskim_s *tskim)
        bfa_wc_down(&tskim->wc);
 }
 
-/**
+/*
  * Handle IOC h/w failure notification from itnim.
  */
 void
@@ -3478,7 +3477,7 @@ bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
        bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
 }
 
-/**
+/*
  * Cleanup TM command and associated IOs as part of ITNIM offline.
  */
 void
@@ -3489,7 +3488,7 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
        bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
 }
 
-/**
+/*
  *     Memory allocation and initialization.
  */
 void
@@ -3507,7 +3506,7 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
                /*
                 * initialize TSKIM
                 */
-               bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
+               memset(tskim, 0, sizeof(struct bfa_tskim_s));
                tskim->tsk_tag = i;
                tskim->bfa      = fcpim->bfa;
                tskim->fcpim    = fcpim;
@@ -3525,7 +3524,7 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
 void
 bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
 {
-       /**
+       /*
        * @todo
        */
 }
@@ -3536,14 +3535,14 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
        struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
        struct bfa_tskim_s *tskim;
-       u16     tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
+       u16     tsk_tag = be16_to_cpu(rsp->tsk_tag);
 
        tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
        bfa_assert(tskim->tsk_tag == tsk_tag);
 
        tskim->tsk_status = rsp->tsk_status;
 
-       /**
+       /*
         * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
         * requests. All other statuses are for normal completions.
         */
@@ -3558,7 +3557,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 
 
 
-/**
+/*
  *  hal_tskim_api
  */
 
@@ -3585,7 +3584,7 @@ bfa_tskim_free(struct bfa_tskim_s *tskim)
        list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
 }
 
-/**
+/*
  *     Start a task management command.
  *
  * @param[in]  tskim   BFA task management command instance
index 3bf343160aacbcfc0a2188ad72b06ed9529bd143..db53717eeb4bb49912a728483c455acabed54d67 100644 (file)
@@ -104,7 +104,7 @@ struct bfa_fcpim_mod_s {
        bfa_fcpim_profile_t     profile_start;
 };
 
-/**
+/*
  * BFA IO (initiator mode)
  */
 struct bfa_ioim_s {
@@ -137,7 +137,7 @@ struct bfa_ioim_sp_s {
        struct bfa_tskim_s      *tskim;         /*  Relevant TM cmd     */
 };
 
-/**
+/*
  * BFA Task management command (initiator mode)
  */
 struct bfa_tskim_s {
@@ -160,7 +160,7 @@ struct bfa_tskim_s {
 };
 
 
-/**
+/*
  * BFA i-t-n (initiator mode)
  */
 struct bfa_itnim_s {
@@ -303,7 +303,7 @@ bfa_status_t        bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
                struct bfa_itnim_ioprofile_s *ioprofile);
 #define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
 
-/**
+/*
  *     BFA completion callback for bfa_itnim_online().
  *
  * @param[in]          itnim           FCS or driver itnim instance
@@ -312,7 +312,7 @@ bfa_status_t        bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
  */
 void   bfa_cb_itnim_online(void *itnim);
 
-/**
+/*
  *     BFA completion callback for bfa_itnim_offline().
  *
  * @param[in]          itnim           FCS or driver itnim instance
@@ -323,7 +323,7 @@ void        bfa_cb_itnim_offline(void *itnim);
 void   bfa_cb_itnim_tov_begin(void *itnim);
 void   bfa_cb_itnim_tov(void *itnim);
 
-/**
+/*
  *     BFA notification to FCS/driver for second level error recovery.
  *
  * Atleast one I/O request has timedout and target is unresponsive to
@@ -351,7 +351,7 @@ void                bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
                                      bfa_boolean_t iotov);
 
 
-/**
+/*
  *     I/O completion notification.
  *
  * @param[in]          dio                     driver IO structure
@@ -368,7 +368,7 @@ void        bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
                                  u8 scsi_status, int sns_len,
                                  u8 *sns_info, s32 residue);
 
-/**
+/*
  *     I/O good completion notification.
  *
  * @param[in]          dio                     driver IO structure
@@ -377,7 +377,7 @@ void        bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
  */
 void   bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
 
-/**
+/*
  *     I/O abort completion notification
  *
  * @param[in]          dio                     driver IO that was aborted
index 9cebbe30a6782dbf8e8d9fedea033d7f19531d68..c94502dfac664f55fd42080695989d9e1cb1697b 100644 (file)
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  bfa_fcs.c BFA FCS main
  */
 
@@ -25,7 +25,7 @@
 
 BFA_TRC_FILE(FCS, FCS);
 
-/**
+/*
  * FCS sub-modules
  */
 struct bfa_fcs_mod_s {
@@ -43,7 +43,7 @@ static struct bfa_fcs_mod_s fcs_modules[] = {
          bfa_fcs_fabric_modexit },
 };
 
-/**
+/*
  *  fcs_api BFA FCS API
  */
 
@@ -58,11 +58,11 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
 
 
 
-/**
+/*
  *  fcs_api BFA FCS API
  */
 
-/**
+/*
  * fcs attach -- called once to initialize data structures at driver attach time
  */
 void
@@ -86,7 +86,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
        }
 }
 
-/**
+/*
  * fcs initialization, called once after bfa initialization is complete
  */
 void
@@ -110,7 +110,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
        }
 }
 
-/**
+/*
  * Start FCS operations.
  */
 void
@@ -119,7 +119,7 @@ bfa_fcs_start(struct bfa_fcs_s *fcs)
        bfa_fcs_fabric_modstart(fcs);
 }
 
-/**
+/*
  *     brief
  *             FCS driver details initialization.
  *
@@ -138,7 +138,7 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
        bfa_fcs_fabric_psymb_init(&fcs->fabric);
 }
 
-/**
+/*
  *     brief
  *             FCS FDMI Driver Parameter Initialization
  *
@@ -154,7 +154,7 @@ bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
        fcs->fdmi_enabled = fdmi_enable;
 
 }
-/**
+/*
  *     brief
  *             FCS instance cleanup and exit.
  *
@@ -196,7 +196,7 @@ bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
        bfa_wc_down(&fcs->wc);
 }
 
-/**
+/*
  * Fabric module implementation.
  */
 
@@ -232,11 +232,11 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
                                         u32 rsp_len,
                                         u32 resid_len,
                                         struct fchs_s *rspfchs);
-/**
+/*
  *  fcs_fabric_sm fabric state machine functions
  */
 
-/**
+/*
  * Fabric state machine events
  */
 enum bfa_fcs_fabric_event {
@@ -286,7 +286,7 @@ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
                                           enum bfa_fcs_fabric_event event);
 static void    bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
                                           enum bfa_fcs_fabric_event event);
-/**
+/*
  *   Beginning state before fabric creation.
  */
 static void
@@ -312,7 +312,7 @@ bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
-/**
+/*
  *   Beginning state before fabric creation.
  */
 static void
@@ -345,7 +345,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
-/**
+/*
  *   Link is down, awaiting LINK UP event from port. This is also the
  *   first state at fabric creation.
  */
@@ -375,7 +375,7 @@ bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
-/**
+/*
  *   FLOGI is in progress, awaiting FLOGI reply.
  */
 static void
@@ -468,7 +468,7 @@ bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
-/**
+/*
  *   Authentication is in progress, awaiting authentication results.
  */
 static void
@@ -508,7 +508,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
-/**
+/*
  *   Authentication failed
  */
 static void
@@ -534,7 +534,7 @@ bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
-/**
+/*
  *   Port is in loopback mode.
  */
 static void
@@ -560,7 +560,7 @@ bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
-/**
+/*
  *   There is no attached fabric - private loop or NPort-to-NPort topology.
  */
 static void
@@ -593,7 +593,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
-/**
+/*
  *   Fabric is online - normal operating state.
  */
 static void
@@ -628,7 +628,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
-/**
+/*
  *   Exchanging virtual fabric parameters.
  */
 static void
@@ -652,7 +652,7 @@ bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
-/**
+/*
  *   EVFP exchange complete and VFT tagging is enabled.
  */
 static void
@@ -663,7 +663,7 @@ bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
        bfa_trc(fabric->fcs, event);
 }
 
-/**
+/*
  *   Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
  */
 static void
@@ -684,7 +684,7 @@ bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
                fabric->event_arg.swp_vfid);
 }
 
-/**
+/*
  *   Fabric is being deleted, awaiting vport delete completions.
  */
 static void
@@ -714,7 +714,7 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
 
 
 
-/**
+/*
  *  fcs_fabric_private fabric private functions
  */
 
@@ -728,7 +728,7 @@ bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
        port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
 }
 
-/**
+/*
  * Port Symbolic Name Creation for base port.
  */
 void
@@ -789,7 +789,7 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
        port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
 }
 
-/**
+/*
  * bfa lps login completion callback
  */
 void
@@ -867,7 +867,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
        bfa_trc(fabric->fcs, fabric->is_npiv);
        bfa_trc(fabric->fcs, fabric->is_auth);
 }
-/**
+/*
  *             Allocate and send FLOGI.
  */
 static void
@@ -897,7 +897,7 @@ bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
        bfa_fcs_fabric_set_opertype(fabric);
        fabric->stats.fabric_onlines++;
 
-       /**
+       /*
         * notify online event to base and then virtual ports
         */
        bfa_fcs_lport_online(&fabric->bport);
@@ -917,7 +917,7 @@ bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
        bfa_trc(fabric->fcs, fabric->fabric_name);
        fabric->stats.fabric_offlines++;
 
-       /**
+       /*
         * notify offline event first to vports and then base port.
         */
        list_for_each_safe(qe, qen, &fabric->vport_q) {
@@ -939,7 +939,7 @@ bfa_fcs_fabric_delay(void *cbarg)
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
 }
 
-/**
+/*
  * Delete all vports and wait for vport delete completions.
  */
 static void
@@ -965,11 +965,11 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
 }
 
-/**
+/*
  *  fcs_fabric_public fabric public functions
  */
 
-/**
+/*
  * Attach time initialization.
  */
 void
@@ -978,9 +978,9 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
        struct bfa_fcs_fabric_s *fabric;
 
        fabric = &fcs->fabric;
-       bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
+       memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
 
-       /**
+       /*
         * Initialize base fabric.
         */
        fabric->fcs = fcs;
@@ -989,7 +989,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
        fabric->lps = bfa_lps_alloc(fcs->bfa);
        bfa_assert(fabric->lps);
 
-       /**
+       /*
         * Initialize fabric delete completion handler. Fabric deletion is
         * complete when the last vport delete is complete.
         */
@@ -1007,7 +1007,7 @@ bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
        bfa_trc(fcs, 0);
 }
 
-/**
+/*
  *   Module cleanup
  */
 void
@@ -1017,7 +1017,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
 
        bfa_trc(fcs, 0);
 
-       /**
+       /*
         * Cleanup base fabric.
         */
        fabric = &fcs->fabric;
@@ -1025,7 +1025,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
 }
 
-/**
+/*
  * Fabric module start -- kick starts FCS actions
  */
 void
@@ -1038,7 +1038,7 @@ bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
 }
 
-/**
+/*
  *   Suspend fabric activity as part of driver suspend.
  */
 void
@@ -1064,7 +1064,7 @@ bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
        return fabric->oper_type;
 }
 
-/**
+/*
  *   Link up notification from BFA physical port module.
  */
 void
@@ -1074,7 +1074,7 @@ bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
 }
 
-/**
+/*
  *   Link down notification from BFA physical port module.
  */
 void
@@ -1084,7 +1084,7 @@ bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
        bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
 }
 
-/**
+/*
  *   A child vport is being created in the fabric.
  *
  *   Call from vport module at vport creation. A list of base port and vports
@@ -1099,7 +1099,7 @@ void
 bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
                        struct bfa_fcs_vport_s *vport)
 {
-       /**
+       /*
         * - add vport to fabric's vport_q
         */
        bfa_trc(fabric->fcs, fabric->vf_id);
@@ -1109,7 +1109,7 @@ bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
        bfa_wc_up(&fabric->wc);
 }
 
-/**
+/*
  *   A child vport is being deleted from fabric.
  *
  *   Vport is being deleted.
@@ -1123,7 +1123,7 @@ bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
        bfa_wc_down(&fabric->wc);
 }
 
-/**
+/*
  *   Base port is deleted.
  */
 void
@@ -1133,7 +1133,7 @@ bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
 }
 
 
-/**
+/*
  *    Check if fabric is online.
  *
  *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
@@ -1146,7 +1146,7 @@ bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
        return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
 }
 
-/**
+/*
  *     brief
  *
  */
@@ -1158,7 +1158,7 @@ bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Lookup for a vport withing a fabric given its pwwn
  */
 struct bfa_fcs_vport_s *
@@ -1176,7 +1176,7 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
        return NULL;
 }
 
-/**
+/*
  *    In a given fabric, return the number of lports.
  *
  *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
@@ -1214,7 +1214,7 @@ bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
 
        return oui;
 }
-/**
+/*
  *             Unsolicited frame receive handling.
  */
 void
@@ -1230,7 +1230,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
        bfa_trc(fabric->fcs, len);
        bfa_trc(fabric->fcs, pid);
 
-       /**
+       /*
         * Look for our own FLOGI frames being looped back. This means an
         * external loopback cable is in place. Our own FLOGI frames are
         * sometimes looped back when switch port gets temporarily bypassed.
@@ -1242,7 +1242,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
                return;
        }
 
-       /**
+       /*
         * FLOGI/EVFP exchanges should be consumed by base fabric.
         */
        if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
@@ -1252,7 +1252,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
        }
 
        if (fabric->bport.pid == pid) {
-               /**
+               /*
                 * All authentication frames should be routed to auth
                 */
                bfa_trc(fabric->fcs, els_cmd->els_code);
@@ -1266,7 +1266,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
                return;
        }
 
-       /**
+       /*
         * look for a matching local port ID
         */
        list_for_each(qe, &fabric->vport_q) {
@@ -1280,7 +1280,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
        bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
 }
 
-/**
+/*
  *             Unsolicited frames to be processed by fabric.
  */
 static void
@@ -1304,7 +1304,7 @@ bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
        }
 }
 
-/**
+/*
  *     Process incoming FLOGI
  */
 static void
@@ -1329,7 +1329,7 @@ bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
                return;
        }
 
-       fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred);
+       fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred);
        bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
        bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
 
@@ -1351,7 +1351,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
        struct fchs_s   fchs;
 
        fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
-       /**
+       /*
         * Do not expect this failure -- expect remote node to retry
         */
        if (!fcxp)
@@ -1370,7 +1370,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
                      FC_MAX_PDUSZ, 0);
 }
 
-/**
+/*
  *   Flogi Acc completion callback.
  */
 static void
@@ -1417,130 +1417,7 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
        }
 }
 
-/**
- *  fcs_vf_api virtual fabrics API
- */
-
-/**
- * Enable VF mode.
- *
- * @param[in]          fcs             fcs module instance
- * @param[in]          vf_id           default vf_id of port, FC_VF_ID_NULL
- *                                     to use standard default vf_id of 1.
- *
- * @retval     BFA_STATUS_OK           vf mode is enabled
- * @retval     BFA_STATUS_BUSY         Port is active. Port must be disabled
- *                                     before VF mode can be enabled.
- */
-bfa_status_t
-bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id)
-{
-       return BFA_STATUS_OK;
-}
-
-/**
- * Disable VF mode.
- *
- * @param[in]          fcs             fcs module instance
- *
- * @retval     BFA_STATUS_OK           vf mode is disabled
- * @retval     BFA_STATUS_BUSY         VFs are present and being used. All
- *                                     VFs must be deleted before disabling
- *                                     VF mode.
- */
-bfa_status_t
-bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs)
-{
-       return BFA_STATUS_OK;
-}
-
-/**
- *  Create a new VF instance.
- *
- *  A new VF is created using the given VF configuration. A VF is identified
- *  by VF id. No duplicate VF creation is allowed with the same VF id. Once
- *  a VF is created, VF is automatically started after link initialization
- *  and EVFP exchange is completed.
- *
- *     param[in] vf     -      FCS vf data structure. Memory is
- *                             allocated by caller (driver)
- *     param[in] fcs    -      FCS module
- *     param[in] vf_cfg -      VF configuration
- *     param[in] vf_drv -      Opaque handle back to the driver's
- *                             virtual vf structure
- *
- *     retval BFA_STATUS_OK VF creation is successful
- *     retval BFA_STATUS_FAILED VF creation failed
- *     retval BFA_STATUS_EEXIST A VF exists with the given vf_id
- */
-bfa_status_t
-bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id,
-                 struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
-{
-       bfa_trc(fcs, vf_id);
-       return BFA_STATUS_OK;
-}
-
-/**
- *     Use this function to delete a BFA VF object. VF object should
- *     be stopped before this function call.
- *
- *     param[in] vf - pointer to bfa_vf_t.
- *
- *     retval BFA_STATUS_OK    On vf deletion success
- *     retval BFA_STATUS_BUSY VF is not in a stopped state
- *     retval BFA_STATUS_INPROGRESS VF deletion in in progress
- */
-bfa_status_t
-bfa_fcs_vf_delete(bfa_fcs_vf_t *vf)
-{
-       bfa_trc(vf->fcs, vf->vf_id);
-       return BFA_STATUS_OK;
-}
-
-
-/**
- *     Returns attributes of the given VF.
- *
- *     param[in]       vf      pointer to bfa_vf_t.
- *     param[out] vf_attr      vf attributes returned
- *
- *     return None
- */
-void
-bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr)
-{
-       bfa_trc(vf->fcs, vf->vf_id);
-}
-
-/**
- *     Return statistics associated with the given vf.
- *
- *     param[in] vf            pointer to bfa_vf_t.
- *     param[out] vf_stats     vf statistics returned
- *
- *     @return None
- */
-void
-bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats)
-{
-       bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s));
-}
-
-/**
- *     clear statistics associated with the given vf.
- *
- *     param[in]       vf      pointer to bfa_vf_t.
- *
- *     @return None
- */
-void
-bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf)
-{
-       bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s));
-}
-
-/**
+/*
  *     Returns FCS vf structure for a given vf_id.
  *
  *     param[in]       vf_id - VF_ID
@@ -1558,81 +1435,7 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
        return NULL;
 }
 
-/**
- *     Return the list of VFs configured.
- *
- *     param[in]       fcs     fcs module instance
- *     param[out]      vf_ids  returned list of vf_ids
- *     param[in,out]   nvfs    in:size of vf_ids array,
- *                             out:total elements present,
- *                             actual elements returned is limited by the size
- *
- *     return Driver VF structure
- */
-void
-bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
-{
-       bfa_trc(fcs, *nvfs);
-}
-
-/**
- *     Return the list of all VFs visible from fabric.
- *
- *     param[in]       fcs     fcs module instance
- *     param[out]      vf_ids  returned list of vf_ids
- *     param[in,out]   nvfs    in:size of vf_ids array,
- *                             out:total elements present,
- *                             actual elements returned is limited by the size
- *
- *     return Driver VF structure
- */
-void
-bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
-{
-       bfa_trc(fcs, *nvfs);
-}
-
-/**
- *     Return the list of local logical ports present in the given VF.
- *
- *     param[in]       vf      vf for which logical ports are returned
- *     param[out]      lpwwn   returned logical port wwn list
- *     param[in,out]   nlports in:size of lpwwn list;
- *                             out:total elements present,
- *                             actual elements returned is limited by the size
- */
-void
-bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
-{
-       struct list_head        *qe;
-       struct bfa_fcs_vport_s *vport;
-       int     i;
-       struct bfa_fcs_s      *fcs;
-
-       if (vf == NULL || lpwwn == NULL || *nlports == 0)
-               return;
-
-       fcs = vf->fcs;
-
-       bfa_trc(fcs, vf->vf_id);
-       bfa_trc(fcs, (u32) *nlports);
-
-       i = 0;
-       lpwwn[i++] = vf->bport.port_cfg.pwwn;
-
-       list_for_each(qe, &vf->vport_q) {
-               if (i >= *nlports)
-                       break;
-
-               vport = (struct bfa_fcs_vport_s *) qe;
-               lpwwn[i++] = vport->lport.port_cfg.pwwn;
-       }
-
-       bfa_trc(fcs, i);
-       *nlports = i;
-}
-
-/**
+/*
  * BFA FCS PPORT ( physical port)
  */
 static void
@@ -1662,11 +1465,11 @@ bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
        bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
 }
 
-/**
+/*
  * BFA FCS UF ( Unsolicited Frames)
  */
 
-/**
+/*
  *             BFA callback for unsolicited frame receive handler.
  *
  * @param[in]          cbarg           callback arg for receive handler
@@ -1683,7 +1486,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
        struct fc_vft_s *vft;
        struct bfa_fcs_fabric_s *fabric;
 
-       /**
+       /*
         * check for VFT header
         */
        if (fchs->routing == FC_RTG_EXT_HDR &&
@@ -1695,7 +1498,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
                else
                        fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
 
-               /**
+               /*
                 * drop frame if vfid is unknown
                 */
                if (!fabric) {
@@ -1705,7 +1508,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
                        return;
                }
 
-               /**
+               /*
                 * skip vft header
                 */
                fchs = (struct fchs_s *) (vft + 1);
index d75045df1e7e2202f817eb20cf8f70ee5423c14f..9cb6a55977c3ad932642a80a5eecff8cb803fe24 100644 (file)
@@ -196,7 +196,7 @@ struct bfa_fcs_fabric_s {
 #define bfa_fcs_fabric_is_switched(__f)                        \
        ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
 
-/**
+/*
  *   The design calls for a single implementation of base fabric and vf.
  */
 #define bfa_fcs_vf_t struct bfa_fcs_fabric_s
@@ -216,7 +216,7 @@ struct bfa_fcs_fabric_s;
 
 #define bfa_fcs_lport_t struct bfa_fcs_lport_s
 
-/**
+/*
  * Symbolic Name related defines
  *  Total bytes 255.
  *  Physical Port's symbolic name 128 bytes.
@@ -239,7 +239,7 @@ struct bfa_fcs_fabric_s;
 #define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ                        48
 #define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ               16
 
-/**
+/*
  * Get FC port ID for a logical port.
  */
 #define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid)
@@ -262,7 +262,7 @@ bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
 #define bfa_fcs_lport_get_fabric_ipaddr(_lport)                \
                ((_lport)->fabric->fabric_ip_addr)
 
-/**
+/*
  * bfa fcs port public functions
  */
 
@@ -342,7 +342,7 @@ struct bfa_fcs_vport_s {
 #define bfa_fcs_vport_get_port(vport)                  \
        ((struct bfa_fcs_lport_s  *)(&vport->port))
 
-/**
+/*
  * bfa fcs vport public functions
  */
 bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
@@ -393,7 +393,7 @@ struct bfa_fcs_rpf_s {
        enum bfa_port_speed     rpsc_speed;
        /*  Current Speed from RPSC. O if RPSC fails */
        enum bfa_port_speed     assigned_speed;
-       /**
+       /*
         * Speed assigned by the user.  will be used if RPSC is
         * not supported by the rport.
         */
@@ -434,7 +434,7 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
        return rport->bfa_rport;
 }
 
-/**
+/*
  * bfa fcs rport API functions
  */
 bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
@@ -573,7 +573,7 @@ bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
        return itnim->bfa_itnim;
 }
 
-/**
+/*
  * bfa fcs FCP Initiator mode API functions
  */
 void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
@@ -677,22 +677,9 @@ void bfa_fcs_exit(struct bfa_fcs_s *fcs);
 void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
 void           bfa_fcs_start(struct bfa_fcs_s *fcs);
 
-/**
+/*
  * bfa fcs vf public functions
  */
-bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id);
-bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs);
-bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs,
-                              u16 vf_id, struct bfa_lport_cfg_s *port_cfg,
-                              struct bfad_vf_s *vf_drv);
-bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf);
-void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
-void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
-void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr);
-void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf,
-                         struct bfa_vf_stats_s *vf_stats);
-void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf);
-void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
 bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
 u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
 
@@ -729,11 +716,11 @@ u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
 void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
 
-/**
+/*
  * BFA FCS callback interfaces
  */
 
-/**
+/*
  * fcb Main fcs callbacks
  */
 
@@ -742,7 +729,7 @@ struct bfad_vf_s;
 struct bfad_vport_s;
 struct bfad_rport_s;
 
-/**
+/*
  * lport callbacks
  */
 struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
@@ -754,19 +741,19 @@ void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
                          struct bfad_vf_s *vf_drv,
                          struct bfad_vport_s *vp_drv);
 
-/**
+/*
  * vport callbacks
  */
 void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
 
-/**
+/*
  * rport callbacks
  */
 bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
                                 struct bfa_fcs_rport_s **rport,
                                 struct bfad_rport_s **rport_drv);
 
-/**
+/*
  * itnim callbacks
  */
 void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
index 569dfefab70d7a1ae71143895e08571d0b015b5c..9662bcdeb41d64c50ab7530fd34ab17d543fc44c 100644 (file)
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  fcpim.c - FCP initiator mode i-t nexus state machine
  */
 
@@ -38,7 +38,7 @@ static void   bfa_fcs_itnim_prli_response(void *fcsarg,
                            bfa_status_t req_status, u32 rsp_len,
                            u32 resid_len, struct fchs_s *rsp_fchs);
 
-/**
+/*
  *  fcs_itnim_sm FCS itnim state machine events
  */
 
@@ -84,7 +84,7 @@ static struct bfa_sm_table_s itnim_sm_table[] = {
        {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR},
 };
 
-/**
+/*
  *  fcs_itnim_sm FCS itnim state machine
  */
 
@@ -494,11 +494,11 @@ bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
 
 
 
-/**
+/*
  *  itnim_public FCS ITNIM public interfaces
  */
 
-/**
+/*
  *     Called by rport when a new rport is created.
  *
  * @param[in] rport    -  remote port.
@@ -554,7 +554,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
        return itnim;
 }
 
-/**
+/*
  *     Called by rport to delete  the instance of FCPIM.
  *
  * @param[in] rport    -  remote port.
@@ -566,7 +566,7 @@ bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
        bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE);
 }
 
-/**
+/*
  * Notification from rport that PLOGI is complete to initiate FC-4 session.
  */
 void
@@ -586,7 +586,7 @@ bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
        }
 }
 
-/**
+/*
  * Called by rport to handle a remote device offline.
  */
 void
@@ -596,7 +596,7 @@ bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim)
        bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE);
 }
 
-/**
+/*
  * Called by rport when remote port is known to be an initiator from
  * PRLI received.
  */
@@ -608,7 +608,7 @@ bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim)
        bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
 }
 
-/**
+/*
  * Called by rport to check if the itnim is online.
  */
 bfa_status_t
@@ -625,7 +625,7 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
        }
 }
 
-/**
+/*
  * BFA completion callback for bfa_itnim_online().
  */
 void
@@ -637,7 +637,7 @@ bfa_cb_itnim_online(void *cbarg)
        bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
 }
 
-/**
+/*
  * BFA completion callback for bfa_itnim_offline().
  */
 void
@@ -649,7 +649,7 @@ bfa_cb_itnim_offline(void *cb_arg)
        bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
 }
 
-/**
+/*
  * Mark the beginning of PATH TOV handling. IO completion callbacks
  * are still pending.
  */
@@ -661,7 +661,7 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
        bfa_trc(itnim->fcs, itnim->rport->pwwn);
 }
 
-/**
+/*
  * Mark the end of PATH TOV handling. All pending IOs are already cleaned up.
  */
 void
@@ -674,7 +674,7 @@ bfa_cb_itnim_tov(void *cb_arg)
        itnim_drv->state = ITNIM_STATE_TIMEOUT;
 }
 
-/**
+/*
  *             BFA notification to FCS/driver for second level error recovery.
  *
  * Atleast one I/O request has timedout and target is unresponsive to
@@ -736,7 +736,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
        if (itnim == NULL)
                return BFA_STATUS_NO_FCPIM_NEXUS;
 
-       bfa_os_memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s));
+       memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s));
 
        return BFA_STATUS_OK;
 }
@@ -753,7 +753,7 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
        if (itnim == NULL)
                return BFA_STATUS_NO_FCPIM_NEXUS;
 
-       bfa_os_memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s));
+       memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s));
        return BFA_STATUS_OK;
 }
 
index b522bf30247a62d2f63be931b2c46dc73df4a583..377cbfff6f2ec88a467b6e8014d27c91d3b4a0c3 100644 (file)
  * General Public License for more details.
  */
 
-/**
- *  bfa_fcs_lport.c BFA FCS port
- */
-
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
 #include "bfa_fc.h"
 
 BFA_TRC_FILE(FCS, PORT);
 
-/**
- * Forward declarations
- */
-
 static void     bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
                                         struct fchs_s *rx_fchs, u8 reason_code,
                                         u8 reason_code_expl);
@@ -72,7 +64,7 @@ static struct {
                        bfa_fcs_lport_n2n_offline},
        };
 
-/**
+/*
  *  fcs_port_sm FCS logical port state machine
  */
 
@@ -240,7 +232,7 @@ bfa_fcs_lport_sm_deleting(
        }
 }
 
-/**
+/*
  *  fcs_port_pvt
  */
 
@@ -272,7 +264,7 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
                          FC_MAX_PDUSZ, 0);
 }
 
-/**
+/*
  * Process incoming plogi from a remote port.
  */
 static void
@@ -303,7 +295,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
                return;
        }
 
-       /**
+       /*
         * Direct Attach P2P mode : verify address assigned by the r-port.
         */
        if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
@@ -319,12 +311,12 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
                port->pid  = rx_fchs->d_id;
        }
 
-       /**
+       /*
         * First, check if we know the device by pwwn.
         */
        rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name);
        if (rport) {
-               /**
+               /*
                 * Direct Attach P2P mode : handle address assigned by r-port.
                 */
                if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
@@ -337,37 +329,37 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
                return;
        }
 
-       /**
+       /*
         * Next, lookup rport by PID.
         */
        rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id);
        if (!rport) {
-               /**
+               /*
                 * Inbound PLOGI from a new device.
                 */
                bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
                return;
        }
 
-       /**
+       /*
         * Rport is known only by PID.
         */
        if (rport->pwwn) {
-               /**
+               /*
                 * This is a different device with the same pid. Old device
                 * disappeared. Send implicit LOGO to old device.
                 */
                bfa_assert(rport->pwwn != plogi->port_name);
                bfa_fcs_rport_logo_imp(rport);
 
-               /**
+               /*
                 * Inbound PLOGI from a new device (with old PID).
                 */
                bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
                return;
        }
 
-       /**
+       /*
         * PLOGI crossing each other.
         */
        bfa_assert(rport->pwwn == WWN_NULL);
@@ -479,12 +471,12 @@ static void
 bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
                        struct fc_rnid_general_topology_data_s *gen_topo_data)
 {
-       bfa_os_memset(gen_topo_data, 0,
+       memset(gen_topo_data, 0,
                      sizeof(struct fc_rnid_general_topology_data_s));
 
-       gen_topo_data->asso_type = bfa_os_htonl(RNID_ASSOCIATED_TYPE_HOST);
+       gen_topo_data->asso_type = cpu_to_be32(RNID_ASSOCIATED_TYPE_HOST);
        gen_topo_data->phy_port_num = 0;        /* @todo */
-       gen_topo_data->num_attached_nodes = bfa_os_htonl(1);
+       gen_topo_data->num_attached_nodes = cpu_to_be32(1);
 }
 
 static void
@@ -598,10 +590,10 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
 
 
 
-/**
+/*
  *  fcs_lport_api BFA FCS port API
  */
-/**
+/*
  *   Module initialization
  */
 void
@@ -610,7 +602,7 @@ bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs)
 
 }
 
-/**
+/*
  *   Module cleanup
  */
 void
@@ -619,7 +611,7 @@ bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs)
        bfa_fcs_modexit_comp(fcs);
 }
 
-/**
+/*
  * Unsolicited frame receive handling.
  */
 void
@@ -637,7 +629,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
                return;
        }
 
-       /**
+       /*
         * First, handle ELSs that donot require a login.
         */
        /*
@@ -673,7 +665,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
                        bfa_fcs_lport_abts_acc(lport, fchs);
                return;
        }
-       /**
+       /*
         * look for a matching remote port ID
         */
        rport = bfa_fcs_lport_get_rport_by_pid(lport, pid);
@@ -686,7 +678,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
                return;
        }
 
-       /**
+       /*
         * Only handles ELS frames for now.
         */
        if (fchs->type != FC_TYPE_ELS) {
@@ -702,20 +694,20 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
        }
 
        if (els_cmd->els_code == FC_ELS_LOGO) {
-               /**
+               /*
                 * @todo Handle LOGO frames received.
                 */
                return;
        }
 
        if (els_cmd->els_code == FC_ELS_PRLI) {
-               /**
+               /*
                 * @todo Handle PRLI frames received.
                 */
                return;
        }
 
-       /**
+       /*
         * Unhandled ELS frames. Send a LS_RJT.
         */
        bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
@@ -723,7 +715,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
 
 }
 
-/**
+/*
  *   PID based Lookup for a R-Port in the Port R-Port Queue
  */
 struct bfa_fcs_rport_s *
@@ -742,7 +734,7 @@ bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
        return NULL;
 }
 
-/**
+/*
  *   PWWN based Lookup for a R-Port in the Port R-Port Queue
  */
 struct bfa_fcs_rport_s *
@@ -761,7 +753,7 @@ bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn)
        return NULL;
 }
 
-/**
+/*
  *   NWWN based Lookup for a R-Port in the Port R-Port Queue
  */
 struct bfa_fcs_rport_s *
@@ -780,7 +772,7 @@ bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
        return NULL;
 }
 
-/**
+/*
  * Called by rport module when new rports are discovered.
  */
 void
@@ -792,7 +784,7 @@ bfa_fcs_lport_add_rport(
        port->num_rports++;
 }
 
-/**
+/*
  * Called by rport module to when rports are deleted.
  */
 void
@@ -807,7 +799,7 @@ bfa_fcs_lport_del_rport(
        bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT);
 }
 
-/**
+/*
  * Called by fabric for base port when fabric login is complete.
  * Called by vport for virtual ports when FDISC is complete.
  */
@@ -817,7 +809,7 @@ bfa_fcs_lport_online(struct bfa_fcs_lport_s *port)
        bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
 }
 
-/**
+/*
  * Called by fabric for base port when fabric goes offline.
  * Called by vport for virtual ports when virtual port becomes offline.
  */
@@ -827,7 +819,7 @@ bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
        bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
 }
 
-/**
+/*
  * Called by fabric to delete base lport and associated resources.
  *
  * Called by vport to delete lport and associated resources. Should call
@@ -839,7 +831,7 @@ bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port)
        bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
 }
 
-/**
+/*
  * Return TRUE if port is online, else return FALSE
  */
 bfa_boolean_t
@@ -848,7 +840,7 @@ bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port)
        return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online);
 }
 
-/**
+/*
   * Attach time initialization of logical ports.
  */
 void
@@ -865,7 +857,7 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
        lport->num_rports = 0;
 }
 
-/**
+/*
  * Logical port initialization of base or virtual port.
  * Called by fabric for base port or by vport for virtual ports.
  */
@@ -878,7 +870,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
        struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad;
        char    lpwwn_buf[BFA_STRING_32];
 
-       bfa_os_assign(lport->port_cfg, *port_cfg);
+       lport->port_cfg = *port_cfg;
 
        lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport,
                                        lport->port_cfg.roles,
@@ -894,7 +886,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
        bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
 }
 
-/**
+/*
  *  fcs_lport_api
  */
 
@@ -934,11 +926,11 @@ bfa_fcs_lport_get_attr(
        }
 }
 
-/**
+/*
  *  bfa_fcs_lport_fab port fab functions
  */
 
-/**
+/*
  *   Called by port to initialize fabric services of the base port.
  */
 static void
@@ -949,7 +941,7 @@ bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port)
        bfa_fcs_lport_ms_init(port);
 }
 
-/**
+/*
  *   Called by port to notify transition to online state.
  */
 static void
@@ -959,7 +951,7 @@ bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port)
        bfa_fcs_lport_scn_online(port);
 }
 
-/**
+/*
  *   Called by port to notify transition to offline state.
  */
 static void
@@ -970,11 +962,11 @@ bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port)
        bfa_fcs_lport_ms_offline(port);
 }
 
-/**
+/*
  *  bfa_fcs_lport_n2n  functions
  */
 
-/**
+/*
  *   Called by fcs/port to initialize N2N topology.
  */
 static void
@@ -982,7 +974,7 @@ bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port)
 {
 }
 
-/**
+/*
  *   Called by fcs/port to notify transition to online state.
  */
 static void
@@ -1006,7 +998,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
            ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
             sizeof(wwn_t)) > 0) {
                port->pid = N2N_LOCAL_PID;
-               /**
+               /*
                 * First, check if we know the device by pwwn.
                 */
                rport = bfa_fcs_lport_get_rport_by_pwwn(port,
@@ -1035,7 +1027,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
        }
 }
 
-/**
+/*
  *   Called by fcs/port to notify transition to offline state.
  */
 static void
@@ -1094,11 +1086,11 @@ static void     bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
                                 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
 static void    bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
                                  struct bfa_fcs_fdmi_port_attr_s *port_attr);
-/**
+/*
  *  fcs_fdmi_sm FCS FDMI state machine
  */
 
-/**
+/*
  *  FDMI State Machine events
  */
 enum port_fdmi_event {
@@ -1143,7 +1135,7 @@ static void     bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
 static void     bfa_fcs_lport_fdmi_sm_disabled(
                                struct bfa_fcs_lport_fdmi_s *fdmi,
                                enum port_fdmi_event event);
-/**
+/*
  *     Start in offline state - awaiting MS to send start.
  */
 static void
@@ -1510,7 +1502,7 @@ bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
                bfa_sm_fault(port->fcs, event);
        }
 }
-/**
+/*
  *  FDMI is disabled state.
  */
 static void
@@ -1525,7 +1517,7 @@ bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi,
        /* No op State. It can only be enabled at Driver Init. */
 }
 
-/**
+/*
 *  RHBA : Register HBA Attributes.
  */
 static void
@@ -1549,7 +1541,7 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
        fdmi->fcxp = fcxp;
 
        pyld = bfa_fcxp_get_reqbuf(fcxp);
-       bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
+       memset(pyld, 0, FC_MAX_PDUSZ);
 
        len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
                                   FDMI_RHBA);
@@ -1584,7 +1576,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
        bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);
 
        rhba->hba_id = bfa_fcs_lport_get_pwwn(port);
-       rhba->port_list.num_ports = bfa_os_htonl(1);
+       rhba->port_list.num_ports = cpu_to_be32(1);
        rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port);
 
        len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);
@@ -1601,86 +1593,69 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
         * Node Name
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
-       attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME);
+       attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME);
        attr->len = sizeof(wwn_t);
        memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len);
        curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
        len += attr->len;
        count++;
-       attr->len =
-               bfa_os_htons(attr->len + sizeof(attr->type) +
+       attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                             sizeof(attr->len));
 
        /*
         * Manufacturer
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
-       attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER);
+       attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
        attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
        memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
-       attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
-                                                                *fields need
-                                                                *to be 4 byte
-                                                                *aligned */
+       attr->len = fc_roundup(attr->len, sizeof(u32));
        curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
        len += attr->len;
        count++;
-       attr->len =
-               bfa_os_htons(attr->len + sizeof(attr->type) +
+       attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                             sizeof(attr->len));
 
        /*
         * Serial Number
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
-       attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM);
+       attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
        attr->len = (u16) strlen(fcs_hba_attr->serial_num);
        memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
-       attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
-                                                                *fields need
-                                                                *to be 4 byte
-                                                                *aligned */
+       attr->len = fc_roundup(attr->len, sizeof(u32));
        curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
        len += attr->len;
        count++;
-       attr->len =
-               bfa_os_htons(attr->len + sizeof(attr->type) +
+       attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                             sizeof(attr->len));
 
        /*
         * Model
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
-       attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL);
+       attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
        attr->len = (u16) strlen(fcs_hba_attr->model);
        memcpy(attr->value, fcs_hba_attr->model, attr->len);
-       attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
-                                                                *fields need
-                                                                *to be 4 byte
-                                                                *aligned */
+       attr->len = fc_roundup(attr->len, sizeof(u32));
        curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
        len += attr->len;
        count++;
-       attr->len =
-               bfa_os_htons(attr->len + sizeof(attr->type) +
+       attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                             sizeof(attr->len));
 
        /*
         * Model Desc
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
-       attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC);
+       attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
        attr->len = (u16) strlen(fcs_hba_attr->model_desc);
        memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
-       attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
-                                                        *fields need
-                                                        *to be 4 byte
-                                                        *aligned */
+       attr->len = fc_roundup(attr->len, sizeof(u32));
        curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
        len += attr->len;
        count++;
-       attr->len =
-               bfa_os_htons(attr->len + sizeof(attr->type) +
+       attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                             sizeof(attr->len));
 
        /*
@@ -1688,18 +1663,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
         */
        if (fcs_hba_attr->hw_version[0] != '\0') {
                attr = (struct fdmi_attr_s *) curr_ptr;
-               attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION);
+               attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
                attr->len = (u16) strlen(fcs_hba_attr->hw_version);
                memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
-               attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
-                                                                *fields need
-                                                                *to be 4 byte
-                                                                *aligned */
+               attr->len = fc_roundup(attr->len, sizeof(u32));
                curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
                len += attr->len;
                count++;
-               attr->len =
-                       bfa_os_htons(attr->len + sizeof(attr->type) +
+               attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                                         sizeof(attr->len));
        }
 
@@ -1707,18 +1678,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
         * Driver Version
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
-       attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION);
+       attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
        attr->len = (u16) strlen(fcs_hba_attr->driver_version);
        memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
-       attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
-                                                        *fields need
-                                                        *to be 4 byte
-                                                        *aligned */
+       attr->len = fc_roundup(attr->len, sizeof(u32));
        curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
        len += attr->len;;
        count++;
-       attr->len =
-               bfa_os_htons(attr->len + sizeof(attr->type) +
+       attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                             sizeof(attr->len));
 
        /*
@@ -1726,18 +1693,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
         */
        if (fcs_hba_attr->option_rom_ver[0] != '\0') {
                attr = (struct fdmi_attr_s *) curr_ptr;
-               attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION);
+               attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
                attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
                memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
-               attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
-                                                                *fields need
-                                                                *to be 4 byte
-                                                                *aligned */
+               attr->len = fc_roundup(attr->len, sizeof(u32));
                curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
                len += attr->len;
                count++;
-               attr->len =
-                       bfa_os_htons(attr->len + sizeof(attr->type) +
+               attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                                         sizeof(attr->len));
        }
 
@@ -1745,18 +1708,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
         * f/w Version = driver version
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
-       attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION);
+       attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
        attr->len = (u16) strlen(fcs_hba_attr->driver_version);
        memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
-       attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
-                                                        *fields need
-                                                        *to be 4 byte
-                                                        *aligned */
+       attr->len = fc_roundup(attr->len, sizeof(u32));
        curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
        len += attr->len;
        count++;
-       attr->len =
-               bfa_os_htons(attr->len + sizeof(attr->type) +
+       attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                             sizeof(attr->len));
 
        /*
@@ -1764,18 +1723,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
         */
        if (fcs_hba_attr->os_name[0] != '\0') {
                attr = (struct fdmi_attr_s *) curr_ptr;
-               attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME);
+               attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
                attr->len = (u16) strlen(fcs_hba_attr->os_name);
                memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
-               attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
-                                                            *fields need
-                                                            *to be 4 byte
-                                                            *aligned */
+               attr->len = fc_roundup(attr->len, sizeof(u32));
                curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
                len += attr->len;
                count++;
-               attr->len =
-                       bfa_os_htons(attr->len + sizeof(attr->type) +
+               attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                                        sizeof(attr->len));
        }
 
@@ -1783,22 +1738,20 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
         * MAX_CT_PAYLOAD
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
-       attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT);
+       attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
        attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
        memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
        len += attr->len;
        count++;
-       attr->len =
-               bfa_os_htons(attr->len + sizeof(attr->type) +
+       attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                             sizeof(attr->len));
 
        /*
         * Update size of payload
         */
-       len += ((sizeof(attr->type) +
-                sizeof(attr->len)) * count);
+       len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
 
-       rhba->hba_attr_blk.attr_count = bfa_os_htonl(count);
+       rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
        return len;
 }
 
@@ -1825,7 +1778,7 @@ bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        }
 
        cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-       cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
        if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
                bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
@@ -1837,7 +1790,7 @@ bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
 }
 
-/**
+/*
 *  RPRT : Register Port
  */
 static void
@@ -1861,7 +1814,7 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
        fdmi->fcxp = fcxp;
 
        pyld = bfa_fcxp_get_reqbuf(fcxp);
-       bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
+       memset(pyld, 0, FC_MAX_PDUSZ);
 
        len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
                                   FDMI_RPRT);
@@ -1879,7 +1832,7 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
        bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
 }
 
-/**
+/*
  * This routine builds Port Attribute Block that used in RPA, RPRT commands.
  */
 static          u16
@@ -1909,56 +1862,54 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
         * FC4 Types
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
-       attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES);
+       attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES);
        attr->len = sizeof(fcs_port_attr.supp_fc4_types);
        memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
        curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
        len += attr->len;
        ++count;
        attr->len =
-               bfa_os_htons(attr->len + sizeof(attr->type) +
+               cpu_to_be16(attr->len + sizeof(attr->type) +
                             sizeof(attr->len));
 
        /*
         * Supported Speed
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
-       attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED);
+       attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED);
        attr->len = sizeof(fcs_port_attr.supp_speed);
        memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
        curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
        len += attr->len;
        ++count;
        attr->len =
-               bfa_os_htons(attr->len + sizeof(attr->type) +
+               cpu_to_be16(attr->len + sizeof(attr->type) +
                             sizeof(attr->len));
 
        /*
         * current Port Speed
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
-       attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED);
+       attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED);
        attr->len = sizeof(fcs_port_attr.curr_speed);
        memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
        curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
        len += attr->len;
        ++count;
-       attr->len =
-               bfa_os_htons(attr->len + sizeof(attr->type) +
+       attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                             sizeof(attr->len));
 
        /*
         * max frame size
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
-       attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE);
+       attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE);
        attr->len = sizeof(fcs_port_attr.max_frm_size);
        memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
        curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
        len += attr->len;
        ++count;
-       attr->len =
-               bfa_os_htons(attr->len + sizeof(attr->type) +
+       attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                             sizeof(attr->len));
 
        /*
@@ -1966,18 +1917,14 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
         */
        if (fcs_port_attr.os_device_name[0] != '\0') {
                attr = (struct fdmi_attr_s *) curr_ptr;
-               attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME);
+               attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
                attr->len = (u16) strlen(fcs_port_attr.os_device_name);
                memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
-               attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
-                                                            *fields need
-                                                            *to be 4 byte
-                                                            *aligned */
+               attr->len = fc_roundup(attr->len, sizeof(u32));
                curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
                len += attr->len;
                ++count;
-               attr->len =
-                       bfa_os_htons(attr->len + sizeof(attr->type) +
+               attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                                        sizeof(attr->len));
        }
        /*
@@ -1985,27 +1932,22 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
         */
        if (fcs_port_attr.host_name[0] != '\0') {
                attr = (struct fdmi_attr_s *) curr_ptr;
-               attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME);
+               attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
                attr->len = (u16) strlen(fcs_port_attr.host_name);
                memcpy(attr->value, fcs_port_attr.host_name, attr->len);
-               attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable
-                                                            *fields need
-                                                            *to be 4 byte
-                                                            *aligned */
+               attr->len = fc_roundup(attr->len, sizeof(u32));
                curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
                len += attr->len;
                ++count;
-               attr->len =
-                       bfa_os_htons(attr->len + sizeof(attr->type) +
+               attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
                                sizeof(attr->len));
        }
 
        /*
         * Update size of payload
         */
-       port_attrib->attr_count = bfa_os_htonl(count);
-       len += ((sizeof(attr->type) +
-                sizeof(attr->len)) * count);
+       port_attrib->attr_count = cpu_to_be32(count);
+       len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
        return len;
 }
 
@@ -2050,7 +1992,7 @@ bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        }
 
        cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-       cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
        if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
                bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
@@ -2062,7 +2004,7 @@ bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
 }
 
-/**
+/*
 *  RPA : Register Port Attributes.
  */
 static void
@@ -2086,15 +2028,13 @@ bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
        fdmi->fcxp = fcxp;
 
        pyld = bfa_fcxp_get_reqbuf(fcxp);
-       bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
+       memset(pyld, 0, FC_MAX_PDUSZ);
 
        len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
                                   FDMI_RPA);
 
-       attr_len =
-               bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
-                                        (u8 *) ((struct ct_hdr_s *) pyld
-                                                     + 1));
+       attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
+                               (u8 *) ((struct ct_hdr_s *) pyld + 1));
 
        bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                          FC_CLASS_3, len + attr_len, &fchs,
@@ -2143,7 +2083,7 @@ bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        }
 
        cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-       cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
        if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
                bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
@@ -2170,7 +2110,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
        struct bfa_fcs_lport_s *port = fdmi->ms->port;
        struct bfa_fcs_driver_info_s  *driver_info = &port->fcs->driver_info;
 
-       bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
+       memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
 
        bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
                                        hba_attr->manufacturer);
@@ -2204,7 +2144,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
                                sizeof(driver_info->host_os_patch));
        }
 
-       hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ);
+       hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ);
 }
 
 void
@@ -2215,7 +2155,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
        struct bfa_fcs_driver_info_s  *driver_info = &port->fcs->driver_info;
        struct bfa_port_attr_s pport_attr;
 
-       bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
+       memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
 
        /*
         * get pport attributes from hal
@@ -2230,17 +2170,17 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
        /*
         * Supported Speeds
         */
-       port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS);
+       port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPORTED_SPEEDS);
 
        /*
         * Current Speed
         */
-       port_attr->curr_speed = bfa_os_htonl(pport_attr.speed);
+       port_attr->curr_speed = cpu_to_be32(pport_attr.speed);
 
        /*
         * Max PDU Size.
         */
-       port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ);
+       port_attr->max_frm_size = cpu_to_be32(FC_MAX_PDUSZ);
 
        /*
         * OS device Name
@@ -2321,11 +2261,11 @@ static void     bfa_fcs_lport_ms_gfn_response(void *fcsarg,
                                               u32 rsp_len,
                                               u32 resid_len,
                                               struct fchs_s *rsp_fchs);
-/**
+/*
  *  fcs_ms_sm FCS MS state machine
  */
 
-/**
+/*
  *  MS State Machine events
  */
 enum port_ms_event {
@@ -2360,7 +2300,7 @@ static void     bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
                                               enum port_ms_event event);
 static void     bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
                                          enum port_ms_event event);
-/**
+/*
  *     Start in offline state - awaiting NS to send start.
  */
 static void
@@ -2432,7 +2372,7 @@ bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
                 */
                bfa_fcs_lport_fdmi_online(ms);
 
-               /**
+               /*
                 * if this is a Vport, go to online state.
                 */
                if (ms->port->vport) {
@@ -2595,7 +2535,7 @@ bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
                bfa_sm_fault(ms->port->fcs, event);
        }
 }
-/**
+/*
  *  ms_pvt MS local functions
  */
 
@@ -2657,12 +2597,12 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        }
 
        cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-       cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
        if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
                gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1);
 
-               num_entries = bfa_os_ntohl(gmal_resp->ms_len);
+               num_entries = be32_to_cpu(gmal_resp->ms_len);
                if (num_entries == 0) {
                        bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
                        return;
@@ -2795,7 +2735,7 @@ bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
                bfa_sm_fault(ms->port->fcs, event);
        }
 }
-/**
+/*
  *  ms_pvt MS local functions
  */
 
@@ -2853,7 +2793,7 @@ bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        }
 
        cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-       cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
        if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
                gfn_resp = (wwn_t *)(cthdr + 1);
@@ -2871,7 +2811,7 @@ bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
 }
 
-/**
+/*
  *  ms_pvt MS local functions
  */
 
@@ -3017,7 +2957,7 @@ bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port)
                bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
 }
 
-/**
+/*
  * @page ns_sm_info VPORT NS State Machine
  *
  * @section ns_sm_interactions VPORT NS State Machine Interactions
@@ -3080,11 +3020,11 @@ static void     bfa_fcs_lport_ns_process_gidft_pids(
                                u32 *pid_buf, u32 n_pids);
 
 static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
-/**
+/*
  *  fcs_ns_sm FCS nameserver interface state machine
  */
 
-/**
+/*
  * VPort NS State Machine events
  */
 enum vport_ns_event {
@@ -3139,7 +3079,7 @@ static void     bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
                                                enum vport_ns_event event);
 static void     bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
                                          enum vport_ns_event event);
-/**
+/*
  *     Start in offline state - awaiting linkup
  */
 static void
@@ -3628,7 +3568,7 @@ bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
 
 
 
-/**
+/*
  *  ns_pvt Nameserver local functions
  */
 
@@ -3724,7 +3664,7 @@ bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        }
 }
 
-/**
+/*
  * Register the symbolic port name.
  */
 static void
@@ -3738,7 +3678,7 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
        u8         symbl[256];
        u8         *psymbl = &symbl[0];
 
-       bfa_os_memset(symbl, 0, sizeof(symbl));
+       memset(symbl, 0, sizeof(symbl));
 
        bfa_trc(port->fcs, port->port_cfg.pwwn);
 
@@ -3755,7 +3695,7 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
         * for V-Port, form a Port Symbolic Name
         */
        if (port->vport) {
-               /**
+               /*
                 * For Vports, we append the vport's port symbolic name
                 * to that of the base port.
                 */
@@ -3815,7 +3755,7 @@ bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        }
 
        cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-       cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
        if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
                port->stats.ns_rspnid_accepts++;
@@ -3829,7 +3769,7 @@ bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
 }
 
-/**
+/*
  * Register FC4-Types
  */
 static void
@@ -3887,7 +3827,7 @@ bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        }
 
        cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-       cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
        if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
                port->stats.ns_rftid_accepts++;
@@ -3901,7 +3841,7 @@ bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
 }
 
-/**
+/*
  * Register FC4-Features : Should be done after RFT_ID
  */
 static void
@@ -3964,7 +3904,7 @@ bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        }
 
        cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-       cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
        if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
                port->stats.ns_rffid_accepts++;
@@ -3982,7 +3922,7 @@ bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        } else
                bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
 }
-/**
+/*
  * Query Fabric for FC4-Types Devices.
  *
 * TBD : Need to use a local (FCS private) response buffer, since the response
@@ -4058,7 +3998,7 @@ bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        }
 
        cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-       cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
        switch (cthdr->cmd_rsp_code) {
 
@@ -4102,7 +4042,7 @@ bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
        }
 }
 
-/**
+/*
  *     This routine will be called by bfa_timer on timer timeouts.
  *
  *     param[in]       port - pointer to bfa_fcs_lport_t.
@@ -4166,7 +4106,7 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
        }
 }
 
-/**
+/*
  *  fcs_ns_public FCS nameserver public interfaces
  */
 
@@ -4227,7 +4167,7 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
        }
 }
 
-/**
+/*
  * FCS SCN
  */
 
@@ -4250,11 +4190,11 @@ static void     bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
                                             struct fchs_s *rx_fchs);
 static void     bfa_fcs_lport_scn_timeout(void *arg);
 
-/**
+/*
  *  fcs_scm_sm FCS SCN state machine
  */
 
-/**
+/*
  * VPort SCN State Machine events
  */
 enum port_scn_event {
@@ -4278,7 +4218,7 @@ static void     bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
 static void     bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
                                           enum port_scn_event event);
 
-/**
+/*
  *     Starting state - awaiting link up.
  */
 static void
@@ -4382,11 +4322,11 @@ bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
 
 
 
-/**
+/*
  *  fcs_scn_private FCS SCN private functions
  */
 
-/**
+/*
  * This routine will be called to send a SCR command.
  */
 static void
@@ -4499,7 +4439,7 @@ bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
                          FC_MAX_PDUSZ, 0);
 }
 
-/**
+/*
  *     This routine will be called by bfa_timer on timer timeouts.
  *
  *     param[in]       vport           - pointer to bfa_fcs_lport_t.
@@ -4522,7 +4462,7 @@ bfa_fcs_lport_scn_timeout(void *arg)
 
 
 
-/**
+/*
  *  fcs_scn_public FCS state change notification public interfaces
  */
 
@@ -4563,7 +4503,7 @@ bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
 
        bfa_trc(port->fcs, rpid);
 
-       /**
+       /*
         * If this is an unknown device, then it just came online.
         * Otherwise let rport handle the RSCN event.
         */
@@ -4579,7 +4519,7 @@ bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
                bfa_fcs_rport_scn(rport);
 }
 
-/**
+/*
  * rscn format based PID comparison
  */
 #define __fc_pid_match(__c0, __c1, __fmt)              \
@@ -4624,7 +4564,7 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
        int             i = 0, j;
 
        num_entries =
-               (bfa_os_ntohs(rscn->payldlen) -
+               (be16_to_cpu(rscn->payldlen) -
                 sizeof(u32)) / sizeof(rscn->event[0]);
 
        bfa_trc(port->fcs, num_entries);
@@ -4691,18 +4631,18 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
                }
        }
 
-       /**
-        * If any of area, domain or fabric RSCN is received, do a fresh discovery
-        * to find new devices.
+       /*
+        * If any of area, domain or fabric RSCN is received, do a fresh
+        * discovery to find new devices.
         */
        if (nsquery)
                bfa_fcs_lport_ns_query(port);
 }
 
-/**
+/*
  * BFA FCS port
  */
-/**
+/*
  *  fcs_port_api BFA FCS port API
  */
 struct bfa_fcs_lport_s *
@@ -4943,10 +4883,10 @@ bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
 void
 bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
 {
-       bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
+       memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
 }
 
-/**
+/*
  * FCS virtual port state machine
  */
 
@@ -4967,11 +4907,11 @@ static void     bfa_fcs_vport_timeout(void *vport_arg);
 static void     bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
 static void     bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
 
-/**
+/*
  *  fcs_vport_sm FCS virtual port state machine
  */
 
-/**
+/*
  * VPort State Machine events
  */
 enum bfa_fcs_vport_event {
@@ -5024,7 +4964,7 @@ static struct bfa_sm_table_s  vport_sm_table[] = {
        {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
 };
 
-/**
+/*
  * Beginning state.
  */
 static void
@@ -5045,7 +4985,7 @@ bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
        }
 }
 
-/**
+/*
  * Created state - a start event is required to start up the state machine.
  */
 static void
@@ -5062,7 +5002,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
                        bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
                        bfa_fcs_vport_do_fdisc(vport);
                } else {
-                       /**
+                       /*
                         * Fabric is offline or not NPIV capable, stay in
                         * offline state.
                         */
@@ -5078,7 +5018,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
 
        case BFA_FCS_VPORT_SM_ONLINE:
        case BFA_FCS_VPORT_SM_OFFLINE:
-               /**
+               /*
                 * Ignore ONLINE/OFFLINE events from fabric
                 * till vport is started.
                 */
@@ -5089,7 +5029,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
        }
 }
 
-/**
+/*
  * Offline state - awaiting ONLINE event from fabric SM.
  */
 static void
@@ -5127,7 +5067,7 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
 }
 
 
-/**
+/*
  * FDISC is sent and awaiting reply from fabric.
  */
 static void
@@ -5174,7 +5114,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
        }
 }
 
-/**
+/*
  * FDISC attempt failed - a timer is active to retry FDISC.
  */
 static void
@@ -5208,7 +5148,7 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
        }
 }
 
-/**
+/*
  * Vport is online (FDISC is complete).
  */
 static void
@@ -5235,7 +5175,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
        }
 }
 
-/**
+/*
  * Vport is being deleted - awaiting lport delete completion to send
  * LOGO to fabric.
  */
@@ -5264,7 +5204,7 @@ bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
        }
 }
 
-/**
+/*
  * Error State.
  * This state will be set when the Vport Creation fails due
  * to errors like Dup WWN. In this state only operation allowed
@@ -5288,7 +5228,7 @@ bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
        }
 }
 
-/**
+/*
  * Lport cleanup is in progress since vport is being deleted. Fabric is
  * offline, so no LOGO is needed to complete vport deletion.
  */
@@ -5313,7 +5253,7 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
        }
 }
 
-/**
+/*
  * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
  * is done.
  */
@@ -5347,10 +5287,10 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
 
 
 
-/**
+/*
  *  fcs_vport_private FCS virtual port private functions
  */
-/**
+/*
  * This routine will be called to send a FDISC command.
  */
 static void
@@ -5397,7 +5337,7 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
        }
 }
 
-/**
+/*
  *     Called to send a logout to the fabric. Used when a V-Port is
  *     deleted/stopped.
  */
@@ -5411,7 +5351,7 @@ bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport)
 }
 
 
-/**
+/*
  *     This routine will be called by bfa_timer on timer timeouts.
  *
  *     param[in]       vport           - pointer to bfa_fcs_vport_t.
@@ -5449,11 +5389,11 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
 
 
 
-/**
+/*
  *  fcs_vport_public FCS virtual port public interfaces
  */
 
-/**
+/*
  * Online notification from fabric SM.
  */
 void
@@ -5463,7 +5403,7 @@ bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
        bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
 }
 
-/**
+/*
  * Offline notification from fabric SM.
  */
 void
@@ -5473,7 +5413,7 @@ bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport)
        bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
 }
 
-/**
+/*
  * Cleanup notification from fabric SM on link timer expiry.
  */
 void
@@ -5481,7 +5421,7 @@ bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
 {
        vport->vport_stats.fab_cleanup++;
 }
-/**
+/*
  * delete notification from fabric SM. To be invoked from within FCS.
  */
 void
@@ -5490,7 +5430,7 @@ bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
        bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
 }
 
-/**
+/*
  * Delete completion callback from associated lport
  */
 void
@@ -5501,11 +5441,11 @@ bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
 
 
 
-/**
+/*
  *  fcs_vport_api Virtual port API
  */
 
-/**
+/*
  *     Use this function to instantiate a new FCS vport object. This
  *     function will not trigger any HW initialization process (which will be
  *     done in vport_start() call)
@@ -5555,7 +5495,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  *     Use this function to instantiate a new FCS PBC vport object. This
  *     function will not trigger any HW initialization process (which will be
  *     done in vport_start() call)
@@ -5585,7 +5525,7 @@ bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
        return rc;
 }
 
-/**
+/*
  *     Use this function to findout if this is a pbc vport or not.
  *
  * @param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5603,7 +5543,7 @@ bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport)
 
 }
 
-/**
+/*
  * Use this function initialize the vport.
  *
  * @param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5618,7 +5558,7 @@ bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport)
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  *     Use this function quiese the vport object. This function will return
  *     immediately, when the vport is actually stopped, the
  *     bfa_drv_vport_stop_cb() will be called.
@@ -5635,7 +5575,7 @@ bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  *     Use this function to delete a vport object. Fabric object should
  *     be stopped before this function call.
  *
@@ -5657,7 +5597,7 @@ bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  *     Use this function to get vport's current status info.
  *
  *     param[in] vport         pointer to bfa_fcs_vport_t.
@@ -5672,13 +5612,13 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
        if (vport == NULL || attr == NULL)
                return;
 
-       bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s));
+       memset(attr, 0, sizeof(struct bfa_vport_attr_s));
 
        bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr);
        attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
 }
 
-/**
+/*
  *     Use this function to get vport's statistics.
  *
  *     param[in]       vport   pointer to bfa_fcs_vport_t.
@@ -5693,7 +5633,7 @@ bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
        *stats = vport->vport_stats;
 }
 
-/**
+/*
  *     Use this function to clear vport's statistics.
  *
  *     param[in]       vport   pointer to bfa_fcs_vport_t.
@@ -5703,10 +5643,10 @@ bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
 void
 bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
 {
-       bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
+       memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
 }
 
-/**
+/*
  *     Lookup a virtual port. Excludes base port from lookup.
  */
 struct bfa_fcs_vport_s *
@@ -5728,7 +5668,7 @@ bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn)
        return vport;
 }
 
-/**
+/*
  * FDISC Response
  */
 void
@@ -5784,7 +5724,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
        }
 }
 
-/**
+/*
  * LOGO response
  */
 void
@@ -5794,7 +5734,7 @@ bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
        bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
 }
 
-/**
+/*
  * Received clear virtual link
  */
 void
index 635f0cd887145deeb96b2f24914a8622269b3319..47f35c0ef29a0c69f5d6842a6db0d173a0195e84 100644 (file)
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  rport.c Remote port implementation.
  */
 
@@ -75,7 +75,7 @@ static void   bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
 static void    bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
                                struct fchs_s *rx_fchs, u16 len);
 static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
-/**
+/*
  *  fcs_rport_sm FCS rport state machine events
  */
 
@@ -172,7 +172,7 @@ static struct bfa_sm_table_s rport_sm_table[] = {
        {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC},
 };
 
-/**
+/*
  *             Beginning state.
  */
 static void
@@ -210,7 +210,7 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
        }
 }
 
-/**
+/*
  *             PLOGI is being sent.
  */
 static void
@@ -262,7 +262,7 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             PLOGI is being sent.
  */
 static void
@@ -287,7 +287,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
 
        case RPSM_EVENT_PLOGI_RCVD:
        case RPSM_EVENT_SCN:
-               /**
+               /*
                 * Ignore, SCN is possibly online notification.
                 */
                break;
@@ -309,7 +309,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_HCB_OFFLINE:
-               /**
+               /*
                 * Ignore BFA callback, on a PLOGI receive we call bfa offline.
                 */
                break;
@@ -319,7 +319,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             PLOGI is sent.
  */
 static void
@@ -380,7 +380,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             PLOGI is sent.
  */
 static void
@@ -475,7 +475,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
        }
 }
 
-/**
+/*
  *             PLOGI is complete. Awaiting BFA rport online callback. FC-4s
  *             are offline.
  */
@@ -519,7 +519,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_SCN:
-               /**
+               /*
                 * @todo
                 * Ignore SCN - PLOGI just completed, FC-4 login should detect
                 * device failures.
@@ -531,7 +531,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             Rport is ONLINE. FC-4s active.
  */
 static void
@@ -580,7 +580,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
        }
 }
 
-/**
+/*
  *             An SCN event is received in ONLINE state. NS query is being sent
  *             prior to ADISC authentication with rport. FC-4s are paused.
  */
@@ -604,7 +604,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
                break;
 
        case RPSM_EVENT_SCN:
-               /**
+               /*
                 * ignore SCN, wait for response to query itself
                 */
                break;
@@ -638,7 +638,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *     An SCN event is received in ONLINE state. NS query is sent to rport.
  *     FC-4s are paused.
  */
@@ -697,7 +697,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
        }
 }
 
-/**
+/*
  *     An SCN event is received in ONLINE state. ADISC is being sent for
  *     authenticating with rport. FC-4s are paused.
  */
@@ -748,7 +748,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             An SCN event is received in ONLINE state. ADISC is to rport.
  *             FC-4s are paused.
  */
@@ -765,7 +765,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
                break;
 
        case RPSM_EVENT_PLOGI_RCVD:
-               /**
+               /*
                 * Too complex to cleanup FC-4 & rport and then acc to PLOGI.
                 * At least go offline when a PLOGI is received.
                 */
@@ -787,7 +787,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
                break;
 
        case RPSM_EVENT_SCN:
-               /**
+               /*
                 * already processing RSCN
                 */
                break;
@@ -810,7 +810,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
        }
 }
 
-/**
+/*
  *             Rport has sent LOGO. Awaiting FC-4 offline completion callback.
  */
 static void
@@ -841,7 +841,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             LOGO needs to be sent to rport. Awaiting FC-4 offline completion
  *             callback.
  */
@@ -864,7 +864,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *     Rport is going offline. Awaiting FC-4 offline completion callback.
  */
 static void
@@ -886,7 +886,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
        case RPSM_EVENT_LOGO_RCVD:
        case RPSM_EVENT_PRLO_RCVD:
        case RPSM_EVENT_ADDRESS_CHANGE:
-               /**
+               /*
                 * rport is already going offline.
                 * SCN - ignore and wait till transitioning to offline state
                 */
@@ -901,7 +901,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             Rport is offline. FC-4s are offline. Awaiting BFA rport offline
  *             callback.
  */
@@ -945,7 +945,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
        case RPSM_EVENT_SCN:
        case RPSM_EVENT_LOGO_RCVD:
        case RPSM_EVENT_PRLO_RCVD:
-               /**
+               /*
                 * Ignore, already offline.
                 */
                break;
@@ -955,7 +955,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             Rport is offline. FC-4s are offline. Awaiting BFA rport offline
  *             callback to send LOGO accept.
  */
@@ -1009,7 +1009,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
 
        case RPSM_EVENT_LOGO_RCVD:
        case RPSM_EVENT_PRLO_RCVD:
-               /**
+               /*
                 * Ignore - already processing a LOGO.
                 */
                break;
@@ -1019,7 +1019,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             Rport is being deleted. FC-4s are offline.
  *  Awaiting BFA rport offline
  *             callback to send LOGO.
@@ -1048,7 +1048,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             Rport is being deleted. FC-4s are offline. LOGO is being sent.
  */
 static void
@@ -1082,7 +1082,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             Rport is offline. FC-4s are offline. BFA rport is offline.
  *             Timer active to delete stale rport.
  */
@@ -1142,7 +1142,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
        }
 }
 
-/**
+/*
  *     Rport address has changed. Nameserver discovery request is being sent.
  */
 static void
@@ -1199,7 +1199,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             Nameserver discovery failed. Waiting for timeout to retry.
  */
 static void
@@ -1263,7 +1263,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *             Rport address has changed. Nameserver discovery request is sent.
  */
 static void
@@ -1329,13 +1329,13 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
                bfa_fcs_rport_send_prlo_acc(rport);
                break;
        case RPSM_EVENT_SCN:
-               /**
+               /*
                 * ignore, wait for NS query response
                 */
                break;
 
        case RPSM_EVENT_LOGO_RCVD:
-               /**
+               /*
                 * Not logged-in yet. Accept LOGO.
                 */
                bfa_fcs_rport_send_logo_acc(rport);
@@ -1354,7 +1354,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
 
 
 
-/**
+/*
  *  fcs_rport_private FCS RPORT provate functions
  */
 
@@ -1415,7 +1415,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
 
        plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp);
 
-       /**
+       /*
         * Check for failure first.
         */
        if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) {
@@ -1436,7 +1436,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
                return;
        }
 
-       /**
+       /*
         * PLOGI is complete. Make sure this device is not one of the known
         * device with a new FC port address.
         */
@@ -1468,7 +1468,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
                }
        }
 
-       /**
+       /*
         * Normal login path -- no evil twins.
         */
        rport->stats.plogi_accs++;
@@ -1621,7 +1621,7 @@ bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
        bfa_trc(rport->fcs, rport->pwwn);
 
        cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-       cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
        if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
                /* Check if the pid is the same as before. */
@@ -1691,7 +1691,7 @@ bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
        bfa_trc(rport->fcs, rport->pwwn);
 
        cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-       cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+       cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
        if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
                bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
@@ -1722,7 +1722,7 @@ bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
        }
 }
 
-/**
+/*
  *     Called to send a logout to the rport.
  */
 static void
@@ -1759,7 +1759,7 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
        bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
 }
 
-/**
+/*
  *     Send ACC for a LOGO received.
  */
 static void
@@ -1788,7 +1788,7 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
                        FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
 }
 
-/**
+/*
  *     brief
  *     This routine will be called by bfa_timer on timer timeouts.
  *
@@ -1961,7 +1961,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
        struct bfa_fcs_rport_s *rport;
        struct bfad_rport_s     *rport_drv;
 
-       /**
+       /*
         * allocate rport
         */
        if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
@@ -1979,7 +1979,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
        rport->pid = rpid;
        rport->pwwn = pwwn;
 
-       /**
+       /*
         * allocate BFA rport
         */
        rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
@@ -1989,7 +1989,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
                return NULL;
        }
 
-       /**
+       /*
         * allocate FC-4s
         */
        bfa_assert(bfa_fcs_lport_is_initiator(port));
@@ -2021,7 +2021,7 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
 {
        struct bfa_fcs_lport_s *port = rport->port;
 
-       /**
+       /*
         * - delete FC-4s
         * - delete BFA rport
         * - remove from queue of rports
@@ -2093,7 +2093,7 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
        }
 }
 
-/**
+/*
  * Update rport parameters from PLOGI or PLOGI accept.
  */
 static void
@@ -2101,14 +2101,14 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
 {
        bfa_fcs_lport_t *port = rport->port;
 
-       /**
+       /*
         * - port name
         * - node name
         */
        rport->pwwn = plogi->port_name;
        rport->nwwn = plogi->node_name;
 
-       /**
+       /*
         * - class of service
         */
        rport->fc_cos = 0;
@@ -2118,16 +2118,16 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
        if (plogi->class2.class_valid)
                rport->fc_cos |= FC_CLASS_2;
 
-       /**
+       /*
         * - CISC
         * - MAX receive frame size
         */
        rport->cisc = plogi->csp.cisc;
-       rport->maxfrsize = bfa_os_ntohs(plogi->class3.rxsz);
+       rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
 
-       bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred));
+       bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
        bfa_trc(port->fcs, port->fabric->bb_credit);
-       /**
+       /*
         * Direct Attach P2P mode :
         * This is to handle a bug (233476) in IBM targets in Direct Attach
         *  Mode. Basically, in FLOGI Accept the target would have
@@ -2136,19 +2136,19 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
         * in PLOGI.
         */
        if ((!bfa_fcs_fabric_is_switched(port->fabric))  &&
-               (bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) {
+               (be16_to_cpu(plogi->csp.bbcred) < port->fabric->bb_credit)) {
 
-               bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred));
+               bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
                bfa_trc(port->fcs, port->fabric->bb_credit);
 
-               port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred);
+               port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
                bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
                                          port->fabric->bb_credit);
        }
 
 }
 
-/**
+/*
  *     Called to handle LOGO received from an existing remote port.
  */
 static void
@@ -2164,11 +2164,11 @@ bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
 
 
 
-/**
+/*
  *  fcs_rport_public FCS rport public interfaces
  */
 
-/**
+/*
  *     Called by bport/vport to create a remote port instance for a discovered
  *     remote device.
  *
@@ -2191,7 +2191,7 @@ bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid)
        return rport;
 }
 
-/**
+/*
  * Called to create a rport for which only the wwn is known.
  *
  * @param[in] port     - base port
@@ -2211,7 +2211,7 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
        bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
        return rport;
 }
-/**
+/*
  * Called by bport in private loop topology to indicate that a
  * rport has been discovered and plogi has been completed.
  *
@@ -2233,7 +2233,7 @@ bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
        bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP);
 }
 
-/**
+/*
  *     Called by bport/vport to handle PLOGI received from a new remote port.
  *     If an existing rport does a plogi, it will be handled separately.
  */
@@ -2272,7 +2272,7 @@ wwn_compare(wwn_t wwn1, wwn_t wwn2)
        return 0;
 }
 
-/**
+/*
  *     Called by bport/vport to handle PLOGI received from an existing
  *      remote port.
  */
@@ -2280,7 +2280,7 @@ void
 bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
                        struct fc_logi_s *plogi)
 {
-       /**
+       /*
         * @todo Handle P2P and initiator-initiator.
         */
 
@@ -2289,7 +2289,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
        rport->reply_oxid = rx_fchs->ox_id;
        bfa_trc(rport->fcs, rport->reply_oxid);
 
-       /**
+       /*
         * In Switched fabric topology,
         * PLOGI to each other. If our pwwn is smaller, ignore it,
         * if it is not a well known address.
@@ -2307,7 +2307,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
        bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
 }
 
-/**
+/*
  * Called by bport/vport to delete a remote port instance.
  *
  * Rport delete is called under the following conditions:
@@ -2321,7 +2321,7 @@ bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport)
        bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 }
 
-/**
+/*
  * Called by bport/vport to  when a target goes offline.
  *
  */
@@ -2331,7 +2331,7 @@ bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport)
        bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
 }
 
-/**
+/*
  * Called by bport in n2n when a target (attached port) becomes online.
  *
  */
@@ -2340,7 +2340,7 @@ bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport)
 {
        bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
 }
-/**
+/*
  *     Called by bport/vport to notify SCN for the remote port
  */
 void
@@ -2350,7 +2350,7 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
        bfa_sm_send_event(rport, RPSM_EVENT_SCN);
 }
 
-/**
+/*
  *     Called by       fcpim to notify that the ITN cleanup is done.
  */
 void
@@ -2359,7 +2359,7 @@ bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport)
        bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
 }
 
-/**
+/*
  *     Called by fcptm to notify that the ITN cleanup is done.
  */
 void
@@ -2368,7 +2368,7 @@ bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport)
        bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
 }
 
-/**
+/*
  *     brief
  *     This routine BFA callback for bfa_rport_online() call.
  *
@@ -2391,7 +2391,7 @@ bfa_cb_rport_online(void *cbarg)
        bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
 }
 
-/**
+/*
  *     brief
  *     This routine BFA callback for bfa_rport_offline() call.
  *
@@ -2413,7 +2413,7 @@ bfa_cb_rport_offline(void *cbarg)
        bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
 }
 
-/**
+/*
  *     brief
  *     This routine is a static BFA callback when there is a QoS flow_id
  *     change notification
@@ -2437,7 +2437,7 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
        bfa_trc(rport->fcs, rport->pwwn);
 }
 
-/**
+/*
  *     brief
  *     This routine is a static BFA callback when there is a QoS priority
  *     change notification
@@ -2461,7 +2461,7 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
        bfa_trc(rport->fcs, rport->pwwn);
 }
 
-/**
+/*
  *             Called to process any unsolicted frames from this remote port
  */
 void
@@ -2470,7 +2470,7 @@ bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport)
        bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
 }
 
-/**
+/*
  *             Called to process any unsolicted frames from this remote port
  */
 void
@@ -2577,7 +2577,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
                        FC_MAX_PDUSZ, 0);
 }
 
-/**
+/*
  * Return state of rport.
  */
 int
@@ -2586,7 +2586,7 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
        return bfa_sm_to_state(rport_sm_table, rport->sm);
 }
 
-/**
+/*
  *     brief
  *              Called by the Driver to set rport delete/ageout timeout
  *
@@ -2613,15 +2613,15 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id)
 
 
 
-/**
+/*
  * Remote port implementation.
  */
 
-/**
+/*
  *  fcs_rport_api FCS rport API.
  */
 
-/**
+/*
  *     Direct API to add a target by port wwn. This interface is used, for
  *     example, by bios when target pwwn is known from boot lun configuration.
  */
@@ -2634,7 +2634,7 @@ bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  *     Direct API to remove a target and its associated resources. This
  *     interface is used, for example, by driver to remove target
  *     ports from the target list for a VM.
@@ -2663,7 +2663,7 @@ bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in)
 
 }
 
-/**
+/*
  *     Remote device status for display/debug.
  */
 void
@@ -2674,7 +2674,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
        bfa_fcs_lport_t *port = rport->port;
        bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
 
-       bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
+       memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
 
        rport_attr->pid = rport->pid;
        rport_attr->pwwn = rport->pwwn;
@@ -2704,7 +2704,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
        }
 }
 
-/**
+/*
  *     Per remote device statistics.
  */
 void
@@ -2717,7 +2717,7 @@ bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
 void
 bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
 {
-       bfa_os_memset((char *)&rport->stats, 0,
+       memset((char *)&rport->stats, 0,
                        sizeof(struct bfa_rport_stats_s));
 }
 
@@ -2767,7 +2767,7 @@ bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed)
 
 
 
-/**
+/*
  * Remote port features (RPF) implementation.
  */
 
@@ -2786,7 +2786,7 @@ static void     bfa_fcs_rpf_rpsc2_response(void *fcsarg,
 
 static void     bfa_fcs_rpf_timeout(void *arg);
 
-/**
+/*
  *  fcs_rport_ftrs_sm FCS rport state machine events
  */
 
@@ -2981,7 +2981,7 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
                bfa_sm_fault(rport->fcs, event);
        }
 }
-/**
+/*
  * Called when Rport is created.
  */
 void
@@ -2995,7 +2995,7 @@ bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
        bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
 }
 
-/**
+/*
  * Called when Rport becomes online
  */
 void
@@ -3010,7 +3010,7 @@ bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
                bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
 }
 
-/**
+/*
  * Called when Rport becomes offline
  */
 void
@@ -3090,16 +3090,16 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
        rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
        if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
                rport->stats.rpsc_accs++;
-               num_ents = bfa_os_ntohs(rpsc2_acc->num_pids);
+               num_ents = be16_to_cpu(rpsc2_acc->num_pids);
                bfa_trc(rport->fcs, num_ents);
                if (num_ents > 0) {
                        bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
                        bfa_trc(rport->fcs,
-                               bfa_os_ntohs(rpsc2_acc->port_info[0].pid));
+                               be16_to_cpu(rpsc2_acc->port_info[0].pid));
                        bfa_trc(rport->fcs,
-                               bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
+                               be16_to_cpu(rpsc2_acc->port_info[0].speed));
                        bfa_trc(rport->fcs,
-                               bfa_os_ntohs(rpsc2_acc->port_info[0].index));
+                               be16_to_cpu(rpsc2_acc->port_info[0].index));
                        bfa_trc(rport->fcs,
                                rpsc2_acc->port_info[0].type);
 
@@ -3109,7 +3109,7 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
                        }
 
                        rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
-                               bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
+                               be16_to_cpu(rpsc2_acc->port_info[0].speed));
 
                        bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
                }
index c787d3af0886e2172d6180d792c5202c695b9a30..d8464ae60070438b01ed785d2bd667042eb1160d 100644 (file)
@@ -22,7 +22,7 @@ void
 bfa_hwcb_reginit(struct bfa_s *bfa)
 {
        struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
-       bfa_os_addr_t           kva = bfa_ioc_bar0(&bfa->ioc);
+       void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
        int                     i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
 
        if (fn == 0) {
@@ -60,8 +60,8 @@ bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
 static void
 bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
 {
-       bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
-               __HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq));
+       writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq),
+                       bfa->iocfc.bfa_regs.intr_status);
 }
 
 void
@@ -72,8 +72,8 @@ bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
 static void
 bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
 {
-       bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
-               __HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq));
+       writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
+                       bfa->iocfc.bfa_regs.intr_status);
 }
 
 void
@@ -102,7 +102,7 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
        *num_vecs = __HFN_NUMINTS;
 }
 
-/**
+/*
  * No special setup required for crossbow -- vector assignments are implicit.
  */
 void
@@ -129,7 +129,7 @@ bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
                bfa->msix.handler[i] = bfa_msix_lpu_err;
 }
 
-/**
+/*
  * Crossbow -- dummy, interrupts are masked
  */
 void
@@ -142,7 +142,7 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
 {
 }
 
-/**
+/*
  * No special enable/disable -- vector assignments are implicit.
  */
 void
index c97ebafec5ea980ec0a4597aa9ac4719001184fd..b0efbc713ffe7c12a83bffe2172876d11baff9da 100644 (file)
@@ -31,15 +31,15 @@ static void
 bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
 {
        int fn = bfa_ioc_pcifn(&bfa->ioc);
-       bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
+       void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
 
        if (msix)
-               bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], vec);
+               writel(vec, kva + __ct_msix_err_vec_reg[fn]);
        else
-               bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], 0);
+               writel(0, kva + __ct_msix_err_vec_reg[fn]);
 }
 
-/**
+/*
  * Dummy interrupt handler for handling spurious interrupt during chip-reinit.
  */
 static void
@@ -51,7 +51,7 @@ void
 bfa_hwct_reginit(struct bfa_s *bfa)
 {
        struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
-       bfa_os_addr_t           kva = bfa_ioc_bar0(&bfa->ioc);
+       void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
        int                     i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
 
        if (fn == 0) {
@@ -88,8 +88,8 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
 {
        u32     r32;
 
-       r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
-       bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);
+       r32 = readl(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
+       writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
 }
 
 void
@@ -97,8 +97,8 @@ bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
 {
        u32     r32;
 
-       r32 = bfa_reg_read(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
-       bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq], r32);
+       r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
+       writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
 }
 
 void
@@ -110,7 +110,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
        *num_vecs = BFA_MSIX_CT_MAX;
 }
 
-/**
+/*
  * Setup MSI-X vector for catapult
  */
 void
@@ -156,7 +156,7 @@ bfa_hwct_msix_uninstall(struct bfa_s *bfa)
                bfa->msix.handler[i] = bfa_hwct_msix_dummy;
 }
 
-/**
+/*
  * Enable MSI-X vectors
  */
 void
index 6795b247791a5d48f8b4572997f5f68afbd7ec95..54475b53a5ab1494490f2561fdc0c9d7884c0a55 100644 (file)
@@ -23,7 +23,7 @@
 
 BFA_TRC_FILE(CNA, IOC);
 
-/**
+/*
  * IOC local definitions
  */
 #define BFA_IOC_TOV            3000    /* msecs */
@@ -49,7 +49,7 @@ BFA_TRC_FILE(CNA, IOC);
          BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
 
-/**
+/*
  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  */
 
@@ -73,7 +73,7 @@ BFA_TRC_FILE(CNA, IOC);
 
 #define bfa_ioc_mbox_cmd_pending(__ioc)                \
                        (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
-                       bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd))
+                       readl((__ioc)->ioc_regs.hfn_mbox_cmd))
 
 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
 
@@ -101,11 +101,11 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
 static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
 
-/**
+/*
  *  hal_ioc_sm
  */
 
-/**
+/*
  * IOC state machine definitions/declarations
  */
 enum ioc_event {
@@ -144,7 +144,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
        {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
 };
 
-/**
+/*
  * IOCPF state machine definitions/declarations
  */
 
@@ -174,7 +174,7 @@ static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
 static void bfa_iocpf_timeout(void *ioc_arg);
 static void bfa_iocpf_sem_timeout(void *ioc_arg);
 
-/**
+/*
  * IOCPF state machine events
  */
 enum iocpf_event {
@@ -191,7 +191,7 @@ enum iocpf_event {
        IOCPF_E_TIMEOUT         = 11,   /*  f/w response timeout        */
 };
 
-/**
+/*
  * IOCPF states
  */
 enum bfa_iocpf_state {
@@ -232,11 +232,11 @@ static struct bfa_sm_table_s iocpf_sm_table[] = {
        {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
 };
 
-/**
+/*
  * IOC State Machine
  */
 
-/**
+/*
  * Beginning state. IOC uninit state.
  */
 
@@ -245,7 +245,7 @@ bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
 {
 }
 
-/**
+/*
  * IOC is in uninit state.
  */
 static void
@@ -262,7 +262,7 @@ bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
                bfa_sm_fault(ioc, event);
        }
 }
-/**
+/*
  * Reset entry actions -- initialize state machine
  */
 static void
@@ -271,7 +271,7 @@ bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
        bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
 }
 
-/**
+/*
  * IOC is in reset state.
  */
 static void
@@ -304,7 +304,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
        bfa_iocpf_enable(ioc);
 }
 
-/**
+/*
  * Host IOC function is being enabled, awaiting response from firmware.
  * Semaphore is acquired.
  */
@@ -352,7 +352,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
        bfa_ioc_send_getattr(ioc);
 }
 
-/**
+/*
  * IOC configuration in progress. Timer is active.
  */
 static void
@@ -447,7 +447,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
        BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
 }
 
-/**
+/*
  * IOC is being disabled
  */
 static void
@@ -474,7 +474,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
        }
 }
 
-/**
+/*
  * IOC disable completion entry.
  */
 static void
@@ -514,7 +514,7 @@ bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 }
 
-/**
+/*
  * Hardware initialization failed.
  */
 static void
@@ -528,7 +528,7 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
                break;
 
        case IOC_E_FAILED:
-               /**
+               /*
                 * Initialization failure during iocpf init retry.
                 */
                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -556,7 +556,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
        struct bfa_ioc_hbfail_notify_s  *notify;
        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 
-       /**
+       /*
         * Notify driver and common modules registered for notification.
         */
        ioc->cbfn->hbfail_cbfn(ioc->bfa);
@@ -569,7 +569,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
                "Heart Beat of IOC has failed\n");
 }
 
-/**
+/*
  * IOC failure.
  */
 static void
@@ -580,7 +580,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
        switch (event) {
 
        case IOC_E_FAILED:
-               /**
+               /*
                 * Initialization failure during iocpf recovery.
                 * !!! Fall through !!!
                 */
@@ -608,12 +608,12 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
 
 
 
-/**
+/*
  * IOCPF State Machine
  */
 
 
-/**
+/*
  * Reset entry actions -- initialize state machine
  */
 static void
@@ -623,7 +623,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
        iocpf->auto_recover = bfa_auto_recover;
 }
 
-/**
+/*
  * Beginning state. IOC is in reset state.
  */
 static void
@@ -646,7 +646,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
        }
 }
 
-/**
+/*
  * Semaphore should be acquired for version check.
  */
 static void
@@ -655,7 +655,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
        bfa_ioc_hw_sem_get(iocpf->ioc);
 }
 
-/**
+/*
  * Awaiting h/w semaphore to continue with version check.
  */
 static void
@@ -692,7 +692,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
        }
 }
 
-/**
+/*
  * Notify enable completion callback.
  */
 static void
@@ -708,7 +708,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
        bfa_iocpf_timer_start(iocpf->ioc);
 }
 
-/**
+/*
  * Awaiting firmware version match.
  */
 static void
@@ -739,7 +739,7 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
        }
 }
 
-/**
+/*
  * Request for semaphore.
  */
 static void
@@ -748,7 +748,7 @@ bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
        bfa_ioc_hw_sem_get(iocpf->ioc);
 }
 
-/**
+/*
  * Awaiting semaphore for h/w initialzation.
  */
 static void
@@ -782,7 +782,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
        bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
 }
 
-/**
+/*
  * Hardware is being initialized. Interrupts are enabled.
  * Holding hardware semaphore lock.
  */
@@ -839,7 +839,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
        bfa_ioc_send_enable(iocpf->ioc);
 }
 
-/**
+/*
  * Host IOC function is being enabled, awaiting response from firmware.
  * Semaphore is acquired.
  */
@@ -866,8 +866,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
        case IOCPF_E_TIMEOUT:
                iocpf->retry_count++;
                if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
-                       bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
-                                     BFI_IOC_UNINIT);
+                       writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
                        break;
                }
@@ -944,7 +943,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
        bfa_ioc_send_disable(iocpf->ioc);
 }
 
-/**
+/*
  * IOC is being disabled
  */
 static void
@@ -968,7 +967,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
                 */
 
        case IOCPF_E_TIMEOUT:
-               bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
+               writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
                break;
 
@@ -980,7 +979,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
        }
 }
 
-/**
+/*
  * IOC disable completion entry.
  */
 static void
@@ -1018,7 +1017,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
        bfa_iocpf_timer_start(iocpf->ioc);
 }
 
-/**
+/*
  * Hardware initialization failed.
  */
 static void
@@ -1053,18 +1052,18 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 static void
 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
 {
-       /**
+       /*
         * Mark IOC as failed in hardware and stop firmware.
         */
        bfa_ioc_lpu_stop(iocpf->ioc);
-       bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
+       writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
 
-       /**
+       /*
         * Notify other functions on HB failure.
         */
        bfa_ioc_notify_hbfail(iocpf->ioc);
 
-       /**
+       /*
         * Flush any queued up mailbox requests.
         */
        bfa_ioc_mbox_hbfail(iocpf->ioc);
@@ -1073,7 +1072,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
                bfa_iocpf_recovery_timer_start(iocpf->ioc);
 }
 
-/**
+/*
  * IOC is in failed state.
  */
 static void
@@ -1101,7 +1100,7 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 
 
 
-/**
+/*
  *  hal_ioc_pvt BFA IOC private functions
  */
 
@@ -1113,7 +1112,7 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
 
        ioc->cbfn->disable_cbfn(ioc->bfa);
 
-       /**
+       /*
         * Notify common modules registered for notification.
         */
        list_for_each(qe, &ioc->hb_notify_q) {
@@ -1123,18 +1122,18 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
 }
 
 bfa_boolean_t
-bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
+bfa_ioc_sem_get(void __iomem *sem_reg)
 {
        u32 r32;
        int cnt = 0;
 #define BFA_SEM_SPINCNT        3000
 
-       r32 = bfa_reg_read(sem_reg);
+       r32 = readl(sem_reg);
 
        while (r32 && (cnt < BFA_SEM_SPINCNT)) {
                cnt++;
-               bfa_os_udelay(2);
-               r32 = bfa_reg_read(sem_reg);
+               udelay(2);
+               r32 = readl(sem_reg);
        }
 
        if (r32 == 0)
@@ -1145,9 +1144,9 @@ bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
 }
 
 void
-bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
+bfa_ioc_sem_release(void __iomem *sem_reg)
 {
-       bfa_reg_write(sem_reg, 1);
+       writel(1, sem_reg);
 }
 
 static void
@@ -1155,11 +1154,11 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
 {
        u32     r32;
 
-       /**
+       /*
         * First read to the semaphore register will return 0, subsequent reads
         * will return 1. Semaphore is released by writing 1 to the register
         */
-       r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+       r32 = readl(ioc->ioc_regs.ioc_sem_reg);
        if (r32 == 0) {
                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
                return;
@@ -1171,7 +1170,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
 void
 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
 {
-       bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
+       writel(1, ioc->ioc_regs.ioc_sem_reg);
 }
 
 static void
@@ -1180,7 +1179,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
        bfa_sem_timer_stop(ioc);
 }
 
-/**
+/*
  * Initialize LPU local memory (aka secondary memory / SRAM)
  */
 static void
@@ -1190,7 +1189,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
        int             i;
 #define PSS_LMEM_INIT_TIME  10000
 
-       pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+       pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
        pss_ctl &= ~__PSS_LMEM_RESET;
        pss_ctl |= __PSS_LMEM_INIT_EN;
 
@@ -1198,18 +1197,18 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
         * i2c workaround 12.5khz clock
         */
        pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
-       bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+       writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
 
-       /**
+       /*
         * wait for memory initialization to be complete
         */
        i = 0;
        do {
-               pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+               pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
                i++;
        } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
 
-       /**
+       /*
         * If memory initialization is not successful, IOC timeout will catch
         * such failures.
         */
@@ -1217,7 +1216,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
        bfa_trc(ioc, pss_ctl);
 
        pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
-       bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+       writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
 }
 
 static void
@@ -1225,13 +1224,13 @@ bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
 {
        u32     pss_ctl;
 
-       /**
+       /*
         * Take processor out of reset.
         */
-       pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+       pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
        pss_ctl &= ~__PSS_LPU0_RESET;
 
-       bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+       writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
 }
 
 static void
@@ -1239,16 +1238,16 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
 {
        u32     pss_ctl;
 
-       /**
+       /*
         * Put processors in reset.
         */
-       pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+       pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
        pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
 
-       bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+       writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
 }
 
-/**
+/*
  * Get driver and firmware versions.
  */
 void
@@ -1261,7 +1260,7 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
 
        pgnum = bfa_ioc_smem_pgnum(ioc, loff);
        pgoff = bfa_ioc_smem_pgoff(ioc, loff);
-       bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
        for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
             i++) {
@@ -1271,7 +1270,7 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
        }
 }
 
-/**
+/*
  * Returns TRUE if same.
  */
 bfa_boolean_t
@@ -1296,7 +1295,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
        return BFA_TRUE;
 }
 
-/**
+/*
  * Return true if current running version is valid. Firmware signature and
  * execution context (driver/bios) must match.
  */
@@ -1305,7 +1304,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
 {
        struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
 
-       /**
+       /*
         * If bios/efi boot (flash based) -- return true
         */
        if (bfa_ioc_is_bios_optrom(ioc))
@@ -1321,7 +1320,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
                return BFA_FALSE;
        }
 
-       if (bfa_os_swap32(fwhdr.param) != boot_env) {
+       if (swab32(fwhdr.param) != boot_env) {
                bfa_trc(ioc, fwhdr.param);
                bfa_trc(ioc, boot_env);
                return BFA_FALSE;
@@ -1330,7 +1329,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
        return bfa_ioc_fwver_cmp(ioc, &fwhdr);
 }
 
-/**
+/*
  * Conditionally flush any pending message from firmware at start.
  */
 static void
@@ -1338,9 +1337,9 @@ bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
 {
        u32     r32;
 
-       r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
+       r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
        if (r32)
-               bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
+               writel(1, ioc->ioc_regs.lpu_mbox_cmd);
 }
 
 
@@ -1352,7 +1351,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
        u32 boot_type;
        u32 boot_env;
 
-       ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+       ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
 
        if (force)
                ioc_fwstate = BFI_IOC_UNINIT;
@@ -1362,7 +1361,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
        boot_type = BFI_BOOT_TYPE_NORMAL;
        boot_env = BFI_BOOT_LOADER_OS;
 
-       /**
+       /*
         * Flash based firmware boot BIOS env.
         */
        if (bfa_ioc_is_bios_optrom(ioc)) {
@@ -1370,7 +1369,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
                boot_env = BFI_BOOT_LOADER_BIOS;
        }
 
-       /**
+       /*
         * Flash based firmware boot UEFI env.
         */
        if (bfa_ioc_is_uefi(ioc)) {
@@ -1378,7 +1377,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
                boot_env = BFI_BOOT_LOADER_UEFI;
        }
 
-       /**
+       /*
         * check if firmware is valid
         */
        fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
@@ -1389,7 +1388,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
                return;
        }
 
-       /**
+       /*
         * If hardware initialization is in progress (initialized by other IOC),
         * just wait for an initialization completion interrupt.
         */
@@ -1398,7 +1397,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
                return;
        }
 
-       /**
+       /*
         * If IOC function is disabled and firmware version is same,
         * just re-enable IOC.
         *
@@ -1409,7 +1408,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
        if (ioc_fwstate == BFI_IOC_DISABLED ||
            (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
 
-               /**
+               /*
                 * When using MSI-X any pending firmware ready event should
                 * be flushed. Otherwise MSI-X interrupts are not delivered.
                 */
@@ -1419,7 +1418,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
                return;
        }
 
-       /**
+       /*
         * Initialize the h/w for any other states.
         */
        bfa_ioc_boot(ioc, boot_type, boot_env);
@@ -1449,17 +1448,17 @@ bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
         * first write msg to mailbox registers
         */
        for (i = 0; i < len / sizeof(u32); i++)
-               bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
-                             bfa_os_wtole(msgp[i]));
+               writel(cpu_to_le32(msgp[i]),
+                       ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
 
        for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
-               bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
+               writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
 
        /*
         * write 1 to mailbox CMD to trigger LPU event
         */
-       bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
-       (void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+       writel(1, ioc->ioc_regs.hfn_mbox_cmd);
+       (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
 }
 
 static void
@@ -1472,7 +1471,7 @@ bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
                    bfa_ioc_portid(ioc));
        enable_req.ioc_class = ioc->ioc_mc;
        bfa_os_gettimeofday(&tv);
-       enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec);
+       enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
        bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
 }
 
@@ -1503,7 +1502,7 @@ bfa_ioc_hb_check(void *cbarg)
        struct bfa_ioc_s  *ioc = cbarg;
        u32     hb_count;
 
-       hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
+       hb_count = readl(ioc->ioc_regs.heartbeat);
        if (ioc->hb_count == hb_count) {
                printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
                bfa_ioc_recover(ioc);
@@ -1519,7 +1518,7 @@ bfa_ioc_hb_check(void *cbarg)
 static void
 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
 {
-       ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
+       ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
        bfa_hb_timer_start(ioc);
 }
 
@@ -1530,7 +1529,7 @@ bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
 }
 
 
-/**
+/*
  *     Initiate a full firmware download.
  */
 static void
@@ -1543,7 +1542,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
        u32 chunkno = 0;
        u32 i;
 
-       /**
+       /*
         * Initialize LMEM first before code download
         */
        bfa_ioc_lmem_init(ioc);
@@ -1554,7 +1553,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
        pgnum = bfa_ioc_smem_pgnum(ioc, loff);
        pgoff = bfa_ioc_smem_pgoff(ioc, loff);
 
-       bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
        for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
 
@@ -1564,7 +1563,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
                }
 
-               /**
+               /*
                 * write smem
                 */
                bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
@@ -1572,27 +1571,25 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
 
                loff += sizeof(u32);
 
-               /**
+               /*
                 * handle page offset wrap around
                 */
                loff = PSS_SMEM_PGOFF(loff);
                if (loff == 0) {
                        pgnum++;
-                       bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
-                                     pgnum);
+                       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
                }
        }
 
-       bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
-                     bfa_ioc_smem_pgnum(ioc, 0));
+       writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
 
        /*
         * Set boot type and boot param at the end.
        */
        bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
-                       bfa_os_swap32(boot_type));
+                       swab32(boot_type));
        bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
-                       bfa_os_swap32(boot_env));
+                       swab32(boot_env));
 }
 
 static void
@@ -1601,7 +1598,7 @@ bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
        bfa_ioc_hwinit(ioc, force);
 }
 
-/**
+/*
  * Update BFA configuration from firmware configuration.
  */
 static void
@@ -1609,14 +1606,14 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
 {
        struct bfi_ioc_attr_s   *attr = ioc->attr;
 
-       attr->adapter_prop  = bfa_os_ntohl(attr->adapter_prop);
-       attr->card_type     = bfa_os_ntohl(attr->card_type);
-       attr->maxfrsize     = bfa_os_ntohs(attr->maxfrsize);
+       attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
+       attr->card_type     = be32_to_cpu(attr->card_type);
+       attr->maxfrsize     = be16_to_cpu(attr->maxfrsize);
 
        bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
 }
 
-/**
+/*
  * Attach time initialization of mbox logic.
  */
 static void
@@ -1632,7 +1629,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
        }
 }
 
-/**
+/*
  * Mbox poll timer -- restarts any pending mailbox requests.
  */
 static void
@@ -1642,27 +1639,27 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
        struct bfa_mbox_cmd_s           *cmd;
        u32                     stat;
 
-       /**
+       /*
         * If no command pending, do nothing
         */
        if (list_empty(&mod->cmd_q))
                return;
 
-       /**
+       /*
         * If previous command is not yet fetched by firmware, do nothing
         */
-       stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+       stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
        if (stat)
                return;
 
-       /**
+       /*
         * Enqueue command to firmware.
         */
        bfa_q_deq(&mod->cmd_q, &cmd);
        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
 }
 
-/**
+/*
  * Cleanup any pending requests.
  */
 static void
@@ -1675,7 +1672,7 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
                bfa_q_deq(&mod->cmd_q, &cmd);
 }
 
-/**
+/*
  * Read data from SMEM to host through PCI memmap
  *
  * @param[in]  ioc     memory for IOC
@@ -1704,26 +1701,25 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
                return BFA_STATUS_FAILED;
        }
 
-       bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
        len = sz/sizeof(u32);
        bfa_trc(ioc, len);
        for (i = 0; i < len; i++) {
                r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
-               buf[i] = bfa_os_ntohl(r32);
+               buf[i] = be32_to_cpu(r32);
                loff += sizeof(u32);
 
-               /**
+               /*
                 * handle page offset wrap around
                 */
                loff = PSS_SMEM_PGOFF(loff);
                if (loff == 0) {
                        pgnum++;
-                       bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+                       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
                }
        }
-       bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
-                     bfa_ioc_smem_pgnum(ioc, 0));
+       writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
        /*
         *  release semaphore.
         */
@@ -1733,7 +1729,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Clear SMEM data from host through PCI memmap
  *
  * @param[in]  ioc     memory for IOC
@@ -1760,7 +1756,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
                return BFA_STATUS_FAILED;
        }
 
-       bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
        len = sz/sizeof(u32); /* len in words */
        bfa_trc(ioc, len);
@@ -1768,17 +1764,16 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
                bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
                loff += sizeof(u32);
 
-               /**
+               /*
                 * handle page offset wrap around
                 */
                loff = PSS_SMEM_PGOFF(loff);
                if (loff == 0) {
                        pgnum++;
-                       bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+                       writel(pgnum, ioc->ioc_regs.host_page_num_fn);
                }
        }
-       bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
-                     bfa_ioc_smem_pgnum(ioc, 0));
+       writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
 
        /*
         *  release semaphore.
@@ -1788,7 +1783,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * hal iocpf to ioc interface
  */
 static void
@@ -1813,7 +1808,7 @@ static void
 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
 {
        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
-       /**
+       /*
         * Provide enable completion callback.
         */
        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -1824,7 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
 
 
 
-/**
+/*
  *  hal_ioc_public
  */
 
@@ -1848,43 +1843,43 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Interface used by diag module to do firmware boot with memory test
  * as the entry vector.
  */
 void
 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
 {
-       bfa_os_addr_t   rb;
+       void __iomem *rb;
 
        bfa_ioc_stats(ioc, ioc_boots);
 
        if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
                return;
 
-       /**
+       /*
         * Initialize IOC state of all functions on a chip reset.
         */
        rb = ioc->pcidev.pci_bar_kva;
        if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
-               bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
-               bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
+               writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
+               writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
        } else {
-               bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
-               bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
+               writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
+               writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
        }
 
        bfa_ioc_msgflush(ioc);
        bfa_ioc_download_fw(ioc, boot_type, boot_env);
 
-       /**
+       /*
         * Enable interrupts just before starting LPU
         */
        ioc->cbfn->reset_cbfn(ioc->bfa);
        bfa_ioc_lpu_start(ioc);
 }
 
-/**
+/*
  * Enable/disable IOC failure auto recovery.
  */
 void
@@ -1904,7 +1899,7 @@ bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
 bfa_boolean_t
 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
 {
-       u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+       u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
 
        return ((r32 != BFI_IOC_UNINIT) &&
                (r32 != BFI_IOC_INITING) &&
@@ -1918,21 +1913,21 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
        u32     r32;
        int             i;
 
-       /**
+       /*
         * read the MBOX msg
         */
        for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
             i++) {
-               r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
+               r32 = readl(ioc->ioc_regs.lpu_mbox +
                                   i * sizeof(u32));
-               msgp[i] = bfa_os_htonl(r32);
+               msgp[i] = cpu_to_be32(r32);
        }
 
-       /**
+       /*
         * turn off mailbox interrupt by clearing mailbox status
         */
-       bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
-       bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
+       writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+       readl(ioc->ioc_regs.lpu_mbox_cmd);
 }
 
 void
@@ -1971,7 +1966,7 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
        }
 }
 
-/**
+/*
  * IOC attach time initialization and setup.
  *
  * @param[in]  ioc     memory for IOC
@@ -1996,7 +1991,7 @@ bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
        bfa_fsm_send_event(ioc, IOC_E_RESET);
 }
 
-/**
+/*
  * Driver detach time IOC cleanup.
  */
 void
@@ -2005,7 +2000,7 @@ bfa_ioc_detach(struct bfa_ioc_s *ioc)
        bfa_fsm_send_event(ioc, IOC_E_DETACH);
 }
 
-/**
+/*
  * Setup IOC PCI properties.
  *
  * @param[in]  pcidev  PCI device information for this IOC
@@ -2019,7 +2014,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
        ioc->ctdev      = bfa_asic_id_ct(ioc->pcidev.device_id);
        ioc->cna        = ioc->ctdev && !ioc->fcmode;
 
-       /**
+       /*
         * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
         */
        if (ioc->ctdev)
@@ -2031,7 +2026,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
        bfa_ioc_reg_init(ioc);
 }
 
-/**
+/*
  * Initialize IOC dma memory
  *
  * @param[in]  dm_kva  kernel virtual address of IOC dma memory
@@ -2040,7 +2035,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
 void
 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
 {
-       /**
+       /*
         * dma memory for firmware attribute
         */
        ioc->attr_dma.kva = dm_kva;
@@ -2048,7 +2043,7 @@ bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
        ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
 }
 
-/**
+/*
  * Return size of dma memory required.
  */
 u32
@@ -2073,7 +2068,7 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc)
        bfa_fsm_send_event(ioc, IOC_E_DISABLE);
 }
 
-/**
+/*
  * Returns memory required for saving firmware trace in case of crash.
  * Driver must call this interface to allocate memory required for
  * automatic saving of firmware trace. Driver should call
@@ -2086,7 +2081,7 @@ bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
        return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
 }
 
-/**
+/*
  * Initialize memory for saving firmware trace. Driver must initialize
  * trace memory before call bfa_ioc_enable().
  */
@@ -2109,7 +2104,7 @@ bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
        return PSS_SMEM_PGOFF(fmaddr);
 }
 
-/**
+/*
  * Register mailbox message handler functions
  *
  * @param[in]  ioc             IOC instance
@@ -2125,7 +2120,7 @@ bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
                mod->mbhdlr[mc].cbfn = mcfuncs[mc];
 }
 
-/**
+/*
  * Register mailbox message handler function, to be called by common modules
  */
 void
@@ -2138,7 +2133,7 @@ bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
        mod->mbhdlr[mc].cbarg   = cbarg;
 }
 
-/**
+/*
  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
  * Responsibility of caller to serialize
  *
@@ -2151,7 +2146,7 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
        u32                     stat;
 
-       /**
+       /*
         * If a previous command is pending, queue new command
         */
        if (!list_empty(&mod->cmd_q)) {
@@ -2159,22 +2154,22 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
                return;
        }
 
-       /**
+       /*
         * If mailbox is busy, queue command for poll timer
         */
-       stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+       stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
        if (stat) {
                list_add_tail(&cmd->qe, &mod->cmd_q);
                return;
        }
 
-       /**
+       /*
         * mailbox is free -- queue command to firmware
         */
        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
 }
 
-/**
+/*
  * Handle mailbox interrupts
  */
 void
@@ -2186,7 +2181,7 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
 
        bfa_ioc_msgget(ioc, &m);
 
-       /**
+       /*
         * Treat IOC message class as special.
         */
        mc = m.mh.msg_class;
@@ -2214,7 +2209,7 @@ bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
        ioc->port_id = bfa_ioc_pcifn(ioc);
 }
 
-/**
+/*
  * return true if IOC is disabled
  */
 bfa_boolean_t
@@ -2224,7 +2219,7 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
                bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
 }
 
-/**
+/*
  * return true if IOC firmware is different.
  */
 bfa_boolean_t
@@ -2243,7 +2238,7 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
         ((__sm) == BFI_IOC_FAIL) ||            \
         ((__sm) == BFI_IOC_CFG_DISABLED))
 
-/**
+/*
  * Check if adapter is disabled -- both IOCs should be in a disabled
  * state.
  */
@@ -2251,17 +2246,17 @@ bfa_boolean_t
 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
 {
        u32     ioc_state;
-       bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
+       void __iomem *rb = ioc->pcidev.pci_bar_kva;
 
        if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
                return BFA_FALSE;
 
-       ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
+       ioc_state = readl(rb + BFA_IOC0_STATE_REG);
        if (!bfa_ioc_state_disabled(ioc_state))
                return BFA_FALSE;
 
        if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
-               ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
+               ioc_state = readl(rb + BFA_IOC1_STATE_REG);
                if (!bfa_ioc_state_disabled(ioc_state))
                        return BFA_FALSE;
        }
@@ -2269,7 +2264,7 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
        return BFA_TRUE;
 }
 
-/**
+/*
  * Add to IOC heartbeat failure notification queue. To be used by common
  * modules such as cee, port, diag.
  */
@@ -2293,7 +2288,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
        bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
        bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
        bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
-       bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
+       memcpy(&ad_attr->vpd, &ioc_attr->vpd,
                      sizeof(struct bfa_mfg_vpd_s));
 
        ad_attr->nports = bfa_ioc_get_nports(ioc);
@@ -2343,8 +2338,8 @@ bfa_ioc_get_type(struct bfa_ioc_s *ioc)
 void
 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
 {
-       bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
-       bfa_os_memcpy((void *)serial_num,
+       memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
+       memcpy((void *)serial_num,
                        (void *)ioc->attr->brcd_serialnum,
                        BFA_ADAPTER_SERIAL_NUM_LEN);
 }
@@ -2352,8 +2347,8 @@ bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
 void
 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
 {
-       bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
-       bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
+       memset((void *)fw_ver, 0, BFA_VERSION_LEN);
+       memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
 }
 
 void
@@ -2361,7 +2356,7 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
 {
        bfa_assert(chip_rev);
 
-       bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
+       memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
 
        chip_rev[0] = 'R';
        chip_rev[1] = 'e';
@@ -2374,16 +2369,16 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
 void
 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
 {
-       bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
-       bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
+       memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
+       memcpy(optrom_ver, ioc->attr->optrom_version,
                      BFA_VERSION_LEN);
 }
 
 void
 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
 {
-       bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
-       bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+       memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
+       memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
 }
 
 void
@@ -2392,14 +2387,14 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
        struct bfi_ioc_attr_s   *ioc_attr;
 
        bfa_assert(model);
-       bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
+       memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
 
        ioc_attr = ioc->attr;
 
-       /**
+       /*
         * model name
         */
-       bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
+       snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
                BFA_MFG_NAME, ioc_attr->card_type);
 }
 
@@ -2446,7 +2441,7 @@ bfa_ioc_get_state(struct bfa_ioc_s *ioc)
 void
 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
 {
-       bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
+       memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
 
        ioc_attr->state = bfa_ioc_get_state(ioc);
        ioc_attr->port_id = ioc->port_id;
@@ -2460,7 +2455,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
        bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
 }
 
-/**
+/*
  *  hal_wwn_public
  */
 wwn_t
@@ -2526,7 +2521,7 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
        return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
 }
 
-/**
+/*
  * Retrieve saved firmware trace from a prior IOC failure.
  */
 bfa_status_t
@@ -2541,12 +2536,12 @@ bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
        if (tlen > ioc->dbg_fwsave_len)
                tlen = ioc->dbg_fwsave_len;
 
-       bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
+       memcpy(trcdata, ioc->dbg_fwsave, tlen);
        *trclen = tlen;
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Clear saved firmware trace
  */
 void
@@ -2555,7 +2550,7 @@ bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
        ioc->dbg_fwsave_once = BFA_TRUE;
 }
 
-/**
+/*
  * Retrieve saved firmware trace from a prior IOC failure.
  */
 bfa_status_t
@@ -2595,7 +2590,7 @@ bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
 
        bfa_ioc_send_fwsync(ioc);
 
-       /**
+       /*
         * After sending a fw sync mbox command wait for it to
         * take effect.  We will not wait for a response because
         *    1. fw_sync mbox cmd doesn't have a response.
@@ -2610,7 +2605,7 @@ bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
                fwsync_iter--;
 }
 
-/**
+/*
  * Dump firmware smem
  */
 bfa_status_t
@@ -2630,7 +2625,7 @@ bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
        loff = *offset;
        dlen = *buflen;
 
-       /**
+       /*
         * First smem read, sync smem before proceeding
         * No need to sync before reading every chunk.
         */
@@ -2657,7 +2652,7 @@ bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
        return status;
 }
 
-/**
+/*
  * Firmware statistics
  */
 bfa_status_t
@@ -2702,7 +2697,7 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
        return status;
 }
 
-/**
+/*
  * Save firmware trace if configured.
  */
 static void
@@ -2716,7 +2711,7 @@ bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
        }
 }
 
-/**
+/*
  * Firmware failure detected. Start recovery actions.
  */
 static void
@@ -2738,7 +2733,7 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
                return;
 }
 
-/**
+/*
  *  hal_iocpf_pvt BFA IOC PF private functions
  */
 
@@ -2795,7 +2790,7 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
        bfa_ioc_hw_sem_get(ioc);
 }
 
-/**
+/*
  *  bfa timer function
  */
 void
@@ -2840,7 +2835,7 @@ bfa_timer_beat(struct bfa_timer_mod_s *mod)
        }
 }
 
-/**
+/*
  * Should be called with lock protection
  */
 void
@@ -2858,7 +2853,7 @@ bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
        list_add_tail(&timer->qe, &mod->timer_q);
 }
 
-/**
+/*
  * Should be called with lock protection
  */
 void
index 288c5801aace553021d68472824f342ce9aac0e0..9c407a87a1a1e01e228531c5dd48a4a0f1c85f3b 100644 (file)
 #include "bfa_cs.h"
 #include "bfi.h"
 
-/**
+/*
  * BFA timer declarations
  */
 typedef void (*bfa_timer_cbfn_t)(void *);
 
-/**
+/*
  * BFA timer data structure
  */
 struct bfa_timer_s {
        struct list_head        qe;
        bfa_timer_cbfn_t timercb;
        void            *arg;
-       int             timeout;        /**< in millisecs. */
+       int             timeout;        /* in millisecs */
 };
 
-/**
+/*
  * Timer module structure
  */
 struct bfa_timer_mod_s {
        struct list_head timer_q;
 };
 
-#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
+#define BFA_TIMER_FREQ 200 /* specified in millisecs */
 
 void bfa_timer_beat(struct bfa_timer_mod_s *mod);
 void bfa_timer_init(struct bfa_timer_mod_s *mod);
@@ -53,7 +53,7 @@ void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
                        unsigned int timeout);
 void bfa_timer_stop(struct bfa_timer_s *timer);
 
-/**
+/*
  * Generic Scatter Gather Element used by driver
  */
 struct bfa_sge_s {
@@ -62,9 +62,9 @@ struct bfa_sge_s {
 };
 
 #define bfa_sge_word_swap(__sge) do {                                       \
-       ((u32 *)(__sge))[0] = bfa_os_swap32(((u32 *)(__sge))[0]);      \
-       ((u32 *)(__sge))[1] = bfa_os_swap32(((u32 *)(__sge))[1]);      \
-       ((u32 *)(__sge))[2] = bfa_os_swap32(((u32 *)(__sge))[2]);      \
+       ((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]);      \
+       ((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]);      \
+       ((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]);      \
 } while (0)
 
 #define bfa_swap_words(_x)  (  \
@@ -80,17 +80,17 @@ struct bfa_sge_s {
 #define bfa_sgaddr_le(_x)      (_x)
 #endif
 
-/**
+/*
  * PCI device information required by IOC
  */
 struct bfa_pcidev_s {
        int             pci_slot;
        u8              pci_func;
-       u16     device_id;
-       bfa_os_addr_t   pci_bar_kva;
+       u16             device_id;
+       void __iomem    *pci_bar_kva;
 };
 
-/**
+/*
  * Structure used to remember the DMA-able memory block's KVA and Physical
  * Address
  */
@@ -102,7 +102,7 @@ struct bfa_dma_s {
 #define BFA_DMA_ALIGN_SZ       256
 #define BFA_ROUNDUP(_l, _s)    (((_l) + ((_s) - 1)) & ~((_s) - 1))
 
-/**
+/*
  * smem size for Crossbow and Catapult
  */
 #define BFI_SMEM_CB_SIZE       0x200000U       /* ! 2MB for crossbow   */
@@ -125,40 +125,38 @@ __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 static inline void
 __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 {
-       dma_addr->a32.addr_lo = (u32) bfa_os_htonl(pa);
-       dma_addr->a32.addr_hi = (u32) bfa_os_htonl(bfa_os_u32(pa));
+       dma_addr->a32.addr_lo = (u32) cpu_to_be32(pa);
+       dma_addr->a32.addr_hi = (u32) cpu_to_be32(bfa_os_u32(pa));
 }
 
 struct bfa_ioc_regs_s {
-       bfa_os_addr_t   hfn_mbox_cmd;
-       bfa_os_addr_t   hfn_mbox;
-       bfa_os_addr_t   lpu_mbox_cmd;
-       bfa_os_addr_t   lpu_mbox;
-       bfa_os_addr_t   pss_ctl_reg;
-       bfa_os_addr_t   pss_err_status_reg;
-       bfa_os_addr_t   app_pll_fast_ctl_reg;
-       bfa_os_addr_t   app_pll_slow_ctl_reg;
-       bfa_os_addr_t   ioc_sem_reg;
-       bfa_os_addr_t   ioc_usage_sem_reg;
-       bfa_os_addr_t   ioc_init_sem_reg;
-       bfa_os_addr_t   ioc_usage_reg;
-       bfa_os_addr_t   host_page_num_fn;
-       bfa_os_addr_t   heartbeat;
-       bfa_os_addr_t   ioc_fwstate;
-       bfa_os_addr_t   ll_halt;
-       bfa_os_addr_t   err_set;
-       bfa_os_addr_t   shirq_isr_next;
-       bfa_os_addr_t   shirq_msk_next;
-       bfa_os_addr_t   smem_page_start;
+       void __iomem *hfn_mbox_cmd;
+       void __iomem *hfn_mbox;
+       void __iomem *lpu_mbox_cmd;
+       void __iomem *lpu_mbox;
+       void __iomem *pss_ctl_reg;
+       void __iomem *pss_err_status_reg;
+       void __iomem *app_pll_fast_ctl_reg;
+       void __iomem *app_pll_slow_ctl_reg;
+       void __iomem *ioc_sem_reg;
+       void __iomem *ioc_usage_sem_reg;
+       void __iomem *ioc_init_sem_reg;
+       void __iomem *ioc_usage_reg;
+       void __iomem *host_page_num_fn;
+       void __iomem *heartbeat;
+       void __iomem *ioc_fwstate;
+       void __iomem *ll_halt;
+       void __iomem *err_set;
+       void __iomem *shirq_isr_next;
+       void __iomem *shirq_msk_next;
+       void __iomem *smem_page_start;
        u32     smem_pg0;
 };
 
-#define bfa_reg_read(_raddr)   bfa_os_reg_read(_raddr)
-#define bfa_reg_write(_raddr, _val)    bfa_os_reg_write(_raddr, _val)
-#define bfa_mem_read(_raddr, _off)     bfa_os_mem_read(_raddr, _off)
+#define bfa_mem_read(_raddr, _off)     swab32(readl(((_raddr) + (_off))))
 #define bfa_mem_write(_raddr, _off, _val)      \
-                                       bfa_os_mem_write(_raddr, _off, _val)
-/**
+                       writel(swab32((_val)), ((_raddr) + (_off)))
+/*
  * IOC Mailbox structures
  */
 struct bfa_mbox_cmd_s {
@@ -166,7 +164,7 @@ struct bfa_mbox_cmd_s {
        u32     msg[BFI_IOC_MSGSZ];
 };
 
-/**
+/*
  * IOC mailbox module
  */
 typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
@@ -179,7 +177,7 @@ struct bfa_ioc_mbox_mod_s {
        } mbhdlr[BFI_MC_MAX];
 };
 
-/**
+/*
  * IOC callback function interfaces
  */
 typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
@@ -193,7 +191,7 @@ struct bfa_ioc_cbfn_s {
        bfa_ioc_reset_cbfn_t    reset_cbfn;
 };
 
-/**
+/*
  * Heartbeat failure notification queue element.
  */
 struct bfa_ioc_hbfail_notify_s {
@@ -202,7 +200,7 @@ struct bfa_ioc_hbfail_notify_s {
        void                    *cbarg;
 };
 
-/**
+/*
  * Initialize a heartbeat failure notification structure
  */
 #define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do {    \
@@ -249,7 +247,7 @@ struct bfa_ioc_s {
 };
 
 struct bfa_ioc_hwif_s {
-       bfa_status_t (*ioc_pll_init) (bfa_os_addr_t rb, bfa_boolean_t fcmode);
+       bfa_status_t (*ioc_pll_init) (void __iomem *rb, bfa_boolean_t fcmode);
        bfa_boolean_t   (*ioc_firmware_lock)    (struct bfa_ioc_s *ioc);
        void            (*ioc_firmware_unlock)  (struct bfa_ioc_s *ioc);
        void            (*ioc_reg_init) (struct bfa_ioc_s *ioc);
@@ -267,7 +265,7 @@ struct bfa_ioc_hwif_s {
 #define bfa_ioc_fetch_stats(__ioc, __stats) \
                (((__stats)->drv_stats) = (__ioc)->stats)
 #define bfa_ioc_clr_stats(__ioc)       \
-               bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
+               memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
 #define bfa_ioc_maxfrsize(__ioc)       ((__ioc)->attr->maxfrsize)
 #define bfa_ioc_rx_bbcredit(__ioc)     ((__ioc)->attr->rx_bbcredit)
 #define bfa_ioc_speed_sup(__ioc)       \
@@ -287,7 +285,7 @@ struct bfa_ioc_hwif_s {
 #define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)     (off % BFI_FLASH_CHUNK_SZ_WORDS)
 #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
 
-/**
+/*
  * IOC mailbox interface
  */
 void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd);
@@ -299,7 +297,7 @@ void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
 void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
                bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
 
-/**
+/*
  * IOC interfaces
  */
 
@@ -308,9 +306,9 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
                           (__ioc)->fcmode))
 
 bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
-bfa_status_t bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
-bfa_boolean_t bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb);
-bfa_status_t bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
+bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
+bfa_boolean_t bfa_ioc_ct_pll_init_complete(void __iomem *rb);
+bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
 
 #define        bfa_ioc_isr_mode_set(__ioc, __msix)                     \
                        ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
@@ -370,8 +368,8 @@ void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
 void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
        struct bfa_ioc_hbfail_notify_s *notify);
-bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg);
-void bfa_ioc_sem_release(bfa_os_addr_t sem_reg);
+bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
+void bfa_ioc_sem_release(void __iomem *sem_reg);
 void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
 void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
                        struct bfi_ioc_image_hdr_s *fwhdr);
@@ -441,7 +439,7 @@ bfa_cb_image_get_size(int type)
        }
 }
 
-/**
+/*
  * CNA TRCMOD declaration
  */
 /*
index d7ac864d8539de35a1fc56c28c33091be71534a9..909945043850fbbeaf5b3a0699fd46e11186bd9e 100644 (file)
@@ -34,7 +34,7 @@ static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
 
 struct bfa_ioc_hwif_s hwif_cb;
 
-/**
+/*
  * Called from bfa_ioc_attach() to map asic specific calls.
  */
 void
@@ -52,7 +52,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
        ioc->ioc_hwif = &hwif_cb;
 }
 
-/**
+/*
  * Return true if firmware of current driver matches the running firmware.
  */
 static bfa_boolean_t
@@ -66,17 +66,17 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
 {
 }
 
-/**
+/*
  * Notify other functions on HB failure.
  */
 static void
 bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
 {
-       bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
-       bfa_reg_read(ioc->ioc_regs.err_set);
+       writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+       readl(ioc->ioc_regs.err_set);
 }
 
-/**
+/*
  * Host to LPU mailbox message addresses
  */
 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
@@ -84,7 +84,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
        { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
 };
 
-/**
+/*
  * Host <-> LPU mailbox command/status registers
  */
 static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
@@ -96,7 +96,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
 static void
 bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
 {
-       bfa_os_addr_t   rb;
+       void __iomem *rb;
        int             pcifn = bfa_ioc_pcifn(ioc);
 
        rb = bfa_ioc_bar0(ioc);
@@ -113,7 +113,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
                ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
        }
 
-       /**
+       /*
         * Host <-> LPU mailbox command/status registers
         */
        ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
@@ -133,7 +133,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
        ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
        ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
 
-       /**
+       /*
         * sram memory access
         */
        ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
@@ -145,14 +145,14 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
        ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
 }
 
-/**
+/*
  * Initialize IOC to port mapping.
  */
 
 static void
 bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
 {
-       /**
+       /*
         * For crossbow, port id is same as pci function.
         */
        ioc->port_id = bfa_ioc_pcifn(ioc);
@@ -160,7 +160,7 @@ bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
        bfa_trc(ioc, ioc->port_id);
 }
 
-/**
+/*
  * Set interrupt mode for a function: INTX or MSIX
  */
 static void
@@ -168,7 +168,7 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
 {
 }
 
-/**
+/*
  * Cleanup hw semaphore and usecnt registers
  */
 static void
@@ -180,14 +180,14 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
         * before we clear it. If it is not locked, writing 1
         * will lock it instead of clearing it.
         */
-       bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+       readl(ioc->ioc_regs.ioc_sem_reg);
        bfa_ioc_hw_sem_release(ioc);
 }
 
 
 
 bfa_status_t
-bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
+bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
 {
        u32     pll_sclk, pll_fclk;
 
@@ -199,38 +199,32 @@ bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
                __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
                __APP_PLL_400_JITLMT0_1(3U) |
                __APP_PLL_400_CNTLMT0_1(3U);
-       bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
-       bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
-       bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
-       bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-       bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
-       bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-       bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
-       bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-       bfa_reg_write(rb + APP_PLL_212_CTL_REG,
-                         __APP_PLL_212_LOGIC_SOFT_RESET);
-       bfa_reg_write(rb + APP_PLL_212_CTL_REG,
-                         __APP_PLL_212_BYPASS |
-                         __APP_PLL_212_LOGIC_SOFT_RESET);
-       bfa_reg_write(rb + APP_PLL_400_CTL_REG,
-                         __APP_PLL_400_LOGIC_SOFT_RESET);
-       bfa_reg_write(rb + APP_PLL_400_CTL_REG,
-                         __APP_PLL_400_BYPASS |
-                         __APP_PLL_400_LOGIC_SOFT_RESET);
-       bfa_os_udelay(2);
-       bfa_reg_write(rb + APP_PLL_212_CTL_REG,
-                         __APP_PLL_212_LOGIC_SOFT_RESET);
-       bfa_reg_write(rb + APP_PLL_400_CTL_REG,
-                         __APP_PLL_400_LOGIC_SOFT_RESET);
-       bfa_reg_write(rb + APP_PLL_212_CTL_REG,
-                         pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
-       bfa_reg_write(rb + APP_PLL_400_CTL_REG,
-                         pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
-       bfa_os_udelay(2000);
-       bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
-       bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-       bfa_reg_write((rb + APP_PLL_212_CTL_REG), pll_sclk);
-       bfa_reg_write((rb + APP_PLL_400_CTL_REG), pll_fclk);
+       writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+       writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+       writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+       writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+       writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+       writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+       writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+       writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+       writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
+       writel(__APP_PLL_212_BYPASS | __APP_PLL_212_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_212_CTL_REG);
+       writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
+       writel(__APP_PLL_400_BYPASS | __APP_PLL_400_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_400_CTL_REG);
+       udelay(2);
+       writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
+       writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
+       writel(pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_212_CTL_REG);
+       writel(pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_400_CTL_REG);
+       udelay(2000);
+       writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+       writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+       writel(pll_sclk, (rb + APP_PLL_212_CTL_REG));
+       writel(pll_fclk, (rb + APP_PLL_400_CTL_REG));
 
        return BFA_STATUS_OK;
 }
index f21b82c5f64c64cf271b378099d38484dbf2611c..115730c0aa77f321aa25ee6f0d7136f43c8c2b90 100644 (file)
@@ -34,7 +34,7 @@ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
 
 struct bfa_ioc_hwif_s hwif_ct;
 
-/**
+/*
  * Called from bfa_ioc_attach() to map asic specific calls.
  */
 void
@@ -52,7 +52,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
        ioc->ioc_hwif = &hwif_ct;
 }
 
-/**
+/*
  * Return true if firmware of current driver matches the running firmware.
  */
 static bfa_boolean_t
@@ -62,13 +62,13 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
        u32 usecnt;
        struct bfi_ioc_image_hdr_s fwhdr;
 
-       /**
+       /*
         * Firmware match check is relevant only for CNA.
         */
        if (!ioc->cna)
                return BFA_TRUE;
 
-       /**
+       /*
         * If bios boot (flash based) -- do not increment usage count
         */
        if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
@@ -76,27 +76,27 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
                return BFA_TRUE;
 
        bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
-       usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
+       usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
 
-       /**
+       /*
         * If usage count is 0, always return TRUE.
         */
        if (usecnt == 0) {
-               bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
+               writel(1, ioc->ioc_regs.ioc_usage_reg);
                bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
                bfa_trc(ioc, usecnt);
                return BFA_TRUE;
        }
 
-       ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+       ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
        bfa_trc(ioc, ioc_fwstate);
 
-       /**
+       /*
         * Use count cannot be non-zero and chip in uninitialized state.
         */
        bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
 
-       /**
+       /*
         * Check if another driver with a different firmware is active
         */
        bfa_ioc_fwver_get(ioc, &fwhdr);
@@ -106,11 +106,11 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
                return BFA_FALSE;
        }
 
-       /**
+       /*
         * Same firmware version. Increment the reference count.
         */
        usecnt++;
-       bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
+       writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
        bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
        bfa_trc(ioc, usecnt);
        return BFA_TRUE;
@@ -121,50 +121,50 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
 {
        u32 usecnt;
 
-       /**
+       /*
         * Firmware lock is relevant only for CNA.
         */
        if (!ioc->cna)
                return;
 
-       /**
+       /*
         * If bios boot (flash based) -- do not decrement usage count
         */
        if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
                                                BFA_IOC_FWIMG_MINSZ)
                return;
 
-       /**
+       /*
         * decrement usage count
         */
        bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
-       usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
+       usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
        bfa_assert(usecnt > 0);
 
        usecnt--;
-       bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
+       writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
        bfa_trc(ioc, usecnt);
 
        bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
 }
 
-/**
+/*
  * Notify other functions on HB failure.
  */
 static void
 bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
 {
        if (ioc->cna) {
-               bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
+               writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
                /* Wait for halt to take effect */
-               bfa_reg_read(ioc->ioc_regs.ll_halt);
+               readl(ioc->ioc_regs.ll_halt);
        } else {
-               bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
-               bfa_reg_read(ioc->ioc_regs.err_set);
+               writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+               readl(ioc->ioc_regs.err_set);
        }
 }
 
-/**
+/*
  * Host to LPU mailbox message addresses
  */
 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
@@ -174,7 +174,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
        { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
 };
 
-/**
+/*
  * Host <-> LPU mailbox command/status registers - port 0
  */
 static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
@@ -184,7 +184,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
        { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
 };
 
-/**
+/*
  * Host <-> LPU mailbox command/status registers - port 1
  */
 static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
@@ -197,7 +197,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
 static void
 bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
 {
-       bfa_os_addr_t   rb;
+       void __iomem *rb;
        int             pcifn = bfa_ioc_pcifn(ioc);
 
        rb = bfa_ioc_bar0(ioc);
@@ -236,7 +236,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
        ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
        ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
 
-       /**
+       /*
         * sram memory access
         */
        ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
@@ -248,7 +248,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
        ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
 }
 
-/**
+/*
  * Initialize IOC to port mapping.
  */
 
@@ -256,13 +256,13 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
 static void
 bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
 {
-       bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
+       void __iomem *rb = ioc->pcidev.pci_bar_kva;
        u32     r32;
 
-       /**
+       /*
         * For catapult, base port id on personality register and IOC type
         */
-       r32 = bfa_reg_read(rb + FNC_PERS_REG);
+       r32 = readl(rb + FNC_PERS_REG);
        r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
        ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
 
@@ -270,22 +270,22 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
        bfa_trc(ioc, ioc->port_id);
 }
 
-/**
+/*
  * Set interrupt mode for a function: INTX or MSIX
  */
 static void
 bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
 {
-       bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
+       void __iomem *rb = ioc->pcidev.pci_bar_kva;
        u32     r32, mode;
 
-       r32 = bfa_reg_read(rb + FNC_PERS_REG);
+       r32 = readl(rb + FNC_PERS_REG);
        bfa_trc(ioc, r32);
 
        mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
                __F0_INTX_STATUS;
 
-       /**
+       /*
         * If already in desired mode, do not change anything
         */
        if (!msix && mode)
@@ -300,10 +300,10 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
        r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
        bfa_trc(ioc, r32);
 
-       bfa_reg_write(rb + FNC_PERS_REG, r32);
+       writel(r32, rb + FNC_PERS_REG);
 }
 
-/**
+/*
  * Cleanup hw semaphore and usecnt registers
  */
 static void
@@ -312,7 +312,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
 
        if (ioc->cna) {
                bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
-               bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
+               writel(0, ioc->ioc_regs.ioc_usage_reg);
                bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
        }
 
@@ -321,7 +321,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
         * before we clear it. If it is not locked, writing 1
         * will lock it instead of clearing it.
         */
-       bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+       readl(ioc->ioc_regs.ioc_sem_reg);
        bfa_ioc_hw_sem_release(ioc);
 }
 
@@ -331,17 +331,17 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
  * Check the firmware state to know if pll_init has been completed already
  */
 bfa_boolean_t
-bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb)
+bfa_ioc_ct_pll_init_complete(void __iomem *rb)
 {
-       if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
-         (bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
+       if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
+         (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
                return BFA_TRUE;
 
        return BFA_FALSE;
 }
 
 bfa_status_t
-bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
+bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
 {
        u32     pll_sclk, pll_fclk, r32;
 
@@ -354,56 +354,51 @@ bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
                __APP_PLL_425_JITLMT0_1(3U) |
                __APP_PLL_425_CNTLMT0_1(1U);
        if (fcmode) {
-               bfa_reg_write((rb + OP_MODE), 0);
-               bfa_reg_write((rb + ETH_MAC_SER_REG),
-                               __APP_EMS_CMLCKSEL |
-                               __APP_EMS_REFCKBUFEN2 |
-                               __APP_EMS_CHANNEL_SEL);
+               writel(0, (rb + OP_MODE));
+               writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
+                        __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
        } else {
-               bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
-               bfa_reg_write((rb + ETH_MAC_SER_REG),
-                               __APP_EMS_REFCKBUFEN1);
+               writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
+               writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
        }
-       bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
-       bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
-       bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
-       bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-       bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
-       bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-       bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
-       bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-       bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
-               __APP_PLL_312_LOGIC_SOFT_RESET);
-       bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
-               __APP_PLL_425_LOGIC_SOFT_RESET);
-       bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
-               __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
-       bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
-               __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
-       bfa_reg_read(rb + HOSTFN0_INT_MSK);
-       bfa_os_udelay(2000);
-       bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
-       bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-       bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
-               __APP_PLL_312_ENABLE);
-       bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
-               __APP_PLL_425_ENABLE);
+       writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+       writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+       writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+       writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+       writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+       writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+       writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+       writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+       writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_312_CTL_REG);
+       writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET,
+                       rb + APP_PLL_425_CTL_REG);
+       writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
+                       rb + APP_PLL_312_CTL_REG);
+       writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
+                       rb + APP_PLL_425_CTL_REG);
+       readl(rb + HOSTFN0_INT_MSK);
+       udelay(2000);
+       writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+       writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+       writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG);
+       writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG);
        if (!fcmode) {
-               bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
-               bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
+               writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
+               writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
        }
-       r32 = bfa_reg_read((rb + PSS_CTL_REG));
+       r32 = readl((rb + PSS_CTL_REG));
        r32 &= ~__PSS_LMEM_RESET;
-       bfa_reg_write((rb + PSS_CTL_REG), r32);
-       bfa_os_udelay(1000);
+       writel(r32, (rb + PSS_CTL_REG));
+       udelay(1000);
        if (!fcmode) {
-               bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
-               bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
+               writel(0, (rb + PMM_1T_RESET_REG_P0));
+               writel(0, (rb + PMM_1T_RESET_REG_P1));
        }
 
-       bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
-       bfa_os_udelay(1000);
-       r32 = bfa_reg_read((rb + MBIST_STAT_REG));
-       bfa_reg_write((rb + MBIST_CTL_REG), 0);
+       writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
+       udelay(1000);
+       r32 = readl((rb + MBIST_STAT_REG));
+       writel(0, (rb + MBIST_CTL_REG));
        return BFA_STATUS_OK;
 }
index 2cd52733867750e76ae7d52cd65c1dae30e812e1..15407ab39e77759e3cafa96214636b19c2fc22bc 100644 (file)
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  bfa_modules.h BFA modules
  */
 
@@ -52,7 +52,7 @@ enum {
 };
 
 
-/**
+/*
  * Macro to define a new BFA module
  */
 #define BFA_MODULE(__mod)                                              \
@@ -80,7 +80,7 @@ enum {
 
 #define BFA_CACHELINE_SZ       (256)
 
-/**
+/*
  * Structure used to interact between different BFA sub modules
  *
  * Each sub module needs to implement only the entry points relevant to it (and
index 788a250ffb8a01b05a6701cab2ecd1e3308016f7..65df62ef437fa349029f05ba76aa774f818ba8cf 100644 (file)
  * General Public License for more details.
  */
 
-/**
- * Contains declarations all OS Specific files needed for BFA layer
- */
-
 #ifndef __BFA_OS_INC_H__
 #define __BFA_OS_INC_H__
 
 #define __BIGENDIAN
 #endif
 
-static inline u64 bfa_os_get_clock(void)
-{
-       return jiffies;
-}
-
 static inline u64 bfa_os_get_log_time(void)
 {
        u64 system_time = 0;
@@ -63,13 +54,6 @@ static inline u64 bfa_os_get_log_time(void)
 #define bfa_io_lat_clock_res_div HZ
 #define bfa_io_lat_clock_res_mul 1000
 
-#define BFA_ASSERT(p) do {                                             \
-       if (!(p)) {      \
-               printk(KERN_ERR "assert(%s) failed at %s:%d\n",         \
-               #p, __FILE__, __LINE__);      \
-       }                                                               \
-} while (0)
-
 #define BFA_LOG(level, bfad, mask, fmt, arg...)                                \
 do {                                                                   \
        if (((mask) == 4) || (level[1] <= '4'))                         \
@@ -81,22 +65,6 @@ do {                                                                 \
        ((_x) & 0x00ff00) |                     \
        (((_x) & 0xff0000) >> 16))
 
-#define bfa_swap_8b(_x)                                        \
-       ((((_x) & 0xff00000000000000ull) >> 56)         \
-        | (((_x) & 0x00ff000000000000ull) >> 40)       \
-        | (((_x) & 0x0000ff0000000000ull) >> 24)       \
-        | (((_x) & 0x000000ff00000000ull) >> 8)        \
-        | (((_x) & 0x00000000ff000000ull) << 8)        \
-        | (((_x) & 0x0000000000ff0000ull) << 24)       \
-        | (((_x) & 0x000000000000ff00ull) << 40)       \
-        | (((_x) & 0x00000000000000ffull) << 56))
-
-#define bfa_os_swap32(_x)                      \
-       ((((_x) & 0xff) << 24)          |       \
-       (((_x) & 0x0000ff00) << 8)      |       \
-       (((_x) & 0x00ff0000) >> 8)      |       \
-       (((_x) & 0xff000000) >> 24))
-
 #define bfa_os_swap_sgaddr(_x)  ((u64)(                                 \
        (((u64)(_x) & (u64)0x00000000000000ffull) << 32)        |       \
        (((u64)(_x) & (u64)0x000000000000ff00ull) << 32)        |       \
@@ -108,59 +76,27 @@ do {                                                                       \
        (((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
 
 #ifndef __BIGENDIAN
-#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
-                                (((_x) & 0x00ff) << 8)))
-#define bfa_os_htonl(_x)       bfa_os_swap32(_x)
-#define bfa_os_htonll(_x)      bfa_swap_8b(_x)
-#define bfa_os_hton3b(_x)      bfa_swap_3b(_x)
-#define bfa_os_wtole(_x)   (_x)
+#define bfa_os_hton3b(_x)  bfa_swap_3b(_x)
 #define bfa_os_sgaddr(_x)  (_x)
-
 #else
-
-#define bfa_os_htons(_x)   (_x)
-#define bfa_os_htonl(_x)   (_x)
 #define bfa_os_hton3b(_x)  (_x)
-#define bfa_os_htonll(_x)  (_x)
-#define bfa_os_wtole(_x)   bfa_os_swap32(_x)
 #define bfa_os_sgaddr(_x)  bfa_os_swap_sgaddr(_x)
-
 #endif
 
-#define bfa_os_ntohs(_x)   bfa_os_htons(_x)
-#define bfa_os_ntohl(_x)   bfa_os_htonl(_x)
-#define bfa_os_ntohll(_x)  bfa_os_htonll(_x)
 #define bfa_os_ntoh3b(_x)  bfa_os_hton3b(_x)
-
 #define bfa_os_u32(__pa64) ((__pa64) >> 32)
 
-#define bfa_os_memset  memset
-#define bfa_os_memcpy  memcpy
-#define bfa_os_udelay  udelay
-#define bfa_os_vsprintf vsprintf
-#define bfa_os_snprintf snprintf
-
-#define bfa_os_assign(__t, __s) __t = __s
-#define bfa_os_addr_t void __iomem *
-
-#define bfa_os_reg_read(_raddr) readl(_raddr)
-#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
-#define bfa_os_mem_read(_raddr, _off)                                  \
-       bfa_os_swap32(readl(((_raddr) + (_off))))
-#define bfa_os_mem_write(_raddr, _off, _val)                           \
-       writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
-
-#define BFA_TRC_TS(_trcm)                                              \
-                       ({                                              \
-                               struct timeval tv;                      \
-                                                                       \
-                               do_gettimeofday(&tv);      \
-                               (tv.tv_sec*1000000+tv.tv_usec);      \
-                        })
+#define BFA_TRC_TS(_trcm)                              \
+       ({                                              \
+               struct timeval tv;                      \
+                                                       \
+               do_gettimeofday(&tv);                   \
+               (tv.tv_sec*1000000+tv.tv_usec);         \
+        })
 
 #define boolean_t int
 
-/**
+/*
  * For current time stamp, OS API will fill-in
  */
 struct bfa_timeval_s {
index b6d170a13bea4f88c8ebbc78d4c63a523b4781b8..fff96226a383c6c06363d9e1036301f88d5bf8ac 100644 (file)
@@ -37,16 +37,16 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
                t0 = dip[i];
                t1 = dip[i + 1];
 #ifdef __BIGENDIAN
-               dip[i] = bfa_os_ntohl(t0);
-               dip[i + 1] = bfa_os_ntohl(t1);
+               dip[i] = be32_to_cpu(t0);
+               dip[i + 1] = be32_to_cpu(t1);
 #else
-               dip[i] = bfa_os_ntohl(t1);
-               dip[i + 1] = bfa_os_ntohl(t0);
+               dip[i] = be32_to_cpu(t1);
+               dip[i + 1] = be32_to_cpu(t0);
 #endif
        }
 }
 
-/**
+/*
  * bfa_port_enable_isr()
  *
  *
@@ -63,7 +63,7 @@ bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
        port->endis_cbfn(port->endis_cbarg, status);
 }
 
-/**
+/*
  * bfa_port_disable_isr()
  *
  *
@@ -80,7 +80,7 @@ bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
        port->endis_cbfn(port->endis_cbarg, status);
 }
 
-/**
+/*
  * bfa_port_get_stats_isr()
  *
  *
@@ -112,7 +112,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
        }
 }
 
-/**
+/*
  * bfa_port_clear_stats_isr()
  *
  *
@@ -129,7 +129,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
        port->stats_status = status;
        port->stats_busy   = BFA_FALSE;
 
-       /**
+       /*
        * re-initialize time stamp for stats reset
        */
        bfa_os_gettimeofday(&tv);
@@ -141,7 +141,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
        }
 }
 
-/**
+/*
  * bfa_port_isr()
  *
  *
@@ -189,7 +189,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
        }
 }
 
-/**
+/*
  * bfa_port_meminfo()
  *
  *
@@ -203,7 +203,7 @@ bfa_port_meminfo(void)
        return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
 }
 
-/**
+/*
  * bfa_port_mem_claim()
  *
  *
@@ -220,7 +220,7 @@ bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
        port->stats_dma.pa  = dma_pa;
 }
 
-/**
+/*
  * bfa_port_enable()
  *
  *   Send the Port enable request to the f/w
@@ -264,7 +264,7 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * bfa_port_disable()
  *
  *   Send the Port disable request to the f/w
@@ -308,7 +308,7 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * bfa_port_get_stats()
  *
  *   Send the request to the f/w to fetch Port statistics.
@@ -348,7 +348,7 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * bfa_port_clear_stats()
  *
  *
@@ -385,7 +385,7 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * bfa_port_hbfail()
  *
  *
@@ -415,7 +415,7 @@ bfa_port_hbfail(void *arg)
        }
 }
 
-/**
+/*
  * bfa_port_attach()
  *
  *
@@ -449,7 +449,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
        bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
        bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
 
-       /**
+       /*
         * initialize time stamp for stats reset
         */
        bfa_os_gettimeofday(&tv);
@@ -458,7 +458,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
        bfa_trc(port, 0);
 }
 
-/**
+/*
  * bfa_port_detach()
  *
  *
index aa1dc749b28195eca325ee97921f91bdeaa95c42..c768143f4805a7cb5f0094682a39c30b73ea433d 100644 (file)
@@ -29,7 +29,7 @@ BFA_MODULE(fcport);
 BFA_MODULE(rport);
 BFA_MODULE(uf);
 
-/**
+/*
  * LPS related definitions
  */
 #define BFA_LPS_MIN_LPORTS      (1)
@@ -41,7 +41,7 @@ BFA_MODULE(uf);
 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
 
-/**
+/*
  *  lps_pvt BFA LPS private functions
  */
 
@@ -55,7 +55,7 @@ enum bfa_lps_event {
        BFA_LPS_SM_RX_CVL       = 7,    /* Rx clear virtual link        */
 };
 
-/**
+/*
  * FC PORT related definitions
  */
 /*
@@ -67,7 +67,7 @@ enum bfa_lps_event {
        (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
 
 
-/**
+/*
  * BFA port state machine events
  */
 enum bfa_fcport_sm_event {
@@ -82,7 +82,7 @@ enum bfa_fcport_sm_event {
        BFA_FCPORT_SM_HWFAIL    = 9,    /*  IOC h/w failure             */
 };
 
-/**
+/*
  * BFA port link notification state machine events
  */
 
@@ -92,7 +92,7 @@ enum bfa_fcport_ln_sm_event {
        BFA_FCPORT_LN_SM_NOTIFICATION   = 3     /*  done notification   */
 };
 
-/**
+/*
  * RPORT related definitions
  */
 #define bfa_rport_offline_cb(__rp) do {                                        \
@@ -126,7 +126,7 @@ enum bfa_rport_event {
        BFA_RPORT_SM_QRESUME    = 9,    /*  space in requeue queue      */
 };
 
-/**
+/*
  * forward declarations FCXP related functions
  */
 static void    __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
@@ -138,7 +138,7 @@ static void bfa_fcxp_qresume(void *cbarg);
 static void    bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
                                struct bfi_fcxp_send_req_s *send_req);
 
-/**
+/*
  * forward declarations for LPS functions
  */
 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
@@ -163,7 +163,7 @@ static void bfa_lps_login_comp(struct bfa_lps_s *lps);
 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
 
-/**
+/*
  * forward declaration for LPS state machine
  */
 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
@@ -175,7 +175,7 @@ static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
                                        event);
 
-/**
+/*
  * forward declaration for FC Port functions
  */
 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
@@ -193,7 +193,7 @@ static void bfa_fcport_stats_get_timeout(void *cbarg);
 static void bfa_fcport_stats_clr_timeout(void *cbarg);
 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
 
-/**
+/*
  * forward declaration for FC PORT state machine
  */
 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
@@ -252,7 +252,7 @@ static struct bfa_sm_table_s hal_port_sm_table[] = {
 };
 
 
-/**
+/*
  * forward declaration for RPORT related functions
  */
 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
@@ -265,7 +265,7 @@ static void         __bfa_cb_rport_online(void *cbarg,
 static void            __bfa_cb_rport_offline(void *cbarg,
                                                bfa_boolean_t complete);
 
-/**
+/*
  * forward declaration for RPORT state machine
  */
 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
@@ -295,7 +295,7 @@ static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
                                        enum bfa_rport_event event);
 
-/**
+/*
  * PLOG related definitions
  */
 static int
@@ -330,7 +330,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
 
        pl_recp = &(plog->plog_recs[tail]);
 
-       bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
+       memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
 
        pl_recp->tv = bfa_os_get_log_time();
        BFA_PL_LOG_REC_INCR(plog->tail);
@@ -342,9 +342,9 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
 void
 bfa_plog_init(struct bfa_plog_s *plog)
 {
-       bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s));
+       memset((char *)plog, 0, sizeof(struct bfa_plog_s));
 
-       bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
+       memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
        plog->head = plog->tail = 0;
        plog->plog_enabled = 1;
 }
@@ -357,7 +357,7 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
        struct bfa_plog_rec_s  lp;
 
        if (plog->plog_enabled) {
-               bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+               memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
                lp.mid = mid;
                lp.eid = event;
                lp.log_type = BFA_PL_LOG_TYPE_STRING;
@@ -381,15 +381,14 @@ bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
                num_ints = BFA_PL_INT_LOG_SZ;
 
        if (plog->plog_enabled) {
-               bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+               memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
                lp.mid = mid;
                lp.eid = event;
                lp.log_type = BFA_PL_LOG_TYPE_INT;
                lp.misc = misc;
 
                for (i = 0; i < num_ints; i++)
-                       bfa_os_assign(lp.log_entry.int_log[i],
-                                       intarr[i]);
+                       lp.log_entry.int_log[i] = intarr[i];
 
                lp.log_num_ints = (u8) num_ints;
 
@@ -407,7 +406,7 @@ bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
        u32     ints[BFA_PL_INT_LOG_SZ];
 
        if (plog->plog_enabled) {
-               bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+               memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 
                ints[0] = tmp_int[0];
                ints[1] = tmp_int[1];
@@ -427,7 +426,7 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
        u32     ints[BFA_PL_INT_LOG_SZ];
 
        if (plog->plog_enabled) {
-               bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+               memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 
                ints[0] = tmp_int[0];
                ints[1] = tmp_int[1];
@@ -462,7 +461,7 @@ bfa_plog_get_setting(struct bfa_plog_s *plog)
        return (bfa_boolean_t)plog->plog_enabled;
 }
 
-/**
+/*
  *  fcxp_pvt BFA FCXP private functions
  */
 
@@ -485,7 +484,7 @@ claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
        mod->req_pld_list_pa = dm_pa;
        dm_kva += buf_pool_sz;
        dm_pa += buf_pool_sz;
-       bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
+       memset(mod->req_pld_list_kva, 0, buf_pool_sz);
 
        /*
         * Initialize the fcxp rsp payload list
@@ -495,7 +494,7 @@ claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
        mod->rsp_pld_list_pa = dm_pa;
        dm_kva += buf_pool_sz;
        dm_pa += buf_pool_sz;
-       bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
+       memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
 
        bfa_meminfo_dma_virt(mi) = dm_kva;
        bfa_meminfo_dma_phys(mi) = dm_pa;
@@ -508,7 +507,7 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
        struct bfa_fcxp_s *fcxp;
 
        fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
-       bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
+       memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
 
        INIT_LIST_HEAD(&mod->fcxp_free_q);
        INIT_LIST_HEAD(&mod->fcxp_active_q);
@@ -559,11 +558,11 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 {
        struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 
-       bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
+       memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
        mod->bfa = bfa;
        mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
 
-       /**
+       /*
         * Initialize FCXP request and response payload sizes.
         */
        mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
@@ -741,20 +740,20 @@ hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
 {
        struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
        struct bfa_fcxp_s       *fcxp;
-       u16             fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
+       u16             fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
 
        bfa_trc(bfa, fcxp_tag);
 
-       fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
+       fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
 
-       /**
+       /*
         * @todo f/w should not set residue to non-0 when everything
         *       is received.
         */
        if (fcxp_rsp->req_status == BFA_STATUS_OK)
                fcxp_rsp->residue_len = 0;
        else
-               fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
+               fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
 
        fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
 
@@ -856,7 +855,7 @@ hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
        }
 }
 
-/**
+/*
  * Handler to resume sending fcxp when space in available in cpe queue.
  */
 static void
@@ -871,7 +870,7 @@ bfa_fcxp_qresume(void *cbarg)
        bfa_fcxp_queue(fcxp, send_req);
 }
 
-/**
+/*
  * Queue fcxp send request to foimrware.
  */
 static void
@@ -885,26 +884,26 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
        bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
                    bfa_lpuid(bfa));
 
-       send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
+       send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
        if (rport) {
                send_req->rport_fw_hndl = rport->fw_handle;
-               send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
+               send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
                if (send_req->max_frmsz == 0)
-                       send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
+                       send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
        } else {
                send_req->rport_fw_hndl = 0;
-               send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
+               send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
        }
 
-       send_req->vf_id = bfa_os_htons(reqi->vf_id);
+       send_req->vf_id = cpu_to_be16(reqi->vf_id);
        send_req->lp_tag = reqi->lp_tag;
        send_req->class = reqi->class;
        send_req->rsp_timeout = rspi->rsp_timeout;
        send_req->cts = reqi->cts;
        send_req->fchs = reqi->fchs;
 
-       send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
-       send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
+       send_req->req_len = cpu_to_be32(reqi->req_tot_len);
+       send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
 
        /*
         * setup req sgles
@@ -955,11 +954,11 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
        bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
 }
 
-/**
+/*
  *  hal_fcxp_api BFA FCXP API
  */
 
-/**
+/*
  * Allocate an FCXP instance to send a response or to send a request
  * that has a response. Request/response buffers are allocated by caller.
  *
@@ -1005,7 +1004,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
        return fcxp;
 }
 
-/**
+/*
  * Get the internal request buffer pointer
  *
  * @param[in]  fcxp    BFA fcxp pointer
@@ -1032,7 +1031,7 @@ bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
        return mod->req_pld_sz;
 }
 
-/**
+/*
  * Get the internal response buffer pointer
  *
  * @param[in]  fcxp    BFA fcxp pointer
@@ -1052,7 +1051,7 @@ bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
        return rspbuf;
 }
 
-/**
+/*
  *             Free the BFA FCXP
  *
  * @param[in]  fcxp                    BFA fcxp pointer
@@ -1069,7 +1068,7 @@ bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
        bfa_fcxp_put(fcxp);
 }
 
-/**
+/*
  * Send a FCXP request
  *
  * @param[in]  fcxp    BFA fcxp pointer
@@ -1103,7 +1102,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
 
        bfa_trc(bfa, fcxp->fcxp_tag);
 
-       /**
+       /*
         * setup request/response info
         */
        reqi->bfa_rport = rport;
@@ -1118,7 +1117,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
        fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
        fcxp->send_cbarg = cbarg;
 
-       /**
+       /*
         * If no room in CPE queue, wait for space in request queue
         */
        send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
@@ -1132,7 +1131,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
        bfa_fcxp_queue(fcxp, send_req);
 }
 
-/**
+/*
  * Abort a BFA FCXP
  *
  * @param[in]  fcxp    BFA fcxp pointer
@@ -1186,7 +1185,7 @@ bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
 void
 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
 {
-       /**
+       /*
         * If waiting for room in request queue, cancel reqq wait
         * and free fcxp.
         */
@@ -1202,7 +1201,7 @@ bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
 
 
 
-/**
+/*
  *  hal_fcxp_public BFA FCXP public functions
  */
 
@@ -1229,11 +1228,11 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
 }
 
 
-/**
+/*
  *  BFA LPS state machine functions
  */
 
-/**
+/*
  * Init state -- no login
  */
 static void
@@ -1285,7 +1284,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
        }
 }
 
-/**
+/*
  * login is in progress -- awaiting response from firmware
  */
 static void
@@ -1327,7 +1326,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
        }
 }
 
-/**
+/*
  * login pending - awaiting space in request queue
  */
 static void
@@ -1359,7 +1358,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
        }
 }
 
-/**
+/*
  * login complete
  */
 static void
@@ -1400,7 +1399,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
        }
 }
 
-/**
+/*
  * logout in progress - awaiting firmware response
  */
 static void
@@ -1424,7 +1423,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
        }
 }
 
-/**
+/*
  * logout pending -- awaiting space in request queue
  */
 static void
@@ -1451,11 +1450,11 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
 
 
 
-/**
+/*
  *  lps_pvt BFA LPS private functions
  */
 
-/**
+/*
  * return memory requirement
  */
 static void
@@ -1468,7 +1467,7 @@ bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
                *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
 }
 
-/**
+/*
  * bfa module attach at initialization time
  */
 static void
@@ -1479,7 +1478,7 @@ bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        struct bfa_lps_s        *lps;
        int                     i;
 
-       bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
+       memset(mod, 0, sizeof(struct bfa_lps_mod_s));
        mod->num_lps = BFA_LPS_MAX_LPORTS;
        if (cfg->drvcfg.min_cfg)
                mod->num_lps = BFA_LPS_MIN_LPORTS;
@@ -1516,7 +1515,7 @@ bfa_lps_stop(struct bfa_s *bfa)
 {
 }
 
-/**
+/*
  * IOC in disabled state -- consider all lps offline
  */
 static void
@@ -1532,7 +1531,7 @@ bfa_lps_iocdisable(struct bfa_s *bfa)
        }
 }
 
-/**
+/*
  * Firmware login response
  */
 static void
@@ -1550,7 +1549,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
                lps->fport      = rsp->f_port;
                lps->npiv_en    = rsp->npiv_en;
                lps->lp_pid     = rsp->lp_pid;
-               lps->pr_bbcred  = bfa_os_ntohs(rsp->bb_credit);
+               lps->pr_bbcred  = be16_to_cpu(rsp->bb_credit);
                lps->pr_pwwn    = rsp->port_name;
                lps->pr_nwwn    = rsp->node_name;
                lps->auth_req   = rsp->auth_req;
@@ -1579,7 +1578,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
        bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
 }
 
-/**
+/*
  * Firmware logout response
  */
 static void
@@ -1594,7 +1593,7 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
        bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
 }
 
-/**
+/*
  * Firmware received a Clear virtual link request (for FCoE)
  */
 static void
@@ -1608,7 +1607,7 @@ bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
        bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
 }
 
-/**
+/*
  * Space is available in request queue, resume queueing request to firmware.
  */
 static void
@@ -1619,7 +1618,7 @@ bfa_lps_reqq_resume(void *lps_arg)
        bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
 }
 
-/**
+/*
  * lps is freed -- triggered by vport delete
  */
 static void
@@ -1632,7 +1631,7 @@ bfa_lps_free(struct bfa_lps_s *lps)
        list_add_tail(&lps->qe, &mod->lps_free_q);
 }
 
-/**
+/*
  * send login request to firmware
  */
 static void
@@ -1648,7 +1647,7 @@ bfa_lps_send_login(struct bfa_lps_s *lps)
 
        m->lp_tag       = lps->lp_tag;
        m->alpa         = lps->alpa;
-       m->pdu_size     = bfa_os_htons(lps->pdusz);
+       m->pdu_size     = cpu_to_be16(lps->pdusz);
        m->pwwn         = lps->pwwn;
        m->nwwn         = lps->nwwn;
        m->fdisc        = lps->fdisc;
@@ -1657,7 +1656,7 @@ bfa_lps_send_login(struct bfa_lps_s *lps)
        bfa_reqq_produce(lps->bfa, lps->reqq);
 }
 
-/**
+/*
  * send logout request to firmware
  */
 static void
@@ -1676,7 +1675,7 @@ bfa_lps_send_logout(struct bfa_lps_s *lps)
        bfa_reqq_produce(lps->bfa, lps->reqq);
 }
 
-/**
+/*
  * Indirect login completion handler for non-fcs
  */
 static void
@@ -1693,7 +1692,7 @@ bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
                bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
 }
 
-/**
+/*
  * Login completion handler -- direct call for fcs, queue for others
  */
 static void
@@ -1711,7 +1710,7 @@ bfa_lps_login_comp(struct bfa_lps_s *lps)
                bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
 }
 
-/**
+/*
  * Indirect logout completion handler for non-fcs
  */
 static void
@@ -1726,7 +1725,7 @@ bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
                bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
 }
 
-/**
+/*
  * Logout completion handler -- direct call for fcs, queue for others
  */
 static void
@@ -1741,7 +1740,7 @@ bfa_lps_logout_comp(struct bfa_lps_s *lps)
                bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
 }
 
-/**
+/*
  * Clear virtual link completion handler for non-fcs
  */
 static void
@@ -1757,7 +1756,7 @@ bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
                bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
 }
 
-/**
+/*
  * Received Clear virtual link event --direct call for fcs,
  * queue for others
  */
@@ -1777,7 +1776,7 @@ bfa_lps_cvl_event(struct bfa_lps_s *lps)
 
 
 
-/**
+/*
  *  lps_public BFA LPS public functions
  */
 
@@ -1790,7 +1789,7 @@ bfa_lps_get_max_vport(struct bfa_s *bfa)
                return BFA_LPS_MAX_VPORTS_SUPP_CB;
 }
 
-/**
+/*
  * Allocate a lport srvice tag.
  */
 struct bfa_lps_s  *
@@ -1810,7 +1809,7 @@ bfa_lps_alloc(struct bfa_s *bfa)
        return lps;
 }
 
-/**
+/*
  * Free lport service tag. This can be called anytime after an alloc.
  * No need to wait for any pending login/logout completions.
  */
@@ -1820,7 +1819,7 @@ bfa_lps_delete(struct bfa_lps_s *lps)
        bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
 }
 
-/**
+/*
  * Initiate a lport login.
  */
 void
@@ -1837,7 +1836,7 @@ bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
        bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
 }
 
-/**
+/*
  * Initiate a lport fdisc login.
  */
 void
@@ -1854,7 +1853,7 @@ bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
        bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
 }
 
-/**
+/*
  * Initiate a lport logout (flogi).
  */
 void
@@ -1863,7 +1862,7 @@ bfa_lps_flogo(struct bfa_lps_s *lps)
        bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
 }
 
-/**
+/*
  * Initiate a lport FDSIC logout.
  */
 void
@@ -1872,7 +1871,7 @@ bfa_lps_fdisclogo(struct bfa_lps_s *lps)
        bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
 }
 
-/**
+/*
  * Discard a pending login request -- should be called only for
  * link down handling.
  */
@@ -1882,7 +1881,7 @@ bfa_lps_discard(struct bfa_lps_s *lps)
        bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
 }
 
-/**
+/*
  * Return lport services tag
  */
 u8
@@ -1891,7 +1890,7 @@ bfa_lps_get_tag(struct bfa_lps_s *lps)
        return lps->lp_tag;
 }
 
-/**
+/*
  * Return lport services tag given the pid
  */
 u8
@@ -1910,7 +1909,7 @@ bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
        return 0;
 }
 
-/**
+/*
  * return if fabric login indicates support for NPIV
  */
 bfa_boolean_t
@@ -1919,7 +1918,7 @@ bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
        return lps->npiv_en;
 }
 
-/**
+/*
  * Return TRUE if attached to F-Port, else return FALSE
  */
 bfa_boolean_t
@@ -1928,7 +1927,7 @@ bfa_lps_is_fport(struct bfa_lps_s *lps)
        return lps->fport;
 }
 
-/**
+/*
  * Return TRUE if attached to a Brocade Fabric
  */
 bfa_boolean_t
@@ -1936,7 +1935,7 @@ bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
 {
        return lps->brcd_switch;
 }
-/**
+/*
  * return TRUE if authentication is required
  */
 bfa_boolean_t
@@ -1951,7 +1950,7 @@ bfa_lps_get_extstatus(struct bfa_lps_s *lps)
        return lps->ext_status;
 }
 
-/**
+/*
  * return port id assigned to the lport
  */
 u32
@@ -1960,7 +1959,7 @@ bfa_lps_get_pid(struct bfa_lps_s *lps)
        return lps->lp_pid;
 }
 
-/**
+/*
  * return port id assigned to the base lport
  */
 u32
@@ -1971,7 +1970,7 @@ bfa_lps_get_base_pid(struct bfa_s *bfa)
        return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
 }
 
-/**
+/*
  * Return bb_credit assigned in FLOGI response
  */
 u16
@@ -1980,7 +1979,7 @@ bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
        return lps->pr_bbcred;
 }
 
-/**
+/*
  * Return peer port name
  */
 wwn_t
@@ -1989,7 +1988,7 @@ bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
        return lps->pr_pwwn;
 }
 
-/**
+/*
  * Return peer node name
  */
 wwn_t
@@ -1998,7 +1997,7 @@ bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
        return lps->pr_nwwn;
 }
 
-/**
+/*
  * return reason code if login request is rejected
  */
 u8
@@ -2007,7 +2006,7 @@ bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
        return lps->lsrjt_rsn;
 }
 
-/**
+/*
  * return explanation code if login request is rejected
  */
 u8
@@ -2016,7 +2015,7 @@ bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
        return lps->lsrjt_expl;
 }
 
-/**
+/*
  * Return fpma/spma MAC for lport
  */
 mac_t
@@ -2025,7 +2024,7 @@ bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
        return lps->lp_mac;
 }
 
-/**
+/*
  * LPS firmware message class handler.
  */
 void
@@ -2055,7 +2054,7 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        }
 }
 
-/**
+/*
  * FC PORT state machine functions
  */
 static void
@@ -2066,7 +2065,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
 
        switch (event) {
        case BFA_FCPORT_SM_START:
-               /**
+               /*
                 * Start event after IOC is configured and BFA is started.
                 */
                if (bfa_fcport_send_enable(fcport)) {
@@ -2080,7 +2079,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
                break;
 
        case BFA_FCPORT_SM_ENABLE:
-               /**
+               /*
                 * Port is persistently configured to be in enabled state. Do
                 * not change state. Port enabling is done when START event is
                 * received.
@@ -2088,7 +2087,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
                break;
 
        case BFA_FCPORT_SM_DISABLE:
-               /**
+               /*
                 * If a port is persistently configured to be disabled, the
                 * first event will a port disable request.
                 */
@@ -2124,13 +2123,13 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
                break;
 
        case BFA_FCPORT_SM_ENABLE:
-               /**
+               /*
                 * Already enable is in progress.
                 */
                break;
 
        case BFA_FCPORT_SM_DISABLE:
-               /**
+               /*
                 * Just send disable request to firmware when room becomes
                 * available in request queue.
                 */
@@ -2145,7 +2144,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
 
        case BFA_FCPORT_SM_LINKUP:
        case BFA_FCPORT_SM_LINKDOWN:
-               /**
+               /*
                 * Possible to get link events when doing back-to-back
                 * enable/disables.
                 */
@@ -2184,7 +2183,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
                break;
 
        case BFA_FCPORT_SM_ENABLE:
-               /**
+               /*
                 * Already being enabled.
                 */
                break;
@@ -2257,13 +2256,13 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
                break;
 
        case BFA_FCPORT_SM_LINKDOWN:
-               /**
+               /*
                 * Possible to get link down event.
                 */
                break;
 
        case BFA_FCPORT_SM_ENABLE:
-               /**
+               /*
                 * Already enabled.
                 */
                break;
@@ -2306,7 +2305,7 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
 
        switch (event) {
        case BFA_FCPORT_SM_ENABLE:
-               /**
+               /*
                 * Already enabled.
                 */
                break;
@@ -2399,14 +2398,14 @@ bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
                break;
 
        case BFA_FCPORT_SM_DISABLE:
-               /**
+               /*
                 * Already being disabled.
                 */
                break;
 
        case BFA_FCPORT_SM_LINKUP:
        case BFA_FCPORT_SM_LINKDOWN:
-               /**
+               /*
                 * Possible to get link events when doing back-to-back
                 * enable/disables.
                 */
@@ -2453,7 +2452,7 @@ bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
 
        case BFA_FCPORT_SM_LINKUP:
        case BFA_FCPORT_SM_LINKDOWN:
-               /**
+               /*
                 * Possible to get link events when doing back-to-back
                 * enable/disables.
                 */
@@ -2483,7 +2482,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
                break;
 
        case BFA_FCPORT_SM_DISABLE:
-               /**
+               /*
                 * Already being disabled.
                 */
                break;
@@ -2508,7 +2507,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
 
        case BFA_FCPORT_SM_LINKUP:
        case BFA_FCPORT_SM_LINKDOWN:
-               /**
+               /*
                 * Possible to get link events when doing back-to-back
                 * enable/disables.
                 */
@@ -2533,7 +2532,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
 
        switch (event) {
        case BFA_FCPORT_SM_START:
-               /**
+               /*
                 * Ignore start event for a port that is disabled.
                 */
                break;
@@ -2557,7 +2556,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
                break;
 
        case BFA_FCPORT_SM_DISABLE:
-               /**
+               /*
                 * Already disabled.
                 */
                break;
@@ -2587,14 +2586,14 @@ bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
                break;
 
        default:
-               /**
+               /*
                 * Ignore all other events.
                 */
                ;
        }
 }
 
-/**
+/*
  * Port is enabled. IOC is down/failed.
  */
 static void
@@ -2613,14 +2612,14 @@ bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
                break;
 
        default:
-               /**
+               /*
                 * Ignore all events.
                 */
                ;
        }
 }
 
-/**
+/*
  * Port is disabled. IOC is down/failed.
  */
 static void
@@ -2639,14 +2638,14 @@ bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
                break;
 
        default:
-               /**
+               /*
                 * Ignore all events.
                 */
                ;
        }
 }
 
-/**
+/*
  * Link state is down
  */
 static void
@@ -2666,7 +2665,7 @@ bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
        }
 }
 
-/**
+/*
  * Link state is waiting for down notification
  */
 static void
@@ -2689,7 +2688,7 @@ bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
        }
 }
 
-/**
+/*
  * Link state is waiting for down notification and there is a pending up
  */
 static void
@@ -2713,7 +2712,7 @@ bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
        }
 }
 
-/**
+/*
  * Link state is up
  */
 static void
@@ -2733,7 +2732,7 @@ bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
        }
 }
 
-/**
+/*
  * Link state is waiting for up notification
  */
 static void
@@ -2756,7 +2755,7 @@ bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
        }
 }
 
-/**
+/*
  * Link state is waiting for up notification and there is a pending down
  */
 static void
@@ -2780,7 +2779,7 @@ bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
        }
 }
 
-/**
+/*
  * Link state is waiting for up notification and there are pending down and up
  */
 static void
@@ -2806,7 +2805,7 @@ bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
 
 
 
-/**
+/*
  *  hal_port_private
  */
 
@@ -2821,7 +2820,7 @@ __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
                bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
 }
 
-/**
+/*
  * Send SCN notification to upper layers.
  * trunk - false if caller is fcport to ignore fcport event in trunked mode
  */
@@ -2897,7 +2896,7 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
        bfa_meminfo_dma_phys(meminfo) = dm_pa;
 }
 
-/**
+/*
  * Memory initialization.
  */
 static void
@@ -2909,7 +2908,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        struct bfa_fcport_ln_s *ln = &fcport->ln;
        struct bfa_timeval_s tv;
 
-       bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
+       memset(fcport, 0, sizeof(struct bfa_fcport_s));
        fcport->bfa = bfa;
        ln->fcport = fcport;
 
@@ -2918,13 +2917,13 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
        bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
 
-       /**
+       /*
         * initialize time stamp for stats reset
         */
        bfa_os_gettimeofday(&tv);
        fcport->stats_reset_time = tv.tv_sec;
 
-       /**
+       /*
         * initialize and set default configuration
         */
        port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
@@ -2942,7 +2941,7 @@ bfa_fcport_detach(struct bfa_s *bfa)
 {
 }
 
-/**
+/*
  * Called when IOC is ready.
  */
 static void
@@ -2951,7 +2950,7 @@ bfa_fcport_start(struct bfa_s *bfa)
        bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
 }
 
-/**
+/*
  * Called before IOC is stopped.
  */
 static void
@@ -2961,7 +2960,7 @@ bfa_fcport_stop(struct bfa_s *bfa)
        bfa_trunk_iocdisable(bfa);
 }
 
-/**
+/*
  * Called when IOC failure is detected.
  */
 static void
@@ -2986,18 +2985,17 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
                fcport->myalpa = 0;
 
        /* QoS Details */
-       bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
-       bfa_os_assign(fcport->qos_vc_attr,
-               pevent->link_state.vc_fcf.qos_vc_attr);
+       fcport->qos_attr = pevent->link_state.qos_attr;
+       fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
 
-       /**
+       /*
         * update trunk state if applicable
         */
        if (!fcport->cfg.trunked)
                trunk->attr.state = BFA_TRUNK_DISABLED;
 
        /* update FCoE specific */
-       fcport->fcoe_vlan = bfa_os_ntohs(pevent->link_state.vc_fcf.fcf.vlan);
+       fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
 
        bfa_trc(fcport->bfa, fcport->speed);
        bfa_trc(fcport->bfa, fcport->topology);
@@ -3010,7 +3008,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
        fcport->topology = BFA_PORT_TOPOLOGY_NONE;
 }
 
-/**
+/*
  * Send port enable message to firmware.
  */
 static bfa_boolean_t
@@ -3018,13 +3016,13 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
 {
        struct bfi_fcport_enable_req_s *m;
 
-       /**
+       /*
         * Increment message tag before queue check, so that responses to old
         * requests are discarded.
         */
        fcport->msgtag++;
 
-       /**
+       /*
         * check for room in queue to send request now
         */
        m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3040,19 +3038,19 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
        m->pwwn = fcport->pwwn;
        m->port_cfg = fcport->cfg;
        m->msgtag = fcport->msgtag;
-       m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
+       m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
        bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
        bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
        bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
 
-       /**
+       /*
         * queue I/O message to firmware
         */
        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
        return BFA_TRUE;
 }
 
-/**
+/*
  * Send port disable message to firmware.
  */
 static bfa_boolean_t
@@ -3060,13 +3058,13 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
 {
        struct bfi_fcport_req_s *m;
 
-       /**
+       /*
         * Increment message tag before queue check, so that responses to old
         * requests are discarded.
         */
        fcport->msgtag++;
 
-       /**
+       /*
         * check for room in queue to send request now
         */
        m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3080,7 +3078,7 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
                        bfa_lpuid(fcport->bfa));
        m->msgtag = fcport->msgtag;
 
-       /**
+       /*
         * queue I/O message to firmware
         */
        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3105,7 +3103,7 @@ bfa_fcport_send_txcredit(void *port_cbarg)
        struct bfa_fcport_s *fcport = port_cbarg;
        struct bfi_fcport_set_svc_params_req_s *m;
 
-       /**
+       /*
         * check for room in queue to send request now
         */
        m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3116,9 +3114,9 @@ bfa_fcport_send_txcredit(void *port_cbarg)
 
        bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
                        bfa_lpuid(fcport->bfa));
-       m->tx_bbcredit = bfa_os_htons((u16)fcport->cfg.tx_bbcredit);
+       m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
 
-       /**
+       /*
         * queue I/O message to firmware
         */
        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3134,7 +3132,7 @@ bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
 
        /* Now swap the 32 bit fields */
        for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
-               dip[i] = bfa_os_ntohl(sip[i]);
+               dip[i] = be32_to_cpu(sip[i]);
 }
 
 static void
@@ -3148,11 +3146,11 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
        for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
             i = i + 2) {
 #ifdef __BIGENDIAN
-               dip[i] = bfa_os_ntohl(sip[i]);
-               dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
+               dip[i] = be32_to_cpu(sip[i]);
+               dip[i + 1] = be32_to_cpu(sip[i + 1]);
 #else
-               dip[i] = bfa_os_ntohl(sip[i + 1]);
-               dip[i + 1] = bfa_os_ntohl(sip[i]);
+               dip[i] = be32_to_cpu(sip[i + 1]);
+               dip[i + 1] = be32_to_cpu(sip[i]);
 #endif
        }
 }
@@ -3223,7 +3221,7 @@ bfa_fcport_send_stats_get(void *cbarg)
        }
        fcport->stats_qfull = BFA_FALSE;
 
-       bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+       memset(msg, 0, sizeof(struct bfi_fcport_req_s));
        bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
                        bfa_lpuid(fcport->bfa));
        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3237,7 +3235,7 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
        if (complete) {
                struct bfa_timeval_s tv;
 
-               /**
+               /*
                 * re-initialize time stamp for stats reset
                 */
                bfa_os_gettimeofday(&tv);
@@ -3285,13 +3283,13 @@ bfa_fcport_send_stats_clear(void *cbarg)
        }
        fcport->stats_qfull = BFA_FALSE;
 
-       bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+       memset(msg, 0, sizeof(struct bfi_fcport_req_s));
        bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
                        bfa_lpuid(fcport->bfa));
        bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
 }
 
-/**
+/*
  * Handle trunk SCN event from firmware.
  */
 static void
@@ -3312,7 +3310,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
        bfa_trc(fcport->bfa, scn->trunk_state);
        bfa_trc(fcport->bfa, scn->trunk_speed);
 
-       /**
+       /*
         * Save off new state for trunk attribute query
         */
        state_prev = trunk->attr.state;
@@ -3327,7 +3325,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
                lattr->trunk_wwn  = tlink->trunk_wwn;
                lattr->fctl       = tlink->fctl;
                lattr->speed      = tlink->speed;
-               lattr->deskew     = bfa_os_ntohl(tlink->deskew);
+               lattr->deskew     = be32_to_cpu(tlink->deskew);
 
                if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
                        fcport->speed    = tlink->speed;
@@ -3360,7 +3358,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
                        BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
        }
 
-       /**
+       /*
         * Notify upper layers if trunk state changed.
         */
        if ((state_prev != trunk->attr.state) ||
@@ -3376,7 +3374,7 @@ bfa_trunk_iocdisable(struct bfa_s *bfa)
        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
        int i = 0;
 
-       /**
+       /*
         * In trunked mode, notify upper layers that link is down
         */
        if (fcport->cfg.trunked) {
@@ -3400,11 +3398,11 @@ bfa_trunk_iocdisable(struct bfa_s *bfa)
 
 
 
-/**
+/*
  *  hal_port_public
  */
 
-/**
+/*
  * Called to initialize port attributes
  */
 void
@@ -3412,7 +3410,7 @@ bfa_fcport_init(struct bfa_s *bfa)
 {
        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-       /**
+       /*
         * Initialize port attributes from IOC hardware data.
         */
        bfa_fcport_set_wwns(fcport);
@@ -3426,7 +3424,7 @@ bfa_fcport_init(struct bfa_s *bfa)
        bfa_assert(fcport->speed_sup);
 }
 
-/**
+/*
  * Firmware message handler.
  */
 void
@@ -3507,11 +3505,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 
 
 
-/**
+/*
  *  hal_port_api
  */
 
-/**
+/*
  * Registered callback for port events.
  */
 void
@@ -3552,7 +3550,7 @@ bfa_fcport_disable(struct bfa_s *bfa)
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Configure port speed.
  */
 bfa_status_t
@@ -3574,7 +3572,7 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Get current speed.
  */
 enum bfa_port_speed
@@ -3585,7 +3583,7 @@ bfa_fcport_get_speed(struct bfa_s *bfa)
        return fcport->speed;
 }
 
-/**
+/*
  * Configure port topology.
  */
 bfa_status_t
@@ -3610,7 +3608,7 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Get current topology.
  */
 enum bfa_port_topology
@@ -3710,7 +3708,7 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
        bfa_fcport_send_txcredit(fcport);
 }
 
-/**
+/*
  * Get port attributes.
  */
 
@@ -3729,7 +3727,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
 {
        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-       bfa_os_memset(attr, 0, sizeof(struct bfa_port_attr_s));
+       memset(attr, 0, sizeof(struct bfa_port_attr_s));
 
        attr->nwwn = fcport->nwwn;
        attr->pwwn = fcport->pwwn;
@@ -3737,7 +3735,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
        attr->factorypwwn =  bfa_ioc_get_mfg_pwwn(&bfa->ioc);
        attr->factorynwwn =  bfa_ioc_get_mfg_nwwn(&bfa->ioc);
 
-       bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
+       memcpy(&attr->pport_cfg, &fcport->cfg,
                sizeof(struct bfa_port_cfg_s));
        /* speed attributes */
        attr->pport_cfg.speed = fcport->cfg.speed;
@@ -3770,7 +3768,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
 
 #define BFA_FCPORT_STATS_TOV   1000
 
-/**
+/*
  * Fetch port statistics (FCQoS or FCoE).
  */
 bfa_status_t
@@ -3796,7 +3794,7 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Reset port statistics (FCQoS or FCoE).
  */
 bfa_status_t
@@ -3820,7 +3818,7 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Fetch FCQoS port statistics
  */
 bfa_status_t
@@ -3833,7 +3831,7 @@ bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
        return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
 }
 
-/**
+/*
  * Reset FCoE port statistics
  */
 bfa_status_t
@@ -3845,7 +3843,7 @@ bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
        return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
 }
 
-/**
+/*
  * Fetch FCQoS port statistics
  */
 bfa_status_t
@@ -3858,7 +3856,7 @@ bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
        return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
 }
 
-/**
+/*
  * Reset FCoE port statistics
  */
 bfa_status_t
@@ -3876,7 +3874,7 @@ bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
        struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
        qos_attr->state = fcport->qos_attr.state;
-       qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
+       qos_attr->total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
 }
 
 void
@@ -3887,10 +3885,10 @@ bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
        struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
        u32 i = 0;
 
-       qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
-       qos_vc_attr->shared_credit  = bfa_os_ntohs(bfa_vc_attr->shared_credit);
+       qos_vc_attr->total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
+       qos_vc_attr->shared_credit  = be16_to_cpu(bfa_vc_attr->shared_credit);
        qos_vc_attr->elp_opmode_flags  =
-                       bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
+                       be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
 
        /* Individual VC info */
        while (i < qos_vc_attr->total_vc_count) {
@@ -3904,7 +3902,7 @@ bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
        }
 }
 
-/**
+/*
  * Fetch port attributes.
  */
 bfa_boolean_t
@@ -3939,7 +3937,7 @@ bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
 
        if (ioc_type == BFA_IOC_TYPE_FC) {
                fcport->cfg.qos_enabled = on_off;
-               /**
+               /*
                 * Notify fcpim of the change in QoS state
                 */
                bfa_fcpim_update_ioredirect(bfa);
@@ -3959,7 +3957,7 @@ bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
                fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
 }
 
-/**
+/*
  * Configure default minimum ratelim speed
  */
 bfa_status_t
@@ -3980,7 +3978,7 @@ bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Get default minimum ratelim speed
  */
 enum bfa_port_speed
@@ -4095,10 +4093,10 @@ bfa_trunk_disable(struct bfa_s *bfa)
 }
 
 
-/**
+/*
  * Rport State machine functions
  */
-/**
+/*
  * Beginning state, only online event expected.
  */
 static void
@@ -4151,7 +4149,7 @@ bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
        }
 }
 
-/**
+/*
  * Waiting for rport create response from firmware.
  */
 static void
@@ -4188,7 +4186,7 @@ bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
        }
 }
 
-/**
+/*
  * Request queue is full, awaiting queue resume to send create request.
  */
 static void
@@ -4229,7 +4227,7 @@ bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
        }
 }
 
-/**
+/*
  * Online state - normal parking state.
  */
 static void
@@ -4275,9 +4273,9 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
                bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
 
                qos_scn->old_qos_attr.qos_flow_id  =
-                       bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
+                       be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
                qos_scn->new_qos_attr.qos_flow_id  =
-                       bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
+                       be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
 
                if (qos_scn->old_qos_attr.qos_flow_id !=
                        qos_scn->new_qos_attr.qos_flow_id)
@@ -4297,7 +4295,7 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
        }
 }
 
-/**
+/*
  * Firmware rport is being deleted - awaiting f/w response.
  */
 static void
@@ -4360,7 +4358,7 @@ bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
        }
 }
 
-/**
+/*
  * Offline state.
  */
 static void
@@ -4395,7 +4393,7 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
        }
 }
 
-/**
+/*
  * Rport is deleted, waiting for firmware response to delete.
  */
 static void
@@ -4447,7 +4445,7 @@ bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
        }
 }
 
-/**
+/*
  * Waiting for rport create response from firmware. A delete is pending.
  */
 static void
@@ -4478,7 +4476,7 @@ bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
        }
 }
 
-/**
+/*
  * Waiting for rport create response from firmware. Rport offline is pending.
  */
 static void
@@ -4513,7 +4511,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
        }
 }
 
-/**
+/*
  * IOC h/w failed.
  */
 static void
@@ -4553,7 +4551,7 @@ bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
 
 
 
-/**
+/*
  *  bfa_rport_private BFA rport private functions
  */
 
@@ -4612,12 +4610,12 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
                   !(mod->num_rports & (mod->num_rports - 1)));
 
        for (i = 0; i < mod->num_rports; i++, rp++) {
-               bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
+               memset(rp, 0, sizeof(struct bfa_rport_s));
                rp->bfa = bfa;
                rp->rport_tag = i;
                bfa_sm_set_state(rp, bfa_rport_sm_uninit);
 
-               /**
+               /*
                 *  - is unused
                 */
                if (i)
@@ -4626,7 +4624,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
                bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
        }
 
-       /**
+       /*
         * consume memory
         */
        bfa_meminfo_kva(meminfo) = (u8 *) rp;
@@ -4687,7 +4685,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
 {
        struct bfi_rport_create_req_s *m;
 
-       /**
+       /*
         * check for room in queue to send request now
         */
        m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4699,7 +4697,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
        bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
                        bfa_lpuid(rp->bfa));
        m->bfa_handle = rp->rport_tag;
-       m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
+       m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
        m->pid = rp->rport_info.pid;
        m->lp_tag = rp->rport_info.lp_tag;
        m->local_pid = rp->rport_info.local_pid;
@@ -4708,7 +4706,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
        m->vf_id = rp->rport_info.vf_id;
        m->cisc = rp->rport_info.cisc;
 
-       /**
+       /*
         * queue I/O message to firmware
         */
        bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4720,7 +4718,7 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
 {
        struct bfi_rport_delete_req_s *m;
 
-       /**
+       /*
         * check for room in queue to send request now
         */
        m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4733,7 +4731,7 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
                        bfa_lpuid(rp->bfa));
        m->fw_handle = rp->fw_handle;
 
-       /**
+       /*
         * queue I/O message to firmware
         */
        bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4745,7 +4743,7 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
 {
        struct bfa_rport_speed_req_s *m;
 
-       /**
+       /*
         * check for room in queue to send request now
         */
        m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4759,7 +4757,7 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
        m->fw_handle = rp->fw_handle;
        m->speed = (u8)rp->rport_info.speed;
 
-       /**
+       /*
         * queue I/O message to firmware
         */
        bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4768,11 +4766,11 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
 
 
 
-/**
+/*
  *  bfa_rport_public
  */
 
-/**
+/*
  * Rport interrupt processing.
  */
 void
@@ -4814,7 +4812,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 
 
 
-/**
+/*
  *  bfa_rport_api
  */
 
@@ -4849,7 +4847,7 @@ bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
 {
        bfa_assert(rport_info->max_frmsz != 0);
 
-       /**
+       /*
         * Some JBODs are seen to be not setting PDU size correctly in PLOGI
         * responses. Default to minimum size.
         */
@@ -4858,7 +4856,7 @@ bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
                rport_info->max_frmsz = FC_MIN_PDUSZ;
        }
 
-       bfa_os_assign(rport->rport_info, *rport_info);
+       rport->rport_info = *rport_info;
        bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
 }
 
@@ -4890,22 +4888,22 @@ bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
                                        struct bfa_rport_qos_attr_s *qos_attr)
 {
        qos_attr->qos_priority  = rport->qos_attr.qos_priority;
-       qos_attr->qos_flow_id  = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
+       qos_attr->qos_flow_id  = be32_to_cpu(rport->qos_attr.qos_flow_id);
 
 }
 
 void
 bfa_rport_clear_stats(struct bfa_rport_s *rport)
 {
-       bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
+       memset(&rport->stats, 0, sizeof(rport->stats));
 }
 
 
-/**
+/*
  * SGPG related functions
  */
 
-/**
+/*
  * Compute and return memory needed by FCP(im) module.
  */
 static void
@@ -4957,8 +4955,8 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
        bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
 
        for (i = 0; i < mod->num_sgpgs; i++) {
-               bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
-               bfa_os_memset(sgpg, 0, sizeof(*sgpg));
+               memset(hsgpg, 0, sizeof(*hsgpg));
+               memset(sgpg, 0, sizeof(*sgpg));
 
                hsgpg->sgpg = sgpg;
                sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
@@ -4997,7 +4995,7 @@ bfa_sgpg_iocdisable(struct bfa_s *bfa)
 
 
 
-/**
+/*
  *  hal_sgpg_public BFA SGPG public functions
  */
 
@@ -5039,7 +5037,7 @@ bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
        if (list_empty(&mod->sgpg_wait_q))
                return;
 
-       /**
+       /*
         * satisfy as many waiting requests as possible
         */
        do {
@@ -5067,11 +5065,11 @@ bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
 
        wqe->nsgpg_total = wqe->nsgpg = nsgpg;
 
-       /**
+       /*
         * allocate any left to this one first
         */
        if (mod->free_sgpgs) {
-               /**
+               /*
                 * no one else is waiting for SGPG
                 */
                bfa_assert(list_empty(&mod->sgpg_wait_q));
@@ -5105,7 +5103,7 @@ bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
        wqe->cbarg = cbarg;
 }
 
-/**
+/*
  *  UF related functions
  */
 /*
@@ -5136,7 +5134,7 @@ claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
        bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
        bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
 
-       bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
+       memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
 }
 
 static void
@@ -5153,11 +5151,11 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
 
        for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
             i++, uf_bp_msg++) {
-               bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
+               memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
 
                uf_bp_msg->buf_tag = i;
                buf_len = sizeof(struct bfa_uf_buf_s);
-               uf_bp_msg->buf_len = bfa_os_htons(buf_len);
+               uf_bp_msg->buf_len = cpu_to_be16(buf_len);
                bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
                            bfa_lpuid(ufm->bfa));
 
@@ -5173,7 +5171,7 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
                bfa_sge_to_be(&sge[1]);
        }
 
-       /**
+       /*
         * advance pointer beyond consumed memory
         */
        bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
@@ -5194,7 +5192,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
         * Initialize UFs and queue it in UF free queue
         */
        for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
-               bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
+               memset(uf, 0, sizeof(struct bfa_uf_s));
                uf->bfa = ufm->bfa;
                uf->uf_tag = i;
                uf->pb_len = sizeof(struct bfa_uf_buf_s);
@@ -5203,7 +5201,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
                list_add_tail(&uf->qe, &ufm->uf_free_q);
        }
 
-       /**
+       /*
         * advance memory pointer
         */
        bfa_meminfo_kva(mi) = (u8 *) uf;
@@ -5241,7 +5239,7 @@ bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 {
        struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
 
-       bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
+       memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
        ufm->bfa = bfa;
        ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
        INIT_LIST_HEAD(&ufm->uf_free_q);
@@ -5279,7 +5277,7 @@ bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
        if (!uf_post_msg)
                return BFA_STATUS_FAILED;
 
-       bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
+       memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
                      sizeof(struct bfi_uf_buf_post_s));
        bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
 
@@ -5310,8 +5308,8 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
        u8 *buf = &uf_buf->d[0];
        struct fchs_s *fchs;
 
-       m->frm_len = bfa_os_ntohs(m->frm_len);
-       m->xfr_len = bfa_os_ntohs(m->xfr_len);
+       m->frm_len = be16_to_cpu(m->frm_len);
+       m->xfr_len = be16_to_cpu(m->xfr_len);
 
        fchs = (struct fchs_s *)uf_buf;
 
@@ -5365,11 +5363,11 @@ bfa_uf_start(struct bfa_s *bfa)
 
 
 
-/**
+/*
  *  hal_uf_api
  */
 
-/**
+/*
  * Register handler for all unsolicted recieve frames.
  *
  * @param[in]  bfa             BFA instance
@@ -5385,7 +5383,7 @@ bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
        ufm->cbarg = cbarg;
 }
 
-/**
+/*
  *     Free an unsolicited frame back to BFA.
  *
  * @param[in]          uf              unsolicited frame to be freed
@@ -5401,7 +5399,7 @@ bfa_uf_free(struct bfa_uf_s *uf)
 
 
 
-/**
+/*
  *  uf_pub BFA uf module public functions
  */
 void
index 9921dad0d0392e4dc58df172871e0fd73c9fa39c..e2349d5cdb931124132e2c6a2b3cc52200dcf40b 100644 (file)
 #include "bfi_ms.h"
 
 
-/**
+/*
  * Scatter-gather DMA related defines
  */
 #define BFA_SGPG_MIN   (16)
 
-/**
+/*
  * Alignment macro for SG page allocation
  */
 #define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
@@ -48,7 +48,7 @@ struct bfa_sgpg_s {
        union bfi_addr_u sgpg_pa;       /*  pa of SG page               */
 };
 
-/**
+/*
  * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
  * SG pages required.
  */
@@ -75,7 +75,7 @@ void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs);
 void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
 
 
-/**
+/*
  * FCXP related defines
  */
 #define BFA_FCXP_MIN           (1)
@@ -115,12 +115,12 @@ typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
 
 
 
-/**
+/*
  * Information needed for a FCXP request
  */
 struct bfa_fcxp_req_info_s {
        struct bfa_rport_s *bfa_rport;
-                                       /** Pointer to the bfa rport that was
+                                       /* Pointer to the bfa rport that was
                                         * returned from bfa_rport_create().
                                         * This could be left NULL for WKA or
                                         * for FCXP interactions before the
@@ -137,11 +137,10 @@ struct bfa_fcxp_req_info_s {
 
 struct bfa_fcxp_rsp_info_s {
        struct fchs_s   rsp_fchs;
-                               /** !< Response frame's FC header will
+                               /* Response frame's FC header will
                                 * be sent back in this field */
        u8              rsp_timeout;
-                               /** !< timeout in seconds, 0-no response
-                                */
+                               /* timeout in seconds, 0-no response */
        u8              rsvd2[3];
        u32     rsp_maxlen;     /*  max response length expected */
 };
@@ -218,7 +217,7 @@ struct bfa_fcxp_wqe_s {
 void   bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 
 
-/**
+/*
  * RPORT related defines
  */
 #define BFA_RPORT_MIN  4
@@ -232,7 +231,7 @@ struct bfa_rport_mod_s {
 
 #define BFA_RPORT_MOD(__bfa)   (&(__bfa)->modules.rport_mod)
 
-/**
+/*
  * Convert rport tag to RPORT
  */
 #define BFA_RPORT_FROM_TAG(__bfa, _tag)                                \
@@ -244,7 +243,7 @@ struct bfa_rport_mod_s {
  */
 void   bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 
-/**
+/*
  *     BFA rport information.
  */
 struct bfa_rport_info_s {
@@ -259,7 +258,7 @@ struct bfa_rport_info_s {
        enum bfa_port_speed speed;      /*  Rport's current speed           */
 };
 
-/**
+/*
  * BFA rport data structure
  */
 struct bfa_rport_s {
@@ -282,7 +281,7 @@ struct bfa_rport_s {
 #define BFA_RPORT_FC_COS(_rport)       ((_rport)->rport_info.fc_class)
 
 
-/**
+/*
  * UF - unsolicited receive related defines
  */
 
@@ -305,7 +304,7 @@ struct bfa_uf_s {
        struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
 };
 
-/**
+/*
  *      Callback prototype for unsolicited frame receive handler.
  *
  * @param[in]           cbarg           callback arg for receive handler
@@ -338,7 +337,7 @@ void        bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 
 #define BFA_UF_BUFSZ   (2 * 1024 + 256)
 
-/**
+/*
  * @todo private
  */
 struct bfa_uf_buf_s {
@@ -346,7 +345,7 @@ struct bfa_uf_buf_s {
 };
 
 
-/**
+/*
  * LPS - bfa lport login/logout service interface
  */
 struct bfa_lps_s {
@@ -397,14 +396,14 @@ struct bfa_lps_mod_s {
 void   bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 
 
-/**
+/*
  * FCPORT related defines
  */
 
 #define BFA_FCPORT(_bfa)       (&((_bfa)->modules.port))
 typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
 
-/**
+/*
  * Link notification data structure
  */
 struct bfa_fcport_ln_s {
@@ -418,7 +417,7 @@ struct bfa_fcport_trunk_s {
        struct bfa_trunk_attr_s attr;
 };
 
-/**
+/*
  * BFA FC port data structure
  */
 struct bfa_fcport_s {
@@ -613,7 +612,7 @@ void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
                          void *cbarg);
 void bfa_uf_free(struct bfa_uf_s *uf);
 
-/**
+/*
  * bfa lport service api
  */
 
index 4d8784e06e142a8d0dd4d178cd947dbca6c17ff9..1f938974b84876978600b40d97c265ddd7c2c7c0 100644 (file)
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  bfad.c Linux driver PCI interface module.
  */
 #include <linux/module.h>
@@ -151,7 +151,7 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
 static void
 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
 
-/**
+/*
  * Beginning state for the driver instance, awaiting the pci_probe event
  */
 static void
@@ -181,7 +181,7 @@ bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
        }
 }
 
-/**
+/*
  * Driver Instance is created, awaiting event INIT to initialize the bfad
  */
 static void
@@ -364,7 +364,7 @@ bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
        }
 }
 
-/**
+/*
  *  BFA callbacks
  */
 void
@@ -376,7 +376,7 @@ bfad_hcb_comp(void *arg, bfa_status_t status)
        complete(&fcomp->comp);
 }
 
-/**
+/*
  * bfa_init callback
  */
 void
@@ -401,7 +401,7 @@ bfa_cb_init(void *drv, bfa_status_t init_status)
        complete(&bfad->comp);
 }
 
-/**
+/*
  *  BFA_FCS callbacks
  */
 struct bfad_port_s *
@@ -457,7 +457,7 @@ bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
        }
 }
 
-/**
+/*
  * FCS RPORT alloc callback, after successful PLOGI by FCS
  */
 bfa_status_t
@@ -478,7 +478,7 @@ ext:
        return rc;
 }
 
-/**
+/*
  * FCS PBC VPORT Create
  */
 void
@@ -663,7 +663,7 @@ ext:
        return rc;
 }
 
-/**
+/*
  * Create a vport under a vf.
  */
 bfa_status_t
@@ -716,30 +716,6 @@ ext:
        return rc;
 }
 
-/**
- * Create a vf and its base vport implicitely.
- */
-bfa_status_t
-bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
-              struct bfa_lport_cfg_s *port_cfg)
-{
-       struct bfad_vf_s      *vf;
-       int             rc = BFA_STATUS_OK;
-
-       vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL);
-       if (!vf) {
-               rc = BFA_STATUS_FAILED;
-               goto ext;
-       }
-
-       rc = bfa_fcs_vf_create(&vf->fcs_vf, &bfad->bfa_fcs, vf_id, port_cfg,
-                              vf);
-       if (rc != BFA_STATUS_OK)
-               kfree(vf);
-ext:
-       return rc;
-}
-
 void
 bfad_bfa_tmo(unsigned long data)
 {
@@ -885,20 +861,6 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
        pci_set_drvdata(pdev, NULL);
 }
 
-void
-bfad_fcs_port_cfg(struct bfad_s *bfad)
-{
-       struct bfa_lport_cfg_s  port_cfg;
-       struct bfa_port_attr_s attr;
-       char            symname[BFA_SYMNAME_MAXLEN];
-
-       sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
-       memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
-       bfa_fcport_get_attr(&bfad->bfa, &attr);
-       port_cfg.nwwn = attr.nwwn;
-       port_cfg.pwwn = attr.pwwn;
-}
-
 bfa_status_t
 bfad_drv_init(struct bfad_s *bfad)
 {
@@ -1089,9 +1051,6 @@ bfad_start_ops(struct bfad_s *bfad) {
        bfa_fcs_init(&bfad->bfa_fcs);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
-       /* PPORT FCS config */
-       bfad_fcs_port_cfg(bfad);
-
        retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
        if (retval != BFA_STATUS_OK) {
                if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
@@ -1181,7 +1140,7 @@ bfad_worker(void *ptr)
        return 0;
 }
 
-/**
+/*
  *  BFA driver interrupt functions
  */
 irqreturn_t
@@ -1240,7 +1199,7 @@ bfad_msix(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-/**
+/*
  * Initialize the MSIX entry table.
  */
 static void
@@ -1293,7 +1252,7 @@ bfad_install_msix_handler(struct bfad_s *bfad)
        return 0;
 }
 
-/**
+/*
  * Setup MSIX based interrupt.
  */
 int
@@ -1374,7 +1333,7 @@ bfad_remove_intr(struct bfad_s *bfad)
        }
 }
 
-/**
+/*
  * PCI probe entry.
  */
 int
@@ -1460,7 +1419,7 @@ out:
        return error;
 }
 
-/**
+/*
  * PCI remove entry.
  */
 void
@@ -1541,7 +1500,7 @@ static struct pci_driver bfad_pci_driver = {
        .remove = __devexit_p(bfad_pci_remove),
 };
 
-/**
+/*
  * Driver module init.
  */
 static int __init
@@ -1581,7 +1540,7 @@ ext:
        return error;
 }
 
-/**
+/*
  * Driver module exit.
  */
 static void __exit
index d8843720eac1b42b39606c6af93f1cde39653b5c..ed9fff440b5c3ef00014502120f7f475072b764e 100644 (file)
  * General Public License for more details.
  */
 
-/**
+/*
  *  bfa_attr.c Linux driver configuration interface module.
  */
 
 #include "bfad_drv.h"
 #include "bfad_im.h"
 
-/**
+/*
  * FC transport template entry, get SCSI target port ID.
  */
 void
@@ -48,7 +48,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 
-/**
+/*
  * FC transport template entry, get SCSI target nwwn.
  */
 void
@@ -70,11 +70,11 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
        if (itnim)
                node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
 
-       fc_starget_node_name(starget) = bfa_os_htonll(node_name);
+       fc_starget_node_name(starget) = cpu_to_be64(node_name);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 
-/**
+/*
  * FC transport template entry, get SCSI target pwwn.
  */
 void
@@ -96,11 +96,11 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
        if (itnim)
                port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
 
-       fc_starget_port_name(starget) = bfa_os_htonll(port_name);
+       fc_starget_port_name(starget) = cpu_to_be64(port_name);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 
-/**
+/*
  * FC transport template entry, get SCSI host port ID.
  */
 void
@@ -114,7 +114,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
                        bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
 }
 
-/**
+/*
  * FC transport template entry, get SCSI host port type.
  */
 static void
@@ -146,7 +146,7 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
        }
 }
 
-/**
+/*
  * FC transport template entry, get SCSI host port state.
  */
 static void
@@ -183,7 +183,7 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
        }
 }
 
-/**
+/*
  * FC transport template entry, get SCSI host active fc4s.
  */
 static void
@@ -202,7 +202,7 @@ bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
        fc_host_active_fc4s(shost)[7] = 1;
 }
 
-/**
+/*
  * FC transport template entry, get SCSI host link speed.
  */
 static void
@@ -236,7 +236,7 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
        }
 }
 
-/**
+/*
  * FC transport template entry, get SCSI host port type.
  */
 static void
@@ -249,11 +249,11 @@ bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
 
        fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port);
 
-       fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn);
+       fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn);
 
 }
 
-/**
+/*
  * FC transport template entry, get BFAD statistics.
  */
 static struct fc_host_statistics *
@@ -304,7 +304,7 @@ bfad_im_get_stats(struct Scsi_Host *shost)
        return hstats;
 }
 
-/**
+/*
  * FC transport template entry, reset BFAD statistics.
  */
 static void
@@ -331,7 +331,7 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
        return;
 }
 
-/**
+/*
  * FC transport template entry, get rport loss timeout.
  */
 static void
@@ -347,7 +347,7 @@ bfad_im_get_rport_loss_tmo(struct fc_rport *rport)
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 
-/**
+/*
  * FC transport template entry, set rport loss timeout.
  */
 static void
@@ -633,7 +633,7 @@ struct fc_function_template bfad_im_vport_fc_function_template = {
        .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
 };
 
-/**
+/*
  *  Scsi_Host_attrs SCSI host attributes
  */
 static ssize_t
@@ -733,7 +733,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
        u64        nwwn;
 
        nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
-       return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn));
+       return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
 }
 
 static ssize_t
index 69ed1c4a903eac810fc40a70d86b20e7f440f6c1..1fedeeb4ac1f87dd69e4945c6c3b65fdd4cdab37 100644 (file)
@@ -318,7 +318,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
        regbuf =  (u32 *)bfad->regdata;
        spin_lock_irqsave(&bfad->bfad_lock, flags);
        for (i = 0; i < len; i++) {
-               *regbuf = bfa_reg_read(reg_addr);
+               *regbuf = readl(reg_addr);
                regbuf++;
                reg_addr += sizeof(u32);
        }
@@ -361,7 +361,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
 
        reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr);
        spin_lock_irqsave(&bfad->bfad_lock, flags);
-       bfa_reg_write(reg_addr, val);
+       writel(val, reg_addr);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
        return nbytes;
index 98420bbb4f3f1d01f6c9035472d42f69a258ab88..97f9b6c0937e75cf3a16762092ac46ac6f6cb890 100644 (file)
  * General Public License for more details.
  */
 
-/**
+/*
  * Contains base driver definitions.
  */
 
-/**
+/*
  *  bfa_drv.h Linux driver data structures.
  */
 
@@ -309,7 +309,6 @@ void                bfad_bfa_tmo(unsigned long data);
 void           bfad_init_timer(struct bfad_s *bfad);
 int            bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
 void           bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
-void           bfad_fcs_port_cfg(struct bfad_s *bfad);
 void           bfad_drv_uninit(struct bfad_s *bfad);
 int            bfad_worker(void *ptr);
 void           bfad_debugfs_init(struct bfad_port_s *port);
index d950ee44016eeefcf3e8870483f9322f667551ce..8daa716739d1bd6d2a6a4180c008161e7f148f27 100644 (file)
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  bfad_im.c Linux driver IM module.
  */
 
@@ -164,10 +164,10 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
                wake_up(wq);
 }
 
-/**
+/*
  *  Scsi_Host_template SCSI host template
  */
-/**
+/*
  * Scsi_Host template entry, returns BFAD PCI info.
  */
 static const char *
@@ -196,7 +196,7 @@ bfad_im_info(struct Scsi_Host *shost)
        return bfa_buf;
 }
 
-/**
+/*
  * Scsi_Host template entry, aborts the specified SCSI command.
  *
  * Returns: SUCCESS or FAILED.
@@ -280,7 +280,7 @@ out:
        return rc;
 }
 
-/**
+/*
  * Scsi_Host template entry, resets a LUN and abort its all commands.
  *
  * Returns: SUCCESS or FAILED.
@@ -319,7 +319,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
                goto out;
        }
 
-       /**
+       /*
         * Set host_scribble to NULL to avoid aborting a task command
         * if happens.
         */
@@ -346,7 +346,7 @@ out:
        return rc;
 }
 
-/**
+/*
  * Scsi_Host template entry, resets the bus and abort all commands.
  */
 static int
@@ -396,7 +396,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
        return SUCCESS;
 }
 
-/**
+/*
  * Scsi_Host template entry slave_destroy.
  */
 static void
@@ -406,11 +406,11 @@ bfad_im_slave_destroy(struct scsi_device *sdev)
        return;
 }
 
-/**
+/*
  *  BFA FCS itnim callbacks
  */
 
-/**
+/*
  * BFA FCS itnim alloc callback, after successful PRLI
  * Context: Interrupt
  */
@@ -433,7 +433,7 @@ bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
        bfad->bfad_flags |= BFAD_RPORT_ONLINE;
 }
 
-/**
+/*
  * BFA FCS itnim free callback.
  * Context: Interrupt. bfad_lock is held
  */
@@ -471,7 +471,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
                queue_work(im->drv_workq, &itnim_drv->itnim_work);
 }
 
-/**
+/*
  * BFA FCS itnim online callback.
  * Context: Interrupt. bfad_lock is held
  */
@@ -492,7 +492,7 @@ bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
                queue_work(im->drv_workq, &itnim_drv->itnim_work);
 }
 
-/**
+/*
  * BFA FCS itnim offline callback.
  * Context: Interrupt. bfad_lock is held
  */
@@ -519,7 +519,7 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
                queue_work(im->drv_workq, &itnim_drv->itnim_work);
 }
 
-/**
+/*
  * Allocate a Scsi_Host for a port.
  */
 int
@@ -751,7 +751,7 @@ bfad_os_thread_workq(struct bfad_s *bfad)
        return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Scsi_Host template entry.
  *
  * Description:
@@ -896,7 +896,7 @@ bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
        return NULL;
 }
 
-/**
+/*
  * Scsi_Host template entry slave_alloc
  */
 static int
@@ -915,12 +915,16 @@ bfad_im_slave_alloc(struct scsi_device *sdev)
 static u32
 bfad_im_supported_speeds(struct bfa_s *bfa)
 {
-       struct bfa_ioc_attr_s ioc_attr;
+       struct bfa_ioc_attr_s *ioc_attr;
        u32 supported_speed = 0;
 
-       bfa_get_attr(bfa, &ioc_attr);
-       if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
-               if (ioc_attr.adapter_attr.is_mezz) {
+       ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL);
+       if (!ioc_attr)
+               return 0;
+
+       bfa_get_attr(bfa, ioc_attr);
+       if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
+               if (ioc_attr->adapter_attr.is_mezz) {
                        supported_speed |= FC_PORTSPEED_8GBIT |
                                FC_PORTSPEED_4GBIT |
                                FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
@@ -929,12 +933,13 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
                                FC_PORTSPEED_4GBIT |
                                FC_PORTSPEED_2GBIT;
                }
-       } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
+       } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
                supported_speed |=  FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
                                FC_PORTSPEED_1GBIT;
-       } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
+       } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
                supported_speed |= FC_PORTSPEED_10GBIT;
        }
+       kfree(ioc_attr);
        return supported_speed;
 }
 
@@ -944,14 +949,13 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
        struct Scsi_Host *host = im_port->shost;
        struct bfad_s         *bfad = im_port->bfad;
        struct bfad_port_s    *port = im_port->port;
-       struct bfa_port_attr_s pattr;
-       struct bfa_lport_attr_s port_attr;
        char symname[BFA_SYMNAME_MAXLEN];
+       struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
 
        fc_host_node_name(host) =
-               bfa_os_htonll((bfa_fcs_lport_get_nwwn(port->fcs_port)));
+               cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port)));
        fc_host_port_name(host) =
-               bfa_os_htonll((bfa_fcs_lport_get_pwwn(port->fcs_port)));
+               cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port)));
        fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
 
        fc_host_supported_classes(host) = FC_COS_CLASS3;
@@ -964,15 +968,12 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
        /* For fibre channel services type 0x20 */
        fc_host_supported_fc4s(host)[7] = 1;
 
-       bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
-       strncpy(symname, port_attr.port_cfg.sym_name.symname,
+       strncpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
                BFA_SYMNAME_MAXLEN);
        sprintf(fc_host_symbolic_name(host), "%s", symname);
 
        fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
-
-       bfa_fcport_get_attr(&bfad->bfa, &pattr);
-       fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
+       fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
 }
 
 static void
@@ -983,9 +984,9 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
        struct bfad_itnim_data_s *itnim_data;
 
        rport_ids.node_name =
-               bfa_os_htonll(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
+               cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
        rport_ids.port_name =
-               bfa_os_htonll(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
+               cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
        rport_ids.port_id =
                bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
        rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
@@ -1015,7 +1016,7 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
        return;
 }
 
-/**
+/*
  * Work queue handler using FC transport service
 * Context: kernel
  */
@@ -1115,7 +1116,7 @@ bfad_im_itnim_work_handler(struct work_struct *work)
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 
-/**
+/*
  * Scsi_Host template entry, queue a SCSI command to the BFAD.
  */
 static int
index 85f2224a5733cf2847377c103c818d1fb545632e..58796d1284b70ae2b49b238d5b2e66737ca5d1a3 100644 (file)
@@ -23,7 +23,7 @@
 
 #pragma pack(1)
 
-/**
+/*
  * BFI FW image type
  */
 #define        BFI_FLASH_CHUNK_SZ                      256     /*  Flash chunk size */
@@ -35,7 +35,7 @@ enum {
        BFI_IMAGE_MAX,
 };
 
-/**
+/*
  * Msg header common to all msgs
  */
 struct bfi_mhdr_s {
@@ -68,7 +68,7 @@ struct bfi_mhdr_s {
 #define BFI_I2H_OPCODE_BASE    128
 #define BFA_I2HM(_x)           ((_x) + BFI_I2H_OPCODE_BASE)
 
-/**
+/*
  ****************************************************************************
  *
  * Scatter Gather Element and Page definition
@@ -79,7 +79,7 @@ struct bfi_mhdr_s {
 #define BFI_SGE_INLINE 1
 #define BFI_SGE_INLINE_MAX     (BFI_SGE_INLINE + 1)
 
-/**
+/*
  * SG Flags
  */
 enum {
@@ -90,7 +90,7 @@ enum {
        BFI_SGE_PGDLEN          = 2,    /*  cumulative data length for page */
 };
 
-/**
+/*
  * DMA addresses
  */
 union bfi_addr_u {
@@ -100,7 +100,7 @@ union bfi_addr_u {
        } a32;
 };
 
-/**
+/*
  * Scatter Gather Element
  */
 struct bfi_sge_s {
@@ -116,7 +116,7 @@ struct bfi_sge_s {
        union bfi_addr_u sga;
 };
 
-/**
+/*
  * Scatter Gather Page
  */
 #define BFI_SGPG_DATA_SGES             7
@@ -139,7 +139,7 @@ struct bfi_msg_s {
        u32     pl[BFI_LMSG_PL_WSZ];
 };
 
-/**
+/*
  * Mailbox message structure
  */
 #define BFI_MBMSG_SZ           7
@@ -148,7 +148,7 @@ struct bfi_mbmsg_s {
        u32             pl[BFI_MBMSG_SZ];
 };
 
-/**
+/*
  * Message Classes
  */
 enum bfi_mclass {
@@ -186,7 +186,7 @@ enum bfi_mclass {
 #define BFI_BOOT_LOADER_BIOS           1
 #define BFI_BOOT_LOADER_UEFI           2
 
-/**
+/*
  *----------------------------------------------------------------------
  *                             IOC
  *----------------------------------------------------------------------
@@ -208,7 +208,7 @@ enum bfi_ioc_i2h_msgs {
        BFI_IOC_I2H_HBEAT               = BFA_I2HM(5),
 };
 
-/**
+/*
  * BFI_IOC_H2I_GETATTR_REQ message
  */
 struct bfi_ioc_getattr_req_s {
@@ -242,7 +242,7 @@ struct bfi_ioc_attr_s {
        u32     card_type;      /*  card type                   */
 };
 
-/**
+/*
  * BFI_IOC_I2H_GETATTR_REPLY message
  */
 struct bfi_ioc_getattr_reply_s {
@@ -251,19 +251,19 @@ struct bfi_ioc_getattr_reply_s {
        u8                      rsvd[3];
 };
 
-/**
+/*
  * Firmware memory page offsets
  */
 #define BFI_IOC_SMEM_PG0_CB    (0x40)
 #define BFI_IOC_SMEM_PG0_CT    (0x180)
 
-/**
+/*
  * Firmware statistic offset
  */
 #define BFI_IOC_FWSTATS_OFF    (0x6B40)
 #define BFI_IOC_FWSTATS_SZ     (4096)
 
-/**
+/*
  * Firmware trace offset
  */
 #define BFI_IOC_TRC_OFF                (0x4b00)
@@ -280,7 +280,7 @@ struct bfi_ioc_image_hdr_s {
        u32     md5sum[BFI_IOC_MD5SUM_SZ];
 };
 
-/**
+/*
  *  BFI_IOC_I2H_READY_EVENT message
  */
 struct bfi_ioc_rdy_event_s {
@@ -294,7 +294,7 @@ struct bfi_ioc_hbeat_s {
        u32        hb_count;    /*  current heart beat count    */
 };
 
-/**
+/*
  * IOC hardware/firmware state
  */
 enum bfi_ioc_state {
@@ -340,7 +340,7 @@ enum {
        ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
                        BFI_ADAPTER_UNSUPP))
 
-/**
+/*
  * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
  */
 struct bfi_ioc_ctrl_req_s {
@@ -352,7 +352,7 @@ struct bfi_ioc_ctrl_req_s {
 #define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
 #define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s;
 
-/**
+/*
  * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
  */
 struct bfi_ioc_ctrl_reply_s {
@@ -364,7 +364,7 @@ struct bfi_ioc_ctrl_reply_s {
 #define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
 
 #define BFI_IOC_MSGSZ   8
-/**
+/*
  * H2I Messages
  */
 union bfi_ioc_h2i_msg_u {
@@ -375,7 +375,7 @@ union bfi_ioc_h2i_msg_u {
        u32                     mboxmsg[BFI_IOC_MSGSZ];
 };
 
-/**
+/*
  * I2H Messages
  */
 union bfi_ioc_i2h_msg_u {
@@ -385,7 +385,7 @@ union bfi_ioc_i2h_msg_u {
 };
 
 
-/**
+/*
  *----------------------------------------------------------------------
  *                             PBC
  *----------------------------------------------------------------------
@@ -394,7 +394,7 @@ union bfi_ioc_i2h_msg_u {
 #define BFI_PBC_MAX_BLUNS      8
 #define BFI_PBC_MAX_VPORTS     16
 
-/**
+/*
  * PBC boot lun configuration
  */
 struct bfi_pbc_blun_s {
@@ -402,7 +402,7 @@ struct bfi_pbc_blun_s {
        lun_t           tgt_lun;
 };
 
-/**
+/*
  * PBC virtual port configuration
  */
 struct bfi_pbc_vport_s {
@@ -410,7 +410,7 @@ struct bfi_pbc_vport_s {
        wwn_t           vp_nwwn;
 };
 
-/**
+/*
  * BFI pre-boot configuration information
  */
 struct bfi_pbc_s {
@@ -427,7 +427,7 @@ struct bfi_pbc_s {
        struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS];
 };
 
-/**
+/*
  *----------------------------------------------------------------------
  *                             MSGQ
  *----------------------------------------------------------------------
@@ -531,7 +531,7 @@ enum bfi_port_i2h {
        BFI_PORT_I2H_CLEAR_STATS_RSP    = BFA_I2HM(4),
 };
 
-/**
+/*
  * Generic REQ type
  */
 struct bfi_port_generic_req_s {
@@ -540,7 +540,7 @@ struct bfi_port_generic_req_s {
        u32     rsvd;
 };
 
-/**
+/*
  * Generic RSP type
  */
 struct bfi_port_generic_rsp_s {
@@ -550,7 +550,7 @@ struct bfi_port_generic_rsp_s {
        u32     msgtag;         /*  msgtag for reply                */
 };
 
-/**
+/*
  * BFI_PORT_H2I_GET_STATS_REQ
  */
 struct bfi_port_get_stats_req_s {
index 69ac85f9e938c9f8aa4d5b42eb186f05f21a2f9c..fa9f6fb9d45b0decc0920d88439b89c7bc37dbe5 100644 (file)
@@ -41,7 +41,7 @@ struct bfi_iocfc_cfg_s {
        u16     rsvd_1;
        u32     endian_sig;     /*  endian signature of host     */
 
-       /**
+       /*
         * Request and response circular queue base addresses, size and
         * shadow index pointers.
         */
@@ -58,7 +58,7 @@ struct bfi_iocfc_cfg_s {
        struct bfa_iocfc_intr_attr_s intr_attr; /*  IOC interrupt attributes */
 };
 
-/**
+/*
  * Boot target wwn information for this port. This contains either the stored
  * or discovered boot target port wwns for the port.
  */
@@ -75,7 +75,7 @@ struct bfi_iocfc_cfgrsp_s {
        struct bfi_pbc_s                pbc_cfg;
 };
 
-/**
+/*
  * BFI_IOCFC_H2I_CFG_REQ message
  */
 struct bfi_iocfc_cfg_req_s {
@@ -84,7 +84,7 @@ struct bfi_iocfc_cfg_req_s {
 };
 
 
-/**
+/*
  * BFI_IOCFC_I2H_CFG_REPLY message
  */
 struct bfi_iocfc_cfg_reply_s {
@@ -95,7 +95,7 @@ struct bfi_iocfc_cfg_reply_s {
 };
 
 
-/**
+/*
  * BFI_IOCFC_H2I_SET_INTR_REQ message
  */
 struct bfi_iocfc_set_intr_req_s {
@@ -107,7 +107,7 @@ struct bfi_iocfc_set_intr_req_s {
 };
 
 
-/**
+/*
  * BFI_IOCFC_H2I_UPDATEQ_REQ message
  */
 struct bfi_iocfc_updateq_req_s {
@@ -119,7 +119,7 @@ struct bfi_iocfc_updateq_req_s {
 };
 
 
-/**
+/*
  * BFI_IOCFC_I2H_UPDATEQ_RSP message
  */
 struct bfi_iocfc_updateq_rsp_s {
@@ -129,7 +129,7 @@ struct bfi_iocfc_updateq_rsp_s {
 };
 
 
-/**
+/*
  * H2I Messages
  */
 union bfi_iocfc_h2i_msg_u {
@@ -140,7 +140,7 @@ union bfi_iocfc_h2i_msg_u {
 };
 
 
-/**
+/*
  * I2H Messages
  */
 union bfi_iocfc_i2h_msg_u {
@@ -173,7 +173,7 @@ enum bfi_fcport_i2h {
 };
 
 
-/**
+/*
  * Generic REQ type
  */
 struct bfi_fcport_req_s {
@@ -181,7 +181,7 @@ struct bfi_fcport_req_s {
        u32        msgtag;      /*  msgtag for reply                */
 };
 
-/**
+/*
  * Generic RSP type
  */
 struct bfi_fcport_rsp_s {
@@ -191,7 +191,7 @@ struct bfi_fcport_rsp_s {
        u32        msgtag;      /*  msgtag for reply                */
 };
 
-/**
+/*
  * BFI_FCPORT_H2I_ENABLE_REQ
  */
 struct bfi_fcport_enable_req_s {
@@ -205,7 +205,7 @@ struct bfi_fcport_enable_req_s {
        u32        rsvd2;
 };
 
-/**
+/*
  * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
  */
 struct bfi_fcport_set_svc_params_req_s {
@@ -214,7 +214,7 @@ struct bfi_fcport_set_svc_params_req_s {
        u16        rsvd;
 };
 
-/**
+/*
  * BFI_FCPORT_I2H_EVENT
  */
 struct bfi_fcport_event_s {
@@ -222,7 +222,7 @@ struct bfi_fcport_event_s {
        struct bfa_port_link_s  link_state;
 };
 
-/**
+/*
  * BFI_FCPORT_I2H_TRUNK_SCN
  */
 struct bfi_fcport_trunk_link_s {
@@ -243,7 +243,7 @@ struct bfi_fcport_trunk_scn_s {
        struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS];
 };
 
-/**
+/*
  * fcport H2I message
  */
 union bfi_fcport_h2i_msg_u {
@@ -255,7 +255,7 @@ union bfi_fcport_h2i_msg_u {
        struct bfi_fcport_req_s                 *pstatsclear;
 };
 
-/**
+/*
  * fcport I2H message
  */
 union bfi_fcport_i2h_msg_u {
@@ -279,7 +279,7 @@ enum bfi_fcxp_i2h {
 
 #define BFA_FCXP_MAX_SGES      2
 
-/**
+/*
  * FCXP send request structure
  */
 struct bfi_fcxp_send_req_s {
@@ -299,7 +299,7 @@ struct bfi_fcxp_send_req_s {
        struct bfi_sge_s   rsp_sge[BFA_FCXP_MAX_SGES];  /*  response buf   */
 };
 
-/**
+/*
  * FCXP send response structure
  */
 struct bfi_fcxp_send_rsp_s {
@@ -565,14 +565,14 @@ enum bfi_ioim_i2h {
        BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2), /*  ABORT rsp    */
 };
 
-/**
+/*
  * IO command DIF info
  */
 struct bfi_ioim_dif_s {
        u32     dif_info[4];
 };
 
-/**
+/*
  * FCP IO messages overview
  *
  * @note
@@ -587,7 +587,7 @@ struct bfi_ioim_req_s {
        u16     rport_hdl;      /*  itnim/rport firmware handle */
        struct fcp_cmnd_s       cmnd;   /*  IO request info     */
 
-       /**
+       /*
         * SG elements array within the IO request must be double word
         * aligned. This aligment is required to optimize SGM setup for the IO.
         */
@@ -598,7 +598,7 @@ struct bfi_ioim_req_s {
        struct bfi_ioim_dif_s  dif;
 };
 
-/**
+/*
  *     This table shows various IO status codes from firmware and their
  *     meaning. Host driver can use these status codes to further process
  *     IO completions.
@@ -684,7 +684,7 @@ enum bfi_ioim_status {
 };
 
 #define BFI_IOIM_SNSLEN        (256)
-/**
+/*
  * I/O response message
  */
 struct bfi_ioim_rsp_s {
@@ -746,7 +746,7 @@ enum bfi_tskim_status {
        BFI_TSKIM_STS_NOT_SUPP = 4,
        BFI_TSKIM_STS_FAILED    = 5,
 
-       /**
+       /*
         * Defined by BFA
         */
        BFI_TSKIM_STS_TIMEOUT  = 10,    /*  TM request timedout */
index 99f2b8c5dd6310420b61df64af22649bcf41b296..8c04fada710b441713f95a1e5bc43fef49728e6e 100644 (file)
@@ -692,6 +692,9 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
                &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port),
                atid, tid, status, csk, csk->state, csk->flags);
 
+       if (status == CPL_ERR_RTX_NEG_ADVICE)
+               goto rel_skb;
+
        if (status && status != CPL_ERR_TCAM_FULL &&
            status != CPL_ERR_CONN_EXIST &&
            status != CPL_ERR_ARP_MISS)
index b9bcfa4c7d261dd5e1e13725459487d2dfe638f6..5be3ae15cb71b858a7402b06a6c10923facd1edf 100644 (file)
@@ -773,6 +773,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
        {"ENGENIO", "INF-01-00"},
        {"STK", "FLEXLINE 380"},
        {"SUN", "CSM100_R_FC"},
+       {"SUN", "STK6580_6780"},
+       {"SUN", "SUN_6180"},
        {NULL, NULL},
 };
 
index 844d618b84bdd51151c21bdd91dd9d0f9b100455..d23a538a9dfcca1e5bae286fb8305796dd6a8233 100644 (file)
@@ -117,7 +117,7 @@ static void fcoe_recv_frame(struct sk_buff *skb);
 
 static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
 
-module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_AUTO, S_IWUSR);
+module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR);
 __MODULE_PARM_TYPE(create, "string");
 MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
 module_param_call(create_vn2vn, fcoe_create, NULL,
@@ -1243,7 +1243,6 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
        struct fcoe_interface *fcoe;
        struct fc_frame_header *fh;
        struct fcoe_percpu_s *fps;
-       struct fcoe_port *port;
        struct ethhdr *eh;
        unsigned int cpu;
 
@@ -1262,16 +1261,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
                        skb_tail_pointer(skb), skb_end_pointer(skb),
                        skb->csum, skb->dev ? skb->dev->name : "<NULL>");
 
-       /* check for mac addresses */
        eh = eth_hdr(skb);
-       port = lport_priv(lport);
-       if (compare_ether_addr(eh->h_dest, port->data_src_addr) &&
-           compare_ether_addr(eh->h_dest, fcoe->ctlr.ctl_src_addr) &&
-           compare_ether_addr(eh->h_dest, (u8[6])FC_FCOE_FLOGI_MAC)) {
-               FCOE_NETDEV_DBG(netdev, "wrong destination mac address:%pM\n",
-                               eh->h_dest);
-               goto err;
-       }
 
        if (is_fip_mode(&fcoe->ctlr) &&
            compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
@@ -1291,6 +1281,12 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
        skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
        fh = (struct fc_frame_header *) skb_transport_header(skb);
 
+       if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) {
+               FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n",
+                               eh->h_dest);
+               goto err;
+       }
+
        fr = fcoe_dev_from_skb(skb);
        fr->fr_dev = lport;
        fr->ptype = ptype;
index aa503d83092a09eb0b69ea1ca76fd227807bd39a..bc17c71232023c7d3b830a541df976de7146a72c 100644 (file)
@@ -2296,7 +2296,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
        struct fip_header *fiph;
        enum fip_vn2vn_subcode sub;
-       union {
+       struct {
                struct fc_rport_priv rdata;
                struct fcoe_rport frport;
        } buf;
index 5a3f93101017b601fe98b2bbf12d9b55fd27152b..841101846b88fe98e55a6c601b013ef9119cdf4a 100644 (file)
@@ -4177,6 +4177,14 @@ static int ioc_general(void __user *arg, char *cmnd)
     ha = gdth_find_ha(gen.ionode);
     if (!ha)
         return -EFAULT;
+
+    if (gen.data_len > INT_MAX)
+        return -EINVAL;
+    if (gen.sense_len > INT_MAX)
+        return -EINVAL;
+    if (gen.data_len + gen.sense_len > INT_MAX)
+        return -EINVAL;
+
     if (gen.data_len + gen.sense_len != 0) {
         if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len,
                                      FALSE, &paddr)))
index df9a12c8b373144618ff051cdb38498648df2df5..fa60d7df44bed7154adf9aa784cecc937db64d23 100644 (file)
@@ -9025,6 +9025,8 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
index aa8bb2f2c6ee286c2fa73c734d37cb8f26573138..b28a00f1082c3e3bbab4e412725030626890652c 100644 (file)
@@ -82,6 +82,7 @@
 
 #define IPR_SUBS_DEV_ID_57B4    0x033B
 #define IPR_SUBS_DEV_ID_57B2    0x035F
+#define IPR_SUBS_DEV_ID_57C4    0x0354
 #define IPR_SUBS_DEV_ID_57C6    0x0357
 #define IPR_SUBS_DEV_ID_57CC    0x035C
 
index 32f67c4b03fc014b8b47935a761a9feb6bdfa59a..911b2736cafae204379b8db7f18767b788d79bc3 100644 (file)
@@ -684,10 +684,9 @@ void fc_disc_stop(struct fc_lport *lport)
 {
        struct fc_disc *disc = &lport->disc;
 
-       if (disc) {
+       if (disc->pending)
                cancel_delayed_work_sync(&disc->disc_work);
-               fc_disc_stop_rports(disc);
-       }
+       fc_disc_stop_rports(disc);
 }
 
 /**
index c797f6b48f05bccff273e06964c0dc06508de174..e340373b509b6528ea0602d410eb902c3b233321 100644 (file)
@@ -58,8 +58,7 @@ struct kmem_cache *scsi_pkt_cachep;
 #define FC_SRB_WRITE           (1 << 0)
 
 /*
- * The SCp.ptr should be tested and set under the host lock. NULL indicates
- * that the command has been retruned to the scsi layer.
+ * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
  */
 #define CMD_SP(Cmnd)               ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
 #define CMD_ENTRY_STATUS(Cmnd)     ((Cmnd)->SCp.have_data_in)
@@ -1880,8 +1879,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 
        lport = fsp->lp;
        si = fc_get_scsi_internal(lport);
-       if (!fsp->cmd)
-               return;
 
        /*
         * if can_queue ramp down is done then try can_queue ramp up
@@ -1891,11 +1888,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
                fc_fcp_can_queue_ramp_up(lport);
 
        sc_cmd = fsp->cmd;
-       fsp->cmd = NULL;
-
-       if (!sc_cmd->SCp.ptr)
-               return;
-
        CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
        switch (fsp->status_code) {
        case FC_COMPLETE:
@@ -1971,15 +1963,13 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
                break;
        }
 
-       if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE) {
-               sc_cmd->result = (DID_REQUEUE << 16);
-               FC_FCP_DBG(fsp, "Returning DID_REQUEUE to scsi-ml\n");
-       }
+       if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE)
+               sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16);
 
        spin_lock_irqsave(&si->scsi_queue_lock, flags);
        list_del(&fsp->list);
-       spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
        sc_cmd->SCp.ptr = NULL;
+       spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
        sc_cmd->scsi_done(sc_cmd);
 
        /* release ref from initial allocation in queue command */
@@ -1997,6 +1987,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
 {
        struct fc_fcp_pkt *fsp;
        struct fc_lport *lport;
+       struct fc_fcp_internal *si;
        int rc = FAILED;
        unsigned long flags;
 
@@ -2006,7 +1997,8 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
        else if (!lport->link_up)
                return rc;
 
-       spin_lock_irqsave(lport->host->host_lock, flags);
+       si = fc_get_scsi_internal(lport);
+       spin_lock_irqsave(&si->scsi_queue_lock, flags);
        fsp = CMD_SP(sc_cmd);
        if (!fsp) {
                /* command completed while scsi eh was setting up */
@@ -2015,7 +2007,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
        }
        /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
        fc_fcp_pkt_hold(fsp);
-       spin_unlock_irqrestore(lport->host->host_lock, flags);
+       spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 
        if (fc_fcp_lock_pkt(fsp)) {
                /* completed while we were waiting for timer to be deleted */
index d9b6e11b0e884b122cfe4d7b6fc2888e5e6c507d..9be63edbf8fb9095624222f7dc6398957e0abd28 100644 (file)
@@ -1447,13 +1447,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
        }
 
        did = fc_frame_did(fp);
-
-       if (!did) {
-               FC_LPORT_DBG(lport, "Bad FLOGI response\n");
-               goto out;
-       }
-
-       if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
+       if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) {
                flp = fc_frame_payload_get(fp, sizeof(*flp));
                if (flp) {
                        mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1492,8 +1486,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
                                fc_lport_enter_dns(lport);
                        }
                }
-       } else
+       } else {
+               FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n");
                fc_lport_error(lport, fp);
+       }
 
 out:
        fc_frame_free(fp);
index b9f2286fe0cbc9ef849cb8e2a713d9d0b11776f7..a84ef13ed74adf2a0e0d1dd81c644b2f3c9514a7 100644 (file)
@@ -196,9 +196,9 @@ static const char *fc_rport_state(struct fc_rport_priv *rdata)
 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
 {
        if (timeout)
-               rport->dev_loss_tmo = timeout + 5;
+               rport->dev_loss_tmo = timeout;
        else
-               rport->dev_loss_tmo = 30;
+               rport->dev_loss_tmo = 1;
 }
 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
 
index a50aa03b8ac1826c13d8acb22b6bb613c91e024e..196de40b906c487c0287b56ac3c27c5f4b66a5ee 100644 (file)
@@ -202,9 +202,12 @@ struct lpfc_stats {
        uint32_t elsRcvPRLO;
        uint32_t elsRcvPRLI;
        uint32_t elsRcvLIRR;
+       uint32_t elsRcvRLS;
        uint32_t elsRcvRPS;
        uint32_t elsRcvRPL;
        uint32_t elsRcvRRQ;
+       uint32_t elsRcvRTV;
+       uint32_t elsRcvECHO;
        uint32_t elsXmitFLOGI;
        uint32_t elsXmitFDISC;
        uint32_t elsXmitPLOGI;
@@ -549,9 +552,11 @@ struct lpfc_hba {
 #define ELS_XRI_ABORT_EVENT    0x40
 #define ASYNC_EVENT            0x80
 #define LINK_DISABLED          0x100 /* Link disabled by user */
-#define FCF_DISC_INPROGRESS    0x200 /* FCF discovery in progress */
-#define HBA_FIP_SUPPORT                0x400 /* FIP support in HBA */
-#define HBA_AER_ENABLED                0x800 /* AER enabled with HBA */
+#define FCF_TS_INPROG           0x200 /* FCF table scan in progress */
+#define FCF_RR_INPROG           0x400 /* FCF roundrobin flogi in progress */
+#define HBA_FIP_SUPPORT                0x800 /* FIP support in HBA */
+#define HBA_AER_ENABLED                0x1000 /* AER enabled with HBA */
+#define HBA_DEVLOSS_TMO         0x2000 /* HBA in devloss timeout */
        uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
        struct lpfc_dmabuf slim2p;
 
@@ -573,6 +578,7 @@ struct lpfc_hba {
        /* These fields used to be binfo */
        uint32_t fc_pref_DID;   /* preferred D_ID */
        uint8_t  fc_pref_ALPA;  /* preferred AL_PA */
+       uint32_t fc_edtovResol; /* E_D_TOV timer resolution */
        uint32_t fc_edtov;      /* E_D_TOV timer value */
        uint32_t fc_arbtov;     /* ARB_TOV timer value */
        uint32_t fc_ratov;      /* R_A_TOV timer value */
index f681eea57730a794fccaf792243b8a4791abcee3..c1cbec01345d0849a027748cdad9425e72094356 100644 (file)
@@ -3789,8 +3789,13 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
                        break;
                case MBX_SECURITY_MGMT:
                case MBX_AUTH_PORT:
-                       if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+                       if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+                               printk(KERN_WARNING "mbox_read:Command 0x%x "
+                                      "is not permitted\n", pmb->mbxCommand);
+                               sysfs_mbox_idle(phba);
+                               spin_unlock_irq(&phba->hbalock);
                                return -EPERM;
+                       }
                        break;
                case MBX_READ_SPARM64:
                case MBX_READ_LA:
index f5d60b55f53a37d5d8e6b2a12b9fb8107771c131..7260c3af555a0723924553fb362cb88aee742cd5 100644 (file)
@@ -3142,12 +3142,12 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
        job = menlo->set_job;
        job->dd_data = NULL; /* so timeout handler does not reply */
 
-       spin_lock_irqsave(&phba->hbalock, flags);
+       spin_lock(&phba->hbalock);
        cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
        if (cmdiocbq->context2 && rspiocbq)
                memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
                       &rspiocbq->iocb, sizeof(IOCB_t));
-       spin_unlock_irqrestore(&phba->hbalock, flags);
+       spin_unlock(&phba->hbalock);
 
        bmp = menlo->bmp;
        rspiocbq = menlo->rspiocbq;
index 03f4ddc185723f94520e112df291cc266b8c45d3..a5f5a093a8a46752e5c802da8a593de4510e7e6e 100644 (file)
@@ -44,6 +44,8 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
 void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *);
+
 void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
 void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
                        struct lpfc_nodelist *);
@@ -229,6 +231,7 @@ void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
 uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
 int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
 void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
 
 int lpfc_mem_alloc(struct lpfc_hba *, int align);
 void lpfc_mem_free(struct lpfc_hba *);
@@ -271,6 +274,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
 void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
 void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
 int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
                             struct lpfc_dmabuf *);
index e6ca12f6c6cb5063857f30f7b3e01b6e5587f14d..884f4d321799cffda5bb5ea56295d4166fb0b32c 100644 (file)
@@ -177,15 +177,18 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
                 (elscmd == ELS_CMD_LOGO)))
                switch (elscmd) {
                case ELS_CMD_FLOGI:
-               elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+               elsiocb->iocb_flag |=
+                       ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
                                        & LPFC_FIP_ELS_ID_MASK);
                break;
                case ELS_CMD_FDISC:
-               elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+               elsiocb->iocb_flag |=
+                       ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
                                        & LPFC_FIP_ELS_ID_MASK);
                break;
                case ELS_CMD_LOGO:
-               elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+               elsiocb->iocb_flag |=
+                       ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
                                        & LPFC_FIP_ELS_ID_MASK);
                break;
                }
@@ -517,18 +520,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if (sp->cmn.edtovResolution)    /* E_D_TOV ticks are in nanoseconds */
                phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
 
+       phba->fc_edtovResol = sp->cmn.edtovResolution;
        phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
 
        if (phba->fc_topology == TOPOLOGY_LOOP) {
                spin_lock_irq(shost->host_lock);
                vport->fc_flag |= FC_PUBLIC_LOOP;
                spin_unlock_irq(shost->host_lock);
-       } else {
-               /*
-                * If we are a N-port connected to a Fabric, fixup sparam's so
-                * logins to devices on remote loops work.
-                */
-               vport->fc_sparam.cmn.altBbCredit = 1;
        }
 
        vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
@@ -585,6 +583,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                        lpfc_unreg_rpi(vport, np);
                }
                lpfc_cleanup_pending_mbox(vport);
+
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       lpfc_sli4_unreg_all_rpis(vport);
+
                if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
                        lpfc_mbx_unreg_vpi(vport);
                        spin_lock_irq(shost->host_lock);
@@ -800,7 +802,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
        if (irsp->ulpStatus) {
                /*
-                * In case of FIP mode, perform round robin FCF failover
+                * In case of FIP mode, perform roundrobin FCF failover
                 * due to new FCF discovery
                 */
                if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
@@ -808,48 +810,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                    (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) &&
                    (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
                        lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
-                                       "2611 FLOGI failed on registered "
-                                       "FCF record fcf_index(%d), status: "
-                                       "x%x/x%x, tmo:x%x, trying to perform "
-                                       "round robin failover\n",
+                                       "2611 FLOGI failed on FCF (x%x), "
+                                       "status:x%x/x%x, tmo:x%x, perform "
+                                       "roundrobin FCF failover\n",
                                        phba->fcf.current_rec.fcf_indx,
                                        irsp->ulpStatus, irsp->un.ulpWord[4],
                                        irsp->ulpTimeout);
                        fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
-                       if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
-                               /*
-                                * Exhausted the eligible FCF record list,
-                                * fail through to retry FLOGI on current
-                                * FCF record.
-                                */
-                               lpfc_printf_log(phba, KERN_WARNING,
-                                               LOG_FIP | LOG_ELS,
-                                               "2760 Completed one round "
-                                               "of FLOGI FCF round robin "
-                                               "failover list, retry FLOGI "
-                                               "on currently registered "
-                                               "FCF index:%d\n",
-                                               phba->fcf.current_rec.fcf_indx);
-                       } else {
-                               lpfc_printf_log(phba, KERN_INFO,
-                                               LOG_FIP | LOG_ELS,
-                                               "2794 FLOGI FCF round robin "
-                                               "failover to FCF index x%x\n",
-                                               fcf_index);
-                               rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
-                                                                  fcf_index);
-                               if (rc)
-                                       lpfc_printf_log(phba, KERN_WARNING,
-                                                       LOG_FIP | LOG_ELS,
-                                                       "2761 FLOGI round "
-                                                       "robin FCF failover "
-                                                       "read FCF failed "
-                                                       "rc:x%x, fcf_index:"
-                                                       "%d\n", rc,
-                                               phba->fcf.current_rec.fcf_indx);
-                               else
-                                       goto out;
-                       }
+                       rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
+                       if (rc)
+                               goto out;
                }
 
                /* FLOGI failure */
@@ -939,6 +909,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        lpfc_nlp_put(ndlp);
                        spin_lock_irq(&phba->hbalock);
                        phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+                       phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
                        spin_unlock_irq(&phba->hbalock);
                        goto out;
                }
@@ -947,13 +918,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        if (phba->hba_flag & HBA_FIP_SUPPORT)
                                lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
                                                LOG_ELS,
-                                               "2769 FLOGI successful on FCF "
-                                               "record: current_fcf_index:"
-                                               "x%x, terminate FCF round "
-                                               "robin failover process\n",
+                                               "2769 FLOGI to FCF (x%x) "
+                                               "completed successfully\n",
                                                phba->fcf.current_rec.fcf_indx);
                        spin_lock_irq(&phba->hbalock);
                        phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+                       phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
                        spin_unlock_irq(&phba->hbalock);
                        goto out;
                }
@@ -1175,12 +1145,13 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
                        return 0;
        }
 
-       if (lpfc_issue_els_flogi(vport, ndlp, 0))
+       if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
                /* This decrement of reference count to node shall kick off
                 * the release of the node.
                 */
                lpfc_nlp_put(ndlp);
-
+               return 0;
+       }
        return 1;
 }
 
@@ -1645,6 +1616,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
        memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
        sp = (struct serv_parm *) pcmd;
 
+       /*
+        * If we are a N-port connected to a Fabric, fix-up paramm's so logins
+        * to device on remote loops work.
+        */
+       if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
+               sp->cmn.altBbCredit = 1;
+
        if (sp->cmn.fcphLow < FC_PH_4_3)
                sp->cmn.fcphLow = FC_PH_4_3;
 
@@ -3925,6 +3903,64 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
        return 0;
 }
 
+/**
+ * lpfc_els_rsp_echo_acc - Issue echo acc response
+ * @vport: pointer to a virtual N_Port data structure.
+ * @data: pointer to echo data to return in the accept.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ *   0 - Successfully issued acc echo response
+ *   1 - Failed to issue acc echo response
+ **/
+static int
+lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
+                     struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_hba  *phba = vport->phba;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_sli *psli;
+       uint8_t *pcmd;
+       uint16_t cmdsize;
+       int rc;
+
+       psli = &phba->sli;
+       cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+
+       elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+                                    ndlp->nlp_DID, ELS_CMD_ACC);
+       if (!elsiocb)
+               return 1;
+
+       elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext;    /* Xri */
+       /* Xmit ECHO ACC response tag <ulpIoTag> */
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                        "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
+                        elsiocb->iotag, elsiocb->iocb.ulpContext);
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+       pcmd += sizeof(uint32_t);
+       memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
+
+       lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+               "Issue ACC ECHO:  did:x%x flg:x%x",
+               ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+       phba->fc_stat.elsXmitACC++;
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+       lpfc_nlp_put(ndlp);
+       elsiocb->context1 = NULL;  /* Don't need ndlp for cmpl,
+                                   * it could be freed */
+
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+       if (rc == IOCB_ERROR) {
+               lpfc_els_free_iocb(phba, elsiocb);
+               return 1;
+       }
+       return 0;
+}
+
 /**
  * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
  * @vport: pointer to a host virtual N_Port data structure.
@@ -4683,6 +4719,30 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        return 0;
 }
 
+/**
+ * lpfc_els_rcv_echo - Process an unsolicited echo iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ *   0 - Successfully processed echo iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+                 struct lpfc_nodelist *ndlp)
+{
+       uint8_t *pcmd;
+
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+
+       /* skip over first word of echo command to find echo data */
+       pcmd += sizeof(uint32_t);
+
+       lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
+       return 0;
+}
+
 /**
  * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
  * @vport: pointer to a host virtual N_Port data structure.
@@ -4734,6 +4794,89 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
 }
 
+/**
+ * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
+static void
+lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+       MAILBOX_t *mb;
+       IOCB_t *icmd;
+       struct RLS_RSP *rls_rsp;
+       uint8_t *pcmd;
+       struct lpfc_iocbq *elsiocb;
+       struct lpfc_nodelist *ndlp;
+       uint16_t xri;
+       uint32_t cmdsize;
+
+       mb = &pmb->u.mb;
+
+       ndlp = (struct lpfc_nodelist *) pmb->context2;
+       xri = (uint16_t) ((unsigned long)(pmb->context1));
+       pmb->context1 = NULL;
+       pmb->context2 = NULL;
+
+       if (mb->mbxStatus) {
+               mempool_free(pmb, phba->mbox_mem_pool);
+               return;
+       }
+
+       cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
+       mempool_free(pmb, phba->mbox_mem_pool);
+       elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+                                    lpfc_max_els_tries, ndlp,
+                                    ndlp->nlp_DID, ELS_CMD_ACC);
+
+       /* Decrement the ndlp reference count from previous mbox command */
+       lpfc_nlp_put(ndlp);
+
+       if (!elsiocb)
+               return;
+
+       icmd = &elsiocb->iocb;
+       icmd->ulpContext = xri;
+
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+       *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+       pcmd += sizeof(uint32_t); /* Skip past command */
+       rls_rsp = (struct RLS_RSP *)pcmd;
+
+       rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
+       rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
+       rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
+       rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
+       rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
+       rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
+
+       /* Xmit ELS RLS ACC response tag <ulpIoTag> */
+       lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+                        "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
+                        "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+                        elsiocb->iotag, elsiocb->iocb.ulpContext,
+                        ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+                        ndlp->nlp_rpi);
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+       phba->fc_stat.elsXmitACC++;
+       if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+               lpfc_els_free_iocb(phba, elsiocb);
+}
+
 /**
  * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
  * @phba: pointer to lpfc hba data structure.
@@ -4827,7 +4970,155 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 }
 
 /**
- * lpfc_els_rcv_rps - Process an unsolicited rps iocb
+ * lpfc_els_rcv_rls - Process an unsolicited rls iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPL) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPL Accept (ACC) response.
+ *
+ * Return codes
+ *   0 - Successfully processed rls iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+                struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_hba *phba = vport->phba;
+       LPFC_MBOXQ_t *mbox;
+       struct lpfc_dmabuf *pcmd;
+       struct ls_rjt stat;
+
+       if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+           (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+               /* reject the unsolicited RPS request and done with it */
+               goto reject_out;
+
+       pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+       if (mbox) {
+               lpfc_read_lnk_stat(phba, mbox);
+               mbox->context1 =
+                   (void *)((unsigned long) cmdiocb->iocb.ulpContext);
+               mbox->context2 = lpfc_nlp_get(ndlp);
+               mbox->vport = vport;
+               mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
+               if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+                       != MBX_NOT_FINISHED)
+                       /* Mbox completion will send ELS Response */
+                       return 0;
+               /* Decrement reference count used for the failed mbox
+                * command.
+                */
+               lpfc_nlp_put(ndlp);
+               mempool_free(mbox, phba->mbox_mem_pool);
+       }
+reject_out:
+       /* issue rejection response */
+       stat.un.b.lsRjtRsvd0 = 0;
+       stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+       stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+       stat.un.b.vendorUnique = 0;
+       lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+       return 0;
+}
+
+/**
+ * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Timout Value (RTV) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
+ * Value (RTV) unsolicited IOCB event.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ * Return codes
+ *   0 - Successfully processed rtv iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+                struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_hba *phba = vport->phba;
+       struct ls_rjt stat;
+       struct RTV_RSP *rtv_rsp;
+       uint8_t *pcmd;
+       struct lpfc_iocbq *elsiocb;
+       uint32_t cmdsize;
+
+
+       if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+           (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+               /* reject the unsolicited RPS request and done with it */
+               goto reject_out;
+
+       cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
+       elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+                                    lpfc_max_els_tries, ndlp,
+                                    ndlp->nlp_DID, ELS_CMD_ACC);
+
+       if (!elsiocb)
+               return 1;
+
+       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+               *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+       pcmd += sizeof(uint32_t); /* Skip past command */
+
+       /* use the command's xri in the response */
+       elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;
+
+       rtv_rsp = (struct RTV_RSP *)pcmd;
+
+       /* populate RTV payload */
+       rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
+       rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
+       bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
+       bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
+       rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
+
+       /* Xmit ELS RLS ACC response tag <ulpIoTag> */
+       lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+                        "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
+                        "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
+                        "Data: x%x x%x x%x\n",
+                        elsiocb->iotag, elsiocb->iocb.ulpContext,
+                        ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+                        ndlp->nlp_rpi,
+                       rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
+       elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+       phba->fc_stat.elsXmitACC++;
+       if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+               lpfc_els_free_iocb(phba, elsiocb);
+       return 0;
+
+reject_out:
+       /* issue rejection response */
+       stat.un.b.lsRjtRsvd0 = 0;
+       stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+       stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+       stat.un.b.vendorUnique = 0;
+       lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+       return 0;
+}
+
+/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
  * @vport: pointer to a host virtual N_Port data structure.
  * @cmdiocb: pointer to lpfc command iocb data structure.
  * @ndlp: pointer to a node-list data structure.
@@ -5017,7 +5308,6 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
        lp = (uint32_t *) pcmd->virt;
        rpl = (RPL *) (lp + 1);
-
        maxsize = be32_to_cpu(rpl->maxsize);
 
        /* We support only one port */
@@ -5836,6 +6126,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                if (newnode)
                        lpfc_nlp_put(ndlp);
                break;
+       case ELS_CMD_RLS:
+               lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+                       "RCV RLS:         did:x%x/ste:x%x flg:x%x",
+                       did, vport->port_state, ndlp->nlp_flag);
+
+               phba->fc_stat.elsRcvRLS++;
+               lpfc_els_rcv_rls(vport, elsiocb, ndlp);
+               if (newnode)
+                       lpfc_nlp_put(ndlp);
+               break;
        case ELS_CMD_RPS:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
                        "RCV RPS:         did:x%x/ste:x%x flg:x%x",
@@ -5866,6 +6166,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                if (newnode)
                        lpfc_nlp_put(ndlp);
                break;
+       case ELS_CMD_RTV:
+               lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+                       "RCV RTV:        did:x%x/ste:x%x flg:x%x",
+                       did, vport->port_state, ndlp->nlp_flag);
+               phba->fc_stat.elsRcvRTV++;
+               lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
+               if (newnode)
+                       lpfc_nlp_put(ndlp);
+               break;
        case ELS_CMD_RRQ:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
                        "RCV RRQ:         did:x%x/ste:x%x flg:x%x",
@@ -5876,6 +6185,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                if (newnode)
                        lpfc_nlp_put(ndlp);
                break;
+       case ELS_CMD_ECHO:
+               lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+                       "RCV ECHO:        did:x%x/ste:x%x flg:x%x",
+                       did, vport->port_state, ndlp->nlp_flag);
+
+               phba->fc_stat.elsRcvECHO++;
+               lpfc_els_rcv_echo(vport, elsiocb, ndlp);
+               if (newnode)
+                       lpfc_nlp_put(ndlp);
+               break;
        default:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
                        "RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
@@ -6170,6 +6489,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
                default:
                        /* Try to recover from this error */
+                       if (phba->sli_rev == LPFC_SLI_REV4)
+                               lpfc_sli4_unreg_all_rpis(vport);
                        lpfc_mbx_unreg_vpi(vport);
                        spin_lock_irq(shost->host_lock);
                        vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -6437,6 +6758,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                        lpfc_unreg_rpi(vport, np);
                }
                lpfc_cleanup_pending_mbox(vport);
+
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       lpfc_sli4_unreg_all_rpis(vport);
+
                lpfc_mbx_unreg_vpi(vport);
                spin_lock_irq(shost->host_lock);
                vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -6452,7 +6777,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                 * to update the MAC address.
                 */
                lpfc_register_new_vport(phba, vport, ndlp);
-               return ;
+               goto out;
        }
 
        if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
index a345dde16c86fcf74b121b38d2e505102b3ee698..a5d1695dac3dcd73b1e5cae6651c46c602d57d20 100644 (file)
@@ -20,6 +20,7 @@
  *******************************************************************/
 
 #include <linux/blkdev.h>
+#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/kthread.h>
@@ -63,6 +64,7 @@ static uint8_t lpfcAlpaArray[] = {
 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
 static void lpfc_disc_flush_list(struct lpfc_vport *vport);
 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+static int lpfc_fcf_inuse(struct lpfc_hba *);
 
 void
 lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -160,11 +162,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
        return;
 }
 
-/*
- * This function is called from the worker thread when dev_loss_tmo
- * expire.
- */
-static void
+/**
+ * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
+ * @ndlp: Pointer to remote node object.
+ *
+ * This function is called from the worker thread when devloss timeout timer
+ * expires. For SLI4 host, this routine shall return 1 when at lease one
+ * remote node, including this @ndlp, is still in use of FCF; otherwise, this
+ * routine shall return 0 when there is no remote node is still in use of FCF
+ * when devloss timeout happened to this @ndlp.
+ **/
+static int
 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
 {
        struct lpfc_rport_data *rdata;
@@ -175,17 +183,21 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
        int  put_node;
        int  put_rport;
        int warn_on = 0;
+       int fcf_inuse = 0;
 
        rport = ndlp->rport;
 
        if (!rport)
-               return;
+               return fcf_inuse;
 
        rdata = rport->dd_data;
        name = (uint8_t *) &ndlp->nlp_portname;
        vport = ndlp->vport;
        phba  = vport->phba;
 
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               fcf_inuse = lpfc_fcf_inuse(phba);
+
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
                "rport devlosstmo:did:x%x type:x%x id:x%x",
                ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
@@ -209,7 +221,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
                        lpfc_nlp_put(ndlp);
                if (put_rport)
                        put_device(&rport->dev);
-               return;
+               return fcf_inuse;
        }
 
        if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
@@ -220,7 +232,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
                                 *name, *(name+1), *(name+2), *(name+3),
                                 *(name+4), *(name+5), *(name+6), *(name+7),
                                 ndlp->nlp_DID);
-               return;
+               return fcf_inuse;
        }
 
        if (ndlp->nlp_type & NLP_FABRIC) {
@@ -233,7 +245,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
                        lpfc_nlp_put(ndlp);
                if (put_rport)
                        put_device(&rport->dev);
-               return;
+               return fcf_inuse;
        }
 
        if (ndlp->nlp_sid != NLP_NO_SID) {
@@ -280,6 +292,74 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
            (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
                lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
 
+       return fcf_inuse;
+}
+
+/**
+ * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
+ * @phba: Pointer to hba context object.
+ * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
+ * @nlp_did: remote node identifer with devloss timeout.
+ *
+ * This function is called from the worker thread after invoking devloss
+ * timeout handler and releasing the reference count for the ndlp with
+ * which the devloss timeout was handled for SLI4 host. For the devloss
+ * timeout of the last remote node which had been in use of FCF, when this
+ * routine is invoked, it shall be guaranteed that none of the remote are
+ * in-use of FCF. When devloss timeout to the last remote using the FCF,
+ * if the FIP engine is neither in FCF table scan process nor roundrobin
+ * failover process, the in-use FCF shall be unregistered. If the FIP
+ * engine is in FCF discovery process, the devloss timeout state shall
+ * be set for either the FCF table scan process or roundrobin failover
+ * process to unregister the in-use FCF.
+ **/
+static void
+lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
+                                   uint32_t nlp_did)
+{
+       /* If devloss timeout happened to a remote node when FCF had no
+        * longer been in-use, do nothing.
+        */
+       if (!fcf_inuse)
+               return;
+
+       if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
+               spin_lock_irq(&phba->hbalock);
+               if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+                       if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+                               spin_unlock_irq(&phba->hbalock);
+                               return;
+                       }
+                       phba->hba_flag |= HBA_DEVLOSS_TMO;
+                       lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                                       "2847 Last remote node (x%x) using "
+                                       "FCF devloss tmo\n", nlp_did);
+               }
+               if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
+                       spin_unlock_irq(&phba->hbalock);
+                       lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                                       "2868 Devloss tmo to FCF rediscovery "
+                                       "in progress\n");
+                       return;
+               }
+               if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
+                       spin_unlock_irq(&phba->hbalock);
+                       lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                                       "2869 Devloss tmo to idle FIP engine, "
+                                       "unreg in-use FCF and rescan.\n");
+                       /* Unregister in-use FCF and rescan */
+                       lpfc_unregister_fcf_rescan(phba);
+                       return;
+               }
+               spin_unlock_irq(&phba->hbalock);
+               if (phba->hba_flag & FCF_TS_INPROG)
+                       lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                                       "2870 FCF table scan in progress\n");
+               if (phba->hba_flag & FCF_RR_INPROG)
+                       lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                                       "2871 FLOGI roundrobin FCF failover "
+                                       "in progress\n");
+       }
        lpfc_unregister_unused_fcf(phba);
 }
 
@@ -408,6 +488,8 @@ lpfc_work_list_done(struct lpfc_hba *phba)
        struct lpfc_work_evt  *evtp = NULL;
        struct lpfc_nodelist  *ndlp;
        int free_evt;
+       int fcf_inuse;
+       uint32_t nlp_did;
 
        spin_lock_irq(&phba->hbalock);
        while (!list_empty(&phba->work_list)) {
@@ -427,12 +509,17 @@ lpfc_work_list_done(struct lpfc_hba *phba)
                        break;
                case LPFC_EVT_DEV_LOSS:
                        ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
-                       lpfc_dev_loss_tmo_handler(ndlp);
+                       fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
                        free_evt = 0;
                        /* decrement the node reference count held for
                         * this queued work
                         */
+                       nlp_did = ndlp->nlp_DID;
                        lpfc_nlp_put(ndlp);
+                       if (phba->sli_rev == LPFC_SLI_REV4)
+                               lpfc_sli4_post_dev_loss_tmo_handler(phba,
+                                                                   fcf_inuse,
+                                                                   nlp_did);
                        break;
                case LPFC_EVT_ONLINE:
                        if (phba->link_state < LPFC_LINK_DOWN)
@@ -707,6 +794,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
                                             : NLP_EVT_DEVICE_RECOVERY);
        }
        if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       lpfc_sli4_unreg_all_rpis(vport);
                lpfc_mbx_unreg_vpi(vport);
                spin_lock_irq(shost->host_lock);
                vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -1021,8 +1110,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                         "2017 REG_FCFI mbxStatus error x%x "
                         "HBA state x%x\n",
                         mboxq->u.mb.mbxStatus, vport->port_state);
-               mempool_free(mboxq, phba->mbox_mem_pool);
-               return;
+               goto fail_out;
        }
 
        /* Start FCoE discovery by sending a FLOGI. */
@@ -1031,20 +1119,30 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        spin_lock_irq(&phba->hbalock);
        phba->fcf.fcf_flag |= FCF_REGISTERED;
        spin_unlock_irq(&phba->hbalock);
+
        /* If there is a pending FCoE event, restart FCF table scan. */
-       if (lpfc_check_pending_fcoe_event(phba, 1)) {
-               mempool_free(mboxq, phba->mbox_mem_pool);
-               return;
-       }
+       if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
+               goto fail_out;
+
+       /* Mark successful completion of FCF table scan */
        spin_lock_irq(&phba->hbalock);
        phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
-       phba->hba_flag &= ~FCF_DISC_INPROGRESS;
-       spin_unlock_irq(&phba->hbalock);
-       if (vport->port_state != LPFC_FLOGI)
+       phba->hba_flag &= ~FCF_TS_INPROG;
+       if (vport->port_state != LPFC_FLOGI) {
+               phba->hba_flag |= FCF_RR_INPROG;
+               spin_unlock_irq(&phba->hbalock);
                lpfc_initial_flogi(vport);
+               goto out;
+       }
+       spin_unlock_irq(&phba->hbalock);
+       goto out;
 
+fail_out:
+       spin_lock_irq(&phba->hbalock);
+       phba->hba_flag &= ~FCF_RR_INPROG;
+       spin_unlock_irq(&phba->hbalock);
+out:
        mempool_free(mboxq, phba->mbox_mem_pool);
-       return;
 }
 
 /**
@@ -1241,10 +1339,9 @@ lpfc_register_fcf(struct lpfc_hba *phba)
        int rc;
 
        spin_lock_irq(&phba->hbalock);
-
        /* If the FCF is not availabe do nothing. */
        if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
-               phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+               phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
                spin_unlock_irq(&phba->hbalock);
                return;
        }
@@ -1252,19 +1349,22 @@ lpfc_register_fcf(struct lpfc_hba *phba)
        /* The FCF is already registered, start discovery */
        if (phba->fcf.fcf_flag & FCF_REGISTERED) {
                phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
-               phba->hba_flag &= ~FCF_DISC_INPROGRESS;
-               spin_unlock_irq(&phba->hbalock);
-               if (phba->pport->port_state != LPFC_FLOGI)
+               phba->hba_flag &= ~FCF_TS_INPROG;
+               if (phba->pport->port_state != LPFC_FLOGI) {
+                       phba->hba_flag |= FCF_RR_INPROG;
+                       spin_unlock_irq(&phba->hbalock);
                        lpfc_initial_flogi(phba->pport);
+                       return;
+               }
+               spin_unlock_irq(&phba->hbalock);
                return;
        }
        spin_unlock_irq(&phba->hbalock);
 
-       fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
-               GFP_KERNEL);
+       fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!fcf_mbxq) {
                spin_lock_irq(&phba->hbalock);
-               phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+               phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
                spin_unlock_irq(&phba->hbalock);
                return;
        }
@@ -1275,7 +1375,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
        rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
        if (rc == MBX_NOT_FINISHED) {
                spin_lock_irq(&phba->hbalock);
-               phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+               phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
                spin_unlock_irq(&phba->hbalock);
                mempool_free(fcf_mbxq, phba->mbox_mem_pool);
        }
@@ -1493,7 +1593,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
         * FCF discovery, no need to restart FCF discovery.
         */
        if ((phba->link_state  >= LPFC_LINK_UP) &&
-               (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
+           (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
                return 0;
 
        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
@@ -1517,14 +1617,14 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
                lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
        } else {
                /*
-                * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
+                * Do not continue FCF discovery and clear FCF_TS_INPROG
                 * flag
                 */
                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
                                "2833 Stop FCF discovery process due to link "
                                "state change (x%x)\n", phba->link_state);
                spin_lock_irq(&phba->hbalock);
-               phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+               phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
                phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
                spin_unlock_irq(&phba->hbalock);
        }
@@ -1728,6 +1828,65 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
        return true;
 }
 
+/**
+ * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
+ * @vport: Pointer to vport object.
+ * @fcf_index: index to next fcf.
+ *
+ * This function processing the roundrobin fcf failover to next fcf index.
+ * When this function is invoked, there will be a current fcf registered
+ * for flogi.
+ * Return: 0 for continue retrying flogi on currently registered fcf;
+ *         1 for stop flogi on currently registered fcf;
+ */
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
+{
+       struct lpfc_hba *phba = vport->phba;
+       int rc;
+
+       if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
+               spin_lock_irq(&phba->hbalock);
+               if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+                       spin_unlock_irq(&phba->hbalock);
+                       lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                                       "2872 Devloss tmo with no eligible "
+                                       "FCF, unregister in-use FCF (x%x) "
+                                       "and rescan FCF table\n",
+                                       phba->fcf.current_rec.fcf_indx);
+                       lpfc_unregister_fcf_rescan(phba);
+                       goto stop_flogi_current_fcf;
+               }
+               /* Mark the end to FLOGI roundrobin failover */
+               phba->hba_flag &= ~FCF_RR_INPROG;
+               /* Allow action to new fcf asynchronous event */
+               phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+               spin_unlock_irq(&phba->hbalock);
+               lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                               "2865 No FCF available, stop roundrobin FCF "
+                               "failover and change port state:x%x/x%x\n",
+                               phba->pport->port_state, LPFC_VPORT_UNKNOWN);
+               phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+               goto stop_flogi_current_fcf;
+       } else {
+               lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
+                               "2794 Try FLOGI roundrobin FCF failover to "
+                               "(x%x)\n", fcf_index);
+               rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
+               if (rc)
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
+                                       "2761 FLOGI roundrobin FCF failover "
+                                       "failed (rc:x%x) to read FCF (x%x)\n",
+                                       rc, phba->fcf.current_rec.fcf_indx);
+               else
+                       goto stop_flogi_current_fcf;
+       }
+       return 0;
+
+stop_flogi_current_fcf:
+       lpfc_can_disctmo(vport);
+       return 1;
+}
+
 /**
  * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
  * @phba: pointer to lpfc hba data structure.
@@ -1756,7 +1915,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        int rc;
 
        /* If there is pending FCoE event restart FCF table scan */
-       if (lpfc_check_pending_fcoe_event(phba, 0)) {
+       if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
                lpfc_sli4_mbox_cmd_free(phba, mboxq);
                return;
        }
@@ -1765,12 +1924,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
                                                      &next_fcf_index);
        if (!new_fcf_record) {
-               lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+               lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
                                "2765 Mailbox command READ_FCF_RECORD "
                                "failed to retrieve a FCF record.\n");
                /* Let next new FCF event trigger fast failover */
                spin_lock_irq(&phba->hbalock);
-               phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+               phba->hba_flag &= ~FCF_TS_INPROG;
                spin_unlock_irq(&phba->hbalock);
                lpfc_sli4_mbox_cmd_free(phba, mboxq);
                return;
@@ -1787,13 +1946,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        /*
         * If the fcf record does not match with connect list entries
         * read the next entry; otherwise, this is an eligible FCF
-        * record for round robin FCF failover.
+        * record for roundrobin FCF failover.
         */
        if (!rc) {
                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
-                               "2781 FCF record (x%x) failed FCF "
-                               "connection list check, fcf_avail:x%x, "
-                               "fcf_valid:x%x\n",
+                               "2781 FCF (x%x) failed connection "
+                               "list check: (x%x/x%x)\n",
                                bf_get(lpfc_fcf_record_fcf_index,
                                       new_fcf_record),
                                bf_get(lpfc_fcf_record_fcf_avail,
@@ -1803,6 +1961,16 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
                    lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
                    new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
+                       if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
+                           phba->fcf.current_rec.fcf_indx) {
+                               lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+                                       "2862 FCF (x%x) matches property "
+                                       "of in-use FCF (x%x)\n",
+                                       bf_get(lpfc_fcf_record_fcf_index,
+                                              new_fcf_record),
+                                       phba->fcf.current_rec.fcf_indx);
+                               goto read_next_fcf;
+                       }
                        /*
                         * In case the current in-use FCF record becomes
                         * invalid/unavailable during FCF discovery that
@@ -1813,9 +1981,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                            !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
                                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
                                                "2835 Invalid in-use FCF "
-                                               "record (x%x) reported, "
-                                               "entering fast FCF failover "
-                                               "mode scanning.\n",
+                                               "(x%x), enter FCF failover "
+                                               "table scan.\n",
                                                phba->fcf.current_rec.fcf_indx);
                                spin_lock_irq(&phba->hbalock);
                                phba->fcf.fcf_flag |= FCF_REDISC_FOV;
@@ -1844,22 +2011,29 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
        if (phba->fcf.fcf_flag & FCF_IN_USE) {
                if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
                    new_fcf_record, vlan_id)) {
-                       phba->fcf.fcf_flag |= FCF_AVAILABLE;
-                       if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
-                               /* Stop FCF redisc wait timer if pending */
-                               __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
-                       else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
-                               /* If in fast failover, mark it's completed */
-                               phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
-                       spin_unlock_irq(&phba->hbalock);
-                       lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-                                       "2836 The new FCF record (x%x) "
-                                       "matches the in-use FCF record "
-                                       "(x%x)\n",
-                                       phba->fcf.current_rec.fcf_indx,
+                       if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
+                           phba->fcf.current_rec.fcf_indx) {
+                               phba->fcf.fcf_flag |= FCF_AVAILABLE;
+                               if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
+                                       /* Stop FCF redisc wait timer */
+                                       __lpfc_sli4_stop_fcf_redisc_wait_timer(
+                                                                       phba);
+                               else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+                                       /* Fast failover, mark completed */
+                                       phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+                               spin_unlock_irq(&phba->hbalock);
+                               lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                                               "2836 New FCF matches in-use "
+                                               "FCF (x%x)\n",
+                                               phba->fcf.current_rec.fcf_indx);
+                               goto out;
+                       } else
+                               lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+                                       "2863 New FCF (x%x) matches "
+                                       "property of in-use FCF (x%x)\n",
                                        bf_get(lpfc_fcf_record_fcf_index,
-                                              new_fcf_record));
-                       goto out;
+                                              new_fcf_record),
+                                       phba->fcf.current_rec.fcf_indx);
                }
                /*
                 * Read next FCF record from HBA searching for the matching
@@ -1953,8 +2127,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
         */
        if (fcf_rec) {
                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-                               "2840 Update current FCF record "
-                               "with initial FCF record (x%x)\n",
+                               "2840 Update initial FCF candidate "
+                               "with FCF (x%x)\n",
                                bf_get(lpfc_fcf_record_fcf_index,
                                       new_fcf_record));
                __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
@@ -1984,20 +2158,28 @@ read_next_fcf:
                         */
                        if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
                                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
-                                              "2782 No suitable FCF record "
-                                              "found during this round of "
-                                              "post FCF rediscovery scan: "
-                                              "fcf_evt_tag:x%x, fcf_index: "
-                                              "x%x\n",
+                                              "2782 No suitable FCF found: "
+                                              "(x%x/x%x)\n",
                                               phba->fcoe_eventtag_at_fcf_scan,
                                               bf_get(lpfc_fcf_record_fcf_index,
                                                      new_fcf_record));
+                               spin_lock_irq(&phba->hbalock);
+                               if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+                                       phba->hba_flag &= ~FCF_TS_INPROG;
+                                       spin_unlock_irq(&phba->hbalock);
+                                       /* Unregister in-use FCF and rescan */
+                                       lpfc_printf_log(phba, KERN_INFO,
+                                                       LOG_FIP,
+                                                       "2864 On devloss tmo "
+                                                       "unreg in-use FCF and "
+                                                       "rescan FCF table\n");
+                                       lpfc_unregister_fcf_rescan(phba);
+                                       return;
+                               }
                                /*
-                                * Let next new FCF event trigger fast
-                                * failover
+                                * Let next new FCF event trigger fast failover
                                 */
-                               spin_lock_irq(&phba->hbalock);
-                               phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+                               phba->hba_flag &= ~FCF_TS_INPROG;
                                spin_unlock_irq(&phba->hbalock);
                                return;
                        }
@@ -2015,9 +2197,8 @@ read_next_fcf:
 
                        /* Replace in-use record with the new record */
                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-                                       "2842 Replace the current in-use "
-                                       "FCF record (x%x) with failover FCF "
-                                       "record (x%x)\n",
+                                       "2842 Replace in-use FCF (x%x) "
+                                       "with failover FCF (x%x)\n",
                                        phba->fcf.current_rec.fcf_indx,
                                        phba->fcf.failover_rec.fcf_indx);
                        memcpy(&phba->fcf.current_rec,
@@ -2029,15 +2210,8 @@ read_next_fcf:
                         * FCF failover.
                         */
                        spin_lock_irq(&phba->hbalock);
-                       phba->fcf.fcf_flag &=
-                                       ~(FCF_REDISC_FOV | FCF_REDISC_RRU);
+                       phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
                        spin_unlock_irq(&phba->hbalock);
-                       /*
-                        * Set up the initial registered FCF index for FLOGI
-                        * round robin FCF failover.
-                        */
-                       phba->fcf.fcf_rr_init_indx =
-                                       phba->fcf.failover_rec.fcf_indx;
                        /* Register to the new FCF record */
                        lpfc_register_fcf(phba);
                } else {
@@ -2069,28 +2243,6 @@ read_next_fcf:
                                                LPFC_FCOE_FCF_GET_FIRST);
                                return;
                        }
-
-                       /*
-                        * Otherwise, initial scan or post linkdown rescan,
-                        * register with the best FCF record found so far
-                        * through the FCF scanning process.
-                        */
-
-                       /*
-                        * Mark the initial FCF discovery completed and
-                        * the start of the first round of the roundrobin
-                        * FCF failover.
-                        */
-                       spin_lock_irq(&phba->hbalock);
-                       phba->fcf.fcf_flag &=
-                                       ~(FCF_INIT_DISC | FCF_REDISC_RRU);
-                       spin_unlock_irq(&phba->hbalock);
-                       /*
-                        * Set up the initial registered FCF index for FLOGI
-                        * round robin FCF failover
-                        */
-                       phba->fcf.fcf_rr_init_indx =
-                                       phba->fcf.current_rec.fcf_indx;
                        /* Register to the new FCF record */
                        lpfc_register_fcf(phba);
                }
@@ -2106,11 +2258,11 @@ out:
 }
 
 /**
- * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
+ * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
  * @phba: pointer to lpfc hba data structure.
  * @mboxq: pointer to mailbox object.
  *
- * This is the callback function for FLOGI failure round robin FCF failover
+ * This is the callback function for FLOGI failure roundrobin FCF failover
  * read FCF record mailbox command from the eligible FCF record bmask for
  * performing the failover. If the FCF read back is not valid/available, it
  * fails through to retrying FLOGI to the currently registered FCF again.
@@ -2125,17 +2277,18 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 {
        struct fcf_record *new_fcf_record;
        uint32_t boot_flag, addr_mode;
-       uint16_t next_fcf_index;
+       uint16_t next_fcf_index, fcf_index;
        uint16_t current_fcf_index;
        uint16_t vlan_id;
+       int rc;
 
-       /* If link state is not up, stop the round robin failover process */
+       /* If link state is not up, stop the roundrobin failover process */
        if (phba->link_state < LPFC_LINK_UP) {
                spin_lock_irq(&phba->hbalock);
                phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+               phba->hba_flag &= ~FCF_RR_INPROG;
                spin_unlock_irq(&phba->hbalock);
-               lpfc_sli4_mbox_cmd_free(phba, mboxq);
-               return;
+               goto out;
        }
 
        /* Parse the FCF record from the non-embedded mailbox command */
@@ -2145,23 +2298,47 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
                                "2766 Mailbox command READ_FCF_RECORD "
                                "failed to retrieve a FCF record.\n");
-               goto out;
+               goto error_out;
        }
 
        /* Get the needed parameters from FCF record */
-       lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
-                                &addr_mode, &vlan_id);
+       rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+                                     &addr_mode, &vlan_id);
 
        /* Log the FCF record information if turned on */
        lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
                                      next_fcf_index);
 
+       fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+       if (!rc) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                               "2848 Remove ineligible FCF (x%x) from "
+                               "from roundrobin bmask\n", fcf_index);
+               /* Clear roundrobin bmask bit for ineligible FCF */
+               lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
+               /* Perform next round of roundrobin FCF failover */
+               fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
+               rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
+               if (rc)
+                       goto out;
+               goto error_out;
+       }
+
+       if (fcf_index == phba->fcf.current_rec.fcf_indx) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+                               "2760 Perform FLOGI roundrobin FCF failover: "
+                               "FCF (x%x) back to FCF (x%x)\n",
+                               phba->fcf.current_rec.fcf_indx, fcf_index);
+               /* Wait 500 ms before retrying FLOGI to current FCF */
+               msleep(500);
+               lpfc_initial_flogi(phba->pport);
+               goto out;
+       }
+
        /* Upload new FCF record to the failover FCF record */
        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-                       "2834 Update the current FCF record (x%x) "
-                       "with the next FCF record (x%x)\n",
-                       phba->fcf.failover_rec.fcf_indx,
-                       bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
+                       "2834 Update current FCF (x%x) with new FCF (x%x)\n",
+                       phba->fcf.failover_rec.fcf_indx, fcf_index);
        spin_lock_irq(&phba->hbalock);
        __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
                                 new_fcf_record, addr_mode, vlan_id,
@@ -2178,14 +2355,13 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
               sizeof(struct lpfc_fcf_rec));
 
        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-                       "2783 FLOGI round robin FCF failover from FCF "
-                       "(x%x) to FCF (x%x).\n",
-                       current_fcf_index,
-                       bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
+                       "2783 Perform FLOGI roundrobin FCF failover: FCF "
+                       "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
 
+error_out:
+       lpfc_register_fcf(phba);
 out:
        lpfc_sli4_mbox_cmd_free(phba, mboxq);
-       lpfc_register_fcf(phba);
 }
 
 /**
@@ -2194,10 +2370,10 @@ out:
  * @mboxq: pointer to mailbox object.
  *
  * This is the callback function of read FCF record mailbox command for
- * updating the eligible FCF bmask for FLOGI failure round robin FCF
+ * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
  * failover when a new FCF event happened. If the FCF read back is
  * valid/available and it passes the connection list check, it updates
- * the bmask for the eligible FCF record for round robin failover.
+ * the bmask for the eligible FCF record for roundrobin failover.
  */
 void
 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
@@ -2639,7 +2815,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
                 * and get the FCF Table.
                 */
                spin_lock_irq(&phba->hbalock);
-               if (phba->hba_flag & FCF_DISC_INPROGRESS) {
+               if (phba->hba_flag & FCF_TS_INPROG) {
                        spin_unlock_irq(&phba->hbalock);
                        return;
                }
@@ -3906,6 +4082,11 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
        LPFC_MBOXQ_t     *mbox;
        int rc;
 
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               lpfc_sli4_unreg_all_rpis(vport);
+               return;
+       }
+
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (mbox) {
                lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
@@ -3992,6 +4173,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        }
 
        spin_lock_irq(&phba->hbalock);
+       /* Cleanup REG_LOGIN completions which are not yet processed */
+       list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
+               if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
+                       (ndlp != (struct lpfc_nodelist *) mb->context2))
+                       continue;
+
+               mb->context2 = NULL;
+               mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+       }
+
        list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
                if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
                    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -5170,6 +5361,8 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
                        if (ndlp)
                                lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
                        lpfc_cleanup_pending_mbox(vports[i]);
+                       if (phba->sli_rev == LPFC_SLI_REV4)
+                               lpfc_sli4_unreg_all_rpis(vports[i]);
                        lpfc_mbx_unreg_vpi(vports[i]);
                        shost = lpfc_shost_from_vport(vports[i]);
                        spin_lock_irq(shost->host_lock);
index a631647051d99dc3cb53226ffd7602efe2cbfd19..9b833345646525c52b736f4f388e18f3f18695c4 100644 (file)
@@ -861,6 +861,47 @@ typedef struct  _RPS_RSP { /* Structure is in Big Endian format */
        uint32_t crcCnt;
 } RPS_RSP;
 
+struct RLS {                   /* Structure is in Big Endian format */
+       uint32_t rls;
+#define rls_rsvd_SHIFT         24
+#define rls_rsvd_MASK          0x000000ff
+#define rls_rsvd_WORD          rls
+#define rls_did_SHIFT          0
+#define rls_did_MASK           0x00ffffff
+#define rls_did_WORD           rls
+};
+
+struct  RLS_RSP {              /* Structure is in Big Endian format */
+       uint32_t linkFailureCnt;
+       uint32_t lossSyncCnt;
+       uint32_t lossSignalCnt;
+       uint32_t primSeqErrCnt;
+       uint32_t invalidXmitWord;
+       uint32_t crcCnt;
+};
+
+struct RTV_RSP {               /* Structure is in Big Endian format */
+       uint32_t ratov;
+       uint32_t edtov;
+       uint32_t qtov;
+#define qtov_rsvd0_SHIFT       28
+#define qtov_rsvd0_MASK                0x0000000f
+#define qtov_rsvd0_WORD                qtov            /* reserved */
+#define qtov_edtovres_SHIFT    27
+#define qtov_edtovres_MASK     0x00000001
+#define qtov_edtovres_WORD     qtov            /* E_D_TOV Resolution */
+#define qtov__rsvd1_SHIFT      19
+#define qtov_rsvd1_MASK                0x0000003f
+#define qtov_rsvd1_WORD                qtov            /* reserved */
+#define qtov_rttov_SHIFT       18
+#define qtov_rttov_MASK                0x00000001
+#define qtov_rttov_WORD                qtov            /* R_T_TOV value */
+#define qtov_rsvd2_SHIFT       0
+#define qtov_rsvd2_MASK                0x0003ffff
+#define qtov_rsvd2_WORD                qtov            /* reserved */
+};
+
+
 typedef struct  _RPL {         /* Structure is in Big Endian format */
        uint32_t maxsize;
        uint32_t index;
index bbdcf96800f619e952bdcb64df043bf789caf68e..6e4bc34e1d0d30028f2aaae15e615f5a0d067cd5 100644 (file)
@@ -424,79 +424,6 @@ struct lpfc_rcqe {
 #define FCOE_SOFn3     0x36
 };
 
-struct lpfc_wqe_generic{
-       struct ulp_bde64 bde;
-       uint32_t word3;
-       uint32_t word4;
-       uint32_t word5;
-       uint32_t word6;
-#define lpfc_wqe_gen_context_SHIFT     16
-#define lpfc_wqe_gen_context_MASK      0x0000FFFF
-#define lpfc_wqe_gen_context_WORD      word6
-#define lpfc_wqe_gen_xri_SHIFT         0
-#define lpfc_wqe_gen_xri_MASK          0x0000FFFF
-#define lpfc_wqe_gen_xri_WORD          word6
-       uint32_t word7;
-#define lpfc_wqe_gen_lnk_SHIFT         23
-#define lpfc_wqe_gen_lnk_MASK          0x00000001
-#define lpfc_wqe_gen_lnk_WORD          word7
-#define lpfc_wqe_gen_erp_SHIFT         22
-#define lpfc_wqe_gen_erp_MASK          0x00000001
-#define lpfc_wqe_gen_erp_WORD          word7
-#define lpfc_wqe_gen_pu_SHIFT          20
-#define lpfc_wqe_gen_pu_MASK           0x00000003
-#define lpfc_wqe_gen_pu_WORD           word7
-#define lpfc_wqe_gen_class_SHIFT       16
-#define lpfc_wqe_gen_class_MASK                0x00000007
-#define lpfc_wqe_gen_class_WORD                word7
-#define lpfc_wqe_gen_command_SHIFT     8
-#define lpfc_wqe_gen_command_MASK      0x000000FF
-#define lpfc_wqe_gen_command_WORD      word7
-#define lpfc_wqe_gen_status_SHIFT      4
-#define lpfc_wqe_gen_status_MASK       0x0000000F
-#define lpfc_wqe_gen_status_WORD       word7
-#define lpfc_wqe_gen_ct_SHIFT          2
-#define lpfc_wqe_gen_ct_MASK           0x00000003
-#define lpfc_wqe_gen_ct_WORD           word7
-       uint32_t abort_tag;
-       uint32_t word9;
-#define lpfc_wqe_gen_request_tag_SHIFT 0
-#define lpfc_wqe_gen_request_tag_MASK  0x0000FFFF
-#define lpfc_wqe_gen_request_tag_WORD  word9
-       uint32_t word10;
-#define lpfc_wqe_gen_ccp_SHIFT         24
-#define lpfc_wqe_gen_ccp_MASK          0x000000FF
-#define lpfc_wqe_gen_ccp_WORD          word10
-#define lpfc_wqe_gen_ccpe_SHIFT                23
-#define lpfc_wqe_gen_ccpe_MASK         0x00000001
-#define lpfc_wqe_gen_ccpe_WORD         word10
-#define lpfc_wqe_gen_pv_SHIFT          19
-#define lpfc_wqe_gen_pv_MASK           0x00000001
-#define lpfc_wqe_gen_pv_WORD           word10
-#define lpfc_wqe_gen_pri_SHIFT         16
-#define lpfc_wqe_gen_pri_MASK          0x00000007
-#define lpfc_wqe_gen_pri_WORD          word10
-       uint32_t word11;
-#define lpfc_wqe_gen_cq_id_SHIFT       16
-#define lpfc_wqe_gen_cq_id_MASK                0x0000FFFF
-#define lpfc_wqe_gen_cq_id_WORD                word11
-#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
-#define lpfc_wqe_gen_wqec_SHIFT                7
-#define lpfc_wqe_gen_wqec_MASK         0x00000001
-#define lpfc_wqe_gen_wqec_WORD         word11
-#define ELS_ID_FLOGI 3
-#define ELS_ID_FDISC 2
-#define ELS_ID_LOGO  1
-#define ELS_ID_DEFAULT 0
-#define lpfc_wqe_gen_els_id_SHIFT      4
-#define lpfc_wqe_gen_els_id_MASK       0x00000003
-#define lpfc_wqe_gen_els_id_WORD       word11
-#define lpfc_wqe_gen_cmd_type_SHIFT    0
-#define lpfc_wqe_gen_cmd_type_MASK     0x0000000F
-#define lpfc_wqe_gen_cmd_type_WORD     word11
-       uint32_t payload[4];
-};
-
 struct lpfc_rqe {
        uint32_t address_hi;
        uint32_t address_lo;
@@ -2279,9 +2206,36 @@ struct wqe_common {
 #define wqe_reqtag_MASK       0x0000FFFF
 #define wqe_reqtag_WORD       word9
 #define wqe_rcvoxid_SHIFT     16
-#define wqe_rcvoxid_MASK       0x0000FFFF
-#define wqe_rcvoxid_WORD       word9
+#define wqe_rcvoxid_MASK      0x0000FFFF
+#define wqe_rcvoxid_WORD      word9
        uint32_t word10;
+#define wqe_ebde_cnt_SHIFT    0
+#define wqe_ebde_cnt_MASK     0x00000007
+#define wqe_ebde_cnt_WORD     word10
+#define wqe_lenloc_SHIFT      7
+#define wqe_lenloc_MASK       0x00000003
+#define wqe_lenloc_WORD       word10
+#define LPFC_WQE_LENLOC_NONE           0
+#define LPFC_WQE_LENLOC_WORD3  1
+#define LPFC_WQE_LENLOC_WORD12 2
+#define LPFC_WQE_LENLOC_WORD4  3
+#define wqe_qosd_SHIFT        9
+#define wqe_qosd_MASK         0x00000001
+#define wqe_qosd_WORD         word10
+#define wqe_xbl_SHIFT         11
+#define wqe_xbl_MASK          0x00000001
+#define wqe_xbl_WORD          word10
+#define wqe_iod_SHIFT         13
+#define wqe_iod_MASK          0x00000001
+#define wqe_iod_WORD          word10
+#define LPFC_WQE_IOD_WRITE     0
+#define LPFC_WQE_IOD_READ      1
+#define wqe_dbde_SHIFT        14
+#define wqe_dbde_MASK         0x00000001
+#define wqe_dbde_WORD         word10
+#define wqe_wqes_SHIFT        15
+#define wqe_wqes_MASK         0x00000001
+#define wqe_wqes_WORD         word10
 #define wqe_pri_SHIFT         16
 #define wqe_pri_MASK          0x00000007
 #define wqe_pri_WORD          word10
@@ -2295,18 +2249,26 @@ struct wqe_common {
 #define wqe_ccpe_MASK         0x00000001
 #define wqe_ccpe_WORD         word10
 #define wqe_ccp_SHIFT         24
-#define wqe_ccp_MASK         0x000000ff
-#define wqe_ccp_WORD         word10
+#define wqe_ccp_MASK          0x000000ff
+#define wqe_ccp_WORD          word10
        uint32_t word11;
-#define wqe_cmd_type_SHIFT  0
-#define wqe_cmd_type_MASK   0x0000000f
-#define wqe_cmd_type_WORD   word11
-#define wqe_wqec_SHIFT      7
-#define wqe_wqec_MASK       0x00000001
-#define wqe_wqec_WORD       word11
-#define wqe_cqid_SHIFT      16
-#define wqe_cqid_MASK       0x0000ffff
-#define wqe_cqid_WORD       word11
+#define wqe_cmd_type_SHIFT    0
+#define wqe_cmd_type_MASK     0x0000000f
+#define wqe_cmd_type_WORD     word11
+#define wqe_els_id_SHIFT      4
+#define wqe_els_id_MASK       0x00000003
+#define wqe_els_id_WORD       word11
+#define LPFC_ELS_ID_FLOGI      3
+#define LPFC_ELS_ID_FDISC      2
+#define LPFC_ELS_ID_LOGO       1
+#define LPFC_ELS_ID_DEFAULT    0
+#define wqe_wqec_SHIFT        7
+#define wqe_wqec_MASK         0x00000001
+#define wqe_wqec_WORD         word11
+#define wqe_cqid_SHIFT        16
+#define wqe_cqid_MASK         0x0000ffff
+#define wqe_cqid_WORD         word11
+#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
 };
 
 struct wqe_did {
@@ -2325,6 +2287,15 @@ struct wqe_did {
 #define wqe_xmit_bls_xo_WORD          word5
 };
 
+struct lpfc_wqe_generic{
+       struct ulp_bde64 bde;
+       uint32_t word3;
+       uint32_t word4;
+       uint32_t word5;
+       struct wqe_common wqe_com;
+       uint32_t payload[4];
+};
+
 struct els_request64_wqe {
        struct ulp_bde64 bde;
        uint32_t payload_len;
@@ -2356,9 +2327,9 @@ struct els_request64_wqe {
 
 struct xmit_els_rsp64_wqe {
        struct ulp_bde64 bde;
-       uint32_t rsvd3;
+       uint32_t response_payload_len;
        uint32_t rsvd4;
-       struct wqe_did  wqe_dest;
+       struct wqe_did wqe_dest;
        struct wqe_common wqe_com; /* words 6-11 */
        uint32_t rsvd_12_15[4];
 };
@@ -2427,7 +2398,7 @@ struct wqe_rctl_dfctl {
 
 struct xmit_seq64_wqe {
        struct ulp_bde64 bde;
-       uint32_t paylaod_offset;
+       uint32_t rsvd3;
        uint32_t relative_offset;
        struct wqe_rctl_dfctl wge_ctl;
        struct wqe_common wqe_com; /* words 6-11 */
@@ -2437,7 +2408,7 @@ struct xmit_seq64_wqe {
 };
 struct xmit_bcast64_wqe {
        struct ulp_bde64 bde;
-       uint32_t paylaod_len;
+       uint32_t seq_payload_len;
        uint32_t rsvd4;
        struct wqe_rctl_dfctl wge_ctl; /* word 5 */
        struct wqe_common wqe_com;     /* words 6-11 */
@@ -2446,8 +2417,8 @@ struct xmit_bcast64_wqe {
 
 struct gen_req64_wqe {
        struct ulp_bde64 bde;
-       uint32_t command_len;
-       uint32_t payload_len;
+       uint32_t request_payload_len;
+       uint32_t relative_offset;
        struct wqe_rctl_dfctl wge_ctl; /* word 5 */
        struct wqe_common wqe_com;     /* words 6-11 */
        uint32_t rsvd_12_15[4];
@@ -2480,7 +2451,7 @@ struct abort_cmd_wqe {
 
 struct fcp_iwrite64_wqe {
        struct ulp_bde64 bde;
-       uint32_t payload_len;
+       uint32_t payload_offset_len;
        uint32_t total_xfer_len;
        uint32_t initial_xfer_len;
        struct wqe_common wqe_com;     /* words 6-11 */
@@ -2489,7 +2460,7 @@ struct fcp_iwrite64_wqe {
 
 struct fcp_iread64_wqe {
        struct ulp_bde64 bde;
-       uint32_t payload_len;          /* word 3 */
+       uint32_t payload_offset_len;   /* word 3 */
        uint32_t total_xfer_len;       /* word 4 */
        uint32_t rsrvd5;               /* word 5 */
        struct wqe_common wqe_com;     /* words 6-11 */
@@ -2497,10 +2468,12 @@ struct fcp_iread64_wqe {
 };
 
 struct fcp_icmnd64_wqe {
-       struct ulp_bde64 bde;    /* words 0-2 */
-       uint32_t rsrvd[3];             /* words 3-5 */
+       struct ulp_bde64 bde;          /* words 0-2 */
+       uint32_t rsrvd3;               /* word 3 */
+       uint32_t rsrvd4;               /* word 4 */
+       uint32_t rsrvd5;               /* word 5 */
        struct wqe_common wqe_com;     /* words 6-11 */
-       uint32_t rsvd_12_15[4];         /* word 12-15 */
+       uint32_t rsvd_12_15[4];        /* word 12-15 */
 };
 
 
index 295c7ddb36c11fa72eaa6626aa7203ac4fb1eabf..b3065791f30333b9667dcb0f9d3d222facb76fc0 100644 (file)
@@ -813,6 +813,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
 
        return 0;
 }
+
 /**
  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
  * @phba: pointer to lpfc HBA data structure.
@@ -2234,10 +2235,9 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
 void
 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
 {
-       /* Clear pending FCF rediscovery wait and failover in progress flags */
-       phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
-                               FCF_DEAD_DISC |
-                               FCF_ACVL_DISC);
+       /* Clear pending FCF rediscovery wait flag */
+       phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+
        /* Now, try to stop the timer */
        del_timer(&phba->fcf.redisc_wait);
 }
@@ -2261,6 +2261,8 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
                return;
        }
        __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+       /* Clear failover in progress flags */
+       phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
        spin_unlock_irq(&phba->hbalock);
 }
 
@@ -2935,8 +2937,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
        phba->fcf.fcf_flag |= FCF_REDISC_EVT;
        spin_unlock_irq(&phba->hbalock);
        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-                       "2776 FCF rediscover wait timer expired, post "
-                       "a worker thread event for FCF table scan\n");
+                       "2776 FCF rediscover quiescent timer expired\n");
        /* wake up worker thread */
        lpfc_worker_wake_up(phba);
 }
@@ -3311,35 +3312,34 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
                if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
                                        LOG_DISCOVERY,
-                                       "2546 New FCF found event: "
-                                       "evt_tag:x%x, fcf_index:x%x\n",
+                                       "2546 New FCF event, evt_tag:x%x, "
+                                       "index:x%x\n",
                                        acqe_fcoe->event_tag,
                                        acqe_fcoe->index);
                else
                        lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
                                        LOG_DISCOVERY,
-                                       "2788 FCF parameter modified event: "
-                                       "evt_tag:x%x, fcf_index:x%x\n",
+                                       "2788 FCF param modified event, "
+                                       "evt_tag:x%x, index:x%x\n",
                                        acqe_fcoe->event_tag,
                                        acqe_fcoe->index);
                if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
                        /*
                         * During period of FCF discovery, read the FCF
                         * table record indexed by the event to update
-                        * FCF round robin failover eligible FCF bmask.
+                        * FCF roundrobin failover eligible FCF bmask.
                         */
                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
                                        LOG_DISCOVERY,
-                                       "2779 Read new FCF record with "
-                                       "fcf_index:x%x for updating FCF "
-                                       "round robin failover bmask\n",
+                                       "2779 Read FCF (x%x) for updating "
+                                       "roundrobin FCF failover bmask\n",
                                        acqe_fcoe->index);
                        rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
                }
 
                /* If the FCF discovery is in progress, do nothing. */
                spin_lock_irq(&phba->hbalock);
-               if (phba->hba_flag & FCF_DISC_INPROGRESS) {
+               if (phba->hba_flag & FCF_TS_INPROG) {
                        spin_unlock_irq(&phba->hbalock);
                        break;
                }
@@ -3358,15 +3358,15 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
 
                /* Otherwise, scan the entire FCF table and re-discover SAN */
                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
-                               "2770 Start FCF table scan due to new FCF "
-                               "event: evt_tag:x%x, fcf_index:x%x\n",
+                               "2770 Start FCF table scan per async FCF "
+                               "event, evt_tag:x%x, index:x%x\n",
                                acqe_fcoe->event_tag, acqe_fcoe->index);
                rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
                                                     LPFC_FCOE_FCF_GET_FIRST);
                if (rc)
                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
                                        "2547 Issue FCF scan read FCF mailbox "
-                                       "command failed 0x%x\n", rc);
+                                       "command failed (x%x)\n", rc);
                break;
 
        case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3378,9 +3378,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
 
        case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
-                       "2549 FCF disconnected from network index 0x%x"
-                       " tag 0x%x\n", acqe_fcoe->index,
-                       acqe_fcoe->event_tag);
+                       "2549 FCF (x%x) disconnected from network, "
+                       "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
                /*
                 * If we are in the middle of FCF failover process, clear
                 * the corresponding FCF bit in the roundrobin bitmap.
@@ -3494,9 +3493,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
                        spin_unlock_irq(&phba->hbalock);
                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
                                        LOG_DISCOVERY,
-                                       "2773 Start FCF fast failover due "
-                                       "to CVL event: evt_tag:x%x\n",
-                                       acqe_fcoe->event_tag);
+                                       "2773 Start FCF failover per CVL, "
+                                       "evt_tag:x%x\n", acqe_fcoe->event_tag);
                        rc = lpfc_sli4_redisc_fcf_table(phba);
                        if (rc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
@@ -3646,8 +3644,7 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
 
        /* Scan FCF table from the first entry to re-discover SAN */
        lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
-                       "2777 Start FCF table scan after FCF "
-                       "rediscovery quiescent period over\n");
+                       "2777 Start post-quiescent FCF table scan\n");
        rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
        if (rc)
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
@@ -4165,7 +4162,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_free_active_sgl;
        }
 
-       /* Allocate eligible FCF bmask memory for FCF round robin failover */
+       /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
        longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
        phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
                                         GFP_KERNEL);
@@ -7270,6 +7267,51 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba)
        return;
 }
 
+/**
+ * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to wait for completion
+ * of device's XRIs exchange busy. It will check the XRI exchange busy
+ * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
+ * that, it will check the XRI exchange busy on outstanding FCP and ELS
+ * I/Os every 30 seconds, log error message, and wait forever. Only when
+ * all XRI exchange busy complete, the driver unload shall proceed with
+ * invoking the function reset ioctl mailbox command to the CNA and the
+ * the rest of the driver unload resource release.
+ **/
+static void
+lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
+{
+       int wait_time = 0;
+       int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+       int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+       while (!fcp_xri_cmpl || !els_xri_cmpl) {
+               if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
+                       if (!fcp_xri_cmpl)
+                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                               "2877 FCP XRI exchange busy "
+                                               "wait time: %d seconds.\n",
+                                               wait_time/1000);
+                       if (!els_xri_cmpl)
+                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                               "2878 ELS XRI exchange busy "
+                                               "wait time: %d seconds.\n",
+                                               wait_time/1000);
+                       msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
+                       wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
+               } else {
+                       msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
+                       wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
+               }
+               fcp_xri_cmpl =
+                       list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+               els_xri_cmpl =
+                       list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+       }
+}
+
 /**
  * lpfc_sli4_hba_unset - Unset the fcoe hba
  * @phba: Pointer to HBA context object.
@@ -7315,6 +7357,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
                spin_unlock_irq(&phba->hbalock);
        }
 
+       /* Abort all iocbs associated with the hba */
+       lpfc_sli_hba_iocb_abort(phba);
+
+       /* Wait for completion of device XRI exchange busy */
+       lpfc_sli4_xri_exchange_busy_wait(phba);
+
        /* Disable PCI subsystem interrupt */
        lpfc_sli4_disable_intr(phba);
 
index 0dfa310cd609318cb0c772f85ea344acdef2c5ea..62d0957e1d4c7dd29add28778f640af952e56b47 100644 (file)
@@ -796,6 +796,34 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
        return;
 }
 
+/**
+ * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA.
+ * @vport: pointer to a vport object.
+ *
+ * This routine sends mailbox command to unregister all active RPIs for
+ * a vport.
+ **/
+void
+lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
+{
+       struct lpfc_hba  *phba  = vport->phba;
+       LPFC_MBOXQ_t     *mbox;
+       int rc;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (mbox) {
+               lpfc_unreg_login(phba, vport->vpi,
+                       vport->vpi + phba->vpi_base, mbox);
+               mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
+               mbox->vport = vport;
+               mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+               mbox->context1 = NULL;
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+               if (rc == MBX_NOT_FINISHED)
+                       mempool_free(mbox, phba->mbox_mem_pool);
+       }
+}
+
 /**
  * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier
  * @phba: pointer to lpfc hba data structure.
index 3a658953486cf9a1e73027c9be828a0c12be5e41..f64b65a770b8df3b3f0a3ba47decc0512a8df0a7 100644 (file)
@@ -169,6 +169,7 @@ lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
        spin_lock_irqsave(shost->host_lock, flags);
        if (!vport->stat_data_enabled ||
                vport->stat_data_blocked ||
+               !pnode ||
                !pnode->lat_data ||
                (phba->bucket_type == LPFC_NO_BUCKET)) {
                spin_unlock_irqrestore(shost->host_lock, flags);
@@ -2040,6 +2041,9 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
        struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
        unsigned long flags;
 
+       if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+               return;
+
        /* If there is queuefull or busy condition send a scsi event */
        if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
                (cmnd->result == SAM_STAT_BUSY)) {
@@ -3226,10 +3230,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
        struct lpfc_scsi_buf *lpfc_cmd;
        struct lpfc_iocbq *iocbq;
        struct lpfc_iocbq *iocbqrsp;
+       struct lpfc_nodelist *pnode = rdata->pnode;
        int ret;
        int status;
 
-       if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
+       if (!pnode || !NLP_CHK_NODE_ACT(pnode))
                return FAILED;
 
        lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -3256,7 +3261,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
                         "0702 Issue %s to TGT %d LUN %d "
                         "rpi x%x nlp_flag x%x\n",
                         lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
-                        rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
+                        pnode->nlp_rpi, pnode->nlp_flag);
 
        status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
                                          iocbq, iocbqrsp, lpfc_cmd->timeout);
index 0d1e187b005ddd366b0545fee2356d1b43df9994..554efa6623f4edab7154e3f1ad23e5be845260c6 100644 (file)
@@ -95,7 +95,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
                return -ENOMEM;
        /* set consumption flag every once in a while */
        if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
-               bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
+               bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
 
        lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
 
@@ -1735,6 +1735,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        struct lpfc_vport  *vport = pmb->vport;
        struct lpfc_dmabuf *mp;
        struct lpfc_nodelist *ndlp;
+       struct Scsi_Host *shost;
        uint16_t rpi, vpi;
        int rc;
 
@@ -1746,7 +1747,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        }
 
        if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
-           (phba->sli_rev == LPFC_SLI_REV4))
+           (phba->sli_rev == LPFC_SLI_REV4) &&
+           (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
                lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
 
        /*
@@ -1765,16 +1767,14 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                        return;
        }
 
-       /* Unreg VPI, if the REG_VPI succeed after VLink failure */
        if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
                !(phba->pport->load_flag & FC_UNLOADING) &&
                !pmb->u.mb.mbxStatus) {
-               lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
-               pmb->vport = vport;
-               pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-               rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-               if (rc != MBX_NOT_FINISHED)
-                       return;
+               shost = lpfc_shost_from_vport(vport);
+               spin_lock_irq(shost->host_lock);
+               vport->vpi_state |= LPFC_VPI_REGISTERED;
+               vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+               spin_unlock_irq(shost->host_lock);
        }
 
        if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
@@ -5921,7 +5921,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
  * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
  * @phba: Pointer to HBA context object.
  *
- * This routine performs a round robin SCSI command to SLI4 FCP WQ index
+ * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
  * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
  * held.
  *
@@ -5965,7 +5965,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
        uint16_t abrt_iotag;
        struct lpfc_iocbq *abrtiocbq;
        struct ulp_bde64 *bpl = NULL;
-       uint32_t els_id = ELS_ID_DEFAULT;
+       uint32_t els_id = LPFC_ELS_ID_DEFAULT;
        int numBdes, i;
        struct ulp_bde64 bde;
 
@@ -5982,7 +5982,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
        memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
        abort_tag = (uint32_t) iocbq->iotag;
        xritag = iocbq->sli4_xritag;
-       wqe->words[7] = 0; /* The ct field has moved so reset */
+       wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
        /* words0-2 bpl convert bde */
        if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
                numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
@@ -6033,109 +6033,117 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                 * contains the FCFI and remote N_Port_ID is
                 * in word 5.
                 */
-
                ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
-               bf_set(lpfc_wqe_gen_context, &wqe->generic,
-                               iocbq->iocb.ulpContext);
-
-               bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
-               bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+               bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+                      iocbq->iocb.ulpContext);
+               bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
+               bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
                /* CCP CCPE PV PRI in word10 were set in the memcpy */
-
                if (command_type == ELS_COMMAND_FIP) {
                        els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
                                        >> LPFC_FIP_ELS_ID_SHIFT);
                }
-               bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id);
-
+               bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
+               bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
+               bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
+               bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
+               bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+               bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
        break;
        case CMD_XMIT_SEQUENCE64_CX:
-               bf_set(lpfc_wqe_gen_context, &wqe->generic,
-                                       iocbq->iocb.un.ulpWord[3]);
-               wqe->generic.word3 = 0;
-               bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+               bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
+                      iocbq->iocb.un.ulpWord[3]);
+               bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
+                      iocbq->iocb.ulpContext);
                /* The entire sequence is transmitted for this IOCB */
                xmit_len = total_len;
                cmnd = CMD_XMIT_SEQUENCE64_CR;
        case CMD_XMIT_SEQUENCE64_CR:
-               /* word3 iocb=io_tag32 wqe=payload_offset */
-               /* payload offset used for multilpe outstanding
-                * sequences on the same exchange
-                */
-               wqe->words[3] = 0;
+               /* word3 iocb=io_tag32 wqe=reserved */
+               wqe->xmit_sequence.rsvd3 = 0;
                /* word4 relative_offset memcpy */
                /* word5 r_ctl/df_ctl memcpy */
-               bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+               bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+               bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+               bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
+                      LPFC_WQE_IOD_WRITE);
+               bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+                      LPFC_WQE_LENLOC_WORD12);
+               bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
                wqe->xmit_sequence.xmit_len = xmit_len;
                command_type = OTHER_COMMAND;
        break;
        case CMD_XMIT_BCAST64_CN:
-               /* word3 iocb=iotag32 wqe=payload_len */
-               wqe->words[3] = 0; /* no definition for this in wqe */
+               /* word3 iocb=iotag32 wqe=seq_payload_len */
+               wqe->xmit_bcast64.seq_payload_len = xmit_len;
                /* word4 iocb=rsvd wqe=rsvd */
                /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
                /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
-               bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+               bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+               bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
+               bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
+               bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
+                      LPFC_WQE_LENLOC_WORD3);
+               bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
        break;
        case CMD_FCP_IWRITE64_CR:
                command_type = FCP_COMMAND_DATA_OUT;
-               /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
-                * confusing.
-                * word3 is payload_len: byte offset to the sgl entry for the
-                * fcp_command.
-                * word4 is total xfer len, same as the IOCB->ulpParameter.
-                * word5 is initial xfer len 0 = wait for xfer-ready
-                */
-
-               /* Always wait for xfer-ready before sending data */
-               wqe->fcp_iwrite.initial_xfer_len = 0;
-               /* word 4 (xfer length) should have been set on the memcpy */
-
-       /* allow write to fall through to read */
+               /* word3 iocb=iotag wqe=payload_offset_len */
+               /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+               wqe->fcp_iwrite.payload_offset_len =
+                       xmit_len + sizeof(struct fcp_rsp);
+               /* word4 iocb=parameter wqe=total_xfer_length memcpy */
+               /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+               bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
+                      iocbq->iocb.ulpFCP2Rcvy);
+               bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
+               /* Always open the exchange */
+               bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
+               bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+               bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
+               bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
+                      LPFC_WQE_LENLOC_WORD4);
+               bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
+               bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
+       break;
        case CMD_FCP_IREAD64_CR:
-               /* FCP_CMD is always the 1st sgl entry */
-               wqe->fcp_iread.payload_len =
+               /* word3 iocb=iotag wqe=payload_offset_len */
+               /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+               wqe->fcp_iread.payload_offset_len =
                        xmit_len + sizeof(struct fcp_rsp);
-
-               /* word 4 (xfer length) should have been set on the memcpy */
-
-               bf_set(lpfc_wqe_gen_erp, &wqe->generic,
-                       iocbq->iocb.ulpFCP2Rcvy);
-               bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
-               /* The XC bit and the XS bit are similar. The driver never
-                * tracked whether or not the exchange was previouslly open.
-                * XC = Exchange create, 0 is create. 1 is already open.
-                * XS = link cmd: 1 do not close the exchange after command.
-                * XS = 0 close exchange when command completes.
-                * The only time we would not set the XC bit is when the XS bit
-                * is set and we are sending our 2nd or greater command on
-                * this exchange.
-                */
+               /* word4 iocb=parameter wqe=total_xfer_length memcpy */
+               /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+               bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
+                      iocbq->iocb.ulpFCP2Rcvy);
+               bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
                /* Always open the exchange */
                bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
-
-               wqe->words[10] &= 0xffff0000; /* zero out ebde count */
-               bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
-               break;
+               bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+               bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
+               bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
+                      LPFC_WQE_LENLOC_WORD4);
+               bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
+               bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
+       break;
        case CMD_FCP_ICMND64_CR:
+               /* word3 iocb=IO_TAG wqe=reserved */
+               wqe->fcp_icmd.rsrvd3 = 0;
+               bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
                /* Always open the exchange */
-               bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
-
-               wqe->words[4] = 0;
-               wqe->words[10] &= 0xffff0000; /* zero out ebde count */
-               bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+               bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
+               bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
+               bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
+               bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+               bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
+                      LPFC_WQE_LENLOC_NONE);
+               bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
        break;
        case CMD_GEN_REQUEST64_CR:
-               /* word3 command length is described as byte offset to the
-                * rsp_data. Would always be 16, sizeof(struct sli4_sge)
-                * sgl[0] = cmnd
-                * sgl[1] = rsp.
-                *
-                */
-               wqe->gen_req.command_len = xmit_len;
-               /* Word4 parameter  copied in the memcpy */
-               /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
+               /* word3 iocb=IO_TAG wqe=request_payload_len */
+               wqe->gen_req.request_payload_len = xmit_len;
+               /* word4 iocb=parameter wqe=relative_offset memcpy */
+               /* word5 [rctl, type, df_ctl, la] copied in memcpy */
                /* word6 context tag copied in memcpy */
                if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
                        ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
@@ -6144,31 +6152,39 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                                ct, iocbq->iocb.ulpCommand);
                        return IOCB_ERROR;
                }
-               bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
-               bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
-                       iocbq->iocb.ulpTimeout);
-
-               bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+               bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
+               bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
+               bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
+               bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+               bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+               bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+               bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+               bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
                command_type = OTHER_COMMAND;
        break;
        case CMD_XMIT_ELS_RSP64_CX:
                /* words0-2 BDE memcpy */
-               /* word3 iocb=iotag32 wqe=rsvd */
-               wqe->words[3] = 0;
+               /* word3 iocb=iotag32 wqe=response_payload_len */
+               wqe->xmit_els_rsp.response_payload_len = xmit_len;
                /* word4 iocb=did wge=rsvd. */
-               wqe->words[4] = 0;
+               wqe->xmit_els_rsp.rsvd4 = 0;
                /* word5 iocb=rsvd wge=did */
                bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
                         iocbq->iocb.un.elsreq64.remoteID);
-
-               bf_set(lpfc_wqe_gen_ct, &wqe->generic,
-                       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
-
-               bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
-               bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+               bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
+                      ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+               bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
+               bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+                      iocbq->iocb.ulpContext);
                if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
-                       bf_set(lpfc_wqe_gen_context, &wqe->generic,
+                       bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
                               iocbq->vport->vpi + phba->vpi_base);
+               bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
+               bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
+               bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
+               bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
+                      LPFC_WQE_LENLOC_WORD3);
+               bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
                command_type = OTHER_COMMAND;
        break;
        case CMD_CLOSE_XRI_CN:
@@ -6193,15 +6209,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                else
                        bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
                bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
-               wqe->words[5] = 0;
-               bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+               /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
+               wqe->abort_cmd.rsrvd5 = 0;
+               bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
                        ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
                abort_tag = iocbq->iocb.un.acxri.abortIoTag;
                /*
                 * The abort handler will send us CMD_ABORT_XRI_CN or
                 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
                 */
-               bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
+               bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+               bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+               bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
+                      LPFC_WQE_LENLOC_NONE);
                cmnd = CMD_ABORT_XRI_CX;
                command_type = OTHER_COMMAND;
                xritag = 0;
@@ -6235,18 +6255,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
                bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
                       iocbq->iocb.ulpContext);
+               bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
+               bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
+                      LPFC_WQE_LENLOC_NONE);
                /* Overwrite the pre-set comnd type with OTHER_COMMAND */
                command_type = OTHER_COMMAND;
        break;
        case CMD_XRI_ABORTED_CX:
        case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
-               /* words0-2 are all 0's no bde */
-               /* word3 and word4 are rsvrd */
-               wqe->words[3] = 0;
-               wqe->words[4] = 0;
-               /* word5 iocb=rsvd wge=did */
-               /* There is no remote port id in the IOCB? */
-               /* Let this fall through and fail */
        case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
        case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
        case CMD_FCP_TRSP64_CX: /* Target mode rcv */
@@ -6257,16 +6273,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
                                iocbq->iocb.ulpCommand);
                return IOCB_ERROR;
        break;
-
        }
-       bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
-       bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
-       wqe->generic.abort_tag = abort_tag;
-       bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
-       bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
-       bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
-       bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
-
+       bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
+       bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
+       wqe->generic.wqe_com.abort_tag = abort_tag;
+       bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
+       bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
+       bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
+       bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
        return 0;
 }
 
@@ -7257,25 +7271,26 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 }
 
 /**
- * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
  * @phba: Pointer to HBA context object.
  * @pring: Pointer to driver SLI ring object.
  * @cmdiocb: Pointer to driver command iocb object.
  *
- * This function issues an abort iocb for the provided command
- * iocb. This function is called with hbalock held.
- * The function returns 0 when it fails due to memory allocation
- * failure or when the command iocb is an abort request.
+ * This function issues an abort iocb for the provided command iocb down to
+ * the port. Other than the case the outstanding command iocb is an abort
+ * request, this function issues abort out unconditionally. This function is
+ * called with hbalock held. The function returns 0 when it fails due to
+ * memory allocation failure or when the command iocb is an abort request.
  **/
-int
-lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+static int
+lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                           struct lpfc_iocbq *cmdiocb)
 {
        struct lpfc_vport *vport = cmdiocb->vport;
        struct lpfc_iocbq *abtsiocbp;
        IOCB_t *icmd = NULL;
        IOCB_t *iabt = NULL;
-       int retval = IOCB_ERROR;
+       int retval;
 
        /*
         * There are certain command types we don't want to abort.  And we
@@ -7288,18 +7303,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
            (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
                return 0;
 
-       /* If we're unloading, don't abort iocb on the ELS ring, but change the
-        * callback so that nothing happens when it finishes.
-        */
-       if ((vport->load_flag & FC_UNLOADING) &&
-           (pring->ringno == LPFC_ELS_RING)) {
-               if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
-                       cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
-               else
-                       cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
-               goto abort_iotag_exit;
-       }
-
        /* issue ABTS for this IOCB based on iotag */
        abtsiocbp = __lpfc_sli_get_iocbq(phba);
        if (abtsiocbp == NULL)
@@ -7344,6 +7347,63 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
        if (retval)
                __lpfc_sli_release_iocbq(phba, abtsiocbp);
+
+       /*
+        * Caller to this routine should check for IOCB_ERROR
+        * and handle it properly.  This routine no longer removes
+        * iocb off txcmplq and call compl in case of IOCB_ERROR.
+        */
+       return retval;
+}
+
+/**
+ * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb. In case
+ * of unloading, the abort iocb will not be issued to commands on the ELS
+ * ring. Instead, the callback function shall be changed to those commands
+ * so that nothing happens when them finishes. This function is called with
+ * hbalock held. The function returns 0 when the command iocb is an abort
+ * request.
+ **/
+int
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+                          struct lpfc_iocbq *cmdiocb)
+{
+       struct lpfc_vport *vport = cmdiocb->vport;
+       int retval = IOCB_ERROR;
+       IOCB_t *icmd = NULL;
+
+       /*
+        * There are certain command types we don't want to abort.  And we
+        * don't want to abort commands that are already in the process of
+        * being aborted.
+        */
+       icmd = &cmdiocb->iocb;
+       if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+           icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+           (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+               return 0;
+
+       /*
+        * If we're unloading, don't abort iocb on the ELS ring, but change
+        * the callback so that nothing happens when it finishes.
+        */
+       if ((vport->load_flag & FC_UNLOADING) &&
+           (pring->ringno == LPFC_ELS_RING)) {
+               if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+                       cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+               else
+                       cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+               goto abort_iotag_exit;
+       }
+
+       /* Now, we try to issue the abort to the cmdiocb out */
+       retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
+
 abort_iotag_exit:
        /*
         * Caller to this routine should check for IOCB_ERROR
@@ -7353,6 +7413,62 @@ abort_iotag_exit:
        return retval;
 }
 
+/**
+ * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues abort iocbs unconditionally for all
+ * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
+ * to complete before the return of this function. The caller is not required
+ * to hold any locks.
+ **/
+static void
+lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+       LIST_HEAD(completions);
+       struct lpfc_iocbq *iocb, *next_iocb;
+
+       if (pring->ringno == LPFC_ELS_RING)
+               lpfc_fabric_abort_hba(phba);
+
+       spin_lock_irq(&phba->hbalock);
+
+       /* Take off all the iocbs on txq for cancelling */
+       list_splice_init(&pring->txq, &completions);
+       pring->txq_cnt = 0;
+
+       /* Next issue ABTS for everything on the txcmplq */
+       list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+               lpfc_sli_abort_iotag_issue(phba, pring, iocb);
+
+       spin_unlock_irq(&phba->hbalock);
+
+       /* Cancel all the IOCBs from the completions list */
+       lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+                             IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will abort all pending and outstanding iocbs to an HBA.
+ **/
+void
+lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_sli_ring *pring;
+       int i;
+
+       for (i = 0; i < psli->num_rings; i++) {
+               pring = &psli->ring[i];
+               lpfc_sli_iocb_ring_abort(phba, pring);
+       }
+}
+
 /**
  * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
  * @iocbq: Pointer to driver iocb object.
@@ -12242,13 +12358,15 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
        /* Issue the mailbox command asynchronously */
        mboxq->vport = phba->pport;
        mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
+
+       spin_lock_irq(&phba->hbalock);
+       phba->hba_flag |= FCF_TS_INPROG;
+       spin_unlock_irq(&phba->hbalock);
+
        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
        if (rc == MBX_NOT_FINISHED)
                error = -EIO;
        else {
-               spin_lock_irq(&phba->hbalock);
-               phba->hba_flag |= FCF_DISC_INPROGRESS;
-               spin_unlock_irq(&phba->hbalock);
                /* Reset eligible FCF count for new scan */
                if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
                        phba->fcf.eligible_fcf_cnt = 0;
@@ -12258,21 +12376,21 @@ fail_fcf_scan:
        if (error) {
                if (mboxq)
                        lpfc_sli4_mbox_cmd_free(phba, mboxq);
-               /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
+               /* FCF scan failed, clear FCF_TS_INPROG flag */
                spin_lock_irq(&phba->hbalock);
-               phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+               phba->hba_flag &= ~FCF_TS_INPROG;
                spin_unlock_irq(&phba->hbalock);
        }
        return error;
 }
 
 /**
- * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
+ * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
  * @phba: pointer to lpfc hba data structure.
  * @fcf_index: FCF table entry offset.
  *
  * This routine is invoked to read an FCF record indicated by @fcf_index
- * and to use it for FLOGI round robin FCF failover.
+ * and to use it for FLOGI roundrobin FCF failover.
  *
  * Return 0 if the mailbox command is submitted sucessfully, none 0
  * otherwise.
@@ -12318,7 +12436,7 @@ fail_fcf_read:
  * @fcf_index: FCF table entry offset.
  *
  * This routine is invoked to read an FCF record indicated by @fcf_index to
- * determine whether it's eligible for FLOGI round robin failover list.
+ * determine whether it's eligible for FLOGI roundrobin failover list.
  *
  * Return 0 if the mailbox command is submitted sucessfully, none 0
  * otherwise.
@@ -12364,7 +12482,7 @@ fail_fcf_read:
  *
  * This routine is to get the next eligible FCF record index in a round
  * robin fashion. If the next eligible FCF record index equals to the
- * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
+ * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
  * shall be returned, otherwise, the next eligible FCF record's index
  * shall be returned.
  **/
@@ -12392,28 +12510,10 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
                return LPFC_FCOE_FCF_NEXT_NONE;
        }
 
-       /* Check roundrobin failover index bmask stop condition */
-       if (next_fcf_index == phba->fcf.fcf_rr_init_indx) {
-               if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) {
-                       lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
-                                       "2847 Round robin failover FCF index "
-                                       "search hit stop condition:x%x\n",
-                                       next_fcf_index);
-                       return LPFC_FCOE_FCF_NEXT_NONE;
-               }
-               /* The roundrobin failover index bmask updated, start over */
-               lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-                               "2848 Round robin failover FCF index bmask "
-                               "updated, start over\n");
-               spin_lock_irq(&phba->hbalock);
-               phba->fcf.fcf_flag &= ~FCF_REDISC_RRU;
-               spin_unlock_irq(&phba->hbalock);
-               return phba->fcf.fcf_rr_init_indx;
-       }
-
        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-                       "2845 Get next round robin failover "
-                       "FCF index x%x\n", next_fcf_index);
+                       "2845 Get next roundrobin failover FCF (x%x)\n",
+                       next_fcf_index);
+
        return next_fcf_index;
 }
 
@@ -12422,7 +12522,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine sets the FCF record index in to the eligible bmask for
- * round robin failover search. It checks to make sure that the index
+ * roundrobin failover search. It checks to make sure that the index
  * does not go beyond the range of the driver allocated bmask dimension
  * before setting the bit.
  *
@@ -12434,22 +12534,16 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
 {
        if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
-                               "2610 HBA FCF index reached driver's "
-                               "book keeping dimension: fcf_index:%d, "
-                               "driver_bmask_max:%d\n",
+                               "2610 FCF (x%x) reached driver's book "
+                               "keeping dimension:x%x\n",
                                fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
                return -EINVAL;
        }
        /* Set the eligible FCF record index bmask */
        set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
 
-       /* Set the roundrobin index bmask updated */
-       spin_lock_irq(&phba->hbalock);
-       phba->fcf.fcf_flag |= FCF_REDISC_RRU;
-       spin_unlock_irq(&phba->hbalock);
-
        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-                       "2790 Set FCF index x%x to round robin failover "
+                       "2790 Set FCF (x%x) to roundrobin FCF failover "
                        "bmask\n", fcf_index);
 
        return 0;
@@ -12460,7 +12554,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine clears the FCF record index from the eligible bmask for
- * round robin failover search. It checks to make sure that the index
+ * roundrobin failover search. It checks to make sure that the index
  * does not go beyond the range of the driver allocated bmask dimension
  * before clearing the bit.
  **/
@@ -12469,9 +12563,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
 {
        if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
-                               "2762 HBA FCF index goes beyond driver's "
-                               "book keeping dimension: fcf_index:%d, "
-                               "driver_bmask_max:%d\n",
+                               "2762 FCF (x%x) reached driver's book "
+                               "keeping dimension:x%x\n",
                                fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
                return;
        }
@@ -12479,7 +12572,7 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
        clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-                       "2791 Clear FCF index x%x from round robin failover "
+                       "2791 Clear FCF (x%x) from roundrobin failover "
                        "bmask\n", fcf_index);
 }
 
@@ -12530,8 +12623,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
                }
        } else {
                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-                               "2775 Start FCF rediscovery quiescent period "
-                               "wait timer before scaning FCF table\n");
+                               "2775 Start FCF rediscover quiescent timer\n");
                /*
                 * Start FCF rediscovery wait timer for pending FCF
                 * before rescan FCF record table.
index a0ca572ec28be4a4105a08f4360459aa8f091abf..c4483feb8b71a32a4198175ac91a0c142a647e61 100644 (file)
  *******************************************************************/
 
 #define LPFC_ACTIVE_MBOX_WAIT_CNT               100
+#define LPFC_XRI_EXCH_BUSY_WAIT_TMO            10000
+#define LPFC_XRI_EXCH_BUSY_WAIT_T1             10
+#define LPFC_XRI_EXCH_BUSY_WAIT_T2              30000
 #define LPFC_RELEASE_NOTIFICATION_INTERVAL     32
 #define LPFC_GET_QE_REL_INT                    32
 #define LPFC_RPI_LOW_WATER_MARK                        10
 
+#define LPFC_UNREG_FCF                          1
+#define LPFC_SKIP_UNREG_FCF                     0
+
 /* Amount of time in seconds for waiting FCF rediscovery to complete */
 #define LPFC_FCF_REDISCOVER_WAIT_TMO           2000 /* msec */
 
@@ -163,9 +169,8 @@ struct lpfc_fcf {
 #define FCF_REDISC_PEND        0x80 /* FCF rediscovery pending */
 #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
 #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
-#define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */
+#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
        uint32_t addr_mode;
-       uint16_t fcf_rr_init_indx;
        uint32_t eligible_fcf_cnt;
        struct lpfc_fcf_rec current_rec;
        struct lpfc_fcf_rec failover_rec;
index f93120e4c7961e632e6fb79c9aa2ee28ad029e38..7a1b5b112a0bca1c26481063875576430a7ac664 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.17"
+#define LPFC_DRIVER_VERSION "8.3.18"
 #define LPFC_DRIVER_NAME               "lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME    "lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME    "lpfc:fp"
index d3c9cdee292b11cdcf44cd9b163bec3e7749814d..eb29d508513148653fb492df24cee9ab0b14cb17 100644 (file)
@@ -10,7 +10,7 @@
  *        2 of the License, or (at your option) any later version.
  *
  * FILE                : megaraid_sas.c
- * Version     : v00.00.04.17.1-rc1
+ * Version     : v00.00.04.31-rc1
  *
  * Authors:
  *     (email-id : megaraidlinux@lsi.com)
@@ -56,6 +56,15 @@ module_param_named(poll_mode_io, poll_mode_io, int, 0);
 MODULE_PARM_DESC(poll_mode_io,
        "Complete cmds from IO path, (default=0)");
 
+/*
+ * Number of sectors per IO command
+ * Will be set in megasas_init_mfi if user does not provide
+ */
+static unsigned int max_sectors;
+module_param_named(max_sectors, max_sectors, int, 0);
+MODULE_PARM_DESC(max_sectors,
+       "Maximum number of sectors per IO command");
+
 MODULE_LICENSE("GPL");
 MODULE_VERSION(MEGASAS_VERSION);
 MODULE_AUTHOR("megaraidlinux@lsi.com");
@@ -103,6 +112,7 @@ static int megasas_poll_wait_aen;
 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
 static u32 support_poll_for_event;
 static u32 megasas_dbg_lvl;
+static u32 support_device_change;
 
 /* define lock for aen poll */
 spinlock_t poll_aen_lock;
@@ -718,6 +728,10 @@ static int
 megasas_check_reset_gen2(struct megasas_instance *instance,
                struct megasas_register_set __iomem *regs)
 {
+       if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+               return 1;
+       }
+
        return 0;
 }
 
@@ -930,6 +944,7 @@ megasas_make_sgl_skinny(struct megasas_instance *instance,
                        mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
                        mfi_sgl->sge_skinny[i].phys_addr =
                                                sg_dma_address(os_sgl);
+                       mfi_sgl->sge_skinny[i].flag = 0;
                }
        }
        return sge_count;
@@ -1557,6 +1572,28 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
        }
 }
 
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
+
+static void
+process_fw_state_change_wq(struct work_struct *work);
+
+void megasas_do_ocr(struct megasas_instance *instance)
+{
+       if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+       (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+       (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+               *instance->consumer     = MEGASAS_ADPRESET_INPROG_SIGN;
+       }
+       instance->instancet->disable_intr(instance->reg_set);
+       instance->adprecovery   = MEGASAS_ADPRESET_SM_INFAULT;
+       instance->issuepend_done = 0;
+
+       atomic_set(&instance->fw_outstanding, 0);
+       megasas_internal_reset_defer_cmds(instance);
+       process_fw_state_change_wq(&instance->work_init);
+}
+
 /**
  * megasas_wait_for_outstanding -      Wait for all outstanding cmds
  * @instance:                          Adapter soft state
@@ -1574,6 +1611,8 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
        unsigned long flags;
        struct list_head clist_local;
        struct megasas_cmd *reset_cmd;
+       u32 fw_state;
+       u8 kill_adapter_flag;
 
        spin_lock_irqsave(&instance->hba_lock, flags);
        adprecovery = instance->adprecovery;
@@ -1659,7 +1698,45 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
                msleep(1000);
        }
 
-       if (atomic_read(&instance->fw_outstanding)) {
+       i = 0;
+       kill_adapter_flag = 0;
+       do {
+               fw_state = instance->instancet->read_fw_status_reg(
+                                       instance->reg_set) & MFI_STATE_MASK;
+               if ((fw_state == MFI_STATE_FAULT) &&
+                       (instance->disableOnlineCtrlReset == 0)) {
+                       if (i == 3) {
+                               kill_adapter_flag = 2;
+                               break;
+                       }
+                       megasas_do_ocr(instance);
+                       kill_adapter_flag = 1;
+
+                       /* wait for 1 secs to let FW finish the pending cmds */
+                       msleep(1000);
+               }
+               i++;
+       } while (i <= 3);
+
+       if (atomic_read(&instance->fw_outstanding) &&
+                                       !kill_adapter_flag) {
+               if (instance->disableOnlineCtrlReset == 0) {
+
+                       megasas_do_ocr(instance);
+
+                       /* wait for 5 secs to let FW finish the pending cmds */
+                       for (i = 0; i < wait_time; i++) {
+                               int outstanding =
+                                       atomic_read(&instance->fw_outstanding);
+                               if (!outstanding)
+                                       return SUCCESS;
+                               msleep(1000);
+                       }
+               }
+       }
+
+       if (atomic_read(&instance->fw_outstanding) ||
+                                       (kill_adapter_flag == 2)) {
                printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
                /*
                * Send signal to FW to stop processing any pending cmds.
@@ -2669,6 +2746,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
                        return -ENOMEM;
                }
 
+               memset(cmd->frame, 0, total_sz);
                cmd->frame->io.context = cmd->index;
                cmd->frame->io.pad_0 = 0;
        }
@@ -3585,6 +3663,27 @@ static int megasas_io_attach(struct megasas_instance *instance)
                        instance->max_fw_cmds - MEGASAS_INT_CMDS;
        host->this_id = instance->init_id;
        host->sg_tablesize = instance->max_num_sge;
+       /*
+        * Check if the module parameter value for max_sectors can be used
+        */
+       if (max_sectors && max_sectors < instance->max_sectors_per_req)
+               instance->max_sectors_per_req = max_sectors;
+       else {
+               if (max_sectors) {
+                       if (((instance->pdev->device ==
+                               PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
+                               (instance->pdev->device ==
+                               PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
+                               (max_sectors <= MEGASAS_MAX_SECTORS)) {
+                               instance->max_sectors_per_req = max_sectors;
+                       } else {
+                       printk(KERN_INFO "megasas: max_sectors should be > 0"
+                               "and <= %d (or < 1MB for GEN2 controller)\n",
+                               instance->max_sectors_per_req);
+                       }
+               }
+       }
+
        host->max_sectors = instance->max_sectors_per_req;
        host->cmd_per_lun = 128;
        host->max_channel = MEGASAS_MAX_CHANNELS - 1;
@@ -4658,6 +4757,15 @@ megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
 static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
                        megasas_sysfs_show_support_poll_for_event, NULL);
 
+ static ssize_t
+megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
+{
+       return sprintf(buf, "%u\n", support_device_change);
+}
+
+static DRIVER_ATTR(support_device_change, S_IRUGO,
+                       megasas_sysfs_show_support_device_change, NULL);
+
 static ssize_t
 megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
 {
@@ -4978,6 +5086,7 @@ static int __init megasas_init(void)
               MEGASAS_EXT_VERSION);
 
        support_poll_for_event = 2;
+       support_device_change = 1;
 
        memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
 
@@ -5026,8 +5135,17 @@ static int __init megasas_init(void)
        if (rval)
                goto err_dcf_poll_mode_io;
 
+       rval = driver_create_file(&megasas_pci_driver.driver,
+                               &driver_attr_support_device_change);
+       if (rval)
+               goto err_dcf_support_device_change;
+
        return rval;
 
+err_dcf_support_device_change:
+       driver_remove_file(&megasas_pci_driver.driver,
+                 &driver_attr_poll_mode_io);
+
 err_dcf_poll_mode_io:
        driver_remove_file(&megasas_pci_driver.driver,
                           &driver_attr_dbg_lvl);
@@ -5057,6 +5175,10 @@ static void __exit megasas_exit(void)
                           &driver_attr_poll_mode_io);
        driver_remove_file(&megasas_pci_driver.driver,
                           &driver_attr_dbg_lvl);
+       driver_remove_file(&megasas_pci_driver.driver,
+                       &driver_attr_support_poll_for_event);
+       driver_remove_file(&megasas_pci_driver.driver,
+                       &driver_attr_support_device_change);
        driver_remove_file(&megasas_pci_driver.driver,
                           &driver_attr_release_date);
        driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
index 16a4f68a34b0db0fa48145eb42c0f90aae25ec07..ad16f5e600462955bec5c5e4878119a4093f1ae7 100644 (file)
@@ -18,9 +18,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                        "00.00.04.17.1-rc1"
-#define MEGASAS_RELDATE                        "Oct. 29, 2009"
-#define MEGASAS_EXT_VERSION            "Thu. Oct. 29, 11:41:51 PST 2009"
+#define MEGASAS_VERSION                        "00.00.04.31-rc1"
+#define MEGASAS_RELDATE                        "May 3, 2010"
+#define MEGASAS_EXT_VERSION            "Mon. May 3, 11:41:51 PST 2010"
 
 /*
  * Device IDs
@@ -706,6 +706,7 @@ struct megasas_ctrl_info {
 #define MEGASAS_MAX_LD_IDS                     (MEGASAS_MAX_LD_CHANNELS * \
                                                MEGASAS_MAX_DEV_PER_CHANNEL)
 
+#define MEGASAS_MAX_SECTORS                    (2*1024)
 #define MEGASAS_DBG_LVL                                1
 
 #define MEGASAS_FW_BUSY                                1
index e88bbdde49c5066b4bfb9fa2d8b5822115777cd8..0433ea6f27c9c2db97c464ca238afce39ebd0fcb 100644 (file)
@@ -452,10 +452,6 @@ void osd_end_request(struct osd_request *or)
 {
        struct request *rq = or->request;
 
-       _osd_free_seg(or, &or->set_attr);
-       _osd_free_seg(or, &or->enc_get_attr);
-       _osd_free_seg(or, &or->get_attr);
-
        if (rq) {
                if (rq->next_rq) {
                        _put_request(rq->next_rq);
@@ -464,6 +460,12 @@ void osd_end_request(struct osd_request *or)
 
                _put_request(rq);
        }
+
+       _osd_free_seg(or, &or->get_attr);
+       _osd_free_seg(or, &or->enc_get_attr);
+       _osd_free_seg(or, &or->set_attr);
+       _osd_free_seg(or, &or->cdb_cont);
+
        _osd_request_free(or);
 }
 EXPORT_SYMBOL(osd_end_request);
@@ -547,6 +549,12 @@ static int _osd_realloc_seg(struct osd_request *or,
        return 0;
 }
 
+static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
+{
+       OSD_DEBUG("total_bytes=%d\n", total_bytes);
+       return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
+}
+
 static int _alloc_set_attr_list(struct osd_request *or,
        const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
 {
@@ -885,6 +893,199 @@ int osd_req_read_kern(struct osd_request *or,
 }
 EXPORT_SYMBOL(osd_req_read_kern);
 
+static int _add_sg_continuation_descriptor(struct osd_request *or,
+       const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
+{
+       struct osd_sg_continuation_descriptor *oscd;
+       u32 oscd_size;
+       unsigned i;
+       int ret;
+
+       oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
+
+       if (!or->cdb_cont.total_bytes) {
+               /* First time, jump over the header, we will write to:
+                *      cdb_cont.buff + cdb_cont.total_bytes
+                */
+               or->cdb_cont.total_bytes =
+                               sizeof(struct osd_continuation_segment_header);
+       }
+
+       ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
+       if (unlikely(ret))
+               return ret;
+
+       oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
+       oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
+       oscd->hdr.pad_length = 0;
+       oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
+
+       *len = 0;
+       /* copy the sg entries and convert to network byte order */
+       for (i = 0; i < numentries; i++) {
+               oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
+               oscd->entries[i].len    = cpu_to_be64(sglist[i].len);
+               *len += sglist[i].len;
+       }
+
+       or->cdb_cont.total_bytes += oscd_size;
+       OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
+                 or->cdb_cont.total_bytes, oscd_size, numentries);
+       return 0;
+}
+
+static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
+{
+       struct request_queue *req_q = osd_request_queue(or->osd_dev);
+       struct bio *bio;
+       struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
+       struct osd_continuation_segment_header *cont_seg_hdr;
+
+       if (!or->cdb_cont.total_bytes)
+               return 0;
+
+       cont_seg_hdr = or->cdb_cont.buff;
+       cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
+       cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
+
+       /* create a bio for continuation segment */
+       bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
+                          GFP_KERNEL);
+       if (unlikely(!bio))
+               return -ENOMEM;
+
+       bio->bi_rw |= REQ_WRITE;
+
+       /* integrity check the continuation before the bio is linked
+        * with the other data segments since the continuation
+        * integrity is separate from the other data segments.
+        */
+       osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
+
+       cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
+
+       /* we can't use _req_append_segment, because we need to link in the
+        * continuation bio to the head of the bio list - the
+        * continuation segment (if it exists) is always the first segment in
+        * the out data buffer.
+        */
+       bio->bi_next = or->out.bio;
+       or->out.bio = bio;
+       or->out.total_bytes += or->cdb_cont.total_bytes;
+
+       return 0;
+}
+
+/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
+ * @sglist that has the scatter gather entries. Scatter-gather enables a write
+ * of multiple none-contiguous areas of an object, in a single call. The extents
+ * may overlap and/or be in any order. The only constrain is that:
+ *     total_bytes(sglist) >= total_bytes(bio)
+ */
+int osd_req_write_sg(struct osd_request *or,
+       const struct osd_obj_id *obj, struct bio *bio,
+       const struct osd_sg_entry *sglist, unsigned numentries)
+{
+       u64 len;
+       int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
+
+       if (ret)
+               return ret;
+       osd_req_write(or, obj, 0, bio, len);
+
+       return 0;
+}
+EXPORT_SYMBOL(osd_req_write_sg);
+
+/* osd_req_read_sg: Read multiple extents of an object into @bio
+ * See osd_req_write_sg
+ */
+int osd_req_read_sg(struct osd_request *or,
+       const struct osd_obj_id *obj, struct bio *bio,
+       const struct osd_sg_entry *sglist, unsigned numentries)
+{
+       u64 len;
+       int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
+
+       if (ret)
+               return ret;
+       osd_req_read(or, obj, 0, bio, len);
+
+       return 0;
+}
+EXPORT_SYMBOL(osd_req_read_sg);
+
+/* SG-list write/read Kern API
+ *
+ * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
+ * of sg_entries. @numentries indicates how many pointers and sg_entries there
+ * are.  By requiring an array of buff pointers. This allows a caller to do a
+ * single write/read and scatter into multiple buffers.
+ * NOTE: Each buffer + len should not cross a page boundary.
+ */
+static struct bio *_create_sg_bios(struct osd_request *or,
+       void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
+{
+       struct request_queue *q = osd_request_queue(or->osd_dev);
+       struct bio *bio;
+       unsigned i;
+
+       bio = bio_kmalloc(GFP_KERNEL, numentries);
+       if (unlikely(!bio)) {
+               OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       for (i = 0; i < numentries; i++) {
+               unsigned offset = offset_in_page(buff[i]);
+               struct page *page = virt_to_page(buff[i]);
+               unsigned len = sglist[i].len;
+               unsigned added_len;
+
+               BUG_ON(offset + len > PAGE_SIZE);
+               added_len = bio_add_pc_page(q, bio, page, len, offset);
+               if (unlikely(len != added_len)) {
+                       OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
+                                 len, added_len);
+                       bio_put(bio);
+                       return ERR_PTR(-ENOMEM);
+               }
+       }
+
+       return bio;
+}
+
+int osd_req_write_sg_kern(struct osd_request *or,
+       const struct osd_obj_id *obj, void **buff,
+       const struct osd_sg_entry *sglist, unsigned numentries)
+{
+       struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
+       if (IS_ERR(bio))
+               return PTR_ERR(bio);
+
+       bio->bi_rw |= REQ_WRITE;
+       osd_req_write_sg(or, obj, bio, sglist, numentries);
+
+       return 0;
+}
+EXPORT_SYMBOL(osd_req_write_sg_kern);
+
+int osd_req_read_sg_kern(struct osd_request *or,
+       const struct osd_obj_id *obj, void **buff,
+       const struct osd_sg_entry *sglist, unsigned numentries)
+{
+       struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
+       if (IS_ERR(bio))
+               return PTR_ERR(bio);
+
+       osd_req_read_sg(or, obj, bio, sglist, numentries);
+
+       return 0;
+}
+EXPORT_SYMBOL(osd_req_read_sg_kern);
+
+
+
 void osd_req_get_attributes(struct osd_request *or,
        const struct osd_obj_id *obj)
 {
@@ -1218,17 +1419,18 @@ int osd_req_add_get_attr_page(struct osd_request *or,
        or->get_attr.buff = attar_page;
        or->get_attr.total_bytes = max_page_len;
 
-       or->set_attr.buff = set_one_attr->val_ptr;
-       or->set_attr.total_bytes = set_one_attr->len;
-
        cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
        cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
-       /* ocdb->attrs_page.get_attr_offset; */
+
+       if (!set_one_attr || !set_one_attr->attr_page)
+               return 0; /* The set is optional */
+
+       or->set_attr.buff = set_one_attr->val_ptr;
+       or->set_attr.total_bytes = set_one_attr->len;
 
        cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
        cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
        cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
-       /* ocdb->attrs_page.set_attr_offset; */
        return 0;
 }
 EXPORT_SYMBOL(osd_req_add_get_attr_page);
@@ -1248,11 +1450,14 @@ static int _osd_req_finalize_attr_page(struct osd_request *or)
        if (ret)
                return ret;
 
+       if (or->set_attr.total_bytes == 0)
+               return 0;
+
        /* set one value */
        cdbh->attrs_page.set_attr_offset =
                osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
 
-       ret = _req_append_segment(or, out_padding, &or->enc_get_attr, NULL,
+       ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
                                  &or->out);
        return ret;
 }
@@ -1276,7 +1481,8 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1,
 }
 
 static int _osd_req_finalize_data_integrity(struct osd_request *or,
-       bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key)
+       bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
+       const u8 *cap_key)
 {
        struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
        int ret;
@@ -1307,7 +1513,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
                or->out.last_seg = NULL;
 
                /* they are now all chained to request sign them all together */
-               osd_sec_sign_data(&or->out_data_integ, or->out.req->bio,
+               osd_sec_sign_data(&or->out_data_integ, out_data_bio,
                                  cap_key);
        }
 
@@ -1403,6 +1609,8 @@ int osd_finalize_request(struct osd_request *or,
 {
        struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
        bool has_in, has_out;
+        /* Save for data_integrity without the cdb_continuation */
+       struct bio *out_data_bio = or->out.bio;
        u64 out_data_bytes = or->out.total_bytes;
        int ret;
 
@@ -1418,9 +1626,14 @@ int osd_finalize_request(struct osd_request *or,
        osd_set_caps(&or->cdb, cap);
 
        has_in = or->in.bio || or->get_attr.total_bytes;
-       has_out = or->out.bio || or->set_attr.total_bytes ||
-               or->enc_get_attr.total_bytes;
+       has_out = or->out.bio || or->cdb_cont.total_bytes ||
+               or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
 
+       ret = _osd_req_finalize_cdb_cont(or, cap_key);
+       if (ret) {
+               OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
+               return ret;
+       }
        ret = _init_blk_request(or, has_in, has_out);
        if (ret) {
                OSD_DEBUG("_init_blk_request failed\n");
@@ -1458,7 +1671,8 @@ int osd_finalize_request(struct osd_request *or,
        }
 
        ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
-                                              out_data_bytes, cap_key);
+                                              out_data_bio, out_data_bytes,
+                                              cap_key);
        if (ret)
                return ret;
 
index 4b8765785aeb0cb3020acdb51b1b0218e42bac34..cf89091e4c3d3befec59b89425792dcad23e2a87 100644 (file)
@@ -1594,10 +1594,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
        cfg_entry = &ccn_hcam->cfg_entry;
        fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
 
-       pmcraid_info
-               ("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n",
+       pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \
+                res: %x:%x:%x:%x\n",
                 pinstance->ccn.hcam->ilid,
                 pinstance->ccn.hcam->op_code,
+               ((pinstance->ccn.hcam->timestamp1) |
+               ((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)),
                 pinstance->ccn.hcam->notification_type,
                 pinstance->ccn.hcam->notification_lost,
                 pinstance->ccn.hcam->flags,
@@ -1850,6 +1852,7 @@ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
  *   none
  */
 static void pmcraid_initiate_reset(struct pmcraid_instance *);
+static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
 
 static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
 {
@@ -1881,6 +1884,10 @@ static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
                                               lock_flags);
                        return;
                }
+               if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) {
+                       pinstance->timestamp_error = 1;
+                       pmcraid_set_timestamp(cmd);
+               }
        } else {
                dev_info(&pinstance->pdev->dev,
                        "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
@@ -3363,7 +3370,7 @@ static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
        sg_size = buflen;
 
        for (i = 0; i < num_elem; i++) {
-               page = alloc_pages(GFP_KERNEL|GFP_DMA, order);
+               page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order);
                if (!page) {
                        for (j = i - 1; j >= 0; j--)
                                __free_pages(sg_page(&scatterlist[j]), order);
@@ -3739,6 +3746,7 @@ static long pmcraid_ioctl_passthrough(
        unsigned long request_buffer;
        unsigned long request_offset;
        unsigned long lock_flags;
+       void *ioasa;
        u32 ioasc;
        int request_size;
        int buffer_size;
@@ -3780,6 +3788,11 @@ static long pmcraid_ioctl_passthrough(
        rc = __copy_from_user(buffer,
                             (struct pmcraid_passthrough_ioctl_buffer *) arg,
                             sizeof(struct pmcraid_passthrough_ioctl_buffer));
+
+       ioasa =
+       (void *)(arg +
+               offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
+
        if (rc) {
                pmcraid_err("ioctl: can't copy passthrough buffer\n");
                rc = -EFAULT;
@@ -3947,22 +3960,14 @@ static long pmcraid_ioctl_passthrough(
        }
 
 out_handle_response:
-       /* If the command failed for any reason, copy entire IOASA buffer and
-        * return IOCTL success. If copying IOASA to user-buffer fails, return
+       /* copy entire IOASA buffer and return IOCTL success.
+        * If copying IOASA to user-buffer fails, return
         * EFAULT
         */
-       if (PMCRAID_IOASC_SENSE_KEY(le32_to_cpu(cmd->ioa_cb->ioasa.ioasc))) {
-               void *ioasa =
-                   (void *)(arg +
-                   offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
-
-               pmcraid_info("command failed with %x\n",
-                            le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
-               if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
-                                sizeof(struct pmcraid_ioasa))) {
-                       pmcraid_err("failed to copy ioasa buffer to user\n");
-                       rc = -EFAULT;
-               }
+       if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
+               sizeof(struct pmcraid_ioasa))) {
+               pmcraid_err("failed to copy ioasa buffer to user\n");
+               rc = -EFAULT;
        }
 
        /* If the data transfer was from device, copy the data onto user
@@ -5147,6 +5152,16 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
                pinstance->inq_data = NULL;
                pinstance->inq_data_baddr = 0;
        }
+
+       if (pinstance->timestamp_data != NULL) {
+               pci_free_consistent(pinstance->pdev,
+                                   sizeof(struct pmcraid_timestamp_data),
+                                   pinstance->timestamp_data,
+                                   pinstance->timestamp_data_baddr);
+
+               pinstance->timestamp_data = NULL;
+               pinstance->timestamp_data_baddr = 0;
+       }
 }
 
 /**
@@ -5205,6 +5220,20 @@ static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
                return -ENOMEM;
        }
 
+       /* allocate DMAable memory for set timestamp data buffer */
+       pinstance->timestamp_data = pci_alloc_consistent(
+                                       pinstance->pdev,
+                                       sizeof(struct pmcraid_timestamp_data),
+                                       &pinstance->timestamp_data_baddr);
+
+       if (pinstance->timestamp_data == NULL) {
+               pmcraid_err("couldn't allocate DMA memory for \
+                               set time_stamp \n");
+               pmcraid_release_buffers(pinstance);
+               return -ENOMEM;
+       }
+
+
        /* Initialize all the command blocks and add them to free pool. No
         * need to lock (free_pool_lock) as this is done in initialization
         * itself
@@ -5609,6 +5638,68 @@ static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
        return;
 }
 
+/**
+ * pmcraid_set_timestamp - set the timestamp to IOAFP
+ *
+ * @cmd: pointer to pmcraid_cmd structure
+ *
+ * Return Value
+ *  0 for success or non-zero for failure cases
+ */
+static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
+{
+       struct pmcraid_instance *pinstance = cmd->drv_inst;
+       struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+       __be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN);
+       struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+
+       struct timeval tv;
+       __le64 timestamp;
+
+       do_gettimeofday(&tv);
+       timestamp = tv.tv_sec * 1000;
+
+       pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp);
+       pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8);
+       pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16);
+       pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24);
+       pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32);
+       pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp)  >> 40);
+
+       pmcraid_reinit_cmdblk(cmd);
+       ioarcb->request_type = REQ_TYPE_SCSI;
+       ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+       ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP;
+       ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION;
+       memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len));
+
+       ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
+                                       offsetof(struct pmcraid_ioarcb,
+                                               add_data.u.ioadl[0]));
+       ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
+       ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+
+       ioarcb->request_flags0 |= NO_LINK_DESCS;
+       ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
+       ioarcb->data_transfer_length =
+               cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
+       ioadl = &(ioarcb->add_data.u.ioadl[0]);
+       ioadl->flags = IOADL_FLAGS_LAST_DESC;
+       ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr);
+       ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
+
+       if (!pinstance->timestamp_error) {
+               pinstance->timestamp_error = 0;
+               pmcraid_send_cmd(cmd, pmcraid_set_supported_devs,
+                        PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
+       } else {
+               pmcraid_send_cmd(cmd, pmcraid_return_cmd,
+                        PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
+               return;
+       }
+}
+
+
 /**
  * pmcraid_init_res_table - Initialize the resource table
  * @cmd:  pointer to pmcraid command struct
@@ -5720,7 +5811,7 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
 
        /* release the resource list lock */
        spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
-       pmcraid_set_supported_devs(cmd);
+       pmcraid_set_timestamp(cmd);
 }
 
 /**
@@ -6054,10 +6145,10 @@ out_init:
 static void __exit pmcraid_exit(void)
 {
        pmcraid_netlink_release();
-       class_destroy(pmcraid_class);
        unregister_chrdev_region(MKDEV(pmcraid_major, 0),
                                 PMCRAID_MAX_ADAPTERS);
        pci_unregister_driver(&pmcraid_driver);
+       class_destroy(pmcraid_class);
 }
 
 module_init(pmcraid_init);
index 6cfa0145a1d7f088ea41926c400154bfcd0b989d..1134279604e805780f84607c523bbdd647685f24 100644 (file)
@@ -42,7 +42,7 @@
  */
 #define PMCRAID_DRIVER_NAME            "PMC MaxRAID"
 #define PMCRAID_DEVFILE                        "pmcsas"
-#define PMCRAID_DRIVER_VERSION         "2.0.2"
+#define PMCRAID_DRIVER_VERSION         "2.0.3"
 #define PMCRAID_DRIVER_DATE            __DATE__
 
 #define PMCRAID_FW_VERSION_1           0x002
 #define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE        0x05250000
 #define PMCRAID_IOASC_AC_TERMINATED_BY_HOST            0x0B5A0000
 #define PMCRAID_IOASC_UA_BUS_WAS_RESET                 0x06290000
+#define PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC           0x06908B00
 #define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER                0x06298000
 
 /* Driver defined IOASCs */
@@ -561,6 +562,17 @@ struct pmcraid_inquiry_data {
        __u8    reserved3[16];
 };
 
+#define PMCRAID_TIMESTAMP_LEN          12
+#define PMCRAID_REQ_TM_STR_LEN         6
+#define PMCRAID_SCSI_SET_TIMESTAMP     0xA4
+#define PMCRAID_SCSI_SERVICE_ACTION    0x0F
+
+struct pmcraid_timestamp_data {
+       __u8 reserved1[4];
+       __u8 timestamp[PMCRAID_REQ_TM_STR_LEN];         /* current time value */
+       __u8 reserved2[2];
+};
+
 /* pmcraid_cmd - LLD representation of SCSI command */
 struct pmcraid_cmd {
 
@@ -568,7 +580,6 @@ struct pmcraid_cmd {
        struct pmcraid_control_block *ioa_cb;
        dma_addr_t ioa_cb_bus_addr;
        dma_addr_t dma_handle;
-       u8 *sense_buffer;
 
        /* pointer to mid layer structure of SCSI commands */
        struct scsi_cmnd *scsi_cmd;
@@ -705,6 +716,9 @@ struct pmcraid_instance {
        struct pmcraid_inquiry_data *inq_data;
        dma_addr_t  inq_data_baddr;
 
+       struct pmcraid_timestamp_data *timestamp_data;
+       dma_addr_t  timestamp_data_baddr;
+
        /* size of configuration table entry, varies based on the firmware */
        u32     config_table_entry_size;
 
@@ -791,6 +805,7 @@ struct pmcraid_instance {
 #define SHUTDOWN_NONE               0x0
 #define SHUTDOWN_NORMAL             0x1
 #define SHUTDOWN_ABBREV             0x2
+       u32 timestamp_error:1; /* indicate set timestamp for out of sync */
 
 };
 
@@ -1056,10 +1071,10 @@ struct pmcraid_passthrough_ioctl_buffer {
 #define PMCRAID_PASSTHROUGH_IOCTL    'F'
 
 #define DRV_IOCTL(n, size) \
-    _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
+       _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
 
 #define FMW_IOCTL(n, size) \
-    _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL,  (n), (size))
+       _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL,  (n), (size))
 
 /*
  * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
index 2ff4342ae362ecfae79b03f86ec460a08dc40888..bc8194f74625f923b16b05a88faa3b6bb63ffb2e 100644 (file)
@@ -1538,6 +1538,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
        if (!fcport)
                return;
 
+       /* Now that the rport has been deleted, set the fcport state to
+          FCS_DEVICE_DEAD */
+       atomic_set(&fcport->state, FCS_DEVICE_DEAD);
+
        /*
         * Transport has effectively 'deleted' the rport, clear
         * all local references.
index fdfbf83a63309316c394bf360d189c8bae7a9b33..31a4121a2be1651543d97cd99be7534c86a6acb9 100644 (file)
@@ -1306,6 +1306,125 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
        return rval;
 }
 
+static int
+qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
+       uint8_t is_update)
+{
+       uint32_t start = 0;
+       int valid = 0;
+
+       bsg_job->reply->reply_payload_rcv_len = 0;
+
+       if (unlikely(pci_channel_offline(ha->pdev)))
+               return -EINVAL;
+
+       start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+       if (start > ha->optrom_size)
+               return -EINVAL;
+
+       if (ha->optrom_state != QLA_SWAITING)
+               return -EBUSY;
+
+       ha->optrom_region_start = start;
+
+       if (is_update) {
+               if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
+                       valid = 1;
+               else if (start == (ha->flt_region_boot * 4) ||
+                   start == (ha->flt_region_fw * 4))
+                       valid = 1;
+               else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+                   IS_QLA8XXX_TYPE(ha))
+                       valid = 1;
+               if (!valid) {
+                       qla_printk(KERN_WARNING, ha,
+                           "Invalid start region 0x%x/0x%x.\n",
+                           start, bsg_job->request_payload.payload_len);
+                       return -EINVAL;
+               }
+
+               ha->optrom_region_size = start +
+                   bsg_job->request_payload.payload_len > ha->optrom_size ?
+                   ha->optrom_size - start :
+                   bsg_job->request_payload.payload_len;
+               ha->optrom_state = QLA_SWRITING;
+       } else {
+               ha->optrom_region_size = start +
+                   bsg_job->reply_payload.payload_len > ha->optrom_size ?
+                   ha->optrom_size - start :
+                   bsg_job->reply_payload.payload_len;
+               ha->optrom_state = QLA_SREADING;
+       }
+
+       ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+       if (!ha->optrom_buffer) {
+               qla_printk(KERN_WARNING, ha,
+                   "Read: Unable to allocate memory for optrom retrieval "
+                   "(%x).\n", ha->optrom_region_size);
+
+               ha->optrom_state = QLA_SWAITING;
+               return -ENOMEM;
+       }
+
+       memset(ha->optrom_buffer, 0, ha->optrom_region_size);
+       return 0;
+}
+
+static int
+qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
+{
+       struct Scsi_Host *host = bsg_job->shost;
+       scsi_qla_host_t *vha = shost_priv(host);
+       struct qla_hw_data *ha = vha->hw;
+       int rval = 0;
+
+       rval = qla2x00_optrom_setup(bsg_job, ha, 0);
+       if (rval)
+               return rval;
+
+       ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
+           ha->optrom_region_start, ha->optrom_region_size);
+
+       sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+           bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
+           ha->optrom_region_size);
+
+       bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
+       bsg_job->reply->result = DID_OK;
+       vfree(ha->optrom_buffer);
+       ha->optrom_buffer = NULL;
+       ha->optrom_state = QLA_SWAITING;
+       bsg_job->job_done(bsg_job);
+       return rval;
+}
+
+static int
+qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
+{
+       struct Scsi_Host *host = bsg_job->shost;
+       scsi_qla_host_t *vha = shost_priv(host);
+       struct qla_hw_data *ha = vha->hw;
+       int rval = 0;
+
+       rval = qla2x00_optrom_setup(bsg_job, ha, 1);
+       if (rval)
+               return rval;
+
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
+           ha->optrom_region_size);
+
+       ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+           ha->optrom_region_start, ha->optrom_region_size);
+
+       bsg_job->reply->result = DID_OK;
+       vfree(ha->optrom_buffer);
+       ha->optrom_buffer = NULL;
+       ha->optrom_state = QLA_SWAITING;
+       bsg_job->job_done(bsg_job);
+       return rval;
+}
+
 static int
 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
 {
@@ -1328,6 +1447,12 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
        case QL_VND_FCP_PRIO_CFG_CMD:
                return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
 
+       case QL_VND_READ_FLASH:
+               return qla2x00_read_optrom(bsg_job);
+
+       case QL_VND_UPDATE_FLASH:
+               return qla2x00_update_optrom(bsg_job);
+
        default:
                bsg_job->reply->result = (DID_ERROR << 16);
                bsg_job->job_done(bsg_job);
index cc7c52f87a11a3332868ccf2bfac3d9aae31c9d2..074a999c7017a8cd05c4763c49042e464177b2f8 100644 (file)
@@ -14,6 +14,8 @@
 #define QL_VND_A84_MGMT_CMD    0x04
 #define QL_VND_IIDMA           0x05
 #define QL_VND_FCP_PRIO_CFG_CMD        0x06
+#define QL_VND_READ_FLASH      0x07
+#define QL_VND_UPDATE_FLASH    0x08
 
 /* BSG definations for interpreting CommandSent field */
 #define INT_DEF_LB_LOOPBACK_CMD         0
index e1d3ad40a946cc4c206a0755053bfcc67fa60ac2..3a22effced5fa1a94996582b9fd13ccf151d8861 100644 (file)
@@ -1700,9 +1700,7 @@ typedef struct fc_port {
        atomic_t state;
        uint32_t flags;
 
-       int port_login_retry_count;
        int login_retry;
-       atomic_t port_down_timer;
 
        struct fc_rport *rport, *drport;
        u32 supported_classes;
index c33dec827e1ecd3dde4742d67f44f0afe694b83f..9382a816c133104e45108dff7e4a0a2ced2271b8 100644 (file)
@@ -92,6 +92,7 @@ extern int ql2xshiftctondsd;
 extern int ql2xdbwr;
 extern int ql2xdontresethba;
 extern int ql2xasynctmfenable;
+extern int ql2xgffidenable;
 extern int ql2xenabledif;
 extern int ql2xenablehba_err_chk;
 extern int ql2xtargetreset;
index 3cafbef40737e55f99d815060f9d22566efe080c..259f511374933cb4d7463098c26e746809e75d1b 100644 (file)
@@ -71,7 +71,7 @@ qla2x00_ctx_sp_free(srb_t *sp)
        struct srb_iocb *iocb = ctx->u.iocb_cmd;
        struct scsi_qla_host *vha = sp->fcport->vha;
 
-       del_timer_sync(&iocb->timer);
+       del_timer(&iocb->timer);
        kfree(iocb);
        kfree(ctx);
        mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
@@ -1344,6 +1344,13 @@ cont_alloc:
                qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
                    "firmware dump!!!\n", dump_size / 1024);
 
+               if (ha->fce) {
+                       dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
+                           ha->fce_dma);
+                       ha->fce = NULL;
+                       ha->fce_dma = 0;
+               }
+
                if (ha->eft) {
                        dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
                            ha->eft_dma);
@@ -1818,14 +1825,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
                qla2x00_init_response_q_entries(rsp);
        }
 
-       spin_lock_irqsave(&ha->vport_slock, flags);
+       spin_lock(&ha->vport_slock);
        /* Clear RSCN queue. */
        list_for_each_entry(vp, &ha->vp_list, list) {
                vp->rscn_in_ptr = 0;
                vp->rscn_out_ptr = 0;
        }
 
-       spin_unlock_irqrestore(&ha->vport_slock, flags);
+       spin_unlock(&ha->vport_slock);
 
        ha->isp_ops->config_rings(vha);
 
@@ -2916,21 +2923,13 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
 void
 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
-       struct qla_hw_data *ha = vha->hw;
-
        fcport->vha = vha;
        fcport->login_retry = 0;
-       fcport->port_login_retry_count = ha->port_down_retry_count *
-           PORT_RETRY_TIME;
-       atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
-           PORT_RETRY_TIME);
        fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
 
        qla2x00_iidma_fcport(vha, fcport);
-
-       atomic_set(&fcport->state, FCS_ONLINE);
-
        qla2x00_reg_remote_port(vha, fcport);
+       atomic_set(&fcport->state, FCS_ONLINE);
 }
 
 /*
@@ -3292,8 +3291,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                        continue;
 
                /* Bypass ports whose FCP-4 type is not FCP_SCSI */
-               if (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
-                   new_fcport->fc4_type != FC4_TYPE_UNKNOWN)
+               if (ql2xgffidenable &&
+                   (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
+                   new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
                        continue;
 
                /* Locate matching device in database. */
index 579f02854665a41ab8ae7cdfa0e53155fbcbd374..5f94430b42f03974fda76e9f1f969cedbf3e9047 100644 (file)
@@ -992,8 +992,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
        ha = vha->hw;
 
        DEBUG18(printk(KERN_DEBUG
-           "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__,
-           vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd)));
+           "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
+           vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
 
        cmd_pkt->vp_index = sp->fcport->vp_idx;
 
index e0e43d9e7ed130346d27f86c7f60b933c2fea6cf..1f06ddd9bdd10cdbe6526de1deaaab194a19118e 100644 (file)
@@ -1240,12 +1240,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
        case LSC_SCODE_NPORT_USED:
                data[0] = MBS_LOOP_ID_USED;
                break;
-       case LSC_SCODE_CMD_FAILED:
-               if ((iop[1] & 0xff) == 0x05) {
-                       data[0] = MBS_NOT_LOGGED_IN;
-                       break;
-               }
-               /* Fall through. */
        default:
                data[0] = MBS_COMMAND_ERROR;
                break;
@@ -1431,9 +1425,8 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
                rsp->status_srb = sp;
 
        DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
-           "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
-           cp->device->channel, cp->device->id, cp->device->lun, cp,
-           cp->serial_number));
+           "cmd=%p\n", __func__, sp->fcport->vha->host_no,
+           cp->device->channel, cp->device->id, cp->device->lun, cp));
        if (sense_len)
                DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
 }
@@ -1757,6 +1750,8 @@ check_scsi_status:
        case CS_INCOMPLETE:
        case CS_PORT_UNAVAILABLE:
        case CS_TIMEOUT:
+       case CS_RESET:
+
                /*
                 * We are going to have the fc class block the rport
                 * while we try to recover so instruct the mid layer
@@ -1781,10 +1776,6 @@ check_scsi_status:
                        qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
                break;
 
-       case CS_RESET:
-               cp->result = DID_TRANSPORT_DISRUPTED << 16;
-               break;
-
        case CS_ABORTED:
                cp->result = DID_RESET << 16;
                break;
@@ -1801,10 +1792,10 @@ out:
        if (logit)
                DEBUG2(qla_printk(KERN_INFO, ha,
                    "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
-                   "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x "
+                   "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
                    "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
                    cp->device->id, cp->device->lun, comp_status, scsi_status,
-                   cp->result, ox_id, cp->serial_number, cp->cmnd[0],
+                   cp->result, ox_id, cp->cmnd[0],
                    cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
                    resid_len, fw_resid_len));
 
index 800ea926975236214c2846e4aa8b18ad12ebd56c..1830e6e973155684234710e129e564f82176ce2d 100644 (file)
@@ -160,6 +160,11 @@ MODULE_PARM_DESC(ql2xtargetreset,
                 "Enable target reset."
                 "Default is 1 - use hw defaults.");
 
+int ql2xgffidenable;
+module_param(ql2xgffidenable, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xgffidenable,
+               "Enables GFF_ID checks of port type. "
+               "Default is 0 - Do not use GFF_ID information.");
 
 int ql2xasynctmfenable;
 module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
@@ -255,6 +260,7 @@ static void qla2x00_rst_aen(scsi_qla_host_t *);
 
 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
        struct req_que **, struct rsp_que **);
+static void qla2x00_free_fw_dump(struct qla_hw_data *);
 static void qla2x00_mem_free(struct qla_hw_data *);
 static void qla2x00_sp_free_dma(srb_t *);
 
@@ -539,6 +545,7 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
        srb_t *sp;
        int rval;
 
+       spin_unlock_irq(vha->host->host_lock);
        if (ha->flags.eeh_busy) {
                if (ha->flags.pci_channel_io_perm_failure)
                        cmd->result = DID_NO_CONNECT << 16;
@@ -553,10 +560,6 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
                goto qc24_fail_command;
        }
 
-       /* Close window on fcport/rport state-transitioning. */
-       if (fcport->drport)
-               goto qc24_target_busy;
-
        if (!vha->flags.difdix_supported &&
                scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
                        DEBUG2(qla_printk(KERN_ERR, ha,
@@ -567,15 +570,14 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
        }
        if (atomic_read(&fcport->state) != FCS_ONLINE) {
                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
-                   atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+                       atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
+                       atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
                        cmd->result = DID_NO_CONNECT << 16;
                        goto qc24_fail_command;
                }
                goto qc24_target_busy;
        }
 
-       spin_unlock_irq(vha->host->host_lock);
-
        sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
        if (!sp)
                goto qc24_host_busy_lock;
@@ -597,9 +599,11 @@ qc24_host_busy_lock:
        return SCSI_MLQUEUE_HOST_BUSY;
 
 qc24_target_busy:
+       spin_lock_irq(vha->host->host_lock);
        return SCSI_MLQUEUE_TARGET_BUSY;
 
 qc24_fail_command:
+       spin_lock_irq(vha->host->host_lock);
        done(cmd);
 
        return 0;
@@ -824,81 +828,58 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 {
        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
        srb_t *sp;
-       int ret, i;
+       int ret;
        unsigned int id, lun;
-       unsigned long serial;
        unsigned long flags;
        int wait = 0;
        struct qla_hw_data *ha = vha->hw;
-       struct req_que *req = vha->req;
-       srb_t *spt;
-       int got_ref = 0;
 
        fc_block_scsi_eh(cmd);
 
        if (!CMD_SP(cmd))
                return SUCCESS;
 
-       ret = SUCCESS;
-
        id = cmd->device->id;
        lun = cmd->device->lun;
-       serial = cmd->serial_number;
-       spt = (srb_t *) CMD_SP(cmd);
-       if (!spt)
-               return SUCCESS;
 
-       /* Check active list for command command. */
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
-               sp = req->outstanding_cmds[i];
-
-               if (sp == NULL)
-                       continue;
-               if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) &&
-                   !IS_PROT_IO(sp))
-                       continue;
-               if (sp->cmd != cmd)
-                       continue;
+       sp = (srb_t *) CMD_SP(cmd);
+       if (!sp) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return SUCCESS;
+       }
 
-               DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
-               " pid=%ld.\n", __func__, vha->host_no, sp, serial));
+       DEBUG2(printk("%s(%ld): aborting sp %p from RISC.",
+           __func__, vha->host_no, sp));
 
-               /* Get a reference to the sp and drop the lock.*/
-               sp_get(sp);
-               got_ref++;
+       /* Get a reference to the sp and drop the lock.*/
+       sp_get(sp);
 
-               spin_unlock_irqrestore(&ha->hardware_lock, flags);
-               if (ha->isp_ops->abort_command(sp)) {
-                       DEBUG2(printk("%s(%ld): abort_command "
-                       "mbx failed.\n", __func__, vha->host_no));
-                       ret = FAILED;
-               } else {
-                       DEBUG3(printk("%s(%ld): abort_command "
-                       "mbx success.\n", __func__, vha->host_no));
-                       wait = 1;
-               }
-               spin_lock_irqsave(&ha->hardware_lock, flags);
-               break;
-       }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       if (ha->isp_ops->abort_command(sp)) {
+               DEBUG2(printk("%s(%ld): abort_command "
+               "mbx failed.\n", __func__, vha->host_no));
+               ret = FAILED;
+       } else {
+               DEBUG3(printk("%s(%ld): abort_command "
+               "mbx success.\n", __func__, vha->host_no));
+               wait = 1;
+       }
+       qla2x00_sp_compl(ha, sp);
 
        /* Wait for the command to be returned. */
        if (wait) {
                if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
                        qla_printk(KERN_ERR, ha,
-                           "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
-                           "%x.\n", vha->host_no, id, lun, serial, ret);
+                           "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n",
+                           vha->host_no, id, lun, ret);
                        ret = FAILED;
                }
        }
 
-       if (got_ref)
-               qla2x00_sp_compl(ha, sp);
-
        qla_printk(KERN_INFO, ha,
-           "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
-           vha->host_no, id, lun, wait, serial, ret);
+           "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n",
+           vha->host_no, id, lun, wait, ret);
 
        return ret;
 }
@@ -1043,13 +1024,11 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
        int ret = FAILED;
        unsigned int id, lun;
-       unsigned long serial;
 
        fc_block_scsi_eh(cmd);
 
        id = cmd->device->id;
        lun = cmd->device->lun;
-       serial = cmd->serial_number;
 
        if (!fcport)
                return ret;
@@ -1104,14 +1083,12 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
        struct qla_hw_data *ha = vha->hw;
        int ret = FAILED;
        unsigned int id, lun;
-       unsigned long serial;
        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
        fc_block_scsi_eh(cmd);
 
        id = cmd->device->id;
        lun = cmd->device->lun;
-       serial = cmd->serial_number;
 
        if (!fcport)
                return ret;
@@ -1974,6 +1951,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        ha->bars = bars;
        ha->mem_only = mem_only;
        spin_lock_init(&ha->hardware_lock);
+       spin_lock_init(&ha->vport_slock);
 
        /* Set ISP-type information. */
        qla2x00_set_isp_flags(ha);
@@ -2341,6 +2319,42 @@ probe_out:
        return ret;
 }
 
+static void
+qla2x00_shutdown(struct pci_dev *pdev)
+{
+       scsi_qla_host_t *vha;
+       struct qla_hw_data  *ha;
+
+       vha = pci_get_drvdata(pdev);
+       ha = vha->hw;
+
+       /* Turn-off FCE trace */
+       if (ha->flags.fce_enabled) {
+               qla2x00_disable_fce_trace(vha, NULL, NULL);
+               ha->flags.fce_enabled = 0;
+       }
+
+       /* Turn-off EFT trace */
+       if (ha->eft)
+               qla2x00_disable_eft_trace(vha);
+
+       /* Stop currently executing firmware. */
+       qla2x00_try_to_stop_firmware(vha);
+
+       /* Turn adapter off line */
+       vha->flags.online = 0;
+
+       /* turn-off interrupts on the card */
+       if (ha->interrupts_on) {
+               vha->flags.init_done = 0;
+               ha->isp_ops->disable_intrs(ha);
+       }
+
+       qla2x00_free_irqs(vha);
+
+       qla2x00_free_fw_dump(ha);
+}
+
 static void
 qla2x00_remove_one(struct pci_dev *pdev)
 {
@@ -2597,12 +2611,12 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
                        continue;
                if (atomic_read(&fcport->state) == FCS_ONLINE) {
+                       atomic_set(&fcport->state, FCS_DEVICE_LOST);
                        if (defer)
                                qla2x00_schedule_rport_del(vha, fcport, defer);
                        else if (vha->vp_idx == fcport->vp_idx)
                                qla2x00_schedule_rport_del(vha, fcport, defer);
                }
-               atomic_set(&fcport->state, FCS_DEVICE_LOST);
        }
 }
 
@@ -2830,28 +2844,48 @@ fail:
 }
 
 /*
-* qla2x00_mem_free
-*      Frees all adapter allocated memory.
+* qla2x00_free_fw_dump
+*      Frees fw dump stuff.
 *
 * Input:
-*      ha = adapter block pointer.
+*      ha = adapter block pointer.
 */
 static void
-qla2x00_mem_free(struct qla_hw_data *ha)
+qla2x00_free_fw_dump(struct qla_hw_data *ha)
 {
-       if (ha->srb_mempool)
-               mempool_destroy(ha->srb_mempool);
-
        if (ha->fce)
                dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
-               ha->fce_dma);
+                   ha->fce_dma);
 
        if (ha->fw_dump) {
                if (ha->eft)
                        dma_free_coherent(&ha->pdev->dev,
-                       ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
+                           ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
                vfree(ha->fw_dump);
        }
+       ha->fce = NULL;
+       ha->fce_dma = 0;
+       ha->eft = NULL;
+       ha->eft_dma = 0;
+       ha->fw_dump = NULL;
+       ha->fw_dumped = 0;
+       ha->fw_dump_reading = 0;
+}
+
+/*
+* qla2x00_mem_free
+*      Frees all adapter allocated memory.
+*
+* Input:
+*      ha = adapter block pointer.
+*/
+static void
+qla2x00_mem_free(struct qla_hw_data *ha)
+{
+       qla2x00_free_fw_dump(ha);
+
+       if (ha->srb_mempool)
+               mempool_destroy(ha->srb_mempool);
 
        if (ha->dcbx_tlv)
                dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
@@ -2925,8 +2959,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
 
        ha->srb_mempool = NULL;
        ha->ctx_mempool = NULL;
-       ha->eft = NULL;
-       ha->eft_dma = 0;
        ha->sns_cmd = NULL;
        ha->sns_cmd_dma = 0;
        ha->ct_sns = NULL;
@@ -2946,10 +2978,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
 
        ha->gid_list = NULL;
        ha->gid_list_dma = 0;
-
-       ha->fw_dump = NULL;
-       ha->fw_dumped = 0;
-       ha->fw_dump_reading = 0;
 }
 
 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3547,11 +3575,9 @@ void
 qla2x00_timer(scsi_qla_host_t *vha)
 {
        unsigned long   cpu_flags = 0;
-       fc_port_t       *fcport;
        int             start_dpc = 0;
        int             index;
        srb_t           *sp;
-       int             t;
        uint16_t        w;
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req;
@@ -3567,34 +3593,6 @@ qla2x00_timer(scsi_qla_host_t *vha)
        /* Hardware read to raise pending EEH errors during mailbox waits. */
        if (!pci_channel_offline(ha->pdev))
                pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
-       /*
-        * Ports - Port down timer.
-        *
-        * Whenever, a port is in the LOST state we start decrementing its port
-        * down timer every second until it reaches zero. Once  it reaches zero
-        * the port it marked DEAD.
-        */
-       t = 0;
-       list_for_each_entry(fcport, &vha->vp_fcports, list) {
-               if (fcport->port_type != FCT_TARGET)
-                       continue;
-
-               if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
-
-                       if (atomic_read(&fcport->port_down_timer) == 0)
-                               continue;
-
-                       if (atomic_dec_and_test(&fcport->port_down_timer) != 0)
-                               atomic_set(&fcport->state, FCS_DEVICE_DEAD);
-
-                       DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
-                           "%d remaining\n",
-                           vha->host_no,
-                           t, atomic_read(&fcport->port_down_timer)));
-               }
-               t++;
-       } /* End of for fcport  */
-
 
        /* Loop down handler. */
        if (atomic_read(&vha->loop_down_timer) > 0 &&
@@ -4079,6 +4077,7 @@ static struct pci_driver qla2xxx_pci_driver = {
        .id_table       = qla2xxx_pci_tbl,
        .probe          = qla2x00_probe_one,
        .remove         = qla2x00_remove_one,
+       .shutdown       = qla2x00_shutdown,
        .err_handler    = &qla2xxx_err_handler,
 };
 
index cbceb0ebabf7c41888ec091ab77f61e1c9b20f48..edcf048215dd18326181a8cdca07acf3198b3cb3 100644 (file)
@@ -30,3 +30,104 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
        printk(KERN_INFO "\n");
 }
 
+void qla4xxx_dump_registers(struct scsi_qla_host *ha)
+{
+       uint8_t i;
+
+       if (is_qla8022(ha)) {
+               for (i = 1; i < MBOX_REG_COUNT; i++)
+                       printk(KERN_INFO "mailbox[%d]     = 0x%08X\n",
+                           i, readl(&ha->qla4_8xxx_reg->mailbox_in[i]));
+               return;
+       }
+
+       for (i = 0; i < MBOX_REG_COUNT; i++) {
+               printk(KERN_INFO "0x%02X mailbox[%d]      = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
+                   readw(&ha->reg->mailbox[i]));
+       }
+
+       printk(KERN_INFO "0x%02X flash_address            = 0x%08X\n",
+           (uint8_t) offsetof(struct isp_reg, flash_address),
+           readw(&ha->reg->flash_address));
+       printk(KERN_INFO "0x%02X flash_data               = 0x%08X\n",
+           (uint8_t) offsetof(struct isp_reg, flash_data),
+           readw(&ha->reg->flash_data));
+       printk(KERN_INFO "0x%02X ctrl_status              = 0x%08X\n",
+           (uint8_t) offsetof(struct isp_reg, ctrl_status),
+           readw(&ha->reg->ctrl_status));
+
+       if (is_qla4010(ha)) {
+               printk(KERN_INFO "0x%02X nvram            = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
+                   readw(&ha->reg->u1.isp4010.nvram));
+       } else if (is_qla4022(ha) | is_qla4032(ha)) {
+               printk(KERN_INFO "0x%02X intr_mask        = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask),
+                   readw(&ha->reg->u1.isp4022.intr_mask));
+               printk(KERN_INFO "0x%02X nvram            = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
+                   readw(&ha->reg->u1.isp4022.nvram));
+               printk(KERN_INFO "0x%02X semaphore        = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore),
+                   readw(&ha->reg->u1.isp4022.semaphore));
+       }
+       printk(KERN_INFO "0x%02X req_q_in                 = 0x%08X\n",
+           (uint8_t) offsetof(struct isp_reg, req_q_in),
+           readw(&ha->reg->req_q_in));
+       printk(KERN_INFO "0x%02X rsp_q_out                = 0x%08X\n",
+           (uint8_t) offsetof(struct isp_reg, rsp_q_out),
+           readw(&ha->reg->rsp_q_out));
+
+       if (is_qla4010(ha)) {
+               printk(KERN_INFO "0x%02X ext_hw_conf      = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf),
+                   readw(&ha->reg->u2.isp4010.ext_hw_conf));
+               printk(KERN_INFO "0x%02X port_ctrl        = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl),
+                   readw(&ha->reg->u2.isp4010.port_ctrl));
+               printk(KERN_INFO "0x%02X port_status      = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status),
+                   readw(&ha->reg->u2.isp4010.port_status));
+               printk(KERN_INFO "0x%02X req_q_out        = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out),
+                   readw(&ha->reg->u2.isp4010.req_q_out));
+               printk(KERN_INFO "0x%02X gp_out           = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
+                   readw(&ha->reg->u2.isp4010.gp_out));
+               printk(KERN_INFO "0x%02X gp_in            = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
+                   readw(&ha->reg->u2.isp4010.gp_in));
+               printk(KERN_INFO "0x%02X port_err_status  = 0x%08X\n", (uint8_t)
+                   offsetof(struct isp_reg, u2.isp4010.port_err_status),
+                   readw(&ha->reg->u2.isp4010.port_err_status));
+       } else if (is_qla4022(ha) | is_qla4032(ha)) {
+               printk(KERN_INFO "Page 0 Registers:\n");
+               printk(KERN_INFO "0x%02X ext_hw_conf      = 0x%08X\n", (uint8_t)
+                   offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf),
+                   readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
+               printk(KERN_INFO "0x%02X port_ctrl        = 0x%08X\n", (uint8_t)
+                   offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl),
+                   readw(&ha->reg->u2.isp4022.p0.port_ctrl));
+               printk(KERN_INFO "0x%02X port_status      = 0x%08X\n", (uint8_t)
+                   offsetof(struct isp_reg, u2.isp4022.p0.port_status),
+                   readw(&ha->reg->u2.isp4022.p0.port_status));
+               printk(KERN_INFO "0x%02X gp_out           = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out),
+                   readw(&ha->reg->u2.isp4022.p0.gp_out));
+               printk(KERN_INFO "0x%02X gp_in            = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
+                   readw(&ha->reg->u2.isp4022.p0.gp_in));
+               printk(KERN_INFO "0x%02X port_err_status  = 0x%08X\n", (uint8_t)
+                   offsetof(struct isp_reg, u2.isp4022.p0.port_err_status),
+                   readw(&ha->reg->u2.isp4022.p0.port_err_status));
+               printk(KERN_INFO "Page 1 Registers:\n");
+               writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
+                   &ha->reg->ctrl_status);
+               printk(KERN_INFO "0x%02X req_q_out        = 0x%08X\n",
+                   (uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out),
+                   readw(&ha->reg->u2.isp4022.p1.req_q_out));
+               writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
+                   &ha->reg->ctrl_status);
+       }
+}
index 9dc0a6616edd566656154413a96f41043e886447..0f3bfc3da5cf6670287393bb4ca64baee59db2e4 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/mutex.h>
+#include <linux/aer.h>
 
 #include <net/tcp.h>
 #include <scsi/scsi.h>
 #include "ql4_dbg.h"
 #include "ql4_nx.h"
 
-#if defined(CONFIG_PCIEAER)
-#include <linux/aer.h>
-#else
-/* AER releated */
-static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
-{
-       return -EINVAL;
-}
-static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
-{
-       return -EINVAL;
-}
-static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
-{
-       return -EINVAL;
-}
-#endif
-
 #ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
 #define PCI_DEVICE_ID_QLOGIC_ISP4010   0x4010
 #endif
@@ -179,6 +162,7 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 #define IOCB_TOV_MARGIN                        10
 #define RELOGIN_TOV                    18
 #define ISNS_DEREG_TOV                 5
+#define HBA_ONLINE_TOV                 30
 
 #define MAX_RESET_HA_RETRIES           2
 
index 0336c6db8cb3b338a15bf42608dcdfc1c12c8bfd..5e757d7fff7dc4eb0ae23d01ec93c4b6550d1ce6 100644 (file)
@@ -416,6 +416,8 @@ struct qla_flt_region {
 #define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED       0x802C
 #define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED      0x802D
 #define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD                0x802E
+#define MBOX_ASTS_TXSCVR_INSERTED              0x8130
+#define MBOX_ASTS_TXSCVR_REMOVED               0x8131
 
 #define ISNS_EVENT_DATA_RECEIVED               0x0000
 #define ISNS_EVENT_CONNECTION_OPENED           0x0001
@@ -446,6 +448,7 @@ struct addr_ctrl_blk {
 #define         FWOPT_SESSION_MODE               0x0040
 #define         FWOPT_INITIATOR_MODE             0x0020
 #define         FWOPT_TARGET_MODE                0x0010
+#define         FWOPT_ENABLE_CRBDB               0x8000
 
        uint16_t exec_throttle; /* 04-05 */
        uint8_t zio_count;      /* 06 */
index 95a26fb1626c8c21af2473cc02c75fa654de08ce..6575a47501e57667f3f7c8dc2cae73c7c7fb5876 100644 (file)
@@ -94,6 +94,7 @@ void qla4xxx_process_response_queue(struct scsi_qla_host *ha);
 void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
 void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
 void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha);
+void qla4xxx_dump_registers(struct scsi_qla_host *ha);
 
 void qla4_8xxx_pci_config(struct scsi_qla_host *);
 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
index 4c9be77ee70b81a21a9bda19cd8116b3352bed98..dc01fa3da5d11be799f2c826bc324424e2bd3b76 100644 (file)
@@ -1207,8 +1207,8 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
                        break;
 
                DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot "
-                             "firmware to complete... ctrl_sts=0x%x\n",
-                             ha->host_no, __func__, ctrl_status));
+                   "firmware to complete... ctrl_sts=0x%x, remaining=%ld\n",
+                   ha->host_no, __func__, ctrl_status, max_wait_time));
 
                msleep_interruptible(250);
        } while (!time_after_eq(jiffies, max_wait_time));
@@ -1459,6 +1459,12 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
 exit_init_online:
        set_bit(AF_ONLINE, &ha->flags);
 exit_init_hba:
+       if (is_qla8022(ha) && (status == QLA_ERROR)) {
+               /* Since interrupts are registered in start_firmware for
+                * 82xx, release them here if initialize_adapter fails */
+               qla4xxx_free_irqs(ha);
+       }
+
        DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
            status == QLA_ERROR ? "FAILED" : "SUCCEDED"));
        return status;
index 4ef9ba112ee87ecd55e4815a6572db9d9bf43106..5ae49fd8784639c8802e24c8ce210fd0f9cf1ee0 100644 (file)
@@ -202,19 +202,11 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
 void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha)
 {
        uint32_t dbval = 0;
-       unsigned long wtime;
 
        dbval = 0x14 | (ha->func_num << 5);
        dbval = dbval | (0 << 8) | (ha->request_in << 16);
-       writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr);
-       wmb();
 
-       wtime = jiffies + (2 * HZ);
-       while (readl((void __iomem *)ha->nx_db_rd_ptr) != dbval &&
-           !time_after_eq(jiffies, wtime)) {
-               writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr);
-               wmb();
-       }
+       qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
 }
 
 /**
index 2a1ab63f3eb0165de7a147be1eeef222c0b03e9a..7c33fd5943d50315a22ac5ff64de69799fe795a2 100644 (file)
@@ -72,7 +72,7 @@ qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
 {
        struct srb *srb = ha->status_srb;
        struct scsi_cmnd *cmd;
-       uint8_t sense_len;
+       uint16_t sense_len;
 
        if (srb == NULL)
                return;
@@ -487,6 +487,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
                case MBOX_ASTS_SYSTEM_ERROR:
                        /* Log Mailbox registers */
                        ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
+                       qla4xxx_dump_registers(ha);
+
                        if (ql4xdontresethba) {
                                DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
                                    ha->host_no, __func__));
@@ -621,6 +623,18 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
                        }
                        break;
 
+               case MBOX_ASTS_TXSCVR_INSERTED:
+                       DEBUG2(printk(KERN_WARNING
+                           "scsi%ld: AEN %04x Transceiver"
+                           " inserted\n",  ha->host_no, mbox_sts[0]));
+                       break;
+
+               case MBOX_ASTS_TXSCVR_REMOVED:
+                       DEBUG2(printk(KERN_WARNING
+                           "scsi%ld: AEN %04x Transceiver"
+                           " removed\n",  ha->host_no, mbox_sts[0]));
+                       break;
+
                default:
                        DEBUG2(printk(KERN_WARNING
                                      "scsi%ld: AEN %04x UNKNOWN\n",
index 90021704d8cac0e37a30c2b788e8f40a17875229..2d2f9c879bfd57994366e050f9a82bc2541a4030 100644 (file)
@@ -299,6 +299,10 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
 {
        memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
        memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+
+       if (is_qla8022(ha))
+               qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, 0);
+
        mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
        mbox_cmd[1] = 0;
        mbox_cmd[2] = LSDW(init_fw_cb_dma);
@@ -472,6 +476,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
        init_fw_cb->fw_options |=
                __constant_cpu_to_le16(FWOPT_SESSION_MODE |
                                       FWOPT_INITIATOR_MODE);
+
+       if (is_qla8022(ha))
+               init_fw_cb->fw_options |=
+                   __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
+
        init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
 
        if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
@@ -592,7 +601,7 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
        }
 
        ql4_printk(KERN_INFO, ha, "%ld firmare IOCBs available (%d).\n",
-           ha->host_no, mbox_cmd[2]);
+           ha->host_no, mbox_sts[2]);
 
        return QLA_SUCCESS;
 }
index 449256f2c5f8dc213226e2a81fa44e85eab18990..474b10d713647ea8aafde7a9057a49450bcf99e9 100644 (file)
@@ -839,8 +839,11 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
                done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
                if (done == 1)
                        break;
-               if (timeout >= qla4_8xxx_rom_lock_timeout)
+               if (timeout >= qla4_8xxx_rom_lock_timeout) {
+                       ql4_printk(KERN_WARNING, ha,
+                           "%s: Failed to acquire rom lock", __func__);
                        return -1;
+               }
 
                timeout++;
 
@@ -1078,21 +1081,6 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
        return 0;
 }
 
-static int qla4_8xxx_check_for_bad_spd(struct scsi_qla_host *ha)
-{
-       u32 val = 0;
-       val = qla4_8xxx_rd_32(ha, BOOT_LOADER_DIMM_STATUS) ;
-       val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
-       if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
-               printk("Memory DIMM SPD not programmed.  Assumed valid.\n");
-               return 1;
-       } else if (val) {
-               printk("Memory DIMM type incorrect.  Info:%08X.\n", val);
-               return 2;
-       }
-       return 0;
-}
-
 static int
 qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
 {
@@ -1377,8 +1365,6 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
 
                } while (--retries);
 
-               qla4_8xxx_check_for_bad_spd(ha);
-
                if (!retries) {
                        pegtune_val = qla4_8xxx_rd_32(ha,
                                QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
@@ -1540,14 +1526,31 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
        ql4_printk(KERN_INFO, ha,
            "FW: Attempting to load firmware from flash...\n");
        rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw);
-       if (rval == QLA_SUCCESS)
-               return rval;
 
-       ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash FAILED...\n");
+       if (rval != QLA_SUCCESS) {
+               ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
+                   " FAILED...\n");
+               return rval;
+       }
 
        return rval;
 }
 
+static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
+{
+       if (qla4_8xxx_rom_lock(ha)) {
+               /* Someone else is holding the lock. */
+               dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
+       }
+
+       /*
+        * Either we got the lock, or someone
+        * else died while holding it.
+        * In either case, unlock.
+        */
+       qla4_8xxx_rom_unlock(ha);
+}
+
 /**
  * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
  * @ha: pointer to adapter structure
@@ -1557,11 +1560,12 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
 static int
 qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
 {
-       int rval, i, timeout;
+       int rval = QLA_ERROR;
+       int i, timeout;
        uint32_t old_count, count;
+       int need_reset = 0, peg_stuck = 1;
 
-       if (qla4_8xxx_need_reset(ha))
-               goto dev_initialize;
+       need_reset = qla4_8xxx_need_reset(ha);
 
        old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
 
@@ -1570,12 +1574,30 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
                if (timeout) {
                        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
                           QLA82XX_DEV_FAILED);
-                       return QLA_ERROR;
+                       return rval;
                }
 
                count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
                if (count != old_count)
+                       peg_stuck = 0;
+       }
+
+       if (need_reset) {
+               /* We are trying to perform a recovery here. */
+               if (peg_stuck)
+                       qla4_8xxx_rom_lock_recovery(ha);
+               goto dev_initialize;
+       } else  {
+               /* Start of day for this ha context. */
+               if (peg_stuck) {
+                       /* Either we are the first or recovery in progress. */
+                       qla4_8xxx_rom_lock_recovery(ha);
+                       goto dev_initialize;
+               } else {
+                       /* Firmware already running. */
+                       rval = QLA_SUCCESS;
                        goto dev_ready;
+               }
        }
 
 dev_initialize:
@@ -1601,7 +1623,7 @@ dev_ready:
        ql4_printk(KERN_INFO, ha, "HW State: READY\n");
        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
 
-       return QLA_SUCCESS;
+       return rval;
 }
 
 /**
@@ -1764,20 +1786,9 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
        int retval;
        retval = qla4_8xxx_device_state_handler(ha);
 
-       if (retval == QLA_SUCCESS &&
-           !test_bit(AF_INIT_DONE, &ha->flags)) {
+       if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
                retval = qla4xxx_request_irqs(ha);
-               if (retval != QLA_SUCCESS) {
-                       ql4_printk(KERN_WARNING, ha,
-                           "Failed to reserve interrupt %d already in use.\n",
-                           ha->pdev->irq);
-               } else {
-                       set_bit(AF_IRQ_ATTACHED, &ha->flags);
-                       ha->host->irq = ha->pdev->irq;
-                       ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
-                           __func__, ha->pdev->irq);
-               }
-       }
+
        return retval;
 }
 
index 931ad3f1e918c12b1289839d8ea9a8d556658475..ff689bf530071a1978fe85bce5e56082b2570e8b 100644 (file)
@@ -24,7 +24,6 @@
 
 #define CRB_CMDPEG_STATE               QLA82XX_REG(0x50)
 #define CRB_RCVPEG_STATE               QLA82XX_REG(0x13c)
-#define BOOT_LOADER_DIMM_STATUS                QLA82XX_REG(0x54)
 #define CRB_DMA_SHIFT                  QLA82XX_REG(0xcc)
 
 #define QLA82XX_HW_H0_CH_HUB_ADR       0x05
 # define QLA82XX_CAM_RAM_BASE  (QLA82XX_CRB_CAM + 0x02000)
 # define QLA82XX_CAM_RAM(reg)  (QLA82XX_CAM_RAM_BASE + (reg))
 
-#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000
-#define QLA82XX_BOOT_LOADER_MN_ISSUE   0xff00ffff
 #define QLA82XX_PORT_MODE_ADDR         (QLA82XX_CAM_RAM(0x24))
 #define QLA82XX_PEG_HALT_STATUS1       (QLA82XX_CAM_RAM(0xa8))
 #define QLA82XX_PEG_HALT_STATUS2       (QLA82XX_CAM_RAM(0xac))
 #define QLA82XX_PEG_ALIVE_COUNTER      (QLA82XX_CAM_RAM(0xb0))
+#define QLA82XX_CAM_RAM_DB1            (QLA82XX_CAM_RAM(0x1b0))
+#define QLA82XX_CAM_RAM_DB2            (QLA82XX_CAM_RAM(0x1b4))
 
 #define HALT_STATUS_UNRECOVERABLE      0x80000000
 #define HALT_STATUS_RECOVERABLE                0x40000000
index 370d40ff15296ff8c1c19f3294a011ba94450b75..f4cd846abf6dcf77dc7d6bd7a855437ba6a1868f 100644 (file)
@@ -167,8 +167,6 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
                              "of (%d) secs exhausted, marking device DEAD.\n",
                              ha->host_no, __func__, ddb_entry->fw_ddb_index,
                              QL4_SESS_RECOVERY_TMO));
-
-               qla4xxx_wake_dpc(ha);
        }
 }
 
@@ -573,10 +571,6 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
                if (ha->nx_pcibase)
                        iounmap(
                            (struct device_reg_82xx __iomem *)ha->nx_pcibase);
-
-               if (ha->nx_db_wr_ptr)
-                       iounmap(
-                           (struct device_reg_82xx __iomem *)ha->nx_db_wr_ptr);
        } else if (ha->reg)
                iounmap(ha->reg);
        pci_release_regions(ha->pdev);
@@ -692,7 +686,9 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
                        qla4xxx_wake_dpc(ha);
                        qla4xxx_mailbox_premature_completion(ha);
                }
-       }
+       } else
+               ha->seconds_since_last_heartbeat = 0;
+
        ha->fw_heartbeat_counter = fw_heartbeat_counter;
 }
 
@@ -885,7 +881,13 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
                /* Find a command that hasn't completed. */
                for (index = 0; index < ha->host->can_queue; index++) {
                        cmd = scsi_host_find_tag(ha->host, index);
-                       if (cmd != NULL)
+                       /*
+                        * We cannot just check if the index is valid,
+                        * becase if we are run from the scsi eh, then
+                        * the scsi/block layer is going to prevent
+                        * the tag from being released.
+                        */
+                       if (cmd != NULL && CMD_SP(cmd))
                                break;
                }
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -937,11 +939,14 @@ int qla4xxx_soft_reset(struct scsi_qla_host *ha)
 {
        uint32_t max_wait_time;
        unsigned long flags = 0;
-       int status = QLA_ERROR;
+       int status;
        uint32_t ctrl_status;
 
-       qla4xxx_hw_reset(ha);
+       status = qla4xxx_hw_reset(ha);
+       if (status != QLA_SUCCESS)
+               return status;
 
+       status = QLA_ERROR;
        /* Wait until the Network Reset Intr bit is cleared */
        max_wait_time = RESET_INTR_TOV;
        do {
@@ -1101,7 +1106,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
                    ha->host_no, __func__));
                status = ha->isp_ops->reset_firmware(ha);
                if (status == QLA_SUCCESS) {
-                       qla4xxx_cmd_wait(ha);
+                       if (!test_bit(AF_FW_RECOVERY, &ha->flags))
+                               qla4xxx_cmd_wait(ha);
                        ha->isp_ops->disable_intrs(ha);
                        qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
                        qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
@@ -1118,7 +1124,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
         * or if stop_firmware fails for ISP-82xx.
         * This is the default case for ISP-4xxx */
        if (!is_qla8022(ha) || reset_chip) {
-               qla4xxx_cmd_wait(ha);
+               if (!test_bit(AF_FW_RECOVERY, &ha->flags))
+                       qla4xxx_cmd_wait(ha);
                qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
                qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
                DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -1471,24 +1478,10 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
        db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
        db_len = pci_resource_len(pdev, 4);
 
-       /* mapping of doorbell write pointer */
-       ha->nx_db_wr_ptr = (unsigned long)ioremap(db_base +
-           (ha->pdev->devfn << 12), 4);
-       if (!ha->nx_db_wr_ptr) {
-               printk(KERN_ERR
-                   "cannot remap MMIO doorbell-write (%s), aborting\n",
-                   pci_name(pdev));
-               goto iospace_error_exit;
-       }
-       /* mapping of doorbell read pointer */
-       ha->nx_db_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
-           (ha->pdev->devfn * 8);
-       if (!ha->nx_db_rd_ptr)
-               printk(KERN_ERR
-                   "cannot remap MMIO doorbell-read (%s), aborting\n",
-                   pci_name(pdev));
-       return 0;
+       ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
+           QLA82XX_CAM_RAM_DB2);
 
+       return 0;
 iospace_error_exit:
        return -ENOMEM;
 }
@@ -1960,13 +1953,11 @@ static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
 {
        unsigned long wait_online;
 
-       wait_online = jiffies + (30 * HZ);
+       wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
        while (time_before(jiffies, wait_online)) {
 
                if (adapter_up(ha))
                        return QLA_SUCCESS;
-               else if (ha->retry_reset_ha_cnt == 0)
-                       return QLA_ERROR;
 
                msleep(2000);
        }
@@ -2021,6 +2012,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
        unsigned int id = cmd->device->id;
        unsigned int lun = cmd->device->lun;
        unsigned long serial = cmd->serial_number;
+       unsigned long flags;
        struct srb *srb = NULL;
        int ret = SUCCESS;
        int wait = 0;
@@ -2029,12 +2021,14 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
            "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n",
            ha->host_no, id, lun, cmd, serial);
 
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        srb = (struct srb *) CMD_SP(cmd);
-
-       if (!srb)
+       if (!srb) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
                return SUCCESS;
-
+       }
        kref_get(&srb->srb_ref);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
        if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
                DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
@@ -2267,6 +2261,8 @@ qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
                qla4xxx_mailbox_premature_completion(ha);
                qla4xxx_free_irqs(ha);
                pci_disable_device(pdev);
+               /* Return back all IOs */
+               qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
                return PCI_ERS_RESULT_NEED_RESET;
        case pci_channel_io_perm_failure:
                set_bit(AF_EEH_BUSY, &ha->flags);
@@ -2290,17 +2286,13 @@ qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
        if (!is_aer_supported(ha))
                return PCI_ERS_RESULT_NONE;
 
-       if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
-               ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: firmware hang  -- "
-                   "mmio_enabled\n", ha->host_no, __func__);
-               return PCI_ERS_RESULT_NEED_RESET;
-       } else
-               return PCI_ERS_RESULT_RECOVERED;
+       return PCI_ERS_RESULT_RECOVERED;
 }
 
-uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
+static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
 {
        uint32_t rval = QLA_ERROR;
+       uint32_t ret = 0;
        int fn;
        struct pci_dev *other_pdev = NULL;
 
@@ -2312,7 +2304,6 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
                clear_bit(AF_ONLINE, &ha->flags);
                qla4xxx_mark_all_devices_missing(ha);
                qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
-               qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
        }
 
        fn = PCI_FUNC(ha->pdev->devfn);
@@ -2375,7 +2366,16 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
                        /* Clear driver state register */
                        qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
                        qla4_8xxx_set_drv_active(ha);
-                       ha->isp_ops->enable_intrs(ha);
+                       ret = qla4xxx_request_irqs(ha);
+                       if (ret) {
+                               ql4_printk(KERN_WARNING, ha, "Failed to "
+                                   "reserve interrupt %d already in use.\n",
+                                   ha->pdev->irq);
+                               rval = QLA_ERROR;
+                       } else {
+                               ha->isp_ops->enable_intrs(ha);
+                               rval = QLA_SUCCESS;
+                       }
                }
                qla4_8xxx_idc_unlock(ha);
        } else {
@@ -2387,8 +2387,18 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
                        clear_bit(AF_FW_RECOVERY, &ha->flags);
                        rval = qla4xxx_initialize_adapter(ha,
                            PRESERVE_DDB_LIST);
-                       if (rval == QLA_SUCCESS)
-                               ha->isp_ops->enable_intrs(ha);
+                       if (rval == QLA_SUCCESS) {
+                               ret = qla4xxx_request_irqs(ha);
+                               if (ret) {
+                                       ql4_printk(KERN_WARNING, ha, "Failed to"
+                                           " reserve interrupt %d already in"
+                                           " use.\n", ha->pdev->irq);
+                                       rval = QLA_ERROR;
+                               } else {
+                                       ha->isp_ops->enable_intrs(ha);
+                                       rval = QLA_SUCCESS;
+                               }
+                       }
                        qla4_8xxx_idc_lock(ha);
                        qla4_8xxx_set_drv_active(ha);
                        qla4_8xxx_idc_unlock(ha);
@@ -2430,12 +2440,7 @@ qla4xxx_pci_slot_reset(struct pci_dev *pdev)
                goto exit_slot_reset;
        }
 
-       ret = qla4xxx_request_irqs(ha);
-       if (ret) {
-               ql4_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d"
-                   " already in use.\n", pdev->irq);
-               goto exit_slot_reset;
-       }
+       ha->isp_ops->disable_intrs(ha);
 
        if (is_qla8022(ha)) {
                if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
index a77b973f2cbc607c60c510b8f1e58ae048fb3c42..9bfacf4ed1371f30a557ac99250feb8e2d3f0987 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k3"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k4"
index 8041fe1ab179c2c2e77386726b8082c05643b187..eafeeda6e1942b7deda691f940c0f10efddee390 100644 (file)
@@ -2438,7 +2438,8 @@ scsi_internal_device_unblock(struct scsi_device *sdev)
                sdev->sdev_state = SDEV_RUNNING;
        else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
                sdev->sdev_state = SDEV_CREATED;
-       else
+       else if (sdev->sdev_state != SDEV_CANCEL &&
+                sdev->sdev_state != SDEV_OFFLINE)
                return -EINVAL;
 
        spin_lock_irqsave(q->queue_lock, flags);
index 20ad59dff730289dd3d05768574e8695ea407109..76ee2e784f75085660524fa35143804659735dbd 100644 (file)
@@ -964,10 +964,11 @@ static void __scsi_remove_target(struct scsi_target *starget)
        list_for_each_entry(sdev, &shost->__devices, siblings) {
                if (sdev->channel != starget->channel ||
                    sdev->id != starget->id ||
-                   sdev->sdev_state == SDEV_DEL)
+                   scsi_device_get(sdev))
                        continue;
                spin_unlock_irqrestore(shost->host_lock, flags);
                scsi_remove_device(sdev);
+               scsi_device_put(sdev);
                spin_lock_irqsave(shost->host_lock, flags);
                goto restart;
        }
index 57d1e3e1bd4478548f89ef9f3a472ee4418c71b3..b9ab3a590e4b4fd891865a1f9d3d5f3cb55d9dc5 100644 (file)
@@ -258,6 +258,28 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr,
        return snprintf(buf, 20, "%u\n", sdkp->protection_type);
 }
 
+static ssize_t
+sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct scsi_disk *sdkp = to_scsi_disk(dev);
+       struct scsi_device *sdp = sdkp->device;
+       unsigned int dif, dix;
+
+       dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
+       dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
+
+       if (!dix && scsi_host_dix_capable(sdp->host, SD_DIF_TYPE0_PROTECTION)) {
+               dif = 0;
+               dix = 1;
+       }
+
+       if (!dif && !dix)
+               return snprintf(buf, 20, "none\n");
+
+       return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif);
+}
+
 static ssize_t
 sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
                    char *buf)
@@ -285,6 +307,7 @@ static struct device_attribute sd_disk_attrs[] = {
        __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
               sd_store_manage_start_stop),
        __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
+       __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
        __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
        __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
        __ATTR_NULL,
index cbb38c5197fad67299f32a36792ab1fc5cdeec3e..3cd8ffbad5776dbcee289f1465022602e3897199 100644 (file)
@@ -324,6 +324,15 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot)
                        return CDS_NO_DISC;
        }
 
+       /*
+        * SK/ASC/ASCQ of 2/4/2 means "initialization required"
+        * Using CD_TRAY_OPEN results in an START_STOP_UNIT to close
+        * the tray, which resolves the initialization requirement.
+        */
+       if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY
+                       && sshdr.asc == 0x04 && sshdr.ascq == 0x02)
+               return CDS_TRAY_OPEN;
+
        /*
         * 0x04 is format in progress .. but there must be a disc present!
         */
index c856905bb3bdfc3105ac5eff30de5aad2160c265..fa62578fcd20f572dbd391b614e47f246250b978 100644 (file)
@@ -1411,11 +1411,12 @@ e100_enable_rs485(struct tty_struct *tty, struct serial_rs485 *r)
                       CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 1);
 #endif
 
-       info->rs485.flags = r->flags;
-       if (r->delay_rts_before_send >= 1000)
+       info->rs485 = *r;
+
+       /* Maximum delay before RTS equal to 1000 */
+       if (info->rs485.delay_rts_before_send >= 1000)
                info->rs485.delay_rts_before_send = 1000;
-       else
-               info->rs485.delay_rts_before_send = r->delay_rts_before_send;
+
 /*     printk("rts: on send = %i, after = %i, enabled = %i",
                    info->rs485.rts_on_send,
                    info->rs485.rts_after_sent,
@@ -3234,9 +3235,9 @@ rs_write(struct tty_struct *tty,
                e100_disable_rx(info);
                e100_enable_rx_irq(info);
 #endif
-
-               if (info->rs485.delay_rts_before_send > 0)
-                       msleep(info->rs485.delay_rts_before_send);
+               if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) &&
+                       (info->rs485.delay_rts_before_send > 0))
+                               msleep(info->rs485.delay_rts_before_send);
        }
 #endif /* CONFIG_ETRAX_RS485 */
 
@@ -3694,6 +3695,11 @@ rs_ioctl(struct tty_struct *tty, struct file * file,
 
                rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
                rs485data.flags = 0;
+               if (rs485data.delay_rts_before_send != 0)
+                       rs485data.flags |= SER_RS485_RTS_BEFORE_SEND;
+               else
+                       rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
+
                if (rs485ctrl.enabled)
                        rs485data.flags |= SER_RS485_ENABLED;
                else
@@ -3731,7 +3737,7 @@ rs_ioctl(struct tty_struct *tty, struct file * file,
                /* This is the ioctl to get RS485 data from user-space */
                if (copy_to_user((struct serial_rs485 *) arg,
                                        rs485data,
-                                       sizeof(serial_rs485)))
+                                       sizeof(struct serial_rs485)))
                        return -EFAULT;
                break;
        }
@@ -4527,6 +4533,7 @@ static int __init rs_init(void)
                /* Set sane defaults */
                info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
                info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
+               info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
                info->rs485.delay_rts_before_send = 0;
                info->rs485.flags &= ~(SER_RS485_ENABLED);
 #endif
index fd0d1b98901c2a0c13cc925e078139be079d1f43..09615b51d5910e6b1b899e265836cb3137440ad1 100644 (file)
@@ -90,8 +90,8 @@ struct clk_rate_round_data {
 static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
 {
        unsigned long rate_error, rate_error_prev = ~0UL;
-       unsigned long rate_best_fit = rounder->rate;
        unsigned long highest, lowest, freq;
+       long rate_best_fit = -ENOENT;
        int i;
 
        highest = 0;
@@ -146,7 +146,7 @@ long clk_rate_table_round(struct clk *clk,
        };
 
        if (clk->nr_freqs < 1)
-               return 0;
+               return -ENOSYS;
 
        return clk_rate_round_helper(&table_round);
 }
@@ -541,6 +541,98 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 }
 EXPORT_SYMBOL_GPL(clk_round_rate);
 
+long clk_round_parent(struct clk *clk, unsigned long target,
+                     unsigned long *best_freq, unsigned long *parent_freq,
+                     unsigned int div_min, unsigned int div_max)
+{
+       struct cpufreq_frequency_table *freq, *best = NULL;
+       unsigned long error = ULONG_MAX, freq_high, freq_low, div;
+       struct clk *parent = clk_get_parent(clk);
+
+       if (!parent) {
+               *parent_freq = 0;
+               *best_freq = clk_round_rate(clk, target);
+               return abs(target - *best_freq);
+       }
+
+       for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END;
+            freq++) {
+               if (freq->frequency == CPUFREQ_ENTRY_INVALID)
+                       continue;
+
+               if (unlikely(freq->frequency / target <= div_min - 1)) {
+                       unsigned long freq_max;
+
+                       freq_max = (freq->frequency + div_min / 2) / div_min;
+                       if (error > target - freq_max) {
+                               error = target - freq_max;
+                               best = freq;
+                               if (best_freq)
+                                       *best_freq = freq_max;
+                       }
+
+                       pr_debug("too low freq %lu, error %lu\n", freq->frequency,
+                                target - freq_max);
+
+                       if (!error)
+                               break;
+
+                       continue;
+               }
+
+               if (unlikely(freq->frequency / target >= div_max)) {
+                       unsigned long freq_min;
+
+                       freq_min = (freq->frequency + div_max / 2) / div_max;
+                       if (error > freq_min - target) {
+                               error = freq_min - target;
+                               best = freq;
+                               if (best_freq)
+                                       *best_freq = freq_min;
+                       }
+
+                       pr_debug("too high freq %lu, error %lu\n", freq->frequency,
+                                freq_min - target);
+
+                       if (!error)
+                               break;
+
+                       continue;
+               }
+
+               div = freq->frequency / target;
+               freq_high = freq->frequency / div;
+               freq_low = freq->frequency / (div + 1);
+
+               if (freq_high - target < error) {
+                       error = freq_high - target;
+                       best = freq;
+                       if (best_freq)
+                               *best_freq = freq_high;
+               }
+
+               if (target - freq_low < error) {
+                       error = target - freq_low;
+                       best = freq;
+                       if (best_freq)
+                               *best_freq = freq_low;
+               }
+
+               pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
+                        freq->frequency, div, freq_high, div + 1, freq_low,
+                        *best_freq, best->frequency);
+
+               if (!error)
+                       break;
+       }
+
+       if (parent_freq)
+               *parent_freq = best->frequency;
+
+       return error;
+}
+EXPORT_SYMBOL_GPL(clk_round_parent);
+
 #ifdef CONFIG_PM
 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
 {
index 873a99ff8f64a454a505d1838be751dad20ae181..e5e9e6735f7d62e3d585505de7be4fe379fbbd40 100644 (file)
@@ -79,7 +79,7 @@ static void __init intc_register_irq(struct intc_desc *desc,
         * Register the IRQ position with the global IRQ map, then insert
         * it in to the radix tree.
         */
-       irq_reserve_irqs(irq, 1);
+       irq_reserve_irq(irq);
 
        raw_spin_lock_irqsave(&intc_big_lock, flags);
        radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq));
index 4187cce20ffdc9cd0997b276e2f5e8dc020e2519..a3677c9dfe36e3fa6ebcac37bee81af58d9ece5c 100644 (file)
@@ -60,5 +60,5 @@ void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
        int i;
 
        for (i = 0; i < nr_vecs; i++)
-               irq_reserve_irqs(evt2irq(vectors[i].vect), 1);
+               irq_reserve_irq(evt2irq(vectors[i].vect));
 }
index b5a78a1f4421a0c19aa8735bd49f076fd8f91b46..709c836607de23636e6c567a38833cceff752b08 100644 (file)
 #include <linux/spi/spi.h>
 #include <linux/of_spi.h>
 
-
-/* SPI bustype and spi_master class are registered after board init code
- * provides the SPI device tables, ensuring that both are present by the
- * time controller driver registration causes spi_devices to "enumerate".
- */
 static void spidev_release(struct device *dev)
 {
        struct spi_device       *spi = to_spi_device(dev);
@@ -202,11 +197,16 @@ EXPORT_SYMBOL_GPL(spi_register_driver);
 
 struct boardinfo {
        struct list_head        list;
-       unsigned                n_board_info;
-       struct spi_board_info   board_info[0];
+       struct spi_board_info   board_info;
 };
 
 static LIST_HEAD(board_list);
+static LIST_HEAD(spi_master_list);
+
+/*
+ * Used to protect add/del opertion for board_info list and
+ * spi_master list, and their matching process
+ */
 static DEFINE_MUTEX(board_lock);
 
 /**
@@ -300,16 +300,16 @@ int spi_add_device(struct spi_device *spi)
         */
        status = spi_setup(spi);
        if (status < 0) {
-               dev_err(dev, "can't %s %s, status %d\n",
-                               "setup", dev_name(&spi->dev), status);
+               dev_err(dev, "can't setup %s, status %d\n",
+                               dev_name(&spi->dev), status);
                goto done;
        }
 
        /* Device may be bound to an active driver when this returns */
        status = device_add(&spi->dev);
        if (status < 0)
-               dev_err(dev, "can't %s %s, status %d\n",
-                               "add", dev_name(&spi->dev), status);
+               dev_err(dev, "can't add %s, status %d\n",
+                               dev_name(&spi->dev), status);
        else
                dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
 
@@ -371,6 +371,20 @@ struct spi_device *spi_new_device(struct spi_master *master,
 }
 EXPORT_SYMBOL_GPL(spi_new_device);
 
+static void spi_match_master_to_boardinfo(struct spi_master *master,
+                               struct spi_board_info *bi)
+{
+       struct spi_device *dev;
+
+       if (master->bus_num != bi->bus_num)
+               return;
+
+       dev = spi_new_device(master, bi);
+       if (!dev)
+               dev_err(master->dev.parent, "can't create new device for %s\n",
+                       bi->modalias);
+}
+
 /**
  * spi_register_board_info - register SPI devices for a given board
  * @info: array of chip descriptors
@@ -393,43 +407,25 @@ EXPORT_SYMBOL_GPL(spi_new_device);
 int __init
 spi_register_board_info(struct spi_board_info const *info, unsigned n)
 {
-       struct boardinfo        *bi;
+       struct boardinfo *bi;
+       int i;
 
-       bi = kmalloc(sizeof(*bi) + n * sizeof *info, GFP_KERNEL);
+       bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
        if (!bi)
                return -ENOMEM;
-       bi->n_board_info = n;
-       memcpy(bi->board_info, info, n * sizeof *info);
 
-       mutex_lock(&board_lock);
-       list_add_tail(&bi->list, &board_list);
-       mutex_unlock(&board_lock);
-       return 0;
-}
+       for (i = 0; i < n; i++, bi++, info++) {
+               struct spi_master *master;
 
-/* FIXME someone should add support for a __setup("spi", ...) that
- * creates board info from kernel command lines
- */
-
-static void scan_boardinfo(struct spi_master *master)
-{
-       struct boardinfo        *bi;
-
-       mutex_lock(&board_lock);
-       list_for_each_entry(bi, &board_list, list) {
-               struct spi_board_info   *chip = bi->board_info;
-               unsigned                n;
-
-               for (n = bi->n_board_info; n > 0; n--, chip++) {
-                       if (chip->bus_num != master->bus_num)
-                               continue;
-                       /* NOTE: this relies on spi_new_device to
-                        * issue diagnostics when given bogus inputs
-                        */
-                       (void) spi_new_device(master, chip);
-               }
+               memcpy(&bi->board_info, info, sizeof(*info));
+               mutex_lock(&board_lock);
+               list_add_tail(&bi->list, &board_list);
+               list_for_each_entry(master, &spi_master_list, list)
+                       spi_match_master_to_boardinfo(master, &bi->board_info);
+               mutex_unlock(&board_lock);
        }
-       mutex_unlock(&board_lock);
+
+       return 0;
 }
 
 /*-------------------------------------------------------------------------*/
@@ -512,6 +508,7 @@ int spi_register_master(struct spi_master *master)
 {
        static atomic_t         dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
        struct device           *dev = master->dev.parent;
+       struct boardinfo        *bi;
        int                     status = -ENODEV;
        int                     dynamic = 0;
 
@@ -547,8 +544,12 @@ int spi_register_master(struct spi_master *master)
        dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
                        dynamic ? " (dynamic)" : "");
 
-       /* populate children from any spi device tables */
-       scan_boardinfo(master);
+       mutex_lock(&board_lock);
+       list_add_tail(&master->list, &spi_master_list);
+       list_for_each_entry(bi, &board_list, list)
+               spi_match_master_to_boardinfo(master, &bi->board_info);
+       mutex_unlock(&board_lock);
+
        status = 0;
 
        /* Register devices from the device tree */
@@ -579,7 +580,12 @@ void spi_unregister_master(struct spi_master *master)
 {
        int dummy;
 
-       dummy = device_for_each_child(&master->dev, NULL, __unregister);
+       mutex_lock(&board_lock);
+       list_del(&master->list);
+       mutex_unlock(&board_lock);
+
+       dummy = device_for_each_child(master->dev.parent, &master->dev,
+                                       __unregister);
        device_unregister(&master->dev);
 }
 EXPORT_SYMBOL_GPL(spi_unregister_master);
@@ -652,7 +658,7 @@ int spi_setup(struct spi_device *spi)
         */
        bad_bits = spi->mode & ~spi->master->mode_bits;
        if (bad_bits) {
-               dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+               dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
                        bad_bits);
                return -EINVAL;
        }
index ab483a0ec6d05b7738ab85aed075ee0a293598bf..3f223511127b56c9a6800743991aea342e5e2e92 100644 (file)
@@ -504,6 +504,15 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
                "in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
                dmastat, spistat);
 
+       if (drv_data->rx != NULL) {
+               u16 cr = read_CTRL(drv_data);
+               /* discard old RX data and clear RXS */
+               bfin_spi_dummy_read(drv_data);
+               write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */
+               write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */
+               write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */
+       }
+
        clear_dma_irqstat(drv_data->dma_channel);
 
        /*
@@ -1099,12 +1108,15 @@ static int bfin_spi_setup(struct spi_device *spi)
        }
 
        if (chip->chip_select_num >= MAX_CTRL_CS) {
-               ret = gpio_request(chip->cs_gpio, spi->modalias);
-               if (ret) {
-                       dev_err(&spi->dev, "gpio_request() error\n");
-                       goto pin_error;
+               /* Only request on first setup */
+               if (spi_get_ctldata(spi) == NULL) {
+                       ret = gpio_request(chip->cs_gpio, spi->modalias);
+                       if (ret) {
+                               dev_err(&spi->dev, "gpio_request() error\n");
+                               goto pin_error;
+                       }
+                       gpio_direction_output(chip->cs_gpio, 1);
                }
-               gpio_direction_output(chip->cs_gpio, 1);
        }
 
        dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n",
index 22c6c6659f5b5f9a004ed25ae4543e01c200380b..ee8b47746a155ece0fbc559803a40ecb71e6f21b 100644 (file)
@@ -285,9 +285,9 @@ A_STATUS SetupHIFScatterSupport(HIF_DEVICE *device, HIF_DEVICE_SCATTER_SUPPORT_I
     do {
         
             /* check if host supports scatter requests and it meets our requirements */
-        if (device->func->card->host->max_hw_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
+        if (device->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
             AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : host only supports scatter of : %d entries, need: %d \n",
-                    device->func->card->host->max_hw_segs, MAX_SCATTER_ENTRIES_PER_REQ));
+                    device->func->card->host->max_segs, MAX_SCATTER_ENTRIES_PER_REQ));
             status = A_ENOTSUP;
             break;    
         }
index c196098f0859c2aa1c60df2d2c9dcc1bd5957110..6b8eeea475cf4e9e5a59f9b0d821121229d4cea5 100644 (file)
@@ -198,8 +198,8 @@ int ar6000_htc_raw_open(AR_SOFTC_T *ar)
         
     for (streamID = HTC_RAW_STREAM_0; streamID < HTC_RAW_STREAM_NUM_MAX; streamID++) {
         /* Initialize the data structures */
-        init_MUTEX(&arRaw->raw_htc_read_sem[streamID]);
-        init_MUTEX(&arRaw->raw_htc_write_sem[streamID]);
+       sema_init(&arRaw->raw_htc_read_sem[streamID], 1);
+       sema_init(&arRaw->raw_htc_write_sem[streamID], 1);
         init_waitqueue_head(&arRaw->raw_htc_read_queue[streamID]);
         init_waitqueue_head(&arRaw->raw_htc_write_queue[streamID]);
 
diff --git a/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h b/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h b/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h
deleted file mode 100644 (file)
index e69de29..0000000
index e5357875661f5227a4cdd81dbd3f6b2e53e20fb4..bbbe7c5f7492573508d5027b03cd1df9a187b561 100644 (file)
@@ -1929,7 +1929,7 @@ dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
                goto fail;
 
        net->netdev_ops = NULL;
-       init_MUTEX(&dhd->proto_sem);
+       sema_init(&dhd->proto_sem, 1);
        /* Initialize other structure content */
        init_waitqueue_head(&dhd->ioctl_resp_wait);
        init_waitqueue_head(&dhd->ctrl_wait);
@@ -1977,7 +1977,7 @@ dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
        dhd->timer.function = dhd_watchdog;
 
        /* Initialize thread based operation and lock */
-       init_MUTEX(&dhd->sdsem);
+       sema_init(&dhd->sdsem, 1);
        if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0))
                dhd->threads_only = true;
        else
index ad635ee7758e2aca45e3d6cb5d60d67c19c48618..d060377629ac2fc69320a17600a1371b201dec40 100644 (file)
@@ -866,7 +866,7 @@ static wl_info_t *wl_attach(u16 vendor, u16 device, unsigned long regs,
        spin_lock_init(&wl->rpcq_lock);
        spin_lock_init(&wl->txq_lock);
 
-       init_MUTEX(&wl->sem);
+       sema_init(&wl->sem, 1);
 #else
        spin_lock_init(&wl->lock);
        spin_lock_init(&wl->isr_lock);
index 0560a74515125c6668e225ab433987b91dd4e365..06059850dae2e3d2b7a4d06e16a9337c2cfefab4 100644 (file)
@@ -262,7 +262,7 @@ struct dt9812_usb_cmd {
 
 #define DT9812_NUM_SLOTS       16
 
-static DECLARE_MUTEX(dt9812_mutex);
+static DEFINE_SEMAPHORE(dt9812_mutex);
 
 static const struct usb_device_id dt9812_table[] = {
        {USB_DEVICE(0x0867, 0x9812)},
index 6131e2dd0591ba60b38c6b7cc755e326e5382123..1f177a67ff114f9098f3bcdb282068978c6567e3 100644 (file)
@@ -315,7 +315,7 @@ struct usbduxsub {
  */
 static struct usbduxsub usbduxsub[NUMUSBDUX];
 
-static DECLARE_MUTEX(start_stop_sem);
+static DEFINE_SEMAPHORE(start_stop_sem);
 
 /*
  * Stops the data acquision
@@ -2367,7 +2367,7 @@ static int usbduxsub_probe(struct usb_interface *uinterf,
        dev_dbg(dev, "comedi_: usbdux: "
                "usbduxsub[%d] is ready to connect to comedi.\n", index);
 
-       init_MUTEX(&(usbduxsub[index].sem));
+       sema_init(&(usbduxsub[index].sem), 1);
        /* save a pointer to the usb device */
        usbduxsub[index].usbdev = udev;
 
index 0a164a9a66c3b0024890b61a76e0178abf576023..5b15e6df54e653cb4f7e815979c0c7f645722a23 100644 (file)
@@ -199,7 +199,7 @@ struct usbduxfastsub_s {
  */
 static struct usbduxfastsub_s usbduxfastsub[NUMUSBDUXFAST];
 
-static DECLARE_MUTEX(start_stop_sem);
+static DEFINE_SEMAPHORE(start_stop_sem);
 
 /*
  * bulk transfers to usbduxfast
@@ -1504,7 +1504,7 @@ static int usbduxfastsub_probe(struct usb_interface *uinterf,
               "connect to comedi.\n", index);
 #endif
 
-       init_MUTEX(&(usbduxfastsub[index].sem));
+       sema_init(&(usbduxfastsub[index].sem), 1);
        /* save a pointer to the usb device */
        usbduxfastsub[index].usbdev = udev;
 
index ea268edbf43b0916be95f4e0d5f20b38665a05fb..23fa049b51f22d941ebd8415f375c93ad3462898 100644 (file)
@@ -1158,7 +1158,7 @@ static int msm_fb_release(struct fb_info *info, int user)
        return ret;
 }
 
-DECLARE_MUTEX(msm_fb_pan_sem);
+DEFINE_SEMAPHORE(msm_fb_pan_sem);
 
 static int msm_fb_pan_display(struct fb_var_screeninfo *var,
                              struct fb_info *info)
@@ -1962,7 +1962,7 @@ static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp)
 
 #endif
 
-DECLARE_MUTEX(msm_fb_ioctl_ppp_sem);
+DEFINE_SEMAPHORE(msm_fb_ioctl_ppp_sem);
 DEFINE_MUTEX(msm_fb_ioctl_lut_sem);
 DEFINE_MUTEX(msm_fb_ioctl_hist_sem);
 
index 7fca42c7c0d40f9b0d103f8a3f93f789828d0b68..d1674cd282dcd7deb370cca6b75c8f0f69b3ed73 100644 (file)
@@ -161,7 +161,7 @@ static inline u32 _down_sema(struct semaphore *sema)
 
 static inline void _rtl_rwlock_init(struct semaphore *prwlock)
 {
-       init_MUTEX(prwlock);
+       sema_init(prwlock, 1);
 }
 
 static inline void _init_listhead(struct list_head *list)
index f9c493591ce75f151ead6fbbeed379cb5a8a1290..540a984bb5160d5942902575803c6c79b087d507 100644 (file)
@@ -537,7 +537,7 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
        server->mnt = NULL;
        server->sock_file = NULL;
        init_waitqueue_head(&server->conn_wq);
-       init_MUTEX(&server->sem);
+       sema_init(&server->sem, 1);
        INIT_LIST_HEAD(&server->entry);
        INIT_LIST_HEAD(&server->xmitq);
        INIT_LIST_HEAD(&server->recvq);
index bbf3d9c4abb03aa6dc4b17f9e3f0d123cc7306d7..097e82bc7a6315d92bd0c158bca8a10340f6c22c 100644 (file)
@@ -766,7 +766,7 @@ static int solo_enc_open(struct file *file)
                                    &solo_enc->lock,
                                    V4L2_BUF_TYPE_VIDEO_CAPTURE,
                                    V4L2_FIELD_INTERLACED,
-                                   sizeof(struct videobuf_buffer), fh);
+                                   sizeof(struct videobuf_buffer), fh, NULL);
 
        spin_unlock(&solo_enc->lock);
 
index 9731fa02b5e80702c380e42dfd57c996dad03a3e..6ffd21de837d8bf5161d6654206ce918439cc78f 100644 (file)
@@ -437,7 +437,7 @@ static int solo_v4l2_open(struct file *file)
                                    &solo_dev->pdev->dev, &fh->slock,
                                    V4L2_BUF_TYPE_VIDEO_CAPTURE,
                                    SOLO_DISP_PIX_FIELD,
-                                   sizeof(struct videobuf_buffer), fh);
+                                   sizeof(struct videobuf_buffer), fh, NULL);
 
        return 0;
 }
index 3e46866dd2791dc3b66724bd64e96836a1159dfd..93f625fc852b20d991012ffa6d61b05b693690a6 100644 (file)
@@ -320,7 +320,6 @@ static struct i2c_algorithm tm6000_algo = {
 
 static struct i2c_adapter tm6000_adap_template = {
        .owner = THIS_MODULE,
-       .class = I2C_CLASS_TV_ANALOG | I2C_CLASS_TV_DIGITAL,
        .name = "tm6000",
        .algo = &tm6000_algo,
 };
index f428a7af357a6812fe1f0c7ca8f73a7f88aecf24..e1851f00be568f07e17fbd6c45070bdc9d01de93 100644 (file)
@@ -157,7 +157,7 @@ struct cyasblkdev_blk_data {
 /* pointer to west bridge block data device superstructure */
 static struct cyasblkdev_blk_data *gl_bd;
 
-static DECLARE_MUTEX(open_lock);
+static DEFINE_SEMAPHORE(open_lock);
 
 /* local forwardd declarationss  */
 static cy_as_device_handle *cyas_dev_handle;
index 24e959eca4127a0db1078a60466723d569304be5..0bbb8a3e191df18963e98bafeaef480da125a3a2 100644 (file)
@@ -334,7 +334,7 @@ int cyasblkdev_init_queue(struct cyasblkdev_queue *bq, spinlock_t *lock)
 
        init_completion(&bq->thread_complete);
        init_waitqueue_head(&bq->thread_wq);
-       init_MUTEX(&bq->thread_sem);
+       sema_init(&bq->thread_sem, 1);
 
        ret = kernel_thread(cyasblkdev_queue_thread, bq, CLONE_KERNEL);
        if (ret >= 0) {
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
new file mode 100644 (file)
index 0000000..c43ef48
--- /dev/null
@@ -0,0 +1,11 @@
+obj-y                          += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
+                                  tty_buffer.o tty_port.o tty_mutex.o
+obj-$(CONFIG_LEGACY_PTYS)      += pty.o
+obj-$(CONFIG_UNIX98_PTYS)      += pty.o
+obj-$(CONFIG_AUDIT)            += tty_audit.o
+obj-$(CONFIG_MAGIC_SYSRQ)      += sysrq.o
+obj-$(CONFIG_N_HDLC)           += n_hdlc.o
+obj-$(CONFIG_N_GSM)            += n_gsm.o
+obj-$(CONFIG_R3964)            += n_r3964.o
+
+obj-y                          += vt/
similarity index 100%
rename from drivers/char/n_gsm.c
rename to drivers/tty/n_gsm.c
similarity index 100%
rename from drivers/char/n_hdlc.c
rename to drivers/tty/n_hdlc.c
similarity index 100%
rename from drivers/char/n_r3964.c
rename to drivers/tty/n_r3964.c
similarity index 100%
rename from drivers/char/n_tty.c
rename to drivers/tty/n_tty.c
similarity index 100%
rename from drivers/char/pty.c
rename to drivers/tty/pty.c
similarity index 100%
rename from drivers/char/sysrq.c
rename to drivers/tty/sysrq.c
similarity index 100%
rename from drivers/char/tty_io.c
rename to drivers/tty/tty_io.c
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
new file mode 100644 (file)
index 0000000..14a51c9
--- /dev/null
@@ -0,0 +1,34 @@
+#
+# This file contains the font map for the default (hardware) font
+#
+FONTMAPFILE = cp437.uni
+
+obj-$(CONFIG_VT)                       += vt_ioctl.o vc_screen.o \
+                                          selection.o keyboard.o
+obj-$(CONFIG_CONSOLE_TRANSLATIONS)     += consolemap.o consolemap_deftbl.o
+obj-$(CONFIG_HW_CONSOLE)               += vt.o defkeymap.o
+
+# Files generated that shall be removed upon make clean
+clean-files := consolemap_deftbl.c defkeymap.c
+
+quiet_cmd_conmk = CONMK   $@
+      cmd_conmk = scripts/conmakehash $< > $@
+
+$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
+       $(call cmd,conmk)
+
+$(obj)/defkeymap.o:  $(obj)/defkeymap.c
+
+# Uncomment if you're changing the keymap and have an appropriate
+# loadkeys version for the map. By default, we'll use the shipped
+# versions.
+# GENERATE_KEYMAP := 1
+
+ifdef GENERATE_KEYMAP
+
+$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
+       loadkeys --mktable $< > $@.tmp
+       sed -e 's/^static *//' $@.tmp > $@
+       rm $@.tmp
+
+endif
similarity index 100%
rename from drivers/char/vt.c
rename to drivers/tty/vt/vt.c
index cb23355f52d3d8b5335dbdf1e5b07a32c77b281a..fbe86ca95802b93d68ace2718280a4aeb8b533c2 100644 (file)
@@ -811,7 +811,6 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
                INFO(dev, "MAC %pM\n", net->dev_addr);
                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 
-               netif_stop_queue(net);
                the_dev = dev;
        }
 
index 3ec24609151e8ec322c48afcebae8dc9ce578b5d..734c650a47c433be7524e27b35b5e3c5b5eea0ad 100644 (file)
@@ -502,8 +502,10 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct adp8860_bl *data = dev_get_drvdata(dev);
+       int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+       if (ret)
+               return ret;
 
-       strict_strtoul(buf, 10, &data->cached_daylight_max);
        return adp8860_store(dev, buf, count, ADP8860_BLMX1);
 }
 static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show,
@@ -614,7 +616,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
        if (val == 0) {
                /* Enable automatic ambient light sensing */
                adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
-       } else if ((val > 0) && (val < 6)) {
+       } else if ((val > 0) && (val <= 3)) {
                /* Disable automatic ambient light sensing */
                adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
 
@@ -622,7 +624,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
                mutex_lock(&data->lock);
                adp8860_read(data->client, ADP8860_CFGR, &reg_val);
                reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
-               reg_val |= val << CFGR_BLV_SHIFT;
+               reg_val |= (val - 1) << CFGR_BLV_SHIFT;
                adp8860_write(data->client, ADP8860_CFGR, reg_val);
                mutex_unlock(&data->lock);
        }
index 9093ef0fa8696f2da8b086984042001aa94d8eec..c67801e57aafb24f546250bcbdb45d59bcb5b9d4 100644 (file)
@@ -78,7 +78,7 @@ static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
        const u16 slpin = 0x10;
        const u16 disoff = 0x28;
 
-       if (power) {
+       if (power <= FB_BLANK_NORMAL) {
                if (priv->lcd_on)
                        return 0;
 
index abc43a0eb97dd054f7c526b47fa014e61e458dc0..5d3cf33953ac299cd759fb19bfd5a1d53205cc0b 100644 (file)
@@ -129,7 +129,7 @@ static int lms283gf05_power_set(struct lcd_device *ld, int power)
        struct spi_device *spi = st->spi;
        struct lms283gf05_pdata *pdata = spi->dev.platform_data;
 
-       if (power) {
+       if (power <= FB_BLANK_NORMAL) {
                if (pdata)
                        lms283gf05_reset(pdata->reset_gpio,
                                        pdata->reset_inverted);
index 9fb533f6373e6ab958646ccfdcba047c43b99d9d..1485f7345f49ee32586d5d665d1bd624cfcd3e1a 100644 (file)
@@ -335,6 +335,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
                },
                .driver_data    = (void *)&nvidia_chipset_data,
        },
+       {
+               .callback       = mbp_dmi_match,
+               .ident          = "MacBookAir 3,1",
+               .matches        = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"),
+               },
+               .driver_data    = (void *)&nvidia_chipset_data,
+       },
+       {
+               .callback       = mbp_dmi_match,
+               .ident          = "MacBookAir 3,2",
+               .matches        = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"),
+               },
+               .driver_data    = (void *)&nvidia_chipset_data,
+       },
        { }
 };
 
index 550443518891d41345234f4aa72929798b1f68a8..21866ec69656d6be90e69af18a56316895f9d69f 100644 (file)
@@ -25,6 +25,7 @@ struct pwm_bl_data {
        struct pwm_device       *pwm;
        struct device           *dev;
        unsigned int            period;
+       unsigned int            lth_brightness;
        int                     (*notify)(struct device *,
                                          int brightness);
 };
@@ -48,7 +49,9 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
                pwm_config(pb->pwm, 0, pb->period);
                pwm_disable(pb->pwm);
        } else {
-               pwm_config(pb->pwm, brightness * pb->period / max, pb->period);
+               brightness = pb->lth_brightness +
+                       (brightness * (pb->period - pb->lth_brightness) / max);
+               pwm_config(pb->pwm, brightness, pb->period);
                pwm_enable(pb->pwm);
        }
        return 0;
@@ -92,6 +95,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
 
        pb->period = data->pwm_period_ns;
        pb->notify = data->notify;
+       pb->lth_brightness = data->lth_brightness *
+               (data->pwm_period_ns / data->max_brightness);
        pb->dev = &pdev->dev;
 
        pb->pwm = pwm_request(data->pwm_id, "backlight");
index a3128c9cb7ad6b64999eb6e7bffe6a2898c996e0..5927db0da9998f87f273d9430d782c26723fb16e 100644 (file)
@@ -729,10 +729,10 @@ static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev,
 
        return strlen(buf);
 }
-static DEVICE_ATTR(gamma_table, 0644,
+static DEVICE_ATTR(gamma_table, 0444,
                s6e63m0_sysfs_show_gamma_table, NULL);
 
-static int __init s6e63m0_probe(struct spi_device *spi)
+static int __devinit s6e63m0_probe(struct spi_device *spi)
 {
        int ret = 0;
        struct s6e63m0 *lcd = NULL;
@@ -829,6 +829,9 @@ static int __devexit s6e63m0_remove(struct spi_device *spi)
        struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
 
        s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
+       device_remove_file(&spi->dev, &dev_attr_gamma_table);
+       device_remove_file(&spi->dev, &dev_attr_gamma_mode);
+       backlight_device_unregister(lcd->bd);
        lcd_device_unregister(lcd->ld);
        kfree(lcd);
 
index 5aff46c61e521cbaef027b92d1a4bdd7bb86c547..355abcdcda980acfcd70dcb54d4bb798edf6fcae 100644 (file)
@@ -81,7 +81,7 @@ u) DOS attrs - returned as pseudo-xattr in Samba format (check VFAT and NTFS for
 
 v) mount check for unmatched uids
 
-w) Add support for new vfs entry points for setlease and fallocate 
+w) Add support for new vfs entry point for fallocate
 
 x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of 
 processes can proceed better in parallel (on the server)
index 525ba59a41058a6d13f4311fb841eb2604a00140..e9a393c9c2ca4ddb8ec3d75cfcf0766234ec1789 100644 (file)
@@ -15,7 +15,7 @@
  *   the GNU Lesser General Public License for more details.
  *
  */
-#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
 
 #ifndef _CIFS_FS_SB_H
 #define _CIFS_FS_SB_H
@@ -42,9 +42,9 @@
 #define CIFS_MOUNT_MULTIUSER   0x20000 /* multiuser mount */
 
 struct cifs_sb_info {
-       struct radix_tree_root tlink_tree;
-#define CIFS_TLINK_MASTER_TAG          0       /* is "master" (mount) tcon */
+       struct rb_root tlink_tree;
        spinlock_t tlink_tree_lock;
+       struct tcon_link *master_tlink;
        struct nls_table *local_nls;
        unsigned int rsize;
        unsigned int wsize;
index 54745b6c3db9c50af75b393c8b80d96460439c80..9c3789762ab7c4c92a7d67859c34b32cb196f1ab 100644 (file)
@@ -116,7 +116,7 @@ cifs_read_super(struct super_block *sb, void *data,
                return -ENOMEM;
 
        spin_lock_init(&cifs_sb->tlink_tree_lock);
-       INIT_RADIX_TREE(&cifs_sb->tlink_tree, GFP_KERNEL);
+       cifs_sb->tlink_tree = RB_ROOT;
 
        rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
        if (rc) {
@@ -321,8 +321,7 @@ cifs_alloc_inode(struct super_block *sb)
        /* Until the file is open and we have gotten oplock
        info back from the server, can not assume caching of
        file data or metadata */
-       cifs_inode->clientCanCacheRead = false;
-       cifs_inode->clientCanCacheAll = false;
+       cifs_set_oplock_level(cifs_inode, 0);
        cifs_inode->delete_pending = false;
        cifs_inode->invalid_mapping = false;
        cifs_inode->vfs_inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
@@ -625,11 +624,8 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
                   knows that the file won't be changed on the server
                   by anyone else */
                return generic_setlease(file, arg, lease);
-       else {
-               if (arg != F_UNLCK)
-                       locks_free_lock(*lease);
+       else
                return -EAGAIN;
-       }
 }
 
 struct file_system_type cifs_fs_type = {
index f259e4d7612d5f2a50f70f6b65802b4fff01621c..b577bf0a1bb3622491ed0a885ca30b9e7684d24e 100644 (file)
@@ -336,7 +336,8 @@ struct cifsTconInfo {
  * "get" on the container.
  */
 struct tcon_link {
-       unsigned long           tl_index;
+       struct rb_node          tl_rbnode;
+       uid_t                   tl_uid;
        unsigned long           tl_flags;
 #define TCON_LINK_MASTER       0
 #define TCON_LINK_PENDING      1
index edb6d90efdf27ac2272c961c7269fbbfab2c0aa7..7ed69b6b5fe6719f66ed5af0a0b38d2cda1aa352 100644 (file)
@@ -104,6 +104,7 @@ extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
 extern u64 cifs_UnixTimeToNT(struct timespec);
 extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
                                      int offset);
+extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
 
 extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle,
                                struct file *file, struct tcon_link *tlink,
index 9eb327defa1d39fc16d1c8f0837de36a65e43c92..251a17c03545dfc35e8ab37e1ea5a612e94f7536 100644 (file)
@@ -116,6 +116,7 @@ struct smb_vol {
 
 static int ipv4_connect(struct TCP_Server_Info *server);
 static int ipv6_connect(struct TCP_Server_Info *server);
+static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
 static void cifs_prune_tlinks(struct work_struct *work);
 
 /*
@@ -2900,24 +2901,16 @@ remote_path_check:
                goto mount_fail_check;
        }
 
-       tlink->tl_index = pSesInfo->linux_uid;
+       tlink->tl_uid = pSesInfo->linux_uid;
        tlink->tl_tcon = tcon;
        tlink->tl_time = jiffies;
        set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
        set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
 
-       rc = radix_tree_preload(GFP_KERNEL);
-       if (rc == -ENOMEM) {
-               kfree(tlink);
-               goto mount_fail_check;
-       }
-
+       cifs_sb->master_tlink = tlink;
        spin_lock(&cifs_sb->tlink_tree_lock);
-       radix_tree_insert(&cifs_sb->tlink_tree, pSesInfo->linux_uid, tlink);
-       radix_tree_tag_set(&cifs_sb->tlink_tree, pSesInfo->linux_uid,
-                          CIFS_TLINK_MASTER_TAG);
+       tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
        spin_unlock(&cifs_sb->tlink_tree_lock);
-       radix_tree_preload_end();
 
        queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
                                TLINK_IDLE_EXPIRE);
@@ -3107,32 +3100,25 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
 int
 cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
 {
-       int i, ret;
+       struct rb_root *root = &cifs_sb->tlink_tree;
+       struct rb_node *node;
+       struct tcon_link *tlink;
        char *tmp;
-       struct tcon_link *tlink[8];
-       unsigned long index = 0;
 
        cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
 
-       do {
-               spin_lock(&cifs_sb->tlink_tree_lock);
-               ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
-                                            (void **)tlink, index,
-                                            ARRAY_SIZE(tlink));
-               /* increment index for next pass */
-               if (ret > 0)
-                       index = tlink[ret - 1]->tl_index + 1;
-               for (i = 0; i < ret; i++) {
-                       cifs_get_tlink(tlink[i]);
-                       clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
-                       radix_tree_delete(&cifs_sb->tlink_tree,
-                                                       tlink[i]->tl_index);
-               }
-               spin_unlock(&cifs_sb->tlink_tree_lock);
+       spin_lock(&cifs_sb->tlink_tree_lock);
+       while ((node = rb_first(root))) {
+               tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+               cifs_get_tlink(tlink);
+               clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
+               rb_erase(node, root);
 
-               for (i = 0; i < ret; i++)
-                       cifs_put_tlink(tlink[i]);
-       } while (ret != 0);
+               spin_unlock(&cifs_sb->tlink_tree_lock);
+               cifs_put_tlink(tlink);
+               spin_lock(&cifs_sb->tlink_tree_lock);
+       }
+       spin_unlock(&cifs_sb->tlink_tree_lock);
 
        tmp = cifs_sb->prepath;
        cifs_sb->prepathlen = 0;
@@ -3271,22 +3257,10 @@ out:
        return tcon;
 }
 
-static struct tcon_link *
+static inline struct tcon_link *
 cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
 {
-       struct tcon_link *tlink;
-       unsigned int ret;
-
-       spin_lock(&cifs_sb->tlink_tree_lock);
-       ret = radix_tree_gang_lookup_tag(&cifs_sb->tlink_tree, (void **)&tlink,
-                                       0, 1, CIFS_TLINK_MASTER_TAG);
-       spin_unlock(&cifs_sb->tlink_tree_lock);
-
-       /* the master tcon should always be present */
-       if (ret == 0)
-               BUG();
-
-       return tlink;
+       return cifs_sb->master_tlink;
 }
 
 struct cifsTconInfo *
@@ -3302,6 +3276,47 @@ cifs_sb_tcon_pending_wait(void *unused)
        return signal_pending(current) ? -ERESTARTSYS : 0;
 }
 
+/* find and return a tlink with given uid */
+static struct tcon_link *
+tlink_rb_search(struct rb_root *root, uid_t uid)
+{
+       struct rb_node *node = root->rb_node;
+       struct tcon_link *tlink;
+
+       while (node) {
+               tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+
+               if (tlink->tl_uid > uid)
+                       node = node->rb_left;
+               else if (tlink->tl_uid < uid)
+                       node = node->rb_right;
+               else
+                       return tlink;
+       }
+       return NULL;
+}
+
+/* insert a tcon_link into the tree */
+static void
+tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+       struct tcon_link *tlink;
+
+       while (*new) {
+               tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
+               parent = *new;
+
+               if (tlink->tl_uid > new_tlink->tl_uid)
+                       new = &((*new)->rb_left);
+               else
+                       new = &((*new)->rb_right);
+       }
+
+       rb_link_node(&new_tlink->tl_rbnode, parent, new);
+       rb_insert_color(&new_tlink->tl_rbnode, root);
+}
+
 /*
  * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
  * current task.
@@ -3309,7 +3324,7 @@ cifs_sb_tcon_pending_wait(void *unused)
  * If the superblock doesn't refer to a multiuser mount, then just return
  * the master tcon for the mount.
  *
- * First, search the radix tree for an existing tcon for this fsuid. If one
+ * First, search the rbtree for an existing tcon for this fsuid. If one
  * exists, then check to see if it's pending construction. If it is then wait
  * for construction to complete. Once it's no longer pending, check to see if
  * it failed and either return an error or retry construction, depending on
@@ -3322,14 +3337,14 @@ struct tcon_link *
 cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
 {
        int ret;
-       unsigned long fsuid = (unsigned long) current_fsuid();
+       uid_t fsuid = current_fsuid();
        struct tcon_link *tlink, *newtlink;
 
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
                return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
 
        spin_lock(&cifs_sb->tlink_tree_lock);
-       tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid);
+       tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
        if (tlink)
                cifs_get_tlink(tlink);
        spin_unlock(&cifs_sb->tlink_tree_lock);
@@ -3338,36 +3353,24 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
                newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
                if (newtlink == NULL)
                        return ERR_PTR(-ENOMEM);
-               newtlink->tl_index = fsuid;
+               newtlink->tl_uid = fsuid;
                newtlink->tl_tcon = ERR_PTR(-EACCES);
                set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
                set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
                cifs_get_tlink(newtlink);
 
-               ret = radix_tree_preload(GFP_KERNEL);
-               if (ret != 0) {
-                       kfree(newtlink);
-                       return ERR_PTR(ret);
-               }
-
                spin_lock(&cifs_sb->tlink_tree_lock);
                /* was one inserted after previous search? */
-               tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid);
+               tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
                if (tlink) {
                        cifs_get_tlink(tlink);
                        spin_unlock(&cifs_sb->tlink_tree_lock);
-                       radix_tree_preload_end();
                        kfree(newtlink);
                        goto wait_for_construction;
                }
-               ret = radix_tree_insert(&cifs_sb->tlink_tree, fsuid, newtlink);
-               spin_unlock(&cifs_sb->tlink_tree_lock);
-               radix_tree_preload_end();
-               if (ret) {
-                       kfree(newtlink);
-                       return ERR_PTR(ret);
-               }
                tlink = newtlink;
+               tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
+               spin_unlock(&cifs_sb->tlink_tree_lock);
        } else {
 wait_for_construction:
                ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
@@ -3413,39 +3416,39 @@ cifs_prune_tlinks(struct work_struct *work)
 {
        struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
                                                    prune_tlinks.work);
-       struct tcon_link *tlink[8];
-       unsigned long now = jiffies;
-       unsigned long index = 0;
-       int i, ret;
+       struct rb_root *root = &cifs_sb->tlink_tree;
+       struct rb_node *node = rb_first(root);
+       struct rb_node *tmp;
+       struct tcon_link *tlink;
 
-       do {
-               spin_lock(&cifs_sb->tlink_tree_lock);
-               ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
-                                            (void **)tlink, index,
-                                            ARRAY_SIZE(tlink));
-               /* increment index for next pass */
-               if (ret > 0)
-                       index = tlink[ret - 1]->tl_index + 1;
-               for (i = 0; i < ret; i++) {
-                       if (test_bit(TCON_LINK_MASTER, &tlink[i]->tl_flags) ||
-                           atomic_read(&tlink[i]->tl_count) != 0 ||
-                           time_after(tlink[i]->tl_time + TLINK_IDLE_EXPIRE,
-                                      now)) {
-                               tlink[i] = NULL;
-                               continue;
-                       }
-                       cifs_get_tlink(tlink[i]);
-                       clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
-                       radix_tree_delete(&cifs_sb->tlink_tree,
-                                         tlink[i]->tl_index);
-               }
-               spin_unlock(&cifs_sb->tlink_tree_lock);
+       /*
+        * Because we drop the spinlock in the loop in order to put the tlink
+        * it's not guarded against removal of links from the tree. The only
+        * places that remove entries from the tree are this function and
+        * umounts. Because this function is non-reentrant and is canceled
+        * before umount can proceed, this is safe.
+        */
+       spin_lock(&cifs_sb->tlink_tree_lock);
+       node = rb_first(root);
+       while (node != NULL) {
+               tmp = node;
+               node = rb_next(tmp);
+               tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
+
+               if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
+                   atomic_read(&tlink->tl_count) != 0 ||
+                   time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
+                       continue;
 
-               for (i = 0; i < ret; i++) {
-                       if (tlink[i] != NULL)
-                               cifs_put_tlink(tlink[i]);
-               }
-       } while (ret != 0);
+               cifs_get_tlink(tlink);
+               clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
+               rb_erase(tmp, root);
+
+               spin_unlock(&cifs_sb->tlink_tree_lock);
+               cifs_put_tlink(tlink);
+               spin_lock(&cifs_sb->tlink_tree_lock);
+       }
+       spin_unlock(&cifs_sb->tlink_tree_lock);
 
        queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
                                TLINK_IDLE_EXPIRE);
index ae82159cf7fa6921bb3a27a1fc5e56fbb1045a5c..06c3e83fa387fecf63b93e33a0455dac6da45f6d 100644 (file)
@@ -146,12 +146,7 @@ client_can_cache:
                rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
                                         xid, NULL);
 
-       if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
-               pCifsInode->clientCanCacheAll = true;
-               pCifsInode->clientCanCacheRead = true;
-               cFYI(1, "Exclusive Oplock granted on inode %p", inode);
-       } else if ((oplock & 0xF) == OPLOCK_READ)
-               pCifsInode->clientCanCacheRead = true;
+       cifs_set_oplock_level(pCifsInode, oplock);
 
        return rc;
 }
@@ -253,12 +248,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
                list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
        spin_unlock(&cifs_file_list_lock);
 
-       if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
-               pCifsInode->clientCanCacheAll = true;
-               pCifsInode->clientCanCacheRead = true;
-               cFYI(1, "Exclusive Oplock inode %p", inode);
-       } else if ((oplock & 0xF) == OPLOCK_READ)
-               pCifsInode->clientCanCacheRead = true;
+       cifs_set_oplock_level(pCifsInode, oplock);
 
        file->private_data = pCifsFile;
        return pCifsFile;
@@ -271,8 +261,9 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
  */
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 {
+       struct inode *inode = cifs_file->dentry->d_inode;
        struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
-       struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode);
+       struct cifsInodeInfo *cifsi = CIFS_I(inode);
        struct cifsLockInfo *li, *tmp;
 
        spin_lock(&cifs_file_list_lock);
@@ -288,8 +279,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
        if (list_empty(&cifsi->openFileList)) {
                cFYI(1, "closing last open instance for inode %p",
                        cifs_file->dentry->d_inode);
-               cifsi->clientCanCacheRead = false;
-               cifsi->clientCanCacheAll  = false;
+               cifs_set_oplock_level(cifsi, 0);
        }
        spin_unlock(&cifs_file_list_lock);
 
@@ -607,8 +597,6 @@ reopen_success:
                rc = filemap_write_and_wait(inode->i_mapping);
                mapping_set_error(inode->i_mapping, rc);
 
-               pCifsInode->clientCanCacheAll = false;
-               pCifsInode->clientCanCacheRead = false;
                if (tcon->unix_ext)
                        rc = cifs_get_inode_info_unix(&inode,
                                full_path, inode->i_sb, xid);
@@ -622,18 +610,9 @@ reopen_success:
             invalidate the current end of file on the server
             we can not go to the server to get the new inod
             info */
-       if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
-               pCifsInode->clientCanCacheAll = true;
-               pCifsInode->clientCanCacheRead = true;
-               cFYI(1, "Exclusive Oplock granted on inode %p",
-                        pCifsFile->dentry->d_inode);
-       } else if ((oplock & 0xF) == OPLOCK_READ) {
-               pCifsInode->clientCanCacheRead = true;
-               pCifsInode->clientCanCacheAll = false;
-       } else {
-               pCifsInode->clientCanCacheRead = false;
-               pCifsInode->clientCanCacheAll = false;
-       }
+
+       cifs_set_oplock_level(pCifsInode, oplock);
+
        cifs_relock_file(pCifsFile);
 
 reopen_error_exit:
@@ -775,12 +754,6 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
 
        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
        tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
-
-       if (file->private_data == NULL) {
-               rc = -EBADF;
-               FreeXid(xid);
-               return rc;
-       }
        netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
 
        if ((tcon->ses->capabilities & CAP_UNIX) &&
@@ -956,6 +929,7 @@ cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
        size_t write_size, loff_t *poffset)
 {
+       struct inode *inode = file->f_path.dentry->d_inode;
        int rc = 0;
        unsigned int bytes_written = 0;
        unsigned int total_written;
@@ -963,7 +937,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
        struct cifsTconInfo *pTcon;
        int xid, long_op;
        struct cifsFileInfo *open_file;
-       struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
+       struct cifsInodeInfo *cifsi = CIFS_I(inode);
 
        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 
@@ -1029,21 +1003,17 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
 
        cifs_stats_bytes_written(pTcon, total_written);
 
-       /* since the write may have blocked check these pointers again */
-       if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
-               struct inode *inode = file->f_path.dentry->d_inode;
 /* Do not update local mtime - server will set its actual value on write
- *             inode->i_ctime = inode->i_mtime =
- *                     current_fs_time(inode->i_sb);*/
-               if (total_written > 0) {
-                       spin_lock(&inode->i_lock);
-                       if (*poffset > file->f_path.dentry->d_inode->i_size)
-                               i_size_write(file->f_path.dentry->d_inode,
-                                       *poffset);
-                       spin_unlock(&inode->i_lock);
-               }
-               mark_inode_dirty_sync(file->f_path.dentry->d_inode);
+ *     inode->i_ctime = inode->i_mtime =
+ *             current_fs_time(inode->i_sb);*/
+       if (total_written > 0) {
+               spin_lock(&inode->i_lock);
+               if (*poffset > inode->i_size)
+                       i_size_write(inode, *poffset);
+               spin_unlock(&inode->i_lock);
        }
+       mark_inode_dirty_sync(inode);
+
        FreeXid(xid);
        return total_written;
 }
@@ -1178,7 +1148,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
                                        bool fsuid_only)
 {
        struct cifsFileInfo *open_file;
-       struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+       struct cifs_sb_info *cifs_sb;
        bool any_available = false;
        int rc;
 
@@ -1192,6 +1162,8 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
                return NULL;
        }
 
+       cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+
        /* only filter by fsuid on multiuser mounts */
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
                fsuid_only = false;
index 39869c3c3efbc2a08b87ba6661e48ace7e9789eb..ef3a55bf86b6d3700c521e074954548c6f53223f 100644 (file)
@@ -2177,7 +2177,6 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
 
        setattr_copy(inode, attrs);
        mark_inode_dirty(inode);
-       return 0;
 
 cifs_setattr_exit:
        kfree(full_path);
index 077bf756f34231300c6b0d7fdae7cedf6023bd08..0c98672d01225ccadb67bb09705e7c21922332c7 100644 (file)
@@ -38,10 +38,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
        struct cifs_sb_info *cifs_sb;
 #ifdef CONFIG_CIFS_POSIX
        struct cifsFileInfo *pSMBFile = filep->private_data;
-       struct cifsTconInfo *tcon = tlink_tcon(pSMBFile->tlink);
+       struct cifsTconInfo *tcon;
        __u64   ExtAttrBits = 0;
        __u64   ExtAttrMask = 0;
-       __u64   caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+       __u64   caps;
 #endif /* CONFIG_CIFS_POSIX */
 
        xid = GetXid();
@@ -62,9 +62,11 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                        break;
 #ifdef CONFIG_CIFS_POSIX
                case FS_IOC_GETFLAGS:
+                       if (pSMBFile == NULL)
+                               break;
+                       tcon = tlink_tcon(pSMBFile->tlink);
+                       caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
                        if (CIFS_UNIX_EXTATTR_CAP & caps) {
-                               if (pSMBFile == NULL)
-                                       break;
                                rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid,
                                        &ExtAttrBits, &ExtAttrMask);
                                if (rc == 0)
@@ -75,13 +77,15 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                        break;
 
                case FS_IOC_SETFLAGS:
+                       if (pSMBFile == NULL)
+                               break;
+                       tcon = tlink_tcon(pSMBFile->tlink);
+                       caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
                        if (CIFS_UNIX_EXTATTR_CAP & caps) {
                                if (get_user(ExtAttrBits, (int __user *)arg)) {
                                        rc = -EFAULT;
                                        break;
                                }
-                               if (pSMBFile == NULL)
-                                       break;
                                /* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid,
                                        extAttrBits, &ExtAttrMask);*/
                        }
index c4e296fe351876252a48210f81f6cbae165562f8..43f10281bc19e80be48303e1bd9c974b4882cb37 100644 (file)
@@ -569,10 +569,9 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
 
                                cFYI(1, "file id match, oplock break");
                                pCifsInode = CIFS_I(netfile->dentry->d_inode);
-                               pCifsInode->clientCanCacheAll = false;
-                               if (pSMB->OplockLevel == 0)
-                                       pCifsInode->clientCanCacheRead = false;
 
+                               cifs_set_oplock_level(pCifsInode,
+                                                     pSMB->OplockLevel);
                                /*
                                 * cifs_oplock_break_put() can't be called
                                 * from here.  Get reference after queueing
@@ -722,3 +721,23 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
                           cifs_sb_master_tcon(cifs_sb)->treeName);
        }
 }
+
+void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
+{
+       oplock &= 0xF;
+
+       if (oplock == OPLOCK_EXCLUSIVE) {
+               cinode->clientCanCacheAll = true;
+               cinode->clientCanCacheRead = true;
+               cFYI(1, "Exclusive Oplock granted on inode %p",
+                    &cinode->vfs_inode);
+       } else if (oplock == OPLOCK_READ) {
+               cinode->clientCanCacheAll = false;
+               cinode->clientCanCacheRead = true;
+               cFYI(1, "Level II Oplock granted on inode %p",
+                   &cinode->vfs_inode);
+       } else {
+               cinode->clientCanCacheAll = false;
+               cinode->clientCanCacheRead = false;
+       }
+}
index 8b5dd6369f82c19d5ba28dea07018e4fb13a025f..6a5edea2d70b3ac7c56e8b272686b5a799eabbcf 100644 (file)
@@ -177,7 +177,7 @@ struct mpage_da_data {
 
 struct ext4_io_page {
        struct page     *p_page;
-       int             p_count;
+       atomic_t        p_count;
 };
 
 #define MAX_IO_PAGES 128
@@ -858,6 +858,7 @@ struct ext4_inode_info {
        spinlock_t i_completed_io_lock;
        /* current io_end structure for async DIO write*/
        ext4_io_end_t *cur_aio_dio;
+       atomic_t i_ioend_count; /* Number of outstanding io_end structs */
 
        /*
         * Transactions that contain inode's metadata needed to complete
@@ -2060,6 +2061,7 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
 /* page-io.c */
 extern int __init ext4_init_pageio(void);
 extern void ext4_exit_pageio(void);
+extern void ext4_ioend_wait(struct inode *);
 extern void ext4_free_io_end(ext4_io_end_t *io);
 extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
 extern int ext4_end_io_nolock(ext4_io_end_t *io);
index 1916164704667dc3c190ab327781933fa7c5f4b8..bdbe69902207c151df025d98690c4c016cf0b0e9 100644 (file)
@@ -53,6 +53,7 @@
 static inline int ext4_begin_ordered_truncate(struct inode *inode,
                                              loff_t new_size)
 {
+       trace_ext4_begin_ordered_truncate(inode, new_size);
        return jbd2_journal_begin_ordered_truncate(
                                        EXT4_SB(inode->i_sb)->s_journal,
                                        &EXT4_I(inode)->jinode,
@@ -178,6 +179,7 @@ void ext4_evict_inode(struct inode *inode)
        handle_t *handle;
        int err;
 
+       trace_ext4_evict_inode(inode);
        if (inode->i_nlink) {
                truncate_inode_pages(&inode->i_data, 0);
                goto no_delete;
@@ -5410,9 +5412,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
         * will return the blocks that include the delayed allocation
         * blocks for this file.
         */
-       spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
        delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
-       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 
        stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
        return 0;
@@ -5649,6 +5649,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
        int err, ret;
 
        might_sleep();
+       trace_ext4_mark_inode_dirty(inode, _RET_IP_);
        err = ext4_reserve_inode_write(handle, inode, &iloc);
        if (ext4_handle_valid(handle) &&
            EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
index c58eba34724a4281f1cb8bb405ab607ef09b49ff..5b4d4e3a4d58e3506c15e891e7b8df394bc0838f 100644 (file)
@@ -4640,8 +4640,6 @@ do_more:
                 * with group lock held. generate_buddy look at
                 * them with group lock_held
                 */
-               if (test_opt(sb, DISCARD))
-                       ext4_issue_discard(sb, block_group, bit, count);
                ext4_lock_group(sb, block_group);
                mb_clear_bits(bitmap_bh->b_data, bit, count);
                mb_free_blocks(inode, &e4b, bit, count);
index 46a7d6a9d9764b82075071c94f661cc65c3675fc..7f5451cd1d38bff2399471750618fc7429ce5c5e 100644 (file)
 
 static struct kmem_cache *io_page_cachep, *io_end_cachep;
 
+#define WQ_HASH_SZ             37
+#define to_ioend_wq(v) (&ioend_wq[((unsigned long)v) % WQ_HASH_SZ])
+static wait_queue_head_t ioend_wq[WQ_HASH_SZ];
+
 int __init ext4_init_pageio(void)
 {
+       int i;
+
        io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
        if (io_page_cachep == NULL)
                return -ENOMEM;
@@ -42,6 +48,8 @@ int __init ext4_init_pageio(void)
                kmem_cache_destroy(io_page_cachep);
                return -ENOMEM;
        }
+       for (i = 0; i < WQ_HASH_SZ; i++)
+               init_waitqueue_head(&ioend_wq[i]);
 
        return 0;
 }
@@ -52,24 +60,37 @@ void ext4_exit_pageio(void)
        kmem_cache_destroy(io_page_cachep);
 }
 
+void ext4_ioend_wait(struct inode *inode)
+{
+       wait_queue_head_t *wq = to_ioend_wq(inode);
+
+       wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
+}
+
+static void put_io_page(struct ext4_io_page *io_page)
+{
+       if (atomic_dec_and_test(&io_page->p_count)) {
+               end_page_writeback(io_page->p_page);
+               put_page(io_page->p_page);
+               kmem_cache_free(io_page_cachep, io_page);
+       }
+}
+
 void ext4_free_io_end(ext4_io_end_t *io)
 {
        int i;
+       wait_queue_head_t *wq;
 
        BUG_ON(!io);
        if (io->page)
                put_page(io->page);
-       for (i = 0; i < io->num_io_pages; i++) {
-               if (--io->pages[i]->p_count == 0) {
-                       struct page *page = io->pages[i]->p_page;
-
-                       end_page_writeback(page);
-                       put_page(page);
-                       kmem_cache_free(io_page_cachep, io->pages[i]);
-               }
-       }
+       for (i = 0; i < io->num_io_pages; i++)
+               put_io_page(io->pages[i]);
        io->num_io_pages = 0;
-       iput(io->inode);
+       wq = to_ioend_wq(io->inode);
+       if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) &&
+           waitqueue_active(wq))
+               wake_up_all(wq);
        kmem_cache_free(io_end_cachep, io);
 }
 
@@ -142,8 +163,8 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
        io = kmem_cache_alloc(io_end_cachep, flags);
        if (io) {
                memset(io, 0, sizeof(*io));
-               io->inode = igrab(inode);
-               BUG_ON(!io->inode);
+               atomic_inc(&EXT4_I(inode)->i_ioend_count);
+               io->inode = inode;
                INIT_WORK(&io->work, ext4_end_io_work);
                INIT_LIST_HEAD(&io->list);
        }
@@ -171,35 +192,15 @@ static void ext4_end_bio(struct bio *bio, int error)
        struct workqueue_struct *wq;
        struct inode *inode;
        unsigned long flags;
-       ext4_fsblk_t err_block;
        int i;
 
        BUG_ON(!io_end);
-       inode = io_end->inode;
        bio->bi_private = NULL;
        bio->bi_end_io = NULL;
        if (test_bit(BIO_UPTODATE, &bio->bi_flags))
                error = 0;
-       err_block = bio->bi_sector >> (inode->i_blkbits - 9);
        bio_put(bio);
 
-       if (!(inode->i_sb->s_flags & MS_ACTIVE)) {
-               pr_err("sb umounted, discard end_io request for inode %lu\n",
-                       io_end->inode->i_ino);
-               ext4_free_io_end(io_end);
-               return;
-       }
-
-       if (error) {
-               io_end->flag |= EXT4_IO_END_ERROR;
-               ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
-                            "(offset %llu size %ld starting block %llu)",
-                            inode->i_ino,
-                            (unsigned long long) io_end->offset,
-                            (long) io_end->size,
-                            (unsigned long long) err_block);
-       }
-
        for (i = 0; i < io_end->num_io_pages; i++) {
                struct page *page = io_end->pages[i]->p_page;
                struct buffer_head *bh, *head;
@@ -236,13 +237,7 @@ static void ext4_end_bio(struct bio *bio, int error)
                        } while (bh != head);
                }
 
-               if (--io_end->pages[i]->p_count == 0) {
-                       struct page *page = io_end->pages[i]->p_page;
-
-                       end_page_writeback(page);
-                       put_page(page);
-                       kmem_cache_free(io_page_cachep, io_end->pages[i]);
-               }
+               put_io_page(io_end->pages[i]);
 
                /*
                 * If this is a partial write which happened to make
@@ -254,8 +249,19 @@ static void ext4_end_bio(struct bio *bio, int error)
                if (!partial_write)
                        SetPageUptodate(page);
        }
-
        io_end->num_io_pages = 0;
+       inode = io_end->inode;
+
+       if (error) {
+               io_end->flag |= EXT4_IO_END_ERROR;
+               ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
+                            "(offset %llu size %ld starting block %llu)",
+                            inode->i_ino,
+                            (unsigned long long) io_end->offset,
+                            (long) io_end->size,
+                            (unsigned long long)
+                            bio->bi_sector >> (inode->i_blkbits - 9));
+       }
 
        /* Add the io_end to per-inode completed io list*/
        spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
@@ -305,7 +311,6 @@ static int io_submit_init(struct ext4_io_submit *io,
        bio->bi_private = io->io_end = io_end;
        bio->bi_end_io = ext4_end_bio;
 
-       io_end->inode = inode;
        io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
 
        io->io_bio = bio;
@@ -360,7 +365,7 @@ submit_and_retry:
        if ((io_end->num_io_pages == 0) ||
            (io_end->pages[io_end->num_io_pages-1] != io_page)) {
                io_end->pages[io_end->num_io_pages++] = io_page;
-               io_page->p_count++;
+               atomic_inc(&io_page->p_count);
        }
        return 0;
 }
@@ -389,7 +394,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
                return -ENOMEM;
        }
        io_page->p_page = page;
-       io_page->p_count = 0;
+       atomic_set(&io_page->p_count, 1);
        get_page(page);
 
        for (bh = head = page_buffers(page), block_start = 0;
@@ -421,10 +426,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
         * PageWriteback bit from the page to prevent the system from
         * wedging later on.
         */
-       if (io_page->p_count == 0) {
-               put_page(page);
-               end_page_writeback(page);
-               kmem_cache_free(io_page_cachep, io_page);
-       }
+       put_io_page(io_page);
        return ret;
 }
index 40131b777af66c3112bb1df16265faadac435d0f..61182fe6254e94ad606a6c67fed1b30ef289ad38 100644 (file)
@@ -828,12 +828,22 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
        ei->cur_aio_dio = NULL;
        ei->i_sync_tid = 0;
        ei->i_datasync_tid = 0;
+       atomic_set(&ei->i_ioend_count, 0);
 
        return &ei->vfs_inode;
 }
 
+static int ext4_drop_inode(struct inode *inode)
+{
+       int drop = generic_drop_inode(inode);
+
+       trace_ext4_drop_inode(inode, drop);
+       return drop;
+}
+
 static void ext4_destroy_inode(struct inode *inode)
 {
+       ext4_ioend_wait(inode);
        if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
                ext4_msg(inode->i_sb, KERN_ERR,
                         "Inode %lu (%p): orphan list check failed!",
@@ -1173,6 +1183,7 @@ static const struct super_operations ext4_sops = {
        .destroy_inode  = ext4_destroy_inode,
        .write_inode    = ext4_write_inode,
        .dirty_inode    = ext4_dirty_inode,
+       .drop_inode     = ext4_drop_inode,
        .evict_inode    = ext4_evict_inode,
        .put_super      = ext4_put_super,
        .sync_fs        = ext4_sync_fs,
@@ -1194,6 +1205,7 @@ static const struct super_operations ext4_nojournal_sops = {
        .destroy_inode  = ext4_destroy_inode,
        .write_inode    = ext4_write_inode,
        .dirty_inode    = ext4_dirty_inode,
+       .drop_inode     = ext4_drop_inode,
        .evict_inode    = ext4_evict_inode,
        .write_super    = ext4_write_super,
        .put_super      = ext4_put_super,
@@ -2699,7 +2711,6 @@ static int ext4_lazyinit_thread(void *arg)
        struct ext4_li_request *elr;
        unsigned long next_wakeup;
        DEFINE_WAIT(wait);
-       int ret;
 
        BUG_ON(NULL == eli);
 
@@ -2723,13 +2734,12 @@ cont_thread:
                        elr = list_entry(pos, struct ext4_li_request,
                                         lr_request);
 
-                       if (time_after_eq(jiffies, elr->lr_next_sched))
-                               ret = ext4_run_li_request(elr);
-
-                       if (ret) {
-                               ret = 0;
-                               ext4_remove_li_request(elr);
-                               continue;
+                       if (time_after_eq(jiffies, elr->lr_next_sched)) {
+                               if (ext4_run_li_request(elr) != 0) {
+                                       /* error, remove the lazy_init job */
+                                       ext4_remove_li_request(elr);
+                                       continue;
+                               }
                        }
 
                        if (time_before(elr->lr_next_sched, next_wakeup))
@@ -2740,7 +2750,8 @@ cont_thread:
                if (freezing(current))
                        refrigerator();
 
-               if (time_after_eq(jiffies, next_wakeup)) {
+               if ((time_after_eq(jiffies, next_wakeup)) ||
+                   (MAX_JIFFY_OFFSET == next_wakeup)) {
                        cond_resched();
                        continue;
                }
@@ -3348,6 +3359,24 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
        spin_lock_init(&sbi->s_next_gen_lock);
 
+       err = percpu_counter_init(&sbi->s_freeblocks_counter,
+                       ext4_count_free_blocks(sb));
+       if (!err) {
+               err = percpu_counter_init(&sbi->s_freeinodes_counter,
+                               ext4_count_free_inodes(sb));
+       }
+       if (!err) {
+               err = percpu_counter_init(&sbi->s_dirs_counter,
+                               ext4_count_dirs(sb));
+       }
+       if (!err) {
+               err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
+       }
+       if (err) {
+               ext4_msg(sb, KERN_ERR, "insufficient memory");
+               goto failed_mount3;
+       }
+
        sbi->s_stripe = ext4_get_stripe_size(sbi);
        sbi->s_max_writeback_mb_bump = 128;
 
@@ -3446,22 +3475,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        }
        set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
 
-no_journal:
-       err = percpu_counter_init(&sbi->s_freeblocks_counter,
-                                 ext4_count_free_blocks(sb));
-       if (!err)
-               err = percpu_counter_init(&sbi->s_freeinodes_counter,
-                                         ext4_count_free_inodes(sb));
-       if (!err)
-               err = percpu_counter_init(&sbi->s_dirs_counter,
-                                         ext4_count_dirs(sb));
-       if (!err)
-               err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
-       if (err) {
-               ext4_msg(sb, KERN_ERR, "insufficient memory");
-               goto failed_mount_wq;
-       }
+       /*
+        * The journal may have updated the bg summary counts, so we
+        * need to update the global counters.
+        */
+       percpu_counter_set(&sbi->s_freeblocks_counter,
+                          ext4_count_free_blocks(sb));
+       percpu_counter_set(&sbi->s_freeinodes_counter,
+                          ext4_count_free_inodes(sb));
+       percpu_counter_set(&sbi->s_dirs_counter,
+                          ext4_count_dirs(sb));
+       percpu_counter_set(&sbi->s_dirtyblocks_counter, 0);
 
+no_journal:
        EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
        if (!EXT4_SB(sb)->dio_unwritten_wq) {
                printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
@@ -3611,10 +3637,6 @@ failed_mount_wq:
                jbd2_journal_destroy(sbi->s_journal);
                sbi->s_journal = NULL;
        }
-       percpu_counter_destroy(&sbi->s_freeblocks_counter);
-       percpu_counter_destroy(&sbi->s_freeinodes_counter);
-       percpu_counter_destroy(&sbi->s_dirs_counter);
-       percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
 failed_mount3:
        if (sbi->s_flex_groups) {
                if (is_vmalloc_addr(sbi->s_flex_groups))
@@ -3622,6 +3644,10 @@ failed_mount3:
                else
                        kfree(sbi->s_flex_groups);
        }
+       percpu_counter_destroy(&sbi->s_freeblocks_counter);
+       percpu_counter_destroy(&sbi->s_freeinodes_counter);
+       percpu_counter_destroy(&sbi->s_dirs_counter);
+       percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
 failed_mount2:
        for (i = 0; i < db_count; i++)
                brelse(sbi->s_group_desc[i]);
@@ -3949,13 +3975,11 @@ static int ext4_commit_super(struct super_block *sb, int sync)
        else
                es->s_kbytes_written =
                        cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
-       if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeblocks_counter))
-               ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
-                                       &EXT4_SB(sb)->s_freeblocks_counter));
-       if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
-               es->s_free_inodes_count =
-                       cpu_to_le32(percpu_counter_sum_positive(
-                                       &EXT4_SB(sb)->s_freeinodes_counter));
+       ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
+                                          &EXT4_SB(sb)->s_freeblocks_counter));
+       es->s_free_inodes_count =
+               cpu_to_le32(percpu_counter_sum_positive(
+                               &EXT4_SB(sb)->s_freeinodes_counter));
        sb->s_dirt = 0;
        BUFFER_TRACE(sbh, "marking dirty");
        mark_buffer_dirty(sbh);
@@ -4556,12 +4580,10 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
 
 static int ext4_quota_off(struct super_block *sb, int type)
 {
-       /* Force all delayed allocation blocks to be allocated */
-       if (test_opt(sb, DELALLOC)) {
-               down_read(&sb->s_umount);
+       /* Force all delayed allocation blocks to be allocated.
+        * Caller already holds s_umount sem */
+       if (test_opt(sb, DELALLOC))
                sync_filesystem(sb);
-               up_read(&sb->s_umount);
-       }
 
        return dquot_quota_off(sb, type);
 }
index ac943c1307b5e33272f2d91d07f95c6a332f3e7a..aa996471ec5c961d7d94c62c10a20d29ac8e1bbb 100644 (file)
@@ -629,8 +629,6 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 
 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
 {
-       if (arg != F_UNLCK)
-               locks_free_lock(*fl);
        return -EINVAL;
 }
 
index eac5f96323e3d9640a197ee5eb10acdbf6046591..793cb9d943d21a70584958c27fba6b2f97b37f93 100644 (file)
@@ -14,7 +14,7 @@ void hpfs_lock_creation(struct super_block *s)
 #ifdef DEBUG_LOCKS
        printk("lock creation\n");
 #endif
-       down(&hpfs_sb(s)->hpfs_creation_de);
+       mutex_lock(&hpfs_sb(s)->hpfs_creation_de);
 }
 
 void hpfs_unlock_creation(struct super_block *s)
@@ -22,7 +22,7 @@ void hpfs_unlock_creation(struct super_block *s)
 #ifdef DEBUG_LOCKS
        printk("unlock creation\n");
 #endif
-       up(&hpfs_sb(s)->hpfs_creation_de);
+       mutex_unlock(&hpfs_sb(s)->hpfs_creation_de);
 }
 
 /* Map a sector into a buffer and return pointers to it and to the buffer. */
index b59eac0232a0f057063fac03d747a8b627560056..2fee17d0d9ab37ac67602007b0ab556f9e9eb22d 100644 (file)
@@ -87,7 +87,7 @@ struct hpfs_sb_info {
        unsigned *sb_bmp_dir;           /* main bitmap directory */
        unsigned sb_c_bitmap;           /* current bitmap */
        unsigned sb_max_fwd_alloc;      /* max forwad allocation */
-       struct semaphore hpfs_creation_de; /* when creating dirents, nobody else
+       struct mutex hpfs_creation_de;  /* when creating dirents, nobody else
                                           can alloc blocks */
        /*unsigned sb_mounting : 1;*/
        int sb_timeshift;
index bb69389972eb33148dfdc4176ea87b7003cb39f0..6c5f01597c3af07a21ec162b996437545c6e13af 100644 (file)
@@ -491,7 +491,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
        sbi->sb_bmp_dir = NULL;
        sbi->sb_cp_table = NULL;
 
-       init_MUTEX(&sbi->hpfs_creation_de);
+       mutex_init(&sbi->hpfs_creation_de);
 
        uid = current_uid();
        gid = current_gid();
index d6cfac1f0a40cc20357961bd7895f6985440dee7..a5fe68189eed00d76e0d3027fead3edc648c9aff 100644 (file)
@@ -932,8 +932,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
        if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
                *user = current_user();
                if (user_shm_lock(size, *user)) {
-                       WARN_ONCE(1,
-                         "Using mlock ulimits for SHM_HUGETLB deprecated\n");
+                       printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n");
                } else {
                        *user = NULL;
                        return ERR_PTR(-EPERM);
index 538417c1fdbbcbb630cbcdb885c74ebe5f433844..c590d155c0959bfbe1f8ce2f6be9702afd2904f4 100644 (file)
@@ -1838,7 +1838,6 @@ size_t journal_tag_bytes(journal_t *journal)
  */
 #define JBD2_MAX_SLABS 8
 static struct kmem_cache *jbd2_slab[JBD2_MAX_SLABS];
-static DECLARE_MUTEX(jbd2_slab_create_sem);
 
 static const char *jbd2_slab_names[JBD2_MAX_SLABS] = {
        "jbd2_1k", "jbd2_2k", "jbd2_4k", "jbd2_8k",
@@ -1859,6 +1858,7 @@ static void jbd2_journal_destroy_slabs(void)
 
 static int jbd2_journal_create_slab(size_t size)
 {
+       static DEFINE_MUTEX(jbd2_slab_create_mutex);
        int i = order_base_2(size) - 10;
        size_t slab_size;
 
@@ -1870,16 +1870,16 @@ static int jbd2_journal_create_slab(size_t size)
 
        if (unlikely(i < 0))
                i = 0;
-       down(&jbd2_slab_create_sem);
+       mutex_lock(&jbd2_slab_create_mutex);
        if (jbd2_slab[i]) {
-               up(&jbd2_slab_create_sem);
+               mutex_unlock(&jbd2_slab_create_mutex);
                return 0;       /* Already created */
        }
 
        slab_size = 1 << (i+10);
        jbd2_slab[i] = kmem_cache_create(jbd2_slab_names[i], slab_size,
                                         slab_size, 0, NULL);
-       up(&jbd2_slab_create_sem);
+       mutex_unlock(&jbd2_slab_create_mutex);
        if (!jbd2_slab[i]) {
                printk(KERN_EMERG "JBD2: no memory for jbd2_slab cache\n");
                return -ENOMEM;
index 5b526a9778828977871d01e610fdfd797da0527a..0e62dd35d088486ee2c04c62f312ffb2b0ca5149 100644 (file)
@@ -235,11 +235,8 @@ static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
                        fl->fl_ops->fl_copy_lock(new, fl);
                new->fl_ops = fl->fl_ops;
        }
-       if (fl->fl_lmops) {
-               if (fl->fl_lmops->fl_copy_lock)
-                       fl->fl_lmops->fl_copy_lock(new, fl);
+       if (fl->fl_lmops)
                new->fl_lmops = fl->fl_lmops;
-       }
 }
 
 /*
@@ -1428,8 +1425,9 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
                goto out;
 
        if (my_before != NULL) {
-               *flp = *my_before;
                error = lease->fl_lmops->fl_change(my_before, arg);
+               if (!error)
+                       *flp = *my_before;
                goto out;
        }
 
@@ -1444,8 +1442,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
        return 0;
 
 out:
-       if (arg != F_UNLCK)
-               locks_free_lock(lease);
        return error;
 }
 EXPORT_SYMBOL(generic_setlease);
@@ -1508,9 +1504,8 @@ static int do_fcntl_delete_lease(struct file *filp)
 
 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
 {
-       struct file_lock *fl;
+       struct file_lock *fl, *ret;
        struct fasync_struct *new;
-       struct inode *inode = filp->f_path.dentry->d_inode;
        int error;
 
        fl = lease_alloc(filp, arg);
@@ -1522,10 +1517,16 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
                locks_free_lock(fl);
                return -ENOMEM;
        }
+       ret = fl;
        lock_flocks();
-       error = __vfs_setlease(filp, arg, &fl);
-       if (error)
-               goto out_unlock;
+       error = __vfs_setlease(filp, arg, &ret);
+       if (error) {
+               unlock_flocks();
+               locks_free_lock(fl);
+               goto out_free_fasync;
+       }
+       if (ret != fl)
+               locks_free_lock(fl);
 
        /*
         * fasync_insert_entry() returns the old entry if any.
@@ -1533,20 +1534,13 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
         * inserted it into the fasync list. Clear new so that
         * we don't release it here.
         */
-       if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new))
+       if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
                new = NULL;
 
-       if (error < 0) {
-               /* remove lease just inserted by setlease */
-               fl->fl_type = F_UNLCK | F_INPROGRESS;
-               fl->fl_break_time = jiffies - 10;
-               time_out_leases(inode);
-               goto out_unlock;
-       }
-
        error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
-out_unlock:
        unlock_flocks();
+
+out_free_fasync:
        if (new)
                fasync_free(new);
        return error;
index cd51a36b37f0956bf7eafde8ced983d5ae437b05..57afd4a6fabbb992b0b08ea9bb5399071ea576c2 100644 (file)
@@ -486,7 +486,7 @@ static inline int logfs_get_sb_bdev(struct logfs_super *s,
 
 /* dev_mtd.c */
 #ifdef CONFIG_MTD
-int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
+int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr);
 #else
 static inline int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
 {
index 1e524fb73ba56200be86a3675181c78d7bd02fde..60677f9f13110d1a98774595de819fddf2c0ccdf 100644 (file)
@@ -884,7 +884,5 @@ static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
        dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
                        file->f_path.dentry->d_parent->d_name.name,
                        file->f_path.dentry->d_name.name, arg);
-       if (arg != F_UNLCK)
-               locks_free_lock(*fl);
        return -EINVAL;
 }
index b7f818b0580c2667698b4cd2c0c14e8376cfa789..ad2bfa68d534d28531c3788b955734b5d9a76985 100644 (file)
@@ -673,16 +673,17 @@ static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
        spin_unlock(&clp->cl_lock);
 }
 
-static void nfsd4_register_conn(struct nfsd4_conn *conn)
+static int nfsd4_register_conn(struct nfsd4_conn *conn)
 {
        conn->cn_xpt_user.callback = nfsd4_conn_lost;
-       register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
+       return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
 }
 
 static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
 {
        struct nfsd4_conn *conn;
        u32 flags = NFS4_CDFC4_FORE;
+       int ret;
 
        if (ses->se_flags & SESSION4_BACK_CHAN)
                flags |= NFS4_CDFC4_BACK;
@@ -690,7 +691,10 @@ static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
        if (!conn)
                return nfserr_jukebox;
        nfsd4_hash_conn(conn, ses);
-       nfsd4_register_conn(conn);
+       ret = nfsd4_register_conn(conn);
+       if (ret)
+               /* oops; xprt is already down: */
+               nfsd4_conn_lost(&conn->cn_xpt_user);
        return nfs_ok;
 }
 
@@ -1644,6 +1648,7 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi
 {
        struct nfs4_client *clp = ses->se_client;
        struct nfsd4_conn *c;
+       int ret;
 
        spin_lock(&clp->cl_lock);
        c = __nfsd4_find_conn(new->cn_xprt, ses);
@@ -1654,7 +1659,10 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi
        }
        __nfsd4_hash_conn(new, ses);
        spin_unlock(&clp->cl_lock);
-       nfsd4_register_conn(new);
+       ret = nfsd4_register_conn(new);
+       if (ret)
+               /* oops; xprt is already down: */
+               nfsd4_conn_lost(&new->cn_xpt_user);
        return;
 }
 
@@ -2652,6 +2660,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
        if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) {
                dprintk("NFSD: setlease failed [%d], no delegation\n", status);
                dp->dl_flock = NULL;
+               locks_free_lock(fl);
                unhash_delegation(dp);
                flag = NFS4_OPEN_DELEGATE_NONE;
                goto out;
index ddb1f41376e532060da94bdbcf5f12502012ed95..911e61f348fc0fecb5dd748e96153c0c21974c10 100644 (file)
@@ -418,7 +418,7 @@ out_no_root:
 static struct dentry *openprom_mount(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *data)
 {
-       return mount_single(fs_type, flags, data, openprom_fill_super)
+       return mount_single(fs_type, flags, data, openprom_fill_super);
 }
 
 static struct file_system_type openprom_fs_type = {
index c9af48fffcd77fc559f73d5d97317e22ec4d3413..7d287afccde58ac16c854ee5dc2eb31f2f7b1637 100644 (file)
@@ -1111,11 +1111,12 @@ xfs_vm_writepage(
                        uptodate = 0;
 
                /*
-                * A hole may still be marked uptodate because discard_buffer
-                * leaves the flag set.
+                * set_page_dirty dirties all buffers in a page, independent
+                * of their state.  The dirty state however is entirely
+                * meaningless for holes (!mapped && uptodate), so skip
+                * buffers covering holes here.
                 */
                if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
-                       ASSERT(!buffer_dirty(bh));
                        imap_valid = 0;
                        continue;
                }
index 63fd2c07cb57d861837bf463d9ad7b3aa5392088..aa1d353def29c64903cf02db9af159d612259b87 100644 (file)
@@ -1781,7 +1781,6 @@ xfs_buf_delwri_split(
        INIT_LIST_HEAD(list);
        spin_lock(dwlk);
        list_for_each_entry_safe(bp, n, dwq, b_list) {
-               trace_xfs_buf_delwri_split(bp, _RET_IP_);
                ASSERT(bp->b_flags & XBF_DELWRI);
 
                if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
@@ -1795,6 +1794,7 @@ xfs_buf_delwri_split(
                                         _XBF_RUN_QUEUES);
                        bp->b_flags |= XBF_WRITE;
                        list_move_tail(&bp->b_list, list);
+                       trace_xfs_buf_delwri_split(bp, _RET_IP_);
                } else
                        skipped++;
        }
index 2ea238f6d38eb0504187d674a70c57d9acc5b382..ad442d9e392e480ab242f5d13922ee3ec0feab2e 100644 (file)
@@ -416,7 +416,7 @@ xfs_attrlist_by_handle(
        if (IS_ERR(dentry))
                return PTR_ERR(dentry);
 
-       kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
+       kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
        if (!kbuf)
                goto out_dput;
 
index 96107efc0c61908c3f1a94a646604836fc2131e1..94d5fd6a2973042239bd22f19047947494f57581 100644 (file)
@@ -762,7 +762,8 @@ xfs_setup_inode(
        inode->i_state = I_NEW;
 
        inode_sb_list_add(inode);
-       insert_inode_hash(inode);
+       /* make the inode look hashed for the writeback code */
+       hlist_add_fake(&inode->i_hash);
 
        inode->i_mode   = ip->i_d.di_mode;
        inode->i_nlink  = ip->i_d.di_nlink;
index 9f3a78fe6ae4e1f8463129d0d8ef5967bdde2408..064f964d4f3c201ce52bf6bb42e7dbbd3c0526bd 100644 (file)
@@ -353,9 +353,6 @@ xfs_parseargs(
                        mp->m_qflags &= ~XFS_OQUOTA_ENFD;
                } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
                        mp->m_flags |= XFS_MOUNT_DELAYLOG;
-                       cmn_err(CE_WARN,
-                               "Enabling EXPERIMENTAL delayed logging feature "
-                               "- use at your own risk.\n");
                } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
                        mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
                } else if (!strcmp(this_char, "ihashsize")) {
index 37d33254981da7026cffb7e586f072c1a93eae9d..afb0d7cfad1cceafef852098d955cd8dfb42bee3 100644 (file)
@@ -853,6 +853,7 @@ restart:
                if (trylock) {
                        if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
                                skipped++;
+                               xfs_perag_put(pag);
                                continue;
                        }
                        first_index = pag->pag_ici_reclaim_cursor;
index 9b715dce5699df0833ea92058683a5b9f30f8868..9124425b7f2fe9a07c3479047e7c3ea7c3a74693 100644 (file)
@@ -744,9 +744,15 @@ xfs_filestream_new_ag(
         * If the file's parent directory is known, take its iolock in exclusive
         * mode to prevent two sibling files from racing each other to migrate
         * themselves and their parent to different AGs.
+        *
+        * Note that we lock the parent directory iolock inside the child
+        * iolock here.  That's fine as we never hold both parent and child
+        * iolock in any other place.  This is different from the ilock,
+        * which requires locking of the child after the parent for namespace
+        * operations.
         */
        if (pip)
-               xfs_ilock(pip, XFS_IOLOCK_EXCL);
+               xfs_ilock(pip, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
 
        /*
         * A new AG needs to be found for the file.  If the file's parent
index b1498ab5a399b627b3f7b643e2bca6256ae5a49f..19e9dfa1c2543e3a47c05ea1a25cb954b47d9736 100644 (file)
@@ -275,6 +275,7 @@ xfs_free_perag(
                pag = radix_tree_delete(&mp->m_perag_tree, agno);
                spin_unlock(&mp->m_perag_lock);
                ASSERT(pag);
+               ASSERT(atomic_read(&pag->pag_ref) == 0);
                call_rcu(&pag->rcu_head, __xfs_free_perag);
        }
 }
index e0e64b113bd66e91b22933ab6eaf8192e67e936a..9bb6eda4cd215903077612233bf3b6c0d1577327 100644 (file)
@@ -346,8 +346,17 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
 #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta)
 #define xfs_trans_apply_dquot_deltas(tp)
 #define xfs_trans_unreserve_and_mod_dquots(tp)
-#define xfs_trans_reserve_quota_nblks(tp, ip, nblks, ninos, flags)     (0)
-#define xfs_trans_reserve_quota_bydquots(tp, mp, u, g, nb, ni, fl)     (0)
+static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp,
+               struct xfs_inode *ip, long nblks, long ninos, uint flags)
+{
+       return 0;
+}
+static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
+               struct xfs_mount *mp, struct xfs_dquot *udqp,
+               struct xfs_dquot *gdqp, long nblks, long nions, uint flags)
+{
+       return 0;
+}
 #define xfs_qm_vop_create_dqattach(tp, ip, u, g)
 #define xfs_qm_vop_rename_dqattach(it)                                 (0)
 #define xfs_qm_vop_chown(tp, ip, old, new)                             (NULL)
@@ -357,11 +366,14 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
 #define xfs_qm_dqdetach(ip)
 #define xfs_qm_dqrele(d)
 #define xfs_qm_statvfs(ip, s)
-#define xfs_qm_sync(mp, fl)                                            (0)
+static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
+{
+       return 0;
+}
 #define xfs_qm_newmount(mp, a, b)                                      (0)
 #define xfs_qm_mount_quotas(mp)
 #define xfs_qm_unmount(mp)
-#define xfs_qm_unmount_quotas(mp)                                      (0)
+#define xfs_qm_unmount_quotas(mp)
 #endif /* CONFIG_XFS_QUOTA */
 
 #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
index 47e64170305d8f862e2f67d73420eb3e0a8ed9e4..bd8cad21998e0e41ac88435b55ae9f3d275c0283 100644 (file)
@@ -33,18 +33,18 @@ struct stat {
        int             st_blksize;     /* Optimal block size for I/O.  */
        int             __pad2;
        long            st_blocks;      /* Number 512-byte blocks allocated. */
-       int             st_atime;       /* Time of last access.  */
-       unsigned int    st_atime_nsec;
-       int             st_mtime;       /* Time of last modification.  */
-       unsigned int    st_mtime_nsec;
-       int             st_ctime;       /* Time of last status change.  */
-       unsigned int    st_ctime_nsec;
+       long            st_atime;       /* Time of last access.  */
+       unsigned long   st_atime_nsec;
+       long            st_mtime;       /* Time of last modification.  */
+       unsigned long   st_mtime_nsec;
+       long            st_ctime;       /* Time of last status change.  */
+       unsigned long   st_ctime_nsec;
        unsigned int    __unused4;
        unsigned int    __unused5;
 };
 
-#if __BITS_PER_LONG != 64
 /* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
+#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
 struct stat64 {
        unsigned long long st_dev;      /* Device.  */
        unsigned long long st_ino;      /* File serial number.  */
index 5afa5b52063e6d385926de88ec49faf1324a185a..beafc156a5353259d6b077ca57b2dd13af39f48d 100644 (file)
@@ -432,6 +432,10 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
  * together with the @destroy function,
  * enables driver-specific objects derived from a ttm_buffer_object.
  * On successful return, the object kref and list_kref are set to 1.
+ * If a failure occurs, the function will call the @destroy function, or
+ * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
+ * illegal and will likely cause memory corruption.
+ *
  * Returns
  * -ENOMEM: Out of memory.
  * -EINVAL: Invalid placement flags.
index d01b4ddbdc56acb88fd1064b0e4580f82aa03b76..8e0c848326b6df5ccc95fc4ae16b965d81bfa0c8 100644 (file)
@@ -206,14 +206,84 @@ struct ttm_tt {
 struct ttm_mem_type_manager;
 
 struct ttm_mem_type_manager_func {
+       /**
+        * struct ttm_mem_type_manager member init
+        *
+        * @man: Pointer to a memory type manager.
+        * @p_size: Implementation dependent, but typically the size of the
+        * range to be managed in pages.
+        *
+        * Called to initialize a private range manager. The function is
+        * expected to initialize the man::priv member.
+        * Returns 0 on success, negative error code on failure.
+        */
        int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
+
+       /**
+        * struct ttm_mem_type_manager member takedown
+        *
+        * @man: Pointer to a memory type manager.
+        *
+        * Called to undo the setup done in init. All allocated resources
+        * should be freed.
+        */
        int  (*takedown)(struct ttm_mem_type_manager *man);
+
+       /**
+        * struct ttm_mem_type_manager member get_node
+        *
+        * @man: Pointer to a memory type manager.
+        * @bo: Pointer to the buffer object we're allocating space for.
+        * @placement: Placement details.
+        * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+        *
+        * This function should allocate space in the memory type managed
+        * by @man. Placement details if
+        * applicable are given by @placement. If successful,
+        * @mem::mm_node should be set to a non-null value, and
+        * @mem::start should be set to a value identifying the beginning
+        * of the range allocated, and the function should return zero.
+        * If the memory region accomodate the buffer object, @mem::mm_node
+        * should be set to NULL, and the function should return 0.
+        * If a system error occured, preventing the request to be fulfilled,
+        * the function should return a negative error code.
+        *
+        * Note that @mem::mm_node will only be dereferenced by
+        * struct ttm_mem_type_manager functions and optionally by the driver,
+        * which has knowledge of the underlying type.
+        *
+        * This function may not be called from within atomic context, so
+        * an implementation can and must use either a mutex or a spinlock to
+        * protect any data structures managing the space.
+        */
        int  (*get_node)(struct ttm_mem_type_manager *man,
                         struct ttm_buffer_object *bo,
                         struct ttm_placement *placement,
                         struct ttm_mem_reg *mem);
+
+       /**
+        * struct ttm_mem_type_manager member put_node
+        *
+        * @man: Pointer to a memory type manager.
+        * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+        *
+        * This function frees memory type resources previously allocated
+        * and that are identified by @mem::mm_node and @mem::start. May not
+        * be called from within atomic context.
+        */
        void (*put_node)(struct ttm_mem_type_manager *man,
                         struct ttm_mem_reg *mem);
+
+       /**
+        * struct ttm_mem_type_manager member debug
+        *
+        * @man: Pointer to a memory type manager.
+        * @prefix: Prefix to be used in printout to identify the caller.
+        *
+        * This function is called to print out the state of the memory
+        * type manager to aid debugging of out-of-memory conditions.
+        * It may not be called from within atomic context.
+        */
        void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
 };
 
@@ -231,14 +301,13 @@ struct ttm_mem_type_manager {
        uint64_t size;
        uint32_t available_caching;
        uint32_t default_caching;
+       const struct ttm_mem_type_manager_func *func;
+       void *priv;
 
        /*
-        * Protected by the bdev->lru_lock.
-        * TODO: Consider one lru_lock per ttm_mem_type_manager.
-        * Plays ill with list removal, though.
+        * Protected by the global->lru_lock.
         */
-       const struct ttm_mem_type_manager_func *func;
-       void *priv;
+
        struct list_head lru;
 };
 
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
new file mode 100644 (file)
index 0000000..96c038e
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef _LINUX_ATOMIC_H
+#define _LINUX_ATOMIC_H
+#include <asm/atomic.h>
+
+/**
+ * atomic_inc_not_zero_hint - increment if not null
+ * @v: pointer of type atomic_t
+ * @hint: probable value of the atomic before the increment
+ *
+ * This version of atomic_inc_not_zero() gives a hint of probable
+ * value of the atomic. This helps processor to not read the memory
+ * before doing the atomic read/modify/write cycle, lowering
+ * number of bus transactions on some arches.
+ *
+ * Returns: 0 if increment was not done, 1 otherwise.
+ */
+#ifndef atomic_inc_not_zero_hint
+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+{
+       int val, c = hint;
+
+       /* sanity test, should be removed by compiler if hint is a constant */
+       if (!hint)
+               return atomic_inc_not_zero(v);
+
+       do {
+               val = atomic_cmpxchg(v, c, c + 1);
+               if (val == c)
+                       return 1;
+               c = val;
+       } while (c);
+
+       return 0;
+}
+#endif
+
+#endif /* _LINUX_ATOMIC_H */
index 1eb29399a4ff4b82f00257e3c03090015c00060a..334d68a171081472a25f98baa510bc7eb4cfeb8e 100644 (file)
@@ -1056,7 +1056,6 @@ struct lock_manager_operations {
        int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
        void (*fl_notify)(struct file_lock *);  /* unblock callback */
        int (*fl_grant)(struct file_lock *, struct file_lock *, int);
-       void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
        void (*fl_release_private)(struct file_lock *);
        void (*fl_break)(struct file_lock *);
        int (*fl_mylease)(struct file_lock *, struct file_lock *);
index 8a389b608ce3b568e85d14dcbfb71f8f98cf0c13..41cb31f14ee3068cd3c232fb07e385676f2f9ba0 100644 (file)
  */
 #define in_nmi()       (preempt_count() & NMI_MASK)
 
-#if defined(CONFIG_PREEMPT)
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL)
 # define PREEMPT_INATOMIC_BASE kernel_locked()
-# define PREEMPT_CHECK_OFFSET 1
 #else
 # define PREEMPT_INATOMIC_BASE 0
+#endif
+
+#if defined(CONFIG_PREEMPT)
+# define PREEMPT_CHECK_OFFSET 1
+#else
 # define PREEMPT_CHECK_OFFSET 0
 #endif
 
index e9138198e8239d878ba04e069112e45eef0d519a..b676c585574e1723a50fbb7bddf0b96d43706dbe 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
+#include <linux/hardirq.h>
 
 #include <asm/cacheflush.h>
 
index 1f66fa06a97cae66e39d3ad6f6cef8fc221ea955..889b35abaedac162061d01872237de109d62dbbb 100644 (file)
@@ -407,8 +407,6 @@ void i2c_unlock_adapter(struct i2c_adapter *);
 
 /* i2c adapter classes (bitmask) */
 #define I2C_CLASS_HWMON                (1<<0)  /* lm_sensors, ... */
-#define I2C_CLASS_TV_ANALOG    (1<<1)  /* bttv + friends */
-#define I2C_CLASS_TV_DIGITAL   (1<<2)  /* dvb cards */
 #define I2C_CLASS_DDC          (1<<3)  /* DDC bus on graphics adapters */
 #define I2C_CLASS_SPD          (1<<7)  /* SPD EEPROMs and similar */
 
index e9639115dff1b920095b79b92202767cd0ed0537..abde2527c699684f78cf029b82c740af751c26a2 100644 (file)
@@ -412,6 +412,11 @@ static inline void irq_free_desc(unsigned int irq)
        irq_free_descs(irq, 1);
 }
 
+static inline int irq_reserve_irq(unsigned int irq)
+{
+       return irq_reserve_irqs(irq, 1);
+}
+
 #endif /* CONFIG_GENERIC_HARDIRQS */
 
 #endif /* !CONFIG_S390 */
index 05aa8c23483ff502bdf94e76365236fa44ff9721..3bc4dcab6e828a16b6113c1aa1f3de08bb062c46 100644 (file)
@@ -43,7 +43,7 @@ unsigned int irq_get_next_irq(unsigned int offset);
                else
 
 #ifdef CONFIG_SMP
-#define irq_node(irq)  (irq_to_desc(irq)->node)
+#define irq_node(irq)  (irq_get_irq_data(irq)->node)
 #else
 #define irq_node(irq)  0
 #endif
index 450092c1e35f459f6db7dba29dbb94ec1a2c683c..fc3da9e4da199d2d89b76d0e0bb7388e069c820e 100644 (file)
@@ -60,7 +60,7 @@ extern const char linux_proc_banner[];
 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
 #define roundup(x, y) (                                        \
 {                                                      \
-       typeof(y) __y = y;                              \
+       const typeof(y) __y = y;                        \
        (((x) + (__y - 1)) / __y) * __y;                \
 }                                                      \
 )
@@ -293,6 +293,7 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
                                   unsigned int interval_msec);
 
 extern int printk_delay_msec;
+extern int dmesg_restrict;
 
 /*
  * Print a one-time message (analogous to WARN_ONCE() et al):
diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h
new file mode 100644 (file)
index 0000000..38368d7
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * LP5521 LED chip driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_LP5521_H
+#define __LINUX_LP5521_H
+
+/* See Documentation/leds/leds-lp5521.txt */
+
+struct lp5521_led_config {
+       u8              chan_nr;
+       u8              led_current; /* mA x10, 0 if led is not connected */
+       u8              max_current;
+};
+
+#define LP5521_CLOCK_AUTO      0
+#define LP5521_CLOCK_INT       1
+#define LP5521_CLOCK_EXT       2
+
+struct lp5521_platform_data {
+       struct lp5521_led_config *led_config;
+       u8      num_channels;
+       u8      clock_mode;
+       int     (*setup_resources)(void);
+       void    (*release_resources)(void);
+       void    (*enable)(bool state);
+};
+
+#endif /* __LINUX_LP5521_H */
diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h
new file mode 100644 (file)
index 0000000..7967476
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * LP5523 LED Driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_LP5523_H
+#define __LINUX_LP5523_H
+
+/* See Documentation/leds/leds-lp5523.txt */
+
+struct lp5523_led_config {
+       u8              chan_nr;
+       u8              led_current; /* mA x10, 0 if led is not connected */
+       u8              max_current;
+};
+
+#define LP5523_CLOCK_AUTO      0
+#define LP5523_CLOCK_INT       1
+#define LP5523_CLOCK_EXT       2
+
+struct lp5523_platform_data {
+       struct lp5523_led_config *led_config;
+       u8      num_channels;
+       u8      clock_mode;
+       int     (*setup_resources)(void);
+       void    (*release_resources)(void);
+       void    (*enable)(bool state);
+};
+
+#endif /* __LINUX_LP5523_H */
index ba6986a11663a417625a259a612019df46f0ad97..0f19df9e37b0fec0394d0350abf86a3aa63cc518 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/rwsem.h>
+#include <linux/timer.h>
 
 struct device;
 /*
@@ -45,10 +46,14 @@ struct led_classdev {
        /* Get LED brightness level */
        enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
 
-       /* Activate hardware accelerated blink, delays are in
-        * miliseconds and if none is provided then a sensible default
-        * should be chosen. The call can adjust the timings if it can't
-        * match the values specified exactly. */
+       /*
+        * Activate hardware accelerated blink, delays are in milliseconds
+        * and if both are zero then a sensible default should be chosen.
+        * The call should adjust the timings in that case and if it can't
+        * match the values specified exactly.
+        * Deactivate blinking again when the brightness is set to a fixed
+        * value via the brightness_set() callback.
+        */
        int             (*blink_set)(struct led_classdev *led_cdev,
                                     unsigned long *delay_on,
                                     unsigned long *delay_off);
@@ -57,6 +62,10 @@ struct led_classdev {
        struct list_head         node;                  /* LED Device list */
        const char              *default_trigger;       /* Trigger to use */
 
+       unsigned long            blink_delay_on, blink_delay_off;
+       struct timer_list        blink_timer;
+       int                      blink_brightness;
+
 #ifdef CONFIG_LEDS_TRIGGERS
        /* Protects the trigger data below */
        struct rw_semaphore      trigger_lock;
@@ -73,6 +82,36 @@ extern void led_classdev_unregister(struct led_classdev *led_cdev);
 extern void led_classdev_suspend(struct led_classdev *led_cdev);
 extern void led_classdev_resume(struct led_classdev *led_cdev);
 
+/**
+ * led_blink_set - set blinking with software fallback
+ * @led_cdev: the LED to start blinking
+ * @delay_on: the time it should be on (in ms)
+ * @delay_off: the time it should ble off (in ms)
+ *
+ * This function makes the LED blink, attempting to use the
+ * hardware acceleration if possible, but falling back to
+ * software blinking if there is no hardware blinking or if
+ * the LED refuses the passed values.
+ *
+ * Note that if software blinking is active, simply calling
+ * led_cdev->brightness_set() will not stop the blinking,
+ * use led_classdev_brightness_set() instead.
+ */
+extern void led_blink_set(struct led_classdev *led_cdev,
+                         unsigned long *delay_on,
+                         unsigned long *delay_off);
+/**
+ * led_brightness_set - set LED brightness
+ * @led_cdev: the LED to set
+ * @brightness: the brightness to set it to
+ *
+ * Set an LED's brightness, and, if necessary, cancel the
+ * software blink timer that implements blinking when the
+ * hardware doesn't.
+ */
+extern void led_brightness_set(struct led_classdev *led_cdev,
+                              enum led_brightness brightness);
+
 /*
  * LED Triggers
  */
index d19e2114fd867782dc8a71b7b12367a9eff561ad..5c99da1078aa2e2906e0cc8f06428c231f76970a 100644 (file)
@@ -59,19 +59,19 @@ struct sh_mmcif_plat_data {
 #define MMCIF_CE_HOST_STS2     0x0000004C
 #define MMCIF_CE_VERSION       0x0000007C
 
-extern inline u32 sh_mmcif_readl(void __iomem *addr, int reg)
+static inline u32 sh_mmcif_readl(void __iomem *addr, int reg)
 {
        return readl(addr + reg);
 }
 
-extern inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val)
+static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val)
 {
        writel(val, addr + reg);
 }
 
 #define SH_MMCIF_BBS 512 /* boot block size */
 
-extern inline void sh_mmcif_boot_cmd_send(void __iomem *base,
+static inline void sh_mmcif_boot_cmd_send(void __iomem *base,
                                          unsigned long cmd, unsigned long arg)
 {
        sh_mmcif_writel(base, MMCIF_CE_INT, 0);
@@ -79,7 +79,7 @@ extern inline void sh_mmcif_boot_cmd_send(void __iomem *base,
        sh_mmcif_writel(base, MMCIF_CE_CMD_SET, cmd);
 }
 
-extern inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask)
+static inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask)
 {
        unsigned long tmp;
        int cnt;
@@ -95,14 +95,14 @@ extern inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask)
        return -1;
 }
 
-extern inline int sh_mmcif_boot_cmd(void __iomem *base,
+static inline int sh_mmcif_boot_cmd(void __iomem *base,
                                    unsigned long cmd, unsigned long arg)
 {
        sh_mmcif_boot_cmd_send(base, cmd, arg);
        return sh_mmcif_boot_cmd_poll(base, 0x00010000);
 }
 
-extern inline int sh_mmcif_boot_do_read_single(void __iomem *base,
+static inline int sh_mmcif_boot_do_read_single(void __iomem *base,
                                               unsigned int block_nr,
                                               unsigned long *buf)
 {
@@ -125,7 +125,7 @@ extern inline int sh_mmcif_boot_do_read_single(void __iomem *base,
        return 0;
 }
 
-extern inline int sh_mmcif_boot_do_read(void __iomem *base,
+static inline int sh_mmcif_boot_do_read(void __iomem *base,
                                        unsigned long first_block,
                                        unsigned long nr_blocks,
                                        void *buf)
@@ -143,7 +143,7 @@ extern inline int sh_mmcif_boot_do_read(void __iomem *base,
        return ret;
 }
 
-extern inline void sh_mmcif_boot_init(void __iomem *base)
+static inline void sh_mmcif_boot_init(void __iomem *base)
 {
        unsigned long tmp;
 
@@ -177,7 +177,7 @@ extern inline void sh_mmcif_boot_init(void __iomem *base)
        sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000);
 }
 
-extern inline void sh_mmcif_boot_slurp(void __iomem *base,
+static inline void sh_mmcif_boot_slurp(void __iomem *base,
                                       unsigned char *buf,
                                       unsigned long no_bytes)
 {
index 87e2c2e7aed3757748ca69c8b22a96c5ee1d5752..c6bcfe93b9cab9deeab76844ce8b76e2a2adf5bb 100644 (file)
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS  0x1c22
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN        0x1c41
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX        0x1c5f
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS     0x1d22
 #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC       0x1d40
 #define PCI_DEVICE_ID_INTEL_82801AA_0  0x2410
 #define PCI_DEVICE_ID_INTEL_82801AA_1  0x2411
index 057bf22a8323463a6bd9279d6928d3fea7ed8eb8..40150f345982cd4fd40e8b80470505e569f05b03 100644 (file)
@@ -747,6 +747,16 @@ struct perf_event {
        u64                             tstamp_running;
        u64                             tstamp_stopped;
 
+       /*
+        * timestamp shadows the actual context timing but it can
+        * be safely used in NMI interrupt context. It reflects the
+        * context time as it was when the event was last scheduled in.
+        *
+        * ctx_time already accounts for ctx->timestamp. Therefore to
+        * compute ctx_time for a sample, simply add perf_clock().
+        */
+       u64                             shadow_ctx_time;
+
        struct perf_event_attr          attr;
        struct hw_perf_event            hw;
 
index 01b3d759f1fccec7f88e1b276973aa6f70e5bae5..e031e1a486d9fc8e7d7d6539c75d39df1235d2c2 100644 (file)
@@ -8,6 +8,7 @@ struct platform_pwm_backlight_data {
        int pwm_id;
        unsigned int max_brightness;
        unsigned int dft_brightness;
+       unsigned int lth_brightness;
        unsigned int pwm_period_ns;
        int (*init)(struct device *dev);
        int (*notify)(struct device *dev, int brightness);
index a39cbed9ee17a5d771f7c3e7ca129e3a05171220..ab2baa5c488453cba50bf564d6220038d0224867 100644 (file)
  * needed for RCU lookups (because root->height is unreliable). The only
  * time callers need worry about this is when doing a lookup_slot under
  * RCU.
+ *
+ * Indirect pointer in fact is also used to tag the last pointer of a node
+ * when it is shrunk, before we rcu free the node. See shrink code for
+ * details.
  */
 #define RADIX_TREE_INDIRECT_PTR        1
-#define RADIX_TREE_RETRY ((void *)-1UL)
-
-static inline void *radix_tree_ptr_to_indirect(void *ptr)
-{
-       return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
-}
 
-static inline void *radix_tree_indirect_to_ptr(void *ptr)
-{
-       return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
-}
 #define radix_tree_indirect_to_ptr(ptr) \
        radix_tree_indirect_to_ptr((void __force *)(ptr))
 
@@ -140,16 +134,29 @@ do {                                                                      \
  *             removed.
  *
  * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
- * locked across slot lookup and dereference.  More likely, will be used with
- * radix_tree_replace_slot(), as well, so caller will hold tree write locked.
+ * locked across slot lookup and dereference. Not required if write lock is
+ * held (ie. items cannot be concurrently inserted).
+ *
+ * radix_tree_deref_retry must be used to confirm validity of the pointer if
+ * only the read lock is held.
  */
 static inline void *radix_tree_deref_slot(void **pslot)
 {
-       void *ret = rcu_dereference(*pslot);
-       if (unlikely(radix_tree_is_indirect_ptr(ret)))
-               ret = RADIX_TREE_RETRY;
-       return ret;
+       return rcu_dereference(*pslot);
 }
+
+/**
+ * radix_tree_deref_retry      - check radix_tree_deref_slot
+ * @arg:       pointer returned by radix_tree_deref_slot
+ * Returns:    0 if retry is not required, otherwise retry is required
+ *
+ * radix_tree_deref_retry must be used with radix_tree_deref_slot.
+ */
+static inline int radix_tree_deref_retry(void *arg)
+{
+       return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR);
+}
+
 /**
  * radix_tree_replace_slot     - replace item in a slot
  * @pslot:     pointer to slot, returned by radix_tree_lookup_slot
index 88d36f9145bacfa34ee6c32823413b09715e6ef2..d01c96c1966e2724460eeaaef3bdd6c522fc7bda 100644 (file)
@@ -2,6 +2,7 @@
 #define _LINUX_RESOURCE_H
 
 #include <linux/time.h>
+#include <linux/types.h>
 
 /*
  * Resource control/accounting header file for linux
index 5310d27abd2a503ad523059ea4832f34ecfbb194..39fa04966aa892f220dded471b54e4b8930cd3e2 100644 (file)
@@ -29,9 +29,6 @@ struct semaphore {
 #define DEFINE_SEMAPHORE(name) \
        struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
 
-#define DECLARE_MUTEX(name)    \
-       struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
-
 static inline void sema_init(struct semaphore *sem, int val)
 {
        static struct lock_class_key __key;
@@ -39,9 +36,6 @@ static inline void sema_init(struct semaphore *sem, int val)
        lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
 }
 
-#define init_MUTEX(sem)                sema_init(sem, 1)
-#define init_MUTEX_LOCKED(sem) sema_init(sem, 0)
-
 extern void down(struct semaphore *sem);
 extern int __must_check down_interruptible(struct semaphore *sem);
 extern int __must_check down_killable(struct semaphore *sem);
index 4dca992f3093771993ac9d98d1233df5e2f33cfa..cea0c38e7a63dfad2544043c49fca25aea9a7563 100644 (file)
@@ -122,6 +122,10 @@ int clk_rate_table_find(struct clk *clk,
 long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
                              unsigned int div_max, unsigned long rate);
 
+long clk_round_parent(struct clk *clk, unsigned long target,
+                     unsigned long *best_freq, unsigned long *parent_freq,
+                     unsigned int div_min, unsigned int div_max);
+
 #define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags)       \
 {                                                                      \
        .parent         = _parent,                                      \
index 864bd56bd3b0ba08a84958ff5d06e647d643e208..4d9dcd1383150088f598f0d0d58f7d51e7400046 100644 (file)
@@ -5,7 +5,6 @@ struct sh_timer_config {
        char *name;
        long channel_offset;
        int timer_bit;
-       char *clk;
        unsigned long clockevent_rating;
        unsigned long clocksource_rating;
 };
index 92e52a1e6af3fd8478bb451f04d34a3c63b1625f..b4d7710bc38d2b26effd6025243150490e0a26a9 100644 (file)
@@ -204,6 +204,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
 /**
  * struct spi_master - interface to SPI master controller
  * @dev: device interface to this driver
+ * @list: link with the global spi_master list
  * @bus_num: board-specific (and often SOC-specific) identifier for a
  *     given SPI controller.
  * @num_chipselect: chipselects are used to distinguish individual
@@ -238,6 +239,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
 struct spi_master {
        struct device   dev;
 
+       struct list_head list;
+
        /* other than negative (== assign one dynamically), bus_num is fully
         * board-specific.  usually that simplifies to being SOC-specific.
         * example:  one SOC has three SPI controllers, numbered 0..2,
index bbdb680ffbe9d46f31de3f88d82d48869543920f..aea0d438e3c716b3a0f62612f15e43b9abe1e2d8 100644 (file)
@@ -82,18 +82,28 @@ struct svc_xprt {
        struct net              *xpt_net;
 };
 
-static inline void register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
+static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
 {
        spin_lock(&xpt->xpt_lock);
-       list_add(&u->list, &xpt->xpt_users);
+       list_del_init(&u->list);
        spin_unlock(&xpt->xpt_lock);
 }
 
-static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
+static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
 {
        spin_lock(&xpt->xpt_lock);
-       list_del_init(&u->list);
+       if (test_bit(XPT_CLOSE, &xpt->xpt_flags)) {
+               /*
+                * The connection is about to be deleted soon (or,
+                * worse, may already be deleted--in which case we've
+                * already notified the xpt_users).
+                */
+               spin_unlock(&xpt->xpt_lock);
+               return -ENOTCONN;
+       }
+       list_add(&u->list, &xpt->xpt_users);
        spin_unlock(&xpt->xpt_lock);
+       return 0;
 }
 
 int    svc_reg_xprt_class(struct svc_xprt_class *);
index 6da573c75d54cfdde09f449f6cd3dd617c49fa20..8eff83b95366049b6f55d9efd268952453329227 100644 (file)
@@ -28,7 +28,7 @@ struct caif_param {
  * @sockaddr:          Socket address to connect.
  * @priority:          Priority of the connection.
  * @link_selector:     Link selector (high bandwidth or low latency)
- * @link_name:         Name of the CAIF Link Layer to use.
+ * @ifindex:           kernel index of the interface.
  * @param:             Connect Request parameters (CAIF_SO_REQ_PARAM).
  *
  * This struct is used when connecting a CAIF channel.
@@ -39,7 +39,7 @@ struct caif_connect_request {
        struct sockaddr_caif sockaddr;
        enum caif_channel_priority priority;
        enum caif_link_selector link_selector;
-       char link_name[16];
+       int ifindex;
        struct caif_param param;
 };
 
index ce4570dff020720c2ca6d5e22a0202a78c5c42c5..87c3d11b8e555ff2ebd9a07f504cd8ab2b5fcebd 100644 (file)
@@ -121,6 +121,8 @@ struct cfspi {
        wait_queue_head_t wait;
        spinlock_t lock;
        bool flow_stop;
+       bool slave;
+       bool slave_talked;
 #ifdef CONFIG_DEBUG_FS
        enum cfspi_state dbg_state;
        u16 pcmd;
index bd646faffa47bd8cd46ed4f15e1988c7bfac8247..f688478bfb84a4afc62039e57e212978ab24929b 100644 (file)
@@ -139,10 +139,10 @@ struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
                     enum cfcnfg_phy_preference phy_pref);
 
 /**
- * cfcnfg_get_named() - Get the Physical Identifier of CAIF Link Layer
+ * cfcnfg_get_id_from_ifi() - Get the Physical Identifier of ifindex,
+ *                     it matches caif physical id with the kernel interface id.
  * @cnfg:      Configuration object
- * @name:      Name of the Physical Layer (Caif Link Layer)
+ * @ifi:       ifindex obtained from socket.c bindtodevice.
  */
-int cfcnfg_get_named(struct cfcnfg *cnfg, char *name);
-
+int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi);
 #endif                         /* CFCNFG_H_ */
index f3b201d335b3532858c846a8aadb1672de80dce3..9801c55de5d64e5357006f9cfb129bce139572dd 100644 (file)
@@ -384,7 +384,7 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
  *
  * Returns the first attribute which matches the specified type.
  */
-static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh,
+static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
                                             int hdrlen, int attrtype)
 {
        return nla_find(nlmsg_attrdata(nlh, hdrlen),
index 14be49b44e841ff22aa66e5b291a6ed5b00978c3..f986ab7ffe6f040e9e00ac23d404bd8ad31e1f4e 100644 (file)
@@ -721,7 +721,7 @@ struct libfc_function_template {
  * struct fc_disc - Discovery context
  * @retry_count:   Number of retries
  * @pending:       1 if discovery is pending, 0 if not
- * @requesting:    1 if discovery has been requested, 0 if not
+ * @requested:     1 if discovery has been requested, 0 if not
  * @seq_count:     Number of sequences used for discovery
  * @buf_len:       Length of the discovery buffer
  * @disc_id:       Discovery ID
index a8f37012663209020b30baeecabde62512465993..53a9e886612b8c30fd4b4fc3d2814a9433bb551b 100644 (file)
@@ -137,7 +137,7 @@ struct osd_request {
                void *buff;
                unsigned alloc_size; /* 0 here means: don't call kfree */
                unsigned total_bytes;
-       } set_attr, enc_get_attr, get_attr;
+       } cdb_cont, set_attr, enc_get_attr, get_attr;
 
        struct _osd_io_info {
                struct bio *bio;
@@ -448,6 +448,20 @@ void osd_req_read(struct osd_request *or,
 int osd_req_read_kern(struct osd_request *or,
        const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
 
+/* Scatter/Gather write/read commands */
+int osd_req_write_sg(struct osd_request *or,
+       const struct osd_obj_id *obj, struct bio *bio,
+       const struct osd_sg_entry *sglist, unsigned numentries);
+int osd_req_read_sg(struct osd_request *or,
+       const struct osd_obj_id *obj, struct bio *bio,
+       const struct osd_sg_entry *sglist, unsigned numentries);
+int osd_req_write_sg_kern(struct osd_request *or,
+       const struct osd_obj_id *obj, void **buff,
+       const struct osd_sg_entry *sglist, unsigned numentries);
+int osd_req_read_sg_kern(struct osd_request *or,
+       const struct osd_obj_id *obj, void **buff,
+       const struct osd_sg_entry *sglist, unsigned numentries);
+
 /*
  * Root/Partition/Collection/Object Attributes commands
  */
index 685661283540f64beb081b291dc6689c0a255fc0..a6026da25f3e0bf5787d479e1511da85c1175299 100644 (file)
@@ -631,4 +631,46 @@ static inline void osd_sec_set_caps(struct osd_capability_head *cap,
        put_unaligned_le16(bit_mask, &cap->permissions_bit_mask);
 }
 
+/* osd2r05a sec 5.3: CDB continuation segment formats */
+enum osd_continuation_segment_format {
+       CDB_CONTINUATION_FORMAT_V2 = 0x01,
+};
+
+struct osd_continuation_segment_header {
+       u8      format;
+       u8      reserved1;
+       __be16  service_action;
+       __be32  reserved2;
+       u8      integrity_check[OSDv2_CRYPTO_KEYID_SIZE];
+} __packed;
+
+/* osd2r05a sec 5.4.1: CDB continuation descriptors */
+enum osd_continuation_descriptor_type {
+       NO_MORE_DESCRIPTORS = 0x0000,
+       SCATTER_GATHER_LIST = 0x0001,
+       QUERY_LIST = 0x0002,
+       USER_OBJECT = 0x0003,
+       COPY_USER_OBJECT_SOURCE = 0x0101,
+       EXTENSION_CAPABILITIES = 0xFFEE
+};
+
+struct osd_continuation_descriptor_header {
+       __be16  type;
+       u8      reserved;
+       u8      pad_length;
+       __be32  length;
+} __packed;
+
+
+/* osd2r05a sec 5.4.2: Scatter/gather list */
+struct osd_sg_list_entry {
+       __be64 offset;
+       __be64 len;
+};
+
+struct osd_sg_continuation_descriptor {
+       struct osd_continuation_descriptor_header hdr;
+       struct osd_sg_list_entry entries[];
+};
+
 #endif /* ndef __OSD_PROTOCOL_H__ */
index 3f5e88cc75c0b8a1d54b25902e82017f5bfd51f5..bd0be7ed4bcf619ca4aea8cfa3abc6d86d9c7aec 100644 (file)
@@ -37,4 +37,9 @@ struct osd_attr {
        void *val_ptr;          /* in network order */
 };
 
+struct osd_sg_entry {
+       u64 offset;
+       u64 len;
+};
+
 #endif /* ndef __OSD_TYPES_H__ */
index 289010d3270bfa99191cfbdfa95a02564b6638bf..e5e345fb2a5c37db45de06bcdf02f3796aec64c4 100644 (file)
@@ -98,6 +98,103 @@ TRACE_EVENT(ext4_allocate_inode,
                  (unsigned long) __entry->dir, __entry->mode)
 );
 
+TRACE_EVENT(ext4_evict_inode,
+       TP_PROTO(struct inode *inode),
+
+       TP_ARGS(inode),
+
+       TP_STRUCT__entry(
+               __field(        int,   dev_major                )
+               __field(        int,   dev_minor                )
+               __field(        ino_t,  ino                     )
+               __field(        int,    nlink                   )
+       ),
+
+       TP_fast_assign(
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
+               __entry->ino    = inode->i_ino;
+               __entry->nlink  = inode->i_nlink;
+       ),
+
+       TP_printk("dev %d,%d ino %lu nlink %d",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino, __entry->nlink)
+);
+
+TRACE_EVENT(ext4_drop_inode,
+       TP_PROTO(struct inode *inode, int drop),
+
+       TP_ARGS(inode, drop),
+
+       TP_STRUCT__entry(
+               __field(        int,    dev_major               )
+               __field(        int,    dev_minor               )
+               __field(        ino_t,  ino                     )
+               __field(        int,    drop                    )
+       ),
+
+       TP_fast_assign(
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
+               __entry->ino    = inode->i_ino;
+               __entry->drop   = drop;
+       ),
+
+       TP_printk("dev %d,%d ino %lu drop %d",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino, __entry->drop)
+);
+
+TRACE_EVENT(ext4_mark_inode_dirty,
+       TP_PROTO(struct inode *inode, unsigned long IP),
+
+       TP_ARGS(inode, IP),
+
+       TP_STRUCT__entry(
+               __field(        int,    dev_major               )
+               __field(        int,    dev_minor               )
+               __field(        ino_t,  ino                     )
+               __field(unsigned long,  ip                      )
+       ),
+
+       TP_fast_assign(
+               __entry->dev_major = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor = MINOR(inode->i_sb->s_dev);
+               __entry->ino    = inode->i_ino;
+               __entry->ip     = IP;
+       ),
+
+       TP_printk("dev %d,%d ino %lu caller %pF",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino, (void *)__entry->ip)
+);
+
+TRACE_EVENT(ext4_begin_ordered_truncate,
+       TP_PROTO(struct inode *inode, loff_t new_size),
+
+       TP_ARGS(inode, new_size),
+
+       TP_STRUCT__entry(
+               __field(        int,    dev_major               )
+               __field(        int,    dev_minor               )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, new_size                )
+       ),
+
+       TP_fast_assign(
+               __entry->dev_major      = MAJOR(inode->i_sb->s_dev);
+               __entry->dev_minor      = MINOR(inode->i_sb->s_dev);
+               __entry->ino            = inode->i_ino;
+               __entry->new_size       = new_size;
+       ),
+
+       TP_printk("dev %d,%d ino %lu new_size %lld",
+                 __entry->dev_major, __entry->dev_minor,
+                 (unsigned long) __entry->ino,
+                 (long long) __entry->new_size)
+);
+
 DECLARE_EVENT_CLASS(ext4__write_begin,
 
        TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
index b194febf5799bab766e5aef261e566902a086b28..21aa7b3001fb49edce43446dd85898f62ab33d98 100644 (file)
@@ -95,6 +95,14 @@ static void __exit_signal(struct task_struct *tsk)
                tty = sig->tty;
                sig->tty = NULL;
        } else {
+               /*
+                * This can only happen if the caller is de_thread().
+                * FIXME: this is the temporary hack, we should teach
+                * posix-cpu-timers to handle this case correctly.
+                */
+               if (unlikely(has_group_leader_pid(tsk)))
+                       posix_cpu_timers_exit_group(tsk);
+
                /*
                 * If there is any task waiting for the group exit
                 * then notify it:
index 644e8d5fa367e74c3cc06a2114f2e283c701b0e6..5f92acc5f952e0afb0489017c265a943a4a7d464 100644 (file)
@@ -324,6 +324,10 @@ void enable_irq(unsigned int irq)
        if (!desc)
                return;
 
+       if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable,
+           KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
+               return;
+
        chip_bus_lock(desc);
        raw_spin_lock_irqsave(&desc->lock, flags);
        __enable_irq(desc, irq, false);
index 877fb306d4154465e184e19b73ca1984a179aece..17110a4a4fc28c68bf7993b9248432c0dd957d4f 100644 (file)
@@ -194,14 +194,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
 
        account_global_scheduler_latency(tsk, &lat);
 
-       /*
-        * short term hack; if we're > 32 we stop; future we recycle:
-        */
-       tsk->latency_record_count++;
-       if (tsk->latency_record_count >= LT_SAVECOUNT)
-               goto out_unlock;
-
-       for (i = 0; i < LT_SAVECOUNT; i++) {
+       for (i = 0; i < tsk->latency_record_count; i++) {
                struct latency_record *mylat;
                int same = 1;
 
@@ -227,8 +220,14 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
                }
        }
 
+       /*
+        * short term hack; if we're > 32 we stop; future we recycle:
+        */
+       if (tsk->latency_record_count >= LT_SAVECOUNT)
+               goto out_unlock;
+
        /* Allocated a new one: */
-       i = tsk->latency_record_count;
+       i = tsk->latency_record_count++;
        memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
 
 out_unlock:
index 517d827f498281da50210aa76a02b4693116154a..cb6c0d2af68f64b16bd0557f3c7fb7001151b71d 100644 (file)
@@ -674,6 +674,8 @@ event_sched_in(struct perf_event *event,
 
        event->tstamp_running += ctx->time - event->tstamp_stopped;
 
+       event->shadow_ctx_time = ctx->time - ctx->timestamp;
+
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        ctx->nr_active++;
@@ -3396,7 +3398,8 @@ static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
 }
 
 static void perf_output_read_one(struct perf_output_handle *handle,
-                                struct perf_event *event)
+                                struct perf_event *event,
+                                u64 enabled, u64 running)
 {
        u64 read_format = event->attr.read_format;
        u64 values[4];
@@ -3404,11 +3407,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
 
        values[n++] = perf_event_count(event);
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
-               values[n++] = event->total_time_enabled +
+               values[n++] = enabled +
                        atomic64_read(&event->child_total_time_enabled);
        }
        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
-               values[n++] = event->total_time_running +
+               values[n++] = running +
                        atomic64_read(&event->child_total_time_running);
        }
        if (read_format & PERF_FORMAT_ID)
@@ -3421,7 +3424,8 @@ static void perf_output_read_one(struct perf_output_handle *handle,
  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
  */
 static void perf_output_read_group(struct perf_output_handle *handle,
-                           struct perf_event *event)
+                           struct perf_event *event,
+                           u64 enabled, u64 running)
 {
        struct perf_event *leader = event->group_leader, *sub;
        u64 read_format = event->attr.read_format;
@@ -3431,10 +3435,10 @@ static void perf_output_read_group(struct perf_output_handle *handle,
        values[n++] = 1 + leader->nr_siblings;
 
        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
-               values[n++] = leader->total_time_enabled;
+               values[n++] = enabled;
 
        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
-               values[n++] = leader->total_time_running;
+               values[n++] = running;
 
        if (leader != event)
                leader->pmu->read(leader);
@@ -3459,13 +3463,35 @@ static void perf_output_read_group(struct perf_output_handle *handle,
        }
 }
 
+#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
+                                PERF_FORMAT_TOTAL_TIME_RUNNING)
+
 static void perf_output_read(struct perf_output_handle *handle,
                             struct perf_event *event)
 {
+       u64 enabled = 0, running = 0, now, ctx_time;
+       u64 read_format = event->attr.read_format;
+
+       /*
+        * compute total_time_enabled, total_time_running
+        * based on snapshot values taken when the event
+        * was last scheduled in.
+        *
+        * we cannot simply called update_context_time()
+        * because of locking issue as we are called in
+        * NMI context
+        */
+       if (read_format & PERF_FORMAT_TOTAL_TIMES) {
+               now = perf_clock();
+               ctx_time = event->shadow_ctx_time + now;
+               enabled = ctx_time - event->tstamp_enabled;
+               running = ctx_time - event->tstamp_running;
+       }
+
        if (event->attr.read_format & PERF_FORMAT_GROUP)
-               perf_output_read_group(handle, event);
+               perf_output_read_group(handle, event, enabled, running);
        else
-               perf_output_read_one(handle, event);
+               perf_output_read_one(handle, event, enabled, running);
 }
 
 void perf_output_sample(struct perf_output_handle *handle,
index b2ebaee8c377d2aa92c073e772748242b5ce5c90..38e7d5868d60ccdfc8f13176f6021a4a712cdee1 100644 (file)
@@ -261,6 +261,12 @@ static inline void boot_delay_msec(void)
 }
 #endif
 
+#ifdef CONFIG_SECURITY_DMESG_RESTRICT
+int dmesg_restrict = 1;
+#else
+int dmesg_restrict;
+#endif
+
 int do_syslog(int type, char __user *buf, int len, bool from_file)
 {
        unsigned i, j, limit, count;
index 471b66acabb57e770dd552bfc5a9edae356acfd7..37fa9b99ad588356534b1457b7241d97573705b8 100644 (file)
@@ -119,7 +119,7 @@ static int cmp_range(const void *x1, const void *x2)
 
 int clean_sort_range(struct range *range, int az)
 {
-       int i, j, k = az - 1, nr_range = 0;
+       int i, j, k = az - 1, nr_range = az;
 
        for (i = 0; i < k; i++) {
                if (range[i].end)
index c7cf397fb92958bbcbd2772e13a8d4a4d9c00b6d..859ea5a9605fa40fe47aedcb865ab08c66a43797 100644 (file)
@@ -70,17 +70,10 @@ static const struct vm_operations_struct relay_file_mmap_ops = {
  */
 static struct page **relay_alloc_page_array(unsigned int n_pages)
 {
-       struct page **array;
-       size_t pa_size = n_pages * sizeof(struct page *);
-
-       if (pa_size > PAGE_SIZE) {
-               array = vmalloc(pa_size);
-               if (array)
-                       memset(array, 0, pa_size);
-       } else {
-               array = kzalloc(pa_size, GFP_KERNEL);
-       }
-       return array;
+       const size_t pa_size = n_pages * sizeof(struct page *);
+       if (pa_size > PAGE_SIZE)
+               return vzalloc(pa_size);
+       return kzalloc(pa_size, GFP_KERNEL);
 }
 
 /*
index c33a1edb799fda6db2e16fdf9d1d0401a9f78278..b65bf634035ed0684693b9c7c02b3229c8d68e0d 100644 (file)
@@ -703,6 +703,15 @@ static struct ctl_table kern_table[] = {
                .extra2         = &ten_thousand,
        },
 #endif
+       {
+               .procname       = "dmesg_restrict",
+               .data           = &dmesg_restrict,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
        {
                .procname       = "ngroups_max",
                .data           = &ngroups_max,
index bafba687a6d849a11f0201acd3d7ae5e63e9301e..6e3c41a4024c1cc66be01218e2c37498498f2469 100644 (file)
@@ -43,7 +43,7 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 
-static int __initdata no_watchdog;
+static int no_watchdog;
 
 
 /* boot commands */
index 6f412ab4c24f812fc8b7290c4a1e06cc0250ea62..5086bb962b4dd9ca6471afb76f0698facb914816 100644 (file)
@@ -82,6 +82,16 @@ struct radix_tree_preload {
 };
 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
 
+static inline void *ptr_to_indirect(void *ptr)
+{
+       return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
+}
+
+static inline void *indirect_to_ptr(void *ptr)
+{
+       return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
+}
+
 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
 {
        return root->gfp_mask & __GFP_BITS_MASK;
@@ -265,7 +275,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
                        return -ENOMEM;
 
                /* Increase the height.  */
-               node->slots[0] = radix_tree_indirect_to_ptr(root->rnode);
+               node->slots[0] = indirect_to_ptr(root->rnode);
 
                /* Propagate the aggregated tag info into the new root */
                for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
@@ -276,7 +286,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
                newheight = root->height+1;
                node->height = newheight;
                node->count = 1;
-               node = radix_tree_ptr_to_indirect(node);
+               node = ptr_to_indirect(node);
                rcu_assign_pointer(root->rnode, node);
                root->height = newheight;
        } while (height > root->height);
@@ -309,7 +319,7 @@ int radix_tree_insert(struct radix_tree_root *root,
                        return error;
        }
 
-       slot = radix_tree_indirect_to_ptr(root->rnode);
+       slot = indirect_to_ptr(root->rnode);
 
        height = root->height;
        shift = (height-1) * RADIX_TREE_MAP_SHIFT;
@@ -325,8 +335,7 @@ int radix_tree_insert(struct radix_tree_root *root,
                                rcu_assign_pointer(node->slots[offset], slot);
                                node->count++;
                        } else
-                               rcu_assign_pointer(root->rnode,
-                                       radix_tree_ptr_to_indirect(slot));
+                               rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
                }
 
                /* Go a level down */
@@ -374,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
                        return NULL;
                return is_slot ? (void *)&root->rnode : node;
        }
-       node = radix_tree_indirect_to_ptr(node);
+       node = indirect_to_ptr(node);
 
        height = node->height;
        if (index > radix_tree_maxindex(height))
@@ -393,7 +402,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
                height--;
        } while (height > 0);
 
-       return is_slot ? (void *)slot:node;
+       return is_slot ? (void *)slot : indirect_to_ptr(node);
 }
 
 /**
@@ -455,7 +464,7 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
        height = root->height;
        BUG_ON(index > radix_tree_maxindex(height));
 
-       slot = radix_tree_indirect_to_ptr(root->rnode);
+       slot = indirect_to_ptr(root->rnode);
        shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
 
        while (height > 0) {
@@ -509,7 +518,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
 
        shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
        pathp->node = NULL;
-       slot = radix_tree_indirect_to_ptr(root->rnode);
+       slot = indirect_to_ptr(root->rnode);
 
        while (height > 0) {
                int offset;
@@ -579,7 +588,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
 
        if (!radix_tree_is_indirect_ptr(node))
                return (index == 0);
-       node = radix_tree_indirect_to_ptr(node);
+       node = indirect_to_ptr(node);
 
        height = node->height;
        if (index > radix_tree_maxindex(height))
@@ -666,7 +675,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
        }
 
        shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
-       slot = radix_tree_indirect_to_ptr(root->rnode);
+       slot = indirect_to_ptr(root->rnode);
 
        /*
         * we fill the path from (root->height - 2) to 0, leaving the index at
@@ -897,7 +906,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
                results[0] = node;
                return 1;
        }
-       node = radix_tree_indirect_to_ptr(node);
+       node = indirect_to_ptr(node);
 
        max_index = radix_tree_maxindex(node->height);
 
@@ -916,7 +925,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
                        slot = *(((void ***)results)[ret + i]);
                        if (!slot)
                                continue;
-                       results[ret + nr_found] = rcu_dereference_raw(slot);
+                       results[ret + nr_found] =
+                               indirect_to_ptr(rcu_dereference_raw(slot));
                        nr_found++;
                }
                ret += nr_found;
@@ -965,7 +975,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
                results[0] = (void **)&root->rnode;
                return 1;
        }
-       node = radix_tree_indirect_to_ptr(node);
+       node = indirect_to_ptr(node);
 
        max_index = radix_tree_maxindex(node->height);
 
@@ -1090,7 +1100,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
                results[0] = node;
                return 1;
        }
-       node = radix_tree_indirect_to_ptr(node);
+       node = indirect_to_ptr(node);
 
        max_index = radix_tree_maxindex(node->height);
 
@@ -1109,7 +1119,8 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
                        slot = *(((void ***)results)[ret + i]);
                        if (!slot)
                                continue;
-                       results[ret + nr_found] = rcu_dereference_raw(slot);
+                       results[ret + nr_found] =
+                               indirect_to_ptr(rcu_dereference_raw(slot));
                        nr_found++;
                }
                ret += nr_found;
@@ -1159,7 +1170,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
                results[0] = (void **)&root->rnode;
                return 1;
        }
-       node = radix_tree_indirect_to_ptr(node);
+       node = indirect_to_ptr(node);
 
        max_index = radix_tree_maxindex(node->height);
 
@@ -1195,7 +1206,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
                void *newptr;
 
                BUG_ON(!radix_tree_is_indirect_ptr(to_free));
-               to_free = radix_tree_indirect_to_ptr(to_free);
+               to_free = indirect_to_ptr(to_free);
 
                /*
                 * The candidate node has more than one child, or its child
@@ -1208,16 +1219,39 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
 
                /*
                 * We don't need rcu_assign_pointer(), since we are simply
-                * moving the node from one part of the tree to another. If
-                * it was safe to dereference the old pointer to it
+                * moving the node from one part of the tree to another: if it
+                * was safe to dereference the old pointer to it
                 * (to_free->slots[0]), it will be safe to dereference the new
-                * one (root->rnode).
+                * one (root->rnode) as far as dependent read barriers go.
                 */
                newptr = to_free->slots[0];
                if (root->height > 1)
-                       newptr = radix_tree_ptr_to_indirect(newptr);
+                       newptr = ptr_to_indirect(newptr);
                root->rnode = newptr;
                root->height--;
+
+               /*
+                * We have a dilemma here. The node's slot[0] must not be
+                * NULLed in case there are concurrent lookups expecting to
+                * find the item. However if this was a bottom-level node,
+                * then it may be subject to the slot pointer being visible
+                * to callers dereferencing it. If item corresponding to
+                * slot[0] is subsequently deleted, these callers would expect
+                * their slot to become empty sooner or later.
+                *
+                * For example, lockless pagecache will look up a slot, deref
+                * the page pointer, and if the page is 0 refcount it means it
+                * was concurrently deleted from pagecache so try the deref
+                * again. Fortunately there is already a requirement for logic
+                * to retry the entire slot lookup -- the indirect pointer
+                * problem (replacing direct root node with an indirect pointer
+                * also results in a stale slot). So tag the slot as indirect
+                * to force callers to retry.
+                */
+               if (root->height == 0)
+                       *((unsigned long *)&to_free->slots[0]) |=
+                                               RADIX_TREE_INDIRECT_PTR;
+
                radix_tree_node_free(to_free);
        }
 }
@@ -1254,7 +1288,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
                root->rnode = NULL;
                goto out;
        }
-       slot = radix_tree_indirect_to_ptr(slot);
+       slot = indirect_to_ptr(slot);
 
        shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
        pathp->node = NULL;
@@ -1296,8 +1330,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
                        radix_tree_node_free(to_free);
 
                if (pathp->node->count) {
-                       if (pathp->node ==
-                                       radix_tree_indirect_to_ptr(root->rnode))
+                       if (pathp->node == indirect_to_ptr(root->rnode))
                                radix_tree_shrink(root);
                        goto out;
                }
index 75572b5f23746a4b47a42920980a22e6a5251fe7..ea89840fc65fa2b499ce3c19c657e9e8a680b240 100644 (file)
@@ -644,7 +644,9 @@ repeat:
        pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
        if (pagep) {
                page = radix_tree_deref_slot(pagep);
-               if (unlikely(!page || page == RADIX_TREE_RETRY))
+               if (unlikely(!page))
+                       goto out;
+               if (radix_tree_deref_retry(page))
                        goto repeat;
 
                if (!page_cache_get_speculative(page))
@@ -660,6 +662,7 @@ repeat:
                        goto repeat;
                }
        }
+out:
        rcu_read_unlock();
 
        return page;
@@ -777,12 +780,11 @@ repeat:
                page = radix_tree_deref_slot((void **)pages[i]);
                if (unlikely(!page))
                        continue;
-               /*
-                * this can only trigger if nr_found == 1, making livelock
-                * a non issue.
-                */
-               if (unlikely(page == RADIX_TREE_RETRY))
+               if (radix_tree_deref_retry(page)) {
+                       if (ret)
+                               start = pages[ret-1]->index;
                        goto restart;
+               }
 
                if (!page_cache_get_speculative(page))
                        goto repeat;
@@ -830,11 +832,7 @@ repeat:
                page = radix_tree_deref_slot((void **)pages[i]);
                if (unlikely(!page))
                        continue;
-               /*
-                * this can only trigger if nr_found == 1, making livelock
-                * a non issue.
-                */
-               if (unlikely(page == RADIX_TREE_RETRY))
+               if (radix_tree_deref_retry(page))
                        goto restart;
 
                if (page->mapping == NULL || page->index != index)
@@ -887,11 +885,7 @@ repeat:
                page = radix_tree_deref_slot((void **)pages[i]);
                if (unlikely(!page))
                        continue;
-               /*
-                * this can only trigger if nr_found == 1, making livelock
-                * a non issue.
-                */
-               if (unlikely(page == RADIX_TREE_RETRY))
+               if (radix_tree_deref_retry(page))
                        goto restart;
 
                if (!page_cache_get_speculative(page))
@@ -1029,6 +1023,9 @@ find_page:
                                goto page_not_up_to_date;
                        if (!trylock_page(page))
                                goto page_not_up_to_date;
+                       /* Did it get truncated before we got the lock? */
+                       if (!page->mapping)
+                               goto page_not_up_to_date_locked;
                        if (!mapping->a_ops->is_partially_uptodate(page,
                                                                desc, offset))
                                goto page_not_up_to_date_locked;
@@ -1563,8 +1560,10 @@ retry_find:
                        goto no_cached_page;
        }
 
-       if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
+       if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
+               page_cache_release(page);
                return ret | VM_FAULT_RETRY;
+       }
 
        /* Did it get truncated? */
        if (unlikely(page->mapping != mapping)) {
index 9a99cfaf0a19025f6020c106d56e1ce28d6499b4..2efa8ea07ff7d931577da964c190f5524088da3d 100644 (file)
@@ -4208,15 +4208,17 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
 
        memset(mem, 0, size);
        mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
-       if (!mem->stat) {
-               if (size < PAGE_SIZE)
-                       kfree(mem);
-               else
-                       vfree(mem);
-               mem = NULL;
-       }
+       if (!mem->stat)
+               goto out_free;
        spin_lock_init(&mem->pcp_counter_lock);
        return mem;
+
+out_free:
+       if (size < PAGE_SIZE)
+               kfree(mem);
+       else
+               vfree(mem);
+       return NULL;
 }
 
 /*
index 2d1bf7cf885179af5144f41998d95dc2d2b72981..4c51338730977604266bab4e67ef0251aba69638 100644 (file)
@@ -211,6 +211,7 @@ success:
        mmu_notifier_invalidate_range_end(mm, start, end);
        vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
        vm_stat_account(mm, newflags, vma->vm_file, nrpages);
+       perf_event_mmap(vma);
        return 0;
 
 fail:
@@ -299,7 +300,6 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
                error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
                if (error)
                        goto out;
-               perf_event_mmap(vma);
                nstart = tmp;
 
                if (nstart < prev->vm_end)
index b8a6fdc2131258db15321021375117cae27cec0d..d31d7ce52c0ea2dbbf85b26019c9fdd70fab624a 100644 (file)
@@ -913,7 +913,7 @@ keep_lumpy:
         * back off and wait for congestion to clear because further reclaim
         * will encounter the same problem
         */
-       if (nr_dirty == nr_congested)
+       if (nr_dirty == nr_congested && nr_dirty != 0)
                zone_set_flag(zone, ZONE_CONGESTED);
 
        free_page_list(&free_pages);
index cd2e42be7b68f73dc60f40631f2b9f87708d3b47..42eac4d33216b81c307a87016e821051bc86146e 100644 (file)
@@ -949,7 +949,7 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
        v[PGPGIN] /= 2;         /* sectors -> kbytes */
        v[PGPGOUT] /= 2;
 #endif
-       return m->private + *pos;
+       return (unsigned long *)m->private + *pos;
 }
 
 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
index 76ae68303d3a3675265e61eccc213a0c4526d4e1..d522d8c1703e761eb27b4e2375eff9851b06690e 100644 (file)
@@ -16,11 +16,18 @@ int connect_req_to_link_param(struct cfcnfg *cnfg,
 {
        struct dev_info *dev_info;
        enum cfcnfg_phy_preference pref;
+       int res;
+
        memset(l, 0, sizeof(*l));
-       l->priority = s->priority;
+       /* In caif protocol low value is high priority */
+       l->priority = CAIF_PRIO_MAX - s->priority + 1;
 
-       if (s->link_name[0] != '\0')
-               l->phyid = cfcnfg_get_named(cnfg, s->link_name);
+       if (s->ifindex != 0){
+               res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
+               if (res < 0)
+                       return res;
+               l->phyid = res;
+       }
        else {
                switch (s->link_selector) {
                case CAIF_LINK_HIGH_BANDW:
index b99369a055d13df6414421a28cfaee5d000c6c72..a42a408306e4a1c038aa6ad0d8fb3d0e2f923aa9 100644 (file)
@@ -307,6 +307,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
 
        case NETDEV_UNREGISTER:
                caifd = caif_get(dev);
+               if (caifd == NULL)
+                       break;
                netdev_info(dev, "unregister\n");
                atomic_set(&caifd->state, what);
                caif_device_destroy(dev);
index 2eca2dd0000fd7dce7fc614ed5e476f0ca886874..1bf0cf503796f27668d8dc8c1e9a733fd6e76bdd 100644 (file)
@@ -716,8 +716,7 @@ static int setsockopt(struct socket *sock,
 {
        struct sock *sk = sock->sk;
        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
-       int prio, linksel;
-       struct ifreq ifreq;
+       int linksel;
 
        if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
                return -ENOPROTOOPT;
@@ -735,33 +734,6 @@ static int setsockopt(struct socket *sock,
                release_sock(&cf_sk->sk);
                return 0;
 
-       case SO_PRIORITY:
-               if (lvl != SOL_SOCKET)
-                       goto bad_sol;
-               if (ol < sizeof(int))
-                       return -EINVAL;
-               if (copy_from_user(&prio, ov, sizeof(int)))
-                       return -EINVAL;
-               lock_sock(&(cf_sk->sk));
-               cf_sk->conn_req.priority = prio;
-               release_sock(&cf_sk->sk);
-               return 0;
-
-       case SO_BINDTODEVICE:
-               if (lvl != SOL_SOCKET)
-                       goto bad_sol;
-               if (ol < sizeof(struct ifreq))
-                       return -EINVAL;
-               if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
-                       return -EFAULT;
-               lock_sock(&(cf_sk->sk));
-               strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
-                       sizeof(cf_sk->conn_req.link_name));
-               cf_sk->conn_req.link_name
-                       [sizeof(cf_sk->conn_req.link_name)-1] = 0;
-               release_sock(&cf_sk->sk);
-               return 0;
-
        case CAIFSO_REQ_PARAM:
                if (lvl != SOL_CAIF)
                        goto bad_sol;
@@ -880,6 +852,18 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
        sock->state = SS_CONNECTING;
        sk->sk_state = CAIF_CONNECTING;
 
+       /* Check priority value comming from socket */
+       /* if priority value is out of range it will be ajusted */
+       if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
+               cf_sk->conn_req.priority = CAIF_PRIO_MAX;
+       else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
+               cf_sk->conn_req.priority = CAIF_PRIO_MIN;
+       else
+               cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
+
+       /*ifindex = id of the interface.*/
+       cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
+
        dbfs_atomic_inc(&cnt.num_connect_req);
        cf_sk->layer.receive = caif_sktrecv_cb;
        err = caif_connect_client(&cf_sk->conn_req,
@@ -905,6 +889,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
        cf_sk->maxframe = mtu - (headroom + tailroom);
        if (cf_sk->maxframe < 1) {
                pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
+               err = -ENODEV;
                goto out;
        }
 
@@ -1142,7 +1127,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
        set_rx_flow_on(cf_sk);
 
        /* Set default options on configuration */
-       cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
+       cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL;
        cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
        cf_sk->conn_req.protocol = protocol;
        /* Increase the number of sockets created. */
index 41adafd1891422ab1aa32a95a46aee7d9ae77b82..21ede141018abf698a58bcf946e0058d6d06c499 100644 (file)
@@ -173,18 +173,15 @@ static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg,
        return NULL;
 }
 
-int cfcnfg_get_named(struct cfcnfg *cnfg, char *name)
+
+int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
 {
        int i;
-
-       /* Try to match with specified name */
-       for (i = 0; i < MAX_PHY_LAYERS; i++) {
-               if (cnfg->phy_layers[i].frm_layer != NULL
-                   && strcmp(cnfg->phy_layers[i].phy_layer->name,
-                             name) == 0)
-                       return cnfg->phy_layers[i].frm_layer->id;
-       }
-       return 0;
+       for (i = 0; i < MAX_PHY_LAYERS; i++)
+               if (cnfg->phy_layers[i].frm_layer != NULL &&
+                               cnfg->phy_layers[i].ifindex == ifi)
+                       return i;
+       return -ENODEV;
 }
 
 int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
index 08f267a109aa3c677efc4a6795ccfea31fc8d581..3cd8f978e3098288d9d1e114347e1ba2c0e4936c 100644 (file)
@@ -361,11 +361,10 @@ void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
        struct cfctrl_request_info *p, *tmp;
        struct cfctrl *ctrl = container_obj(layr);
        spin_lock(&ctrl->info_list_lock);
-       pr_warn("enter\n");
 
        list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
                if (p->client_layer == adap_layer) {
-                       pr_warn("cancel req :%d\n", p->sequence_no);
+                       pr_debug("cancel req :%d\n", p->sequence_no);
                        list_del(&p->list);
                        kfree(p);
                }
index 496fda9ac66f56bee42568e8c0386566c3208be4..11a2af4c162ab6a8324ebaf6b8063ced01b1aea6 100644 (file)
@@ -12,6 +12,8 @@
 #include <net/caif/cfsrvl.h>
 #include <net/caif/cfpkt.h>
 
+#define container_obj(layr) ((struct cfsrvl *) layr)
+
 static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
 static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
 
@@ -38,5 +40,17 @@ static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt)
 
 static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
 {
+       struct cfsrvl *service = container_obj(layr);
+       struct caif_payload_info *info;
+       int ret;
+
+       if (!cfsrvl_ready(service, &ret))
+               return ret;
+
+       /* Add info for MUX-layer to route the packet out */
+       info = cfpkt_info(pkt);
+       info->channel_id = service->layer.id;
+       info->dev_info = &service->dev_info;
+
        return layr->dn->transmit(layr->dn, pkt);
 }
index bde8481e8d2574dd939f84595f8e4354370aa5a7..e2fb5fa757951a46e8d6be9326ffc1659f943232 100644 (file)
@@ -193,7 +193,7 @@ out:
 
 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
 {
-       caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size);
+       caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
 
        /* Add info for MUX-layer to route the packet out. */
        cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
index 35dfb83184833302e616dca6b0985faeb533897d..0dd54a69dace255fcdf54732d982e8c521c574a5 100644 (file)
@@ -2131,7 +2131,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
        } else {
                struct sock *sk = skb->sk;
                queue_index = sk_tx_queue_get(sk);
-               if (queue_index < 0) {
+               if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
 
                        queue_index = 0;
                        if (dev->real_num_tx_queues > 1)
index a29edf2219c8437b811bcce27fdd2cd9fca0a9fc..c079cc0ec6515fe212aecd27303c2b5671a5d60d 100644 (file)
@@ -47,11 +47,8 @@ extern int fib_detect_death(struct fib_info *fi, int order,
 static inline void fib_result_assign(struct fib_result *res,
                                     struct fib_info *fi)
 {
-       if (res->fi != NULL)
-               fib_info_put(res->fi);
+       /* we used to play games with refcounts, but we now use RCU */
        res->fi = fi;
-       if (fi != NULL)
-               atomic_inc(&fi->fib_clntref);
 }
 
 #endif /* _FIB_LOOKUP_H */
index ba80426658498ba4082c54a19c5af2d2c00cdb10..2ada17129fce6ac9a7285f6033e30d0a9bc4b98a 100644 (file)
@@ -490,9 +490,11 @@ static int inet_csk_diag_dump(struct sock *sk,
 {
        struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
 
-       if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
+       if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
                struct inet_diag_entry entry;
-               struct rtattr *bc = (struct rtattr *)(r + 1);
+               const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
+                                                         sizeof(*r),
+                                                         INET_DIAG_REQ_BYTECODE);
                struct inet_sock *inet = inet_sk(sk);
 
                entry.family = sk->sk_family;
@@ -512,7 +514,7 @@ static int inet_csk_diag_dump(struct sock *sk,
                entry.dport = ntohs(inet->inet_dport);
                entry.userlocks = sk->sk_userlocks;
 
-               if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
+               if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
                        return 0;
        }
 
@@ -527,9 +529,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
 {
        struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
 
-       if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
+       if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
                struct inet_diag_entry entry;
-               struct rtattr *bc = (struct rtattr *)(r + 1);
+               const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
+                                                         sizeof(*r),
+                                                         INET_DIAG_REQ_BYTECODE);
 
                entry.family = tw->tw_family;
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
@@ -548,7 +552,7 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
                entry.dport = ntohs(tw->tw_dport);
                entry.userlocks = 0;
 
-               if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
+               if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
                        return 0;
        }
 
@@ -618,7 +622,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
        struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct listen_sock *lopt;
-       struct rtattr *bc = NULL;
+       const struct nlattr *bc = NULL;
        struct inet_sock *inet = inet_sk(sk);
        int j, s_j;
        int reqnum, s_reqnum;
@@ -638,8 +642,9 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
        if (!lopt || !lopt->qlen)
                goto out;
 
-       if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
-               bc = (struct rtattr *)(r + 1);
+       if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+               bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
+                                    INET_DIAG_REQ_BYTECODE);
                entry.sport = inet->inet_num;
                entry.userlocks = sk->sk_userlocks;
        }
@@ -672,8 +677,8 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
                                        &ireq->rmt_addr;
                                entry.dport = ntohs(ireq->rmt_port);
 
-                               if (!inet_diag_bc_run(RTA_DATA(bc),
-                                                   RTA_PAYLOAD(bc), &entry))
+                               if (!inet_diag_bc_run(nla_data(bc),
+                                                     nla_len(bc), &entry))
                                        continue;
                        }
 
index 3cad2591ace0c15fdad446ab528b0f40c2624d09..3fac340a28d5394bd95bd649d5bcee3dcb3d24df 100644 (file)
@@ -927,6 +927,7 @@ static int get_info(struct net *net, void __user *user,
                        private = &tmp;
                }
 #endif
+               memset(&info, 0, sizeof(info));
                info.valid_hooks = t->valid_hooks;
                memcpy(info.hook_entry, private->hook_entry,
                       sizeof(info.hook_entry));
index d31b007a6d80dcda45f7f7913ec72af7556b79bf..a846d633b3b6f04a3ed72be1d884e484d3efa030 100644 (file)
@@ -1124,6 +1124,7 @@ static int get_info(struct net *net, void __user *user,
                        private = &tmp;
                }
 #endif
+               memset(&info, 0, sizeof(info));
                info.valid_hooks = t->valid_hooks;
                memcpy(info.hook_entry, private->hook_entry,
                       sizeof(info.hook_entry));
index 295c97431e4358408dc45fc7c493536bb41434fc..c04787ce1a71203e1346830450b0a130e358defc 100644 (file)
@@ -47,26 +47,6 @@ __nf_nat_proto_find(u_int8_t protonum)
        return rcu_dereference(nf_nat_protos[protonum]);
 }
 
-static const struct nf_nat_protocol *
-nf_nat_proto_find_get(u_int8_t protonum)
-{
-       const struct nf_nat_protocol *p;
-
-       rcu_read_lock();
-       p = __nf_nat_proto_find(protonum);
-       if (!try_module_get(p->me))
-               p = &nf_nat_unknown_protocol;
-       rcu_read_unlock();
-
-       return p;
-}
-
-static void
-nf_nat_proto_put(const struct nf_nat_protocol *p)
-{
-       module_put(p->me);
-}
-
 /* We keep an extra hash for each conntrack, for fast searching. */
 static inline unsigned int
 hash_by_src(const struct net *net, u16 zone,
@@ -588,6 +568,26 @@ static struct nf_ct_ext_type nat_extend __read_mostly = {
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
 
+static const struct nf_nat_protocol *
+nf_nat_proto_find_get(u_int8_t protonum)
+{
+       const struct nf_nat_protocol *p;
+
+       rcu_read_lock();
+       p = __nf_nat_proto_find(protonum);
+       if (!try_module_get(p->me))
+               p = &nf_nat_unknown_protocol;
+       rcu_read_unlock();
+
+       return p;
+}
+
+static void
+nf_nat_proto_put(const struct nf_nat_protocol *p)
+{
+       module_put(p->me);
+}
+
 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
        [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
        [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
index 51df035897e77dfa5b89e6328817cfcb638702cb..455582384eced0b9242a40ea523ac31c8d6e08f4 100644 (file)
@@ -1137,6 +1137,7 @@ static int get_info(struct net *net, void __user *user,
                        private = &tmp;
                }
 #endif
+               memset(&info, 0, sizeof(info));
                info.valid_hooks = t->valid_hooks;
                memcpy(info.hook_entry, private->hook_entry,
                       sizeof(info.hook_entry));
index 25661f968f3fb2c2976575f4233fd03c42edb863..fc328339be99f5c0c1fe5ffcb484e72b51b63e90 100644 (file)
@@ -2741,6 +2741,7 @@ static void __net_exit ip6_route_net_exit(struct net *net)
        kfree(net->ipv6.ip6_prohibit_entry);
        kfree(net->ipv6.ip6_blk_hole_entry);
 #endif
+       dst_entries_destroy(&net->ipv6.ip6_dst_ops);
 }
 
 static struct pernet_operations ip6_route_net_ops = {
@@ -2832,5 +2833,6 @@ void ip6_route_cleanup(void)
        xfrm6_fini();
        fib6_gc_cleanup();
        unregister_pernet_subsys(&ip6_route_net_ops);
+       dst_entries_destroy(&ip6_dst_blackhole_ops);
        kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
 }
index 104ec3b283d4159f610ef61548beb512d053dcd3..b8dbae82fab8612974d603c42b4cbac3b0509165 100644 (file)
@@ -249,7 +249,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
        struct seq_file *seq;
        int rc = -ENOMEM;
 
-       pd = kzalloc(GFP_KERNEL, sizeof(*pd));
+       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
        if (pd == NULL)
                goto out;
 
index 1eacf8d9966aa292f7f051964f822a00c0ab6605..27a5ea6b6a0ff1a644205ca705f717c78b4dcc84 100644 (file)
@@ -1312,7 +1312,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
        if (!hash) {
                *vmalloced = 1;
                printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
-               hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+               hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+                                PAGE_KERNEL);
        }
 
        if (hash && nulls)
index ed6d929580236c1b4aa77a42db959c9e522f2fc5..dc7bb74110df22818b42222450f0141068b79b6d 100644 (file)
@@ -292,6 +292,12 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
 
                for (i = 0; i < MAX_NF_CT_PROTO; i++)
                        proto_array[i] = &nf_conntrack_l4proto_generic;
+
+               /* Before making proto_array visible to lockless readers,
+                * we must make sure its content is committed to memory.
+                */
+               smp_wmb();
+
                nf_ct_protos[l4proto->l3proto] = proto_array;
        } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
                                        &nf_conntrack_l4proto_generic) {
index c390156b426fc936c6b05fd607f4c32ddb1e3a81..aeec1d483b17e6f65c858e1510c7752965e35260 100644 (file)
@@ -134,8 +134,12 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
 static void rds_loop_conn_free(void *arg)
 {
        struct rds_loop_connection *lc = arg;
+       unsigned long flags;
+
        rdsdebug("lc %p\n", lc);
+       spin_lock_irqsave(&loop_conns_lock, flags);
        list_del(&lc->loop_node);
+       spin_unlock_irqrestore(&loop_conns_lock, flags);
        kfree(lc);
 }
 
index 08a8c6cf2d100f8f05a6c932d8190326277747e9..8e0a32001c90c531a2ad8361dc4bb0c399b812fe 100644 (file)
@@ -221,7 +221,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
 static void rds_tcp_conn_free(void *arg)
 {
        struct rds_tcp_connection *tc = arg;
+       unsigned long flags;
        rdsdebug("freeing tc %p\n", tc);
+
+       spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+       list_del(&tc->t_tcp_node);
+       spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+
        kmem_cache_free(rds_tcp_conn_slab, tc);
 }
 
index 37dff78e9cb17c6fcf35e5302d57b84dd5e9f5f9..d49c40fb7e0960daa905353e714e9317dd38088b 100644 (file)
@@ -34,8 +34,6 @@ struct cgroup_subsys net_cls_subsys = {
        .populate       = cgrp_populate,
 #ifdef CONFIG_NET_CLS_CGROUP
        .subsys_id      = net_cls_subsys_id,
-#else
-#define net_cls_subsys_id net_cls_subsys.subsys_id
 #endif
        .module         = THIS_MODULE,
 };
index 763253257411af1db91b3e25a78af8e687523342..ea8f566e720c7bd9ed508647c080c5c554edba2b 100644 (file)
@@ -103,7 +103,8 @@ retry:
 
 static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
 {
-       textsearch_destroy(EM_TEXT_PRIV(m)->config);
+       if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
+               textsearch_destroy(EM_TEXT_PRIV(m)->config);
 }
 
 static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
index 771bab00754b0baff1a5c7a95892028857c8b84d..3a8c4c419cd4051ce0ce7a586b3d93417f768cb2 100644 (file)
@@ -134,15 +134,15 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
                case X25_FAC_CLASS_D:
                        switch (*p) {
                        case X25_FAC_CALLING_AE:
-                               if (p[1] > X25_MAX_DTE_FACIL_LEN)
-                                       break;
+                               if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+                                       return 0;
                                dte_facs->calling_len = p[2];
                                memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
                                *vc_fac_mask |= X25_MASK_CALLING_AE;
                                break;
                        case X25_FAC_CALLED_AE:
-                               if (p[1] > X25_MAX_DTE_FACIL_LEN)
-                                       break;
+                               if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+                                       return 0;
                                dte_facs->called_len = p[2];
                                memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
                                *vc_fac_mask |= X25_MASK_CALLED_AE;
index 63178961efac00488d36e7c8fc3d21f00e036e2e..f729f022be69bc8c32b1008e8943c92cf36c1e28 100644 (file)
@@ -119,6 +119,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
                                                &x25->vc_facil_mask);
                        if (len > 0)
                                skb_pull(skb, len);
+                       else
+                               return -1;
                        /*
                         *      Copy any Call User Data.
                         */
index 90b54d4697fd2de9ae1dcecc80fc25cdcab63d21..e3c7fc0dca382492f1714f4879abb37b63cc07f2 100755 (executable)
@@ -2794,12 +2794,8 @@ sub process {
                        WARN("__func__ should be used instead of gcc specific __FUNCTION__\n"  . $herecurr);
                }
 
-# check for semaphores used as mutexes
-               if ($line =~ /^.\s*(DECLARE_MUTEX|init_MUTEX)\s*\(/) {
-                       WARN("mutexes are preferred for single holder semaphores\n" . $herecurr);
-               }
-# check for semaphores used as mutexes
-               if ($line =~ /^.\s*init_MUTEX_LOCKED\s*\(/) {
+# check for semaphores initialized locked
+               if ($line =~ /^.\s*sema_init.+,\W?0\W?\)/) {
                        WARN("consider using a completion\n" . $herecurr);
 
                }
index c0efe102d655c40547afe92014cec1c74b70b3c2..af6e9f3de9503b033a69a3eba7c2ecd212fef747 100644 (file)
@@ -875,7 +875,7 @@ const char *sym_expand_string_value(const char *in)
                        symval = sym_get_string_value(sym);
                }
 
-               newlen = strlen(res) + strlen(symval) + strlen(src);
+               newlen = strlen(res) + strlen(symval) + strlen(src) + 1;
                if (newlen > reslen) {
                        reslen = newlen;
                        res = realloc(res, reslen);
index bd72ae6234947e424885f3808dbec54bddad244c..e80da955e6876aeb550bec75d3d99643020034eb 100644 (file)
@@ -39,6 +39,18 @@ config KEYS_DEBUG_PROC_KEYS
 
          If you are unsure as to whether this is required, answer N.
 
+config SECURITY_DMESG_RESTRICT
+       bool "Restrict unprivileged access to the kernel syslog"
+       default n
+       help
+         This enforces restrictions on unprivileged users reading the kernel
+         syslog via dmesg(8).
+
+         If this option is not selected, no restrictions will be enforced
+         unless the dmesg_restrict sysctl is explicitly set to (1).
+
+         If you are unsure how to answer this question, answer N.
+
 config SECURITY
        bool "Enable different security models"
        depends on SYSFS
index cf1de4462ccd3fb297f48bf351dd3494804f22c1..b7106f192b75d5d382e806d303a9b2f6bb879747 100644 (file)
@@ -922,7 +922,7 @@ static int __init apparmor_init(void)
        error = register_security(&apparmor_ops);
        if (error) {
                AA_ERROR("Unable to register AppArmor\n");
-               goto register_security_out;
+               goto set_init_cxt_out;
        }
 
        /* Report that AppArmor successfully initialized */
@@ -936,6 +936,9 @@ static int __init apparmor_init(void)
 
        return error;
 
+set_init_cxt_out:
+       aa_free_task_context(current->real_cred->security);
+
 register_security_out:
        aa_free_root_ns();
 
@@ -944,7 +947,6 @@ alloc_out:
 
        apparmor_enabled = 0;
        return error;
-
 }
 
 security_initcall(apparmor_init);
index 52cc865f1464574e696fd28eca6a6e0eed326d68..4f0eadee78b8d9295f0da93c32a7321a70055392 100644 (file)
@@ -306,7 +306,7 @@ static struct aa_namespace *alloc_namespace(const char *prefix,
        return ns;
 
 fail_unconfined:
-       kzfree(ns->base.name);
+       kzfree(ns->base.hname);
 fail_ns:
        kzfree(ns);
        return NULL;
index 5e632b4857e443d8031eaa17c0e2bd7e877b3d14..04b80f9912bfc3357f1c4b51dd8877ca345df3cc 100644 (file)
@@ -895,6 +895,8 @@ int cap_syslog(int type, bool from_file)
 {
        if (type != SYSLOG_ACTION_OPEN && from_file)
                return 0;
+       if (dmesg_restrict && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
        if ((type != SYSLOG_ACTION_READ_ALL &&
             type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
                return -EPERM;
index f7e374ec44144763feb215890c73714b32a17a22..1b9bf9395cfe820f87b34fa14d57d5a667a67217 100644 (file)
@@ -625,6 +625,8 @@ static short create_adapter_obj(struct hpi_adapter_obj *pao,
                        control_cache_size, (struct hpi_control_cache_info *)
                        &phw->control_cache[0]
                        );
+               if (!phw->p_cache)
+                       pao->has_control_cache = 0;
        } else
                pao->has_control_cache = 0;
 
index 22c5fc6255335ac94aca5ac297432f16b0950c4e..2672f6591ceb7246b9a0a24ecbfa8f4cf2c46288 100644 (file)
@@ -644,6 +644,8 @@ static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
                                interface->control_cache.size_in_bytes,
                                (struct hpi_control_cache_info *)
                                p_control_cache_virtual);
+                       if (!phw->p_cache)
+                               err = HPI_ERROR_MEMORY_ALLOC;
                }
                if (!err) {
                        err = hpios_locked_mem_get_phys_addr(&phw->
index dda4f1c6f65847504a71016b1064d0039c65e8a3..d67f4d3db911dd9685d24e08f29d6b6bc23f16f9 100644 (file)
@@ -571,14 +571,20 @@ struct hpi_control_cache *hpi_alloc_control_cache(const u32
 {
        struct hpi_control_cache *p_cache =
                kmalloc(sizeof(*p_cache), GFP_KERNEL);
+       if (!p_cache)
+               return NULL;
+       p_cache->p_info =
+               kmalloc(sizeof(*p_cache->p_info) * number_of_controls,
+                       GFP_KERNEL);
+       if (!p_cache->p_info) {
+               kfree(p_cache);
+               return NULL;
+       }
        p_cache->cache_size_in_bytes = size_in_bytes;
        p_cache->control_count = number_of_controls;
        p_cache->p_cache =
                (struct hpi_control_cache_single *)pDSP_control_buffer;
        p_cache->init = 0;
-       p_cache->p_info =
-               kmalloc(sizeof(*p_cache->p_info) * p_cache->control_count,
-               GFP_KERNEL);
        return p_cache;
 }
 
index 3e5ca8fb519ff1ae5d2c03cf7f2beb8700b1745d..e377287192aae954aa42d153cb9931f0934f67b4 100644 (file)
@@ -225,39 +225,25 @@ struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip)
 {
        struct dsp_spos_instance * ins = kzalloc(sizeof(struct dsp_spos_instance), GFP_KERNEL);
 
-       if (ins == NULL) 
+       if (ins == NULL)
                return NULL;
 
        /* better to use vmalloc for this big table */
-       ins->symbol_table.nsymbols = 0;
        ins->symbol_table.symbols = vmalloc(sizeof(struct dsp_symbol_entry) *
                                            DSP_MAX_SYMBOLS);
-       ins->symbol_table.highest_frag_index = 0;
-
-       if (ins->symbol_table.symbols == NULL) {
+       ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
+       ins->modules = kmalloc(sizeof(struct dsp_module_desc) * DSP_MAX_MODULES, GFP_KERNEL);
+       if (!ins->symbol_table.symbols || !ins->code.data || !ins->modules) {
                cs46xx_dsp_spos_destroy(chip);
                goto error;
        }
-
+       ins->symbol_table.nsymbols = 0;
+       ins->symbol_table.highest_frag_index = 0;
        ins->code.offset = 0;
        ins->code.size = 0;
-       ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
-
-       if (ins->code.data == NULL) {
-               cs46xx_dsp_spos_destroy(chip);
-               goto error;
-       }
-
        ins->nscb = 0;
        ins->ntask = 0;
-
        ins->nmodules = 0;
-       ins->modules = kmalloc(sizeof(struct dsp_module_desc) * DSP_MAX_MODULES, GFP_KERNEL);
-
-       if (ins->modules == NULL) {
-               cs46xx_dsp_spos_destroy(chip);
-               goto error;
-       }
 
        /* default SPDIF input sample rate
           to 48000 khz */
@@ -271,8 +257,8 @@ struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip)
 
        /* set left and right validity bits and
           default channel status */
-       ins->spdif_csuv_default = 
-               ins->spdif_csuv_stream =  
+       ins->spdif_csuv_default =
+               ins->spdif_csuv_stream =
         /* byte 0 */  ((unsigned int)_wrap_all_bits(  (SNDRV_PCM_DEFAULT_CON_SPDIF        & 0xff)) << 24) |
         /* byte 1 */  ((unsigned int)_wrap_all_bits( ((SNDRV_PCM_DEFAULT_CON_SPDIF >> 8) & 0xff)) << 16) |
         /* byte 3 */   (unsigned int)_wrap_all_bits(  (SNDRV_PCM_DEFAULT_CON_SPDIF >> 24) & 0xff) |
@@ -281,6 +267,9 @@ struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip)
        return ins;
 
 error:
+       kfree(ins->modules);
+       kfree(ins->code.data);
+       vfree(ins->symbol_table.symbols);
        kfree(ins);
        return NULL;
 }
index 460fb2ef7e394614a4045a51d4306c429e818d6a..18af38ebf7579f5507fca391f66ba9280844ccf4 100644 (file)
@@ -1166,6 +1166,7 @@ static const char *cs420x_models[CS420X_MODELS] = {
 
 static struct snd_pci_quirk cs420x_cfg_tbl[] = {
        SND_PCI_QUIRK(0x10de, 0x0ac0, "MacBookPro 5,3", CS420X_MBP53),
+       SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
        SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
        SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
        SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
index ef9af3f4ace2f6fda837a89e4ea46ff8821fd86c..1bd7a540fd49dfa64918e4d533a3d80cbb8fb4af 100644 (file)
@@ -425,7 +425,7 @@ exit:
 static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream)
 {
        struct snd_pcm_substream *substream = lx_stream->stream;
-       const int is_capture = lx_stream->is_capture;
+       const unsigned int is_capture = lx_stream->is_capture;
 
        int err;
 
@@ -473,7 +473,7 @@ static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream)
 
 static void lx_trigger_stop(struct lx6464es *chip, struct lx_stream *lx_stream)
 {
-       const int is_capture = lx_stream->is_capture;
+       const unsigned int is_capture = lx_stream->is_capture;
        int err;
 
        snd_printd(LXP "stopping: stopping stream\n");
index 51afc048961d9855c39e321e523f9f526bd8acd3..aea621eafbb56799385e0e2188a5b3e1130f21d2 100644 (file)
@@ -60,7 +60,7 @@ struct lx_stream {
        snd_pcm_uframes_t          frame_pos;
        enum lx_stream_status      status; /* free, open, running, draining
                                            * pause */
-       int                        is_capture:1;
+       unsigned int               is_capture:1;
 };
 
 
index 3086b751da4a59271b497e1a8605c3664d15a2ff..617f98b0cbae47ce9c76265f50590f88a83c187d 100644 (file)
@@ -1152,7 +1152,7 @@ static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
                                           struct lx_stream *lx_stream)
 {
        struct snd_pcm_substream *substream = lx_stream->stream;
-       int is_capture = lx_stream->is_capture;
+       const unsigned int is_capture = lx_stream->is_capture;
        int err;
        unsigned long flags;
 
index 94a9d06b90277797215e299907444a85ca3f379a..3b5690d28b8bb080e1c6da2658f3017abcfd6f57 100644 (file)
@@ -25,8 +25,9 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
        select SND_SOC_CS42L51 if I2C
        select SND_SOC_CS4270 if I2C
+       select SND_SOC_CX20442
        select SND_SOC_DA7210 if I2C
-       select SND_SOC_JZ4740 if SOC_JZ4740
+       select SND_SOC_JZ4740_CODEC if SOC_JZ4740
        select SND_SOC_MAX98088 if I2C
        select SND_SOC_MAX9877 if I2C
        select SND_SOC_PCM3008
index d251ff54a2d36f8b691b4aca2073d74adb010673..c5ab8c8057714871e13ad249805f60877ff32f5d 100644 (file)
@@ -58,7 +58,7 @@
        (1000000000 / ((rate * 1000) / samples))
 
 #define US_TO_SAMPLES(rate, us) \
-       (rate / (1000000 / us))
+       (rate / (1000000 / (us < 1000000 ? us : 1000000)))
 
 #define UTHR_FROM_PERIOD_SIZE(samples, playrate, burstrate) \
        ((samples * 5000) / ((burstrate * 5000) / (burstrate - playrate)))
@@ -200,7 +200,7 @@ static int dac33_read(struct snd_soc_codec *codec, unsigned int reg,
                      u8 *value)
 {
        struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-       int val;
+       int val, ret = 0;
 
        *value = reg & 0xff;
 
@@ -210,6 +210,7 @@ static int dac33_read(struct snd_soc_codec *codec, unsigned int reg,
                if (val < 0) {
                        dev_err(codec->dev, "Read failed (%d)\n", val);
                        value[0] = dac33_read_reg_cache(codec, reg);
+                       ret = val;
                } else {
                        value[0] = val;
                        dac33_write_reg_cache(codec, reg, val);
@@ -218,7 +219,7 @@ static int dac33_read(struct snd_soc_codec *codec, unsigned int reg,
                value[0] = dac33_read_reg_cache(codec, reg);
        }
 
-       return 0;
+       return ret;
 }
 
 static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
@@ -329,13 +330,18 @@ static void dac33_init_chip(struct snd_soc_codec *codec)
                    dac33_read_reg_cache(codec, DAC33_LINER_TO_RLO_VOL));
 }
 
-static inline void dac33_read_id(struct snd_soc_codec *codec)
+static inline int dac33_read_id(struct snd_soc_codec *codec)
 {
+       int i, ret = 0;
        u8 reg;
 
-       dac33_read(codec, DAC33_DEVICE_ID_MSB, &reg);
-       dac33_read(codec, DAC33_DEVICE_ID_LSB, &reg);
-       dac33_read(codec, DAC33_DEVICE_REV_ID, &reg);
+       for (i = 0; i < 3; i++) {
+               ret = dac33_read(codec, DAC33_DEVICE_ID_MSB + i, &reg);
+               if (ret < 0)
+                       break;
+       }
+
+       return ret;
 }
 
 static inline void dac33_soft_power(struct snd_soc_codec *codec, int power)
@@ -1076,6 +1082,9 @@ static void dac33_calculate_times(struct snd_pcm_substream *substream)
                /* Number of samples under i2c latency */
                dac33->alarm_threshold = US_TO_SAMPLES(rate,
                                                dac33->mode1_latency);
+               nsample_limit = DAC33_BUFFER_SIZE_SAMPLES -
+                               dac33->alarm_threshold;
+
                if (dac33->auto_fifo_config) {
                        if (period_size <= dac33->alarm_threshold)
                                /*
@@ -1086,6 +1095,8 @@ static void dac33_calculate_times(struct snd_pcm_substream *substream)
                                       ((dac33->alarm_threshold / period_size) +
                                       (dac33->alarm_threshold % period_size ?
                                       1 : 0));
+                       else if (period_size > nsample_limit)
+                               dac33->nsample = nsample_limit;
                        else
                                dac33->nsample = period_size;
                } else {
@@ -1097,8 +1108,7 @@ static void dac33_calculate_times(struct snd_pcm_substream *substream)
                         */
                        dac33->nsample_max = substream->runtime->buffer_size -
                                                period_size;
-                       nsample_limit = DAC33_BUFFER_SIZE_SAMPLES -
-                                       dac33->alarm_threshold;
+
                        if (dac33->nsample_max > nsample_limit)
                                dac33->nsample_max = nsample_limit;
 
@@ -1414,9 +1424,15 @@ static int dac33_soc_probe(struct snd_soc_codec *codec)
                dev_err(codec->dev, "Failed to power up codec: %d\n", ret);
                goto err_power;
        }
-       dac33_read_id(codec);
+       ret = dac33_read_id(codec);
        dac33_hard_power(codec, 0);
 
+       if (ret < 0) {
+               dev_err(codec->dev, "Failed to read chip ID: %d\n", ret);
+               ret = -ENODEV;
+               goto err_power;
+       }
+
        /* Check if the IRQ number is valid and request it */
        if (dac33->irq >= 0) {
                ret = request_irq(dac33->irq, dac33_interrupt_handler,
index 329acc1a207457942a560e86d7f990a2fc44ffd8..ee4fb201de60ca9ceb969fd00ce1292aad0a9330 100644 (file)
@@ -119,13 +119,13 @@ static int tpa6130a2_power(int power)
 {
        struct  tpa6130a2_data *data;
        u8      val;
-       int     ret;
+       int     ret = 0;
 
        BUG_ON(tpa6130a2_client == NULL);
        data = i2c_get_clientdata(tpa6130a2_client);
 
        mutex_lock(&data->mutex);
-       if (power) {
+       if (power && !data->power_state) {
                /* Power on */
                if (data->power_gpio >= 0)
                        gpio_set_value(data->power_gpio, 1);
@@ -153,7 +153,7 @@ static int tpa6130a2_power(int power)
                val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
                val &= ~TPA6130A2_SWS;
                tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
-       } else {
+       } else if (!power && data->power_state) {
                /* set SWS */
                val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
                val |= TPA6130A2_SWS;
index b4f11724a63ffc771c1a1d83909e31ae5cccaab7..aca4b1ea10bb710cd733ea730d22171895df620e 100644 (file)
@@ -186,7 +186,6 @@ static int wm8900_volatile_register(unsigned int reg)
 {
        switch (reg) {
        case WM8900_REG_ID:
-       case WM8900_REG_POWER1:
                return 1;
        default:
                return 0;
@@ -1200,11 +1199,6 @@ static int wm8900_probe(struct snd_soc_codec *codec)
                return -ENODEV;
        }
 
-       /* Read back from the chip */
-       reg = snd_soc_read(codec, WM8900_REG_POWER1);
-       reg = (reg >> 12) & 0xf;
-       dev_info(codec->dev, "WM8900 revision %d\n", reg);
-
        wm8900_reset(codec);
 
        /* Turn the chip on */
index 2cb81538cd9194c9cc278515171d67a45ac39ca6..19ca782ac970f1956243862534e5df1df12a3113 100644 (file)
@@ -123,7 +123,7 @@ static void calibrate_dc_servo(struct snd_soc_codec *codec)
                        reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK;
                        break;
                default:
-                       WARN(1, "Unknown DCS readback method");
+                       WARN(1, "Unknown DCS readback method\n");
                        break;
                }
 
index a3bfb2e8b70fb6946739837126dc49fed237f658..73d0edd8ded9de053db82b27c60433fe1a7b8686 100644 (file)
@@ -79,7 +79,7 @@ static void tosa_ext_control(struct snd_soc_codec *codec)
 static int tosa_startup(struct snd_pcm_substream *substream)
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_codec *codec = rtd->card->codec;
+       struct snd_soc_codec *codec = rtd->codec;
 
        /* check the jack status at stream startup */
        tosa_ext_control(codec);
index 1c8f3f507f54e7d1fc469ff03a2fdbf787640e3a..614a8b30d87bdefdb99414f06d1d74438e60cd18 100644 (file)
@@ -165,8 +165,11 @@ static ssize_t pmdown_time_set(struct device *dev,
 {
        struct snd_soc_pcm_runtime *rtd =
                        container_of(dev, struct snd_soc_pcm_runtime, dev);
+       int ret;
 
-       strict_strtol(buf, 10, &rtd->pmdown_time);
+       ret = strict_strtol(buf, 10, &rtd->pmdown_time);
+       if (ret)
+               return ret;
 
        return count;
 }
index 7dae05d8783e4274266bb5c867e534b342e18614..782f741cd00a4e3bb97317d05827f8c3134299bc 100644 (file)
@@ -60,7 +60,7 @@ static const struct rc_config {
        { USB_ID(0x041e, 0x3000), 0, 1, 2, 1,  18, 0x0013 }, /* Extigy       */
        { USB_ID(0x041e, 0x3020), 2, 1, 6, 6,  18, 0x0013 }, /* Audigy 2 NX  */
        { USB_ID(0x041e, 0x3040), 2, 2, 6, 6,  2,  0x6e91 }, /* Live! 24-bit */
-       { USB_ID(0x041e, 0x3042), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi */
+       { USB_ID(0x041e, 0x3042), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 */
        { USB_ID(0x041e, 0x3048), 2, 2, 6, 6,  2,  0x6e91 }, /* Toshiba SB0500 */
 };
 
@@ -183,7 +183,13 @@ static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e
        if (value > 1)
                return -EINVAL;
        changed = value != mixer->audigy2nx_leds[index];
-       err = snd_usb_ctl_msg(mixer->chip->dev,
+       if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042))
+               err = snd_usb_ctl_msg(mixer->chip->dev,
+                             usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
+                             USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+                             !value, 0, NULL, 0, 100);
+       else
+               err = snd_usb_ctl_msg(mixer->chip->dev,
                              usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
                              USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
                              value, index + 2, NULL, 0, 100);
@@ -225,8 +231,12 @@ static int snd_audigy2nx_controls_create(struct usb_mixer_interface *mixer)
        int i, err;
 
        for (i = 0; i < ARRAY_SIZE(snd_audigy2nx_controls); ++i) {
+               /* USB X-Fi S51 doesn't have a CMSS LED */
+               if ((mixer->chip->usb_id == USB_ID(0x041e, 0x3042)) && i == 0)
+                       continue;
                if (i > 1 && /* Live24ext has 2 LEDs only */
                        (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
+                        mixer->chip->usb_id == USB_ID(0x041e, 0x3042) ||
                         mixer->chip->usb_id == USB_ID(0x041e, 0x3048)))
                        break; 
                err = snd_ctl_add(mixer->chip->card,
@@ -365,6 +375,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
 
        if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020) ||
            mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
+           mixer->chip->usb_id == USB_ID(0x041e, 0x3042) ||
            mixer->chip->usb_id == USB_ID(0x041e, 0x3048)) {
                if ((err = snd_audigy2nx_controls_create(mixer)) < 0)
                        return err;
index cff3a3c465d736f17d73a4cf6c01c54c936469c3..4132522ac90f8c88da831b68f9cbdf777131de8e 100644 (file)
@@ -676,8 +676,10 @@ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime,
        if (!needs_knot)
                return 0;
 
-       subs->rate_list.count = count;
        subs->rate_list.list = kmalloc(sizeof(int) * count, GFP_KERNEL);
+       if (!subs->rate_list.list)
+               return -ENOMEM;
+       subs->rate_list.count = count;
        subs->rate_list.mask = 0;
        count = 0;
        list_for_each_entry(fp, &subs->fmt_list, list) {
index 122ec9dc4853d079903d566a219a9e48d8a6a465..26aff6bf9e500d0d6699e968a789db651deb9722 100644 (file)
@@ -8,7 +8,11 @@ perf-trace - Read perf.data (created by perf record) and display trace output
 SYNOPSIS
 --------
 [verse]
-'perf trace' {record <script> | report <script> [args] }
+'perf trace' [<options>]
+'perf trace' [<options>] record <script> [<record-options>] <command>
+'perf trace' [<options>] report <script> [script-args]
+'perf trace' [<options>] <script> <required-script-args> [<record-options>] <command>
+'perf trace' [<options>] <top-script> [script-args]
 
 DESCRIPTION
 -----------
@@ -24,23 +28,53 @@ There are several variants of perf trace:
   available via 'perf trace -l').  The following variants allow you to
   record and run those scripts:
 
-  'perf trace record <script>' to record the events required for 'perf
-  trace report'.  <script> is the name displayed in the output of
-  'perf trace --list' i.e. the actual script name minus any language
-  extension.
+  'perf trace record <script> <command>' to record the events required
+  for 'perf trace report'.  <script> is the name displayed in the
+  output of 'perf trace --list' i.e. the actual script name minus any
+  language extension.  If <command> is not specified, the events are
+  recorded using the -a (system-wide) 'perf record' option.
 
-  'perf trace report <script>' to run and display the results of
-  <script>.  <script> is the name displayed in the output of 'perf
+  'perf trace report <script> [args]' to run and display the results
+  of <script>.  <script> is the name displayed in the output of 'perf
   trace --list' i.e. the actual script name minus any language
   extension.  The perf.data output from a previous run of 'perf trace
   record <script>' is used and should be present for this command to
-  succeed.
+  succeed.  [args] refers to the (mainly optional) args expected by
+  the script.
+
+  'perf trace <script> <required-script-args> <command>' to both
+  record the events required for <script> and to run the <script>
+  using 'live-mode' i.e. without writing anything to disk.  <script>
+  is the name displayed in the output of 'perf trace --list' i.e. the
+  actual script name minus any language extension.  If <command> is
+  not specified, the events are recorded using the -a (system-wide)
+  'perf record' option.  If <script> has any required args, they
+  should be specified before <command>.  This mode doesn't allow for
+  optional script args to be specified; if optional script args are
+  desired, they can be specified using separate 'perf trace record'
+  and 'perf trace report' commands, with the stdout of the record step
+  piped to the stdin of the report script, using the '-o -' and '-i -'
+  options of the corresponding commands.
+
+  'perf trace <top-script>' to both record the events required for
+  <top-script> and to run the <top-script> using 'live-mode'
+  i.e. without writing anything to disk.  <top-script> is the name
+  displayed in the output of 'perf trace --list' i.e. the actual
+  script name minus any language extension; a <top-script> is defined
+  as any script name ending with the string 'top'.
+
+  [<record-options>] can be passed to the record steps of 'perf trace
+  record' and 'live-mode' variants; this isn't possible however for
+  <top-script> 'live-mode' or 'perf trace report' variants.
 
   See the 'SEE ALSO' section for links to language-specific
   information on how to write and run your own trace scripts.
 
 OPTIONS
 -------
+<command>...::
+       Any command you can specify in a shell.
+
 -D::
 --dump-raw-trace=::
         Display verbose dump of the trace data.
@@ -64,6 +98,13 @@ OPTIONS
         Generate perf-trace.[ext] starter script for given language,
         using current perf.data.
 
+-a::
+        Force system-wide collection.  Scripts run without a <command>
+        normally use -a by default, while scripts run with a <command>
+        normally don't - this option allows the latter to be run in
+        system-wide mode.
+
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-trace-perl[1],
index 4e75583ddd6d9605b96256264c4e5ca900670d3e..93bd2ff001fb113ff11159e419269942ac4e3743 100644 (file)
@@ -790,7 +790,7 @@ static const char * const record_usage[] = {
 
 static bool force, append_file;
 
-static const struct option options[] = {
+const struct option record_options[] = {
        OPT_CALLBACK('e', "event", NULL, "event",
                     "event selector. use 'perf list' to list available events",
                     parse_events),
@@ -839,16 +839,16 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
 {
        int i, j, err = -ENOMEM;
 
-       argc = parse_options(argc, argv, options, record_usage,
+       argc = parse_options(argc, argv, record_options, record_usage,
                            PARSE_OPT_STOP_AT_NON_OPTION);
        if (!argc && target_pid == -1 && target_tid == -1 &&
                !system_wide && !cpu_list)
-               usage_with_options(record_usage, options);
+               usage_with_options(record_usage, record_options);
 
        if (force && append_file) {
                fprintf(stderr, "Can't overwrite and append at the same time."
                                " You need to choose between -f and -A");
-               usage_with_options(record_usage, options);
+               usage_with_options(record_usage, record_options);
        } else if (append_file) {
                write_mode = WRITE_APPEND;
        } else {
@@ -871,7 +871,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
                if (thread_num <= 0) {
                        fprintf(stderr, "Can't find all threads of pid %d\n",
                                        target_pid);
-                       usage_with_options(record_usage, options);
+                       usage_with_options(record_usage, record_options);
                }
        } else {
                all_tids=malloc(sizeof(pid_t));
index b513e40974f46bf45f816d658d78d574762f5b17..dd625808c2a5332c4f733a59acfb1ee881faae3f 100644 (file)
@@ -69,7 +69,6 @@ static int                    target_tid                      =     -1;
 static pid_t                   *all_tids                       =      NULL;
 static int                     thread_num                      =      0;
 static bool                    inherit                         =  false;
-static int                     profile_cpu                     =     -1;
 static int                     nr_cpus                         =      0;
 static int                     realtime_prio                   =      0;
 static bool                    group                           =  false;
@@ -558,13 +557,13 @@ static void print_sym_table(void)
        else
                printf(" (all");
 
-       if (profile_cpu != -1)
-               printf(", cpu: %d)\n", profile_cpu);
+       if (cpu_list)
+               printf(", CPU%s: %s)\n", nr_cpus > 1 ? "s" : "", cpu_list);
        else {
                if (target_tid != -1)
                        printf(")\n");
                else
-                       printf(", %d CPUs)\n", nr_cpus);
+                       printf(", %d CPU%s)\n", nr_cpus, nr_cpus > 1 ? "s" : "");
        }
 
        printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
@@ -1187,11 +1186,10 @@ int group_fd;
 static void start_counter(int i, int counter)
 {
        struct perf_event_attr *attr;
-       int cpu;
+       int cpu = -1;
        int thread_index;
 
-       cpu = profile_cpu;
-       if (target_tid == -1 && profile_cpu == -1)
+       if (target_tid == -1)
                cpu = cpumap[i];
 
        attr = attrs + counter;
index 2f8df45c4dcbfe306080354dbfcfb073ac3ed0a0..86cfe3800e6bf5580718fd3df3c3788f9edbda31 100644 (file)
@@ -10,6 +10,7 @@
 #include "util/symbol.h"
 #include "util/thread.h"
 #include "util/trace-event.h"
+#include "util/parse-options.h"
 #include "util/util.h"
 
 static char const              *script_name;
@@ -17,6 +18,7 @@ static char const             *generate_script_lang;
 static bool                    debug_mode;
 static u64                     last_timestamp;
 static u64                     nr_unordered;
+extern const struct option     record_options[];
 
 static int default_start_script(const char *script __unused,
                                int argc __unused,
@@ -328,7 +330,7 @@ static struct script_desc *script_desc__new(const char *name)
 {
        struct script_desc *s = zalloc(sizeof(*s));
 
-       if (s != NULL)
+       if (s != NULL && name)
                s->name = strdup(name);
 
        return s;
@@ -337,6 +339,8 @@ static struct script_desc *script_desc__new(const char *name)
 static void script_desc__delete(struct script_desc *s)
 {
        free(s->name);
+       free(s->half_liner);
+       free(s->args);
        free(s);
 }
 
@@ -537,8 +541,40 @@ static char *get_script_path(const char *script_root, const char *suffix)
        return path;
 }
 
+static bool is_top_script(const char *script_path)
+{
+       return ends_with((char *)script_path, "top") == NULL ? false : true;
+}
+
+static int has_required_arg(char *script_path)
+{
+       struct script_desc *desc;
+       int n_args = 0;
+       char *p;
+
+       desc = script_desc__new(NULL);
+
+       if (read_script_info(desc, script_path))
+               goto out;
+
+       if (!desc->args)
+               goto out;
+
+       for (p = desc->args; *p; p++)
+               if (*p == '<')
+                       n_args++;
+out:
+       script_desc__delete(desc);
+
+       return n_args;
+}
+
 static const char * const trace_usage[] = {
-       "perf trace [<options>] <command>",
+       "perf trace [<options>]",
+       "perf trace [<options>] record <script> [<record-options>] <command>",
+       "perf trace [<options>] report <script> [script-args]",
+       "perf trace [<options>] <script> [<record-options>] <command>",
+       "perf trace [<options>] <top-script> [script-args]",
        NULL
 };
 
@@ -564,50 +600,81 @@ static const struct option options[] = {
        OPT_END()
 };
 
+static bool have_cmd(int argc, const char **argv)
+{
+       char **__argv = malloc(sizeof(const char *) * argc);
+
+       if (!__argv)
+               die("malloc");
+       memcpy(__argv, argv, sizeof(const char *) * argc);
+       argc = parse_options(argc, (const char **)__argv, record_options,
+                            NULL, PARSE_OPT_STOP_AT_NON_OPTION);
+       free(__argv);
+
+       return argc != 0;
+}
+
 int cmd_trace(int argc, const char **argv, const char *prefix __used)
 {
+       char *rec_script_path = NULL;
+       char *rep_script_path = NULL;
        struct perf_session *session;
-       const char *suffix = NULL;
+       char *script_path = NULL;
        const char **__argv;
-       char *script_path;
-       int i, err;
+       bool system_wide;
+       int i, j, err;
 
-       if (argc >= 2 && strncmp(argv[1], "rec", strlen("rec")) == 0) {
-               if (argc < 3) {
-                       fprintf(stderr,
-                               "Please specify a record script\n");
-                       return -1;
-               }
-               suffix = RECORD_SUFFIX;
+       setup_scripting();
+
+       argc = parse_options(argc, argv, options, trace_usage,
+                            PARSE_OPT_STOP_AT_NON_OPTION);
+
+       if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
+               rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
+               if (!rec_script_path)
+                       return cmd_record(argc, argv, NULL);
        }
 
-       if (argc >= 2 && strncmp(argv[1], "rep", strlen("rep")) == 0) {
-               if (argc < 3) {
+       if (argc > 1 && !strncmp(argv[0], "rep", strlen("rep"))) {
+               rep_script_path = get_script_path(argv[1], REPORT_SUFFIX);
+               if (!rep_script_path) {
                        fprintf(stderr,
-                               "Please specify a report script\n");
+                               "Please specify a valid report script"
+                               "(see 'perf trace -l' for listing)\n");
                        return -1;
                }
-               suffix = REPORT_SUFFIX;
        }
 
        /* make sure PERF_EXEC_PATH is set for scripts */
        perf_set_argv_exec_path(perf_exec_path());
 
-       if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) {
-               char *record_script_path, *report_script_path;
+       if (argc && !script_name && !rec_script_path && !rep_script_path) {
                int live_pipe[2];
+               int rep_args;
                pid_t pid;
 
-               record_script_path = get_script_path(argv[1], RECORD_SUFFIX);
-               if (!record_script_path) {
-                       fprintf(stderr, "record script not found\n");
-                       return -1;
+               rec_script_path = get_script_path(argv[0], RECORD_SUFFIX);
+               rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
+
+               if (!rec_script_path && !rep_script_path) {
+                       fprintf(stderr, " Couldn't find script %s\n\n See perf"
+                               " trace -l for available scripts.\n", argv[0]);
+                       usage_with_options(trace_usage, options);
                }
 
-               report_script_path = get_script_path(argv[1], REPORT_SUFFIX);
-               if (!report_script_path) {
-                       fprintf(stderr, "report script not found\n");
-                       return -1;
+               if (is_top_script(argv[0])) {
+                       rep_args = argc - 1;
+               } else {
+                       int rec_args;
+
+                       rep_args = has_required_arg(rep_script_path);
+                       rec_args = (argc - 1) - rep_args;
+                       if (rec_args < 0) {
+                               fprintf(stderr, " %s script requires options."
+                                       "\n\n See perf trace -l for available "
+                                       "scripts and options.\n", argv[0]);
+                               usage_with_options(trace_usage, options);
+                       }
                }
 
                if (pipe(live_pipe) < 0) {
@@ -622,60 +689,84 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
                }
 
                if (!pid) {
+                       system_wide = true;
+                       j = 0;
+
                        dup2(live_pipe[1], 1);
                        close(live_pipe[0]);
 
-                       __argv = malloc(6 * sizeof(const char *));
-                       __argv[0] = "/bin/sh";
-                       __argv[1] = record_script_path;
-                       __argv[2] = "-q";
-                       __argv[3] = "-o";
-                       __argv[4] = "-";
-                       __argv[5] = NULL;
+                       if (!is_top_script(argv[0]))
+                               system_wide = !have_cmd(argc - rep_args,
+                                                       &argv[rep_args]);
+
+                       __argv = malloc((argc + 6) * sizeof(const char *));
+                       if (!__argv)
+                               die("malloc");
+
+                       __argv[j++] = "/bin/sh";
+                       __argv[j++] = rec_script_path;
+                       if (system_wide)
+                               __argv[j++] = "-a";
+                       __argv[j++] = "-q";
+                       __argv[j++] = "-o";
+                       __argv[j++] = "-";
+                       for (i = rep_args + 1; i < argc; i++)
+                               __argv[j++] = argv[i];
+                       __argv[j++] = NULL;
 
                        execvp("/bin/sh", (char **)__argv);
+                       free(__argv);
                        exit(-1);
                }
 
                dup2(live_pipe[0], 0);
                close(live_pipe[1]);
 
-               __argv = malloc((argc + 3) * sizeof(const char *));
-               __argv[0] = "/bin/sh";
-               __argv[1] = report_script_path;
-               for (i = 2; i < argc; i++)
-                       __argv[i] = argv[i];
-               __argv[i++] = "-i";
-               __argv[i++] = "-";
-               __argv[i++] = NULL;
+               __argv = malloc((argc + 4) * sizeof(const char *));
+               if (!__argv)
+                       die("malloc");
+               j = 0;
+               __argv[j++] = "/bin/sh";
+               __argv[j++] = rep_script_path;
+               for (i = 1; i < rep_args + 1; i++)
+                       __argv[j++] = argv[i];
+               __argv[j++] = "-i";
+               __argv[j++] = "-";
+               __argv[j++] = NULL;
 
                execvp("/bin/sh", (char **)__argv);
+               free(__argv);
                exit(-1);
        }
 
-       if (suffix) {
-               script_path = get_script_path(argv[2], suffix);
-               if (!script_path) {
-                       fprintf(stderr, "script not found\n");
-                       return -1;
-               }
-
-               __argv = malloc((argc + 1) * sizeof(const char *));
-               __argv[0] = "/bin/sh";
-               __argv[1] = script_path;
-               for (i = 3; i < argc; i++)
-                       __argv[i - 1] = argv[i];
-               __argv[argc - 1] = NULL;
+       if (rec_script_path)
+               script_path = rec_script_path;
+       if (rep_script_path)
+               script_path = rep_script_path;
+
+       if (script_path) {
+               system_wide = false;
+               j = 0;
+
+               if (rec_script_path)
+                       system_wide = !have_cmd(argc - 1, &argv[1]);
+
+               __argv = malloc((argc + 2) * sizeof(const char *));
+               if (!__argv)
+                       die("malloc");
+               __argv[j++] = "/bin/sh";
+               __argv[j++] = script_path;
+               if (system_wide)
+                       __argv[j++] = "-a";
+               for (i = 2; i < argc; i++)
+                       __argv[j++] = argv[i];
+               __argv[j++] = NULL;
 
                execvp("/bin/sh", (char **)__argv);
+               free(__argv);
                exit(-1);
        }
 
-       setup_scripting();
-
-       argc = parse_options(argc, argv, options, trace_usage,
-                            PARSE_OPT_STOP_AT_NON_OPTION);
-
        if (symbol__init() < 0)
                return -1;
        if (!script_name)
index eb5846bcb565fe3c53599adf4fbe5d1bdb9bed8d..8104895a7b67d8b5324fa975a98f72cb0ccd2bd8 100644 (file)
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_exit $@
+perf record -e raw_syscalls:sys_exit $@
index 5bfaae5a6cbae1b8a63283e9ba32f18269677dc7..33efc8673aae879daab6b97de3c1713ec09e3edd 100644 (file)
@@ -1,3 +1,3 @@
 #!/bin/bash
-perf record -a -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
+perf record -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
 
index 6e0b2f7755ac218098479958150f06129cc60ec4..7cb9db2304483a69543f14fddc8883dc3d964480 100644 (file)
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
+perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
index 6e0b2f7755ac218098479958150f06129cc60ec4..7cb9db2304483a69543f14fddc8883dc3d964480 100644 (file)
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
+perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
index 9f2acaaae9f0538fbdea692bbadd015965b48ae2..464251a1bd7e6a235d59579b85ff80d646aead50 100644 (file)
@@ -1,5 +1,5 @@
 #!/bin/bash
-perf record -a -e sched:sched_switch -e sched:sched_wakeup $@
+perf record -e sched:sched_switch -e sched:sched_wakeup $@
 
 
 
index 85301f2471ff59b774573142a4d4c3db5e40604d..8edda9078d5d3a4711f4039e01be86438774dcdf 100644 (file)
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
+perf record -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
index eb5846bcb565fe3c53599adf4fbe5d1bdb9bed8d..8104895a7b67d8b5324fa975a98f72cb0ccd2bd8 100644 (file)
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_exit $@
+perf record -e raw_syscalls:sys_exit $@
index 5ecbb433caf499d4103ad9a5561ebfa7eea86471..b1495c9a9b20e14ca2ad1ae62eafc13b741d2ca5 100644 (file)
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
+perf record -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
index d931a828126b75d843f40fc2ad3eb1cebc3b43fd..558754b840a98293e31aeb4a70864dd75f3b3b2f 100644 (file)
@@ -1,5 +1,5 @@
 #!/bin/bash
-perf record -a -e net:net_dev_xmit -e net:net_dev_queue                \
+perf record -e net:net_dev_xmit -e net:net_dev_queue           \
                -e net:netif_receive_skb -e net:netif_rx                \
                -e skb:consume_skb -e skb:kfree_skb                     \
                -e skb:skb_copy_datagram_iovec -e napi:napi_poll        \
index 17a3e9bd9e8f0b6f34a2989e7a6d4cbd5ffe7090..7493fddbe99532db2b2c0c4ad3d7b469d29e1dda 100644 (file)
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -m 16384 -a -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
+perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
index 1fc5998b721d5f2a5f2a89e83ad90559f18439dd..4efbfaa7f6a5dc1deace4583c28f3e29fbf0c878 100644 (file)
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_enter $@
+perf record -e raw_syscalls:sys_enter $@
index 1fc5998b721d5f2a5f2a89e83ad90559f18439dd..4efbfaa7f6a5dc1deace4583c28f3e29fbf0c878 100644 (file)
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_enter $@
+perf record -e raw_syscalls:sys_enter $@
index 1fc5998b721d5f2a5f2a89e83ad90559f18439dd..4efbfaa7f6a5dc1deace4583c28f3e29fbf0c878 100644 (file)
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_enter $@
+perf record -e raw_syscalls:sys_enter $@
index 9706d9d40279859321412b270c8ac3053141f4f9..056c69521a38098a8053d18f3e615d3a35390fd4 100644 (file)
@@ -104,9 +104,10 @@ out_destroy_form:
        return rc;
 }
 
+static const char yes[] = "Yes", no[] = "No";
+
 bool ui__dialog_yesno(const char *msg)
 {
        /* newtWinChoice should really be accepting const char pointers... */
-       char yes[] = "Yes", no[] = "No";
-       return newtWinChoice(NULL, yes, no, (char *)msg) == 1;
+       return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1;
 }
index b9efed5e35cc222f3e7b34d922bbc1b90b29987b..792a750d9441af80cd16e4d797234159875babc4 100644 (file)
@@ -30,8 +30,8 @@ __irf_end:
 .section .init.ramfs.info,"a"
 .globl __initramfs_size
 __initramfs_size:
-#ifdef CONFIG_32BIT
-       .long __irf_end - __irf_start
-#else
+#ifdef CONFIG_64BIT
        .quad __irf_end - __irf_start
+#else
+       .long __irf_end - __irf_start
 #endif