]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'linus' into core/iommu
authorIngo Molnar <mingo@elte.hu>
Fri, 16 Jan 2009 09:09:10 +0000 (10:09 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 16 Jan 2009 09:09:10 +0000 (10:09 +0100)
Conflicts:
arch/ia64/include/asm/dma-mapping.h
arch/ia64/include/asm/machvec.h
arch/ia64/include/asm/machvec_sn2.h

1  2 
arch/ia64/include/asm/dma-mapping.h
arch/ia64/include/asm/machvec.h
arch/ia64/include/asm/machvec_sn2.h
arch/ia64/sn/pci/pci_dma.c
drivers/pci/intel-iommu.c

index f4d4b1850a7ec0f9ae0ba0257b2d019665412092,1f912d927585efbc69cb29734a4ddb4f67bff154..abe52575e905c12194efd9ec910a9a86bb879836
  #include <linux/scatterlist.h>
  #include <asm/swiotlb.h>
  
 -struct dma_mapping_ops {
 -      int             (*mapping_error)(struct device *dev,
 -                                       dma_addr_t dma_addr);
 -      void*           (*alloc_coherent)(struct device *dev, size_t size,
 -                              dma_addr_t *dma_handle, gfp_t gfp);
 -      void            (*free_coherent)(struct device *dev, size_t size,
 -                              void *vaddr, dma_addr_t dma_handle);
 -      dma_addr_t      (*map_single)(struct device *hwdev, unsigned long ptr,
 -                              size_t size, int direction);
 -      void            (*unmap_single)(struct device *dev, dma_addr_t addr,
 -                              size_t size, int direction);
 -      void            (*sync_single_for_cpu)(struct device *hwdev,
 -                              dma_addr_t dma_handle, size_t size,
 -                              int direction);
 -      void            (*sync_single_for_device)(struct device *hwdev,
 -                              dma_addr_t dma_handle, size_t size,
 -                              int direction);
 -      void            (*sync_single_range_for_cpu)(struct device *hwdev,
 -                              dma_addr_t dma_handle, unsigned long offset,
 -                              size_t size, int direction);
 -      void            (*sync_single_range_for_device)(struct device *hwdev,
 -                              dma_addr_t dma_handle, unsigned long offset,
 -                              size_t size, int direction);
 -      void            (*sync_sg_for_cpu)(struct device *hwdev,
 -                              struct scatterlist *sg, int nelems,
 -                              int direction);
 -      void            (*sync_sg_for_device)(struct device *hwdev,
 -                              struct scatterlist *sg, int nelems,
 -                              int direction);
 -      int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
 -                              int nents, int direction);
 -      void            (*unmap_sg)(struct device *hwdev,
 -                              struct scatterlist *sg, int nents,
 -                              int direction);
 -      int             (*dma_supported_op)(struct device *hwdev, u64 mask);
 -      int             is_phys;
 -};
 -
 -extern struct dma_mapping_ops *dma_ops;
+ #define ARCH_HAS_DMA_GET_REQUIRED_MASK
 +extern struct dma_map_ops *dma_ops;
  extern struct ia64_machine_vector ia64_mv;
  extern void set_iommu_machvec(void);
  
 -#define dma_alloc_coherent(dev, size, handle, gfp)    \
 -      platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
 +extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
 +                                  enum dma_data_direction);
 +extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
 +                              enum dma_data_direction);
  
 -/* coherent mem. is cheap */
 -static inline void *
 -dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
 -                    gfp_t flag)
 +static inline void *dma_alloc_coherent(struct device *dev, size_t size,
 +                                     dma_addr_t *daddr, gfp_t gfp)
  {
 -      return dma_alloc_coherent(dev, size, dma_handle, flag);
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
  }
 -#define dma_free_coherent     platform_dma_free_coherent
 -static inline void
 -dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
 -                   dma_addr_t dma_handle)
 +
 +static inline void dma_free_coherent(struct device *dev, size_t size,
 +                                   void *caddr, dma_addr_t daddr)
 +{
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      ops->free_coherent(dev, size, caddr, daddr);
 +}
 +
 +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 +
 +static inline dma_addr_t dma_map_single_attrs(struct device *dev,
 +                                            void *caddr, size_t size,
 +                                            enum dma_data_direction dir,
 +                                            struct dma_attrs *attrs)
 +{
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      return ops->map_page(dev, virt_to_page(caddr),
 +                           (unsigned long)caddr & ~PAGE_MASK, size,
 +                           dir, attrs);
 +}
 +
 +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
 +                                        size_t size,
 +                                        enum dma_data_direction dir,
 +                                        struct dma_attrs *attrs)
 +{
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      ops->unmap_page(dev, daddr, size, dir, attrs);
 +}
 +
 +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
 +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
 +
 +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
 +                                 int nents, enum dma_data_direction dir,
 +                                 struct dma_attrs *attrs)
 +{
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      return ops->map_sg(dev, sgl, nents, dir, attrs);
 +}
 +
 +static inline void dma_unmap_sg_attrs(struct device *dev,
 +                                    struct scatterlist *sgl, int nents,
 +                                    enum dma_data_direction dir,
 +                                    struct dma_attrs *attrs)
 +{
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      ops->unmap_sg(dev, sgl, nents, dir, attrs);
 +}
 +
 +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
 +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
 +
 +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
 +                                         size_t size,
 +                                         enum dma_data_direction dir)
  {
 -      dma_free_coherent(dev, size, cpu_addr, dma_handle);
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      ops->sync_single_for_cpu(dev, daddr, size, dir);
  }
 -#define dma_map_single_attrs  platform_dma_map_single_attrs
 -static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
 -                                      size_t size, int dir)
 +
 +static inline void dma_sync_sg_for_cpu(struct device *dev,
 +                                     struct scatterlist *sgl,
 +                                     int nents, enum dma_data_direction dir)
  {
 -      return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL);
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      ops->sync_sg_for_cpu(dev, sgl, nents, dir);
  }
 -#define dma_map_sg_attrs      platform_dma_map_sg_attrs
 -static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl,
 -                           int nents, int dir)
 +
 +static inline void dma_sync_single_for_device(struct device *dev,
 +                                            dma_addr_t daddr,
 +                                            size_t size,
 +                                            enum dma_data_direction dir)
  {
 -      return dma_map_sg_attrs(dev, sgl, nents, dir, NULL);
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      ops->sync_single_for_device(dev, daddr, size, dir);
  }
 -#define dma_unmap_single_attrs        platform_dma_unmap_single_attrs
 -static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr,
 -                                  size_t size, int dir)
 +
 +static inline void dma_sync_sg_for_device(struct device *dev,
 +                                        struct scatterlist *sgl,
 +                                        int nents,
 +                                        enum dma_data_direction dir)
  {
 -      return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL);
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      ops->sync_sg_for_device(dev, sgl, nents, dir);
  }
 -#define dma_unmap_sg_attrs    platform_dma_unmap_sg_attrs
 -static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
 -                              int nents, int dir)
 +
 +static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
 +{
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      return ops->mapping_error(dev, daddr);
 +}
 +
 +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
 +                                    size_t offset, size_t size,
 +                                    enum dma_data_direction dir)
  {
 -      return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL);
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      return ops->map_page(dev, page, offset, size, dir, NULL);
  }
 -#define dma_sync_single_for_cpu       platform_dma_sync_single_for_cpu
 -#define dma_sync_sg_for_cpu   platform_dma_sync_sg_for_cpu
 -#define dma_sync_single_for_device platform_dma_sync_single_for_device
 -#define dma_sync_sg_for_device        platform_dma_sync_sg_for_device
 -#define dma_mapping_error     platform_dma_mapping_error
  
 -#define dma_map_page(dev, pg, off, size, dir)                         \
 -      dma_map_single(dev, page_address(pg) + (off), (size), (dir))
 -#define dma_unmap_page(dev, dma_addr, size, dir)                      \
 -      dma_unmap_single(dev, dma_addr, size, dir)
 +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
 +                                size_t size, enum dma_data_direction dir)
 +{
 +      dma_unmap_single(dev, addr, size, dir);
 +}
  
  /*
   * Rest of this file is part of the "Advanced DMA API".  Use at your own risk.
  #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir)  \
        dma_sync_single_for_device(dev, dma_handle, size, dir)
  
 -#define dma_supported         platform_dma_supported
 +static inline int dma_supported(struct device *dev, u64 mask)
 +{
 +      struct dma_map_ops *ops = platform_dma_get_ops(dev);
 +      return ops->dma_supported(dev, mask);
 +}
  
  static inline int
  dma_set_mask (struct device *dev, u64 mask)
@@@ -172,4 -141,11 +174,4 @@@ dma_cache_sync (struct device *dev, voi
  
  #define dma_is_consistent(d, h)       (1)     /* all we do is coherent memory... */
  
 -static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
 -{
 -      return dma_ops;
 -}
 -
 -
 -
  #endif /* _ASM_IA64_DMA_MAPPING_H */
index 22a75fb55adb42b14e6f7c5ffdd2434ee883c84a,fe87b21217077b4765553b59d2a41742ee558562..367d299d99384e1448e9376b1c899af149e0f599
@@@ -11,6 -11,7 +11,6 @@@
  #define _ASM_IA64_MACHVEC_H
  
  #include <linux/types.h>
 -#include <linux/swiotlb.h>
  
  /* forward declarations: */
  struct device;
@@@ -44,7 -45,24 +44,8 @@@ typedef void ia64_mv_kernel_launch_even
  
  /* DMA-mapping interface: */
  typedef void ia64_mv_dma_init (void);
 -typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t);
 -typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
 -typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
 -typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
 -typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
 -typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
 -typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
 -typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
 -typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
 -typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
 -typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
 -typedef int ia64_mv_dma_supported (struct device *, u64);
 -
 -typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
 -typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
 -typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
 -typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
+ typedef u64 ia64_mv_dma_get_required_mask (struct device *);
 +typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
  
  /*
   * WARNING: The legacy I/O space is _architected_.  Platforms are
@@@ -96,6 -114,8 +97,6 @@@ machvec_noop_bus (struct pci_bus *bus
  
  extern void machvec_setup (char **);
  extern void machvec_timer_interrupt (int, void *);
 -extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
 -extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
  extern void machvec_tlb_migrate_finish (struct mm_struct *);
  
  # if defined (CONFIG_IA64_HP_SIM)
  #  define platform_global_tlb_purge   ia64_mv.global_tlb_purge
  #  define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
  #  define platform_dma_init           ia64_mv.dma_init
 -#  define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
 -#  define platform_dma_free_coherent  ia64_mv.dma_free_coherent
 -#  define platform_dma_map_single_attrs       ia64_mv.dma_map_single_attrs
 -#  define platform_dma_unmap_single_attrs     ia64_mv.dma_unmap_single_attrs
 -#  define platform_dma_map_sg_attrs   ia64_mv.dma_map_sg_attrs
 -#  define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs
 -#  define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
 -#  define platform_dma_sync_sg_for_cpu        ia64_mv.dma_sync_sg_for_cpu
 -#  define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
 -#  define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
 -#  define platform_dma_mapping_error          ia64_mv.dma_mapping_error
 -#  define platform_dma_supported      ia64_mv.dma_supported
+ #  define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
 +#  define platform_dma_get_ops                ia64_mv.dma_get_ops
  #  define platform_irq_to_vector      ia64_mv.irq_to_vector
  #  define platform_local_vector_to_irq        ia64_mv.local_vector_to_irq
  #  define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
@@@ -171,7 -203,19 +173,8 @@@ struct ia64_machine_vector 
        ia64_mv_global_tlb_purge_t *global_tlb_purge;
        ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
        ia64_mv_dma_init *dma_init;
 -      ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
 -      ia64_mv_dma_free_coherent *dma_free_coherent;
 -      ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
 -      ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
 -      ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
 -      ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
 -      ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
 -      ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
 -      ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
 -      ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
 -      ia64_mv_dma_mapping_error *dma_mapping_error;
 -      ia64_mv_dma_supported *dma_supported;
+       ia64_mv_dma_get_required_mask *dma_get_required_mask;
 +      ia64_mv_dma_get_ops *dma_get_ops;
        ia64_mv_irq_to_vector *irq_to_vector;
        ia64_mv_local_vector_to_irq *local_vector_to_irq;
        ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
        platform_global_tlb_purge,              \
        platform_tlb_migrate_finish,            \
        platform_dma_init,                      \
 -      platform_dma_alloc_coherent,            \
 -      platform_dma_free_coherent,             \
 -      platform_dma_map_single_attrs,          \
 -      platform_dma_unmap_single_attrs,        \
 -      platform_dma_map_sg_attrs,              \
 -      platform_dma_unmap_sg_attrs,            \
 -      platform_dma_sync_single_for_cpu,       \
 -      platform_dma_sync_sg_for_cpu,           \
 -      platform_dma_sync_single_for_device,    \
 -      platform_dma_sync_sg_for_device,        \
 -      platform_dma_mapping_error,                     \
 -      platform_dma_supported,                 \
+       platform_dma_get_required_mask,         \
 +      platform_dma_get_ops,                   \
        platform_irq_to_vector,                 \
        platform_local_vector_to_irq,           \
        platform_pci_get_legacy_mem,            \
@@@ -246,9 -302,6 +250,9 @@@ extern void machvec_init_from_cmdline(c
  #  error Unknown configuration.  Update arch/ia64/include/asm/machvec.h.
  # endif /* CONFIG_IA64_GENERIC */
  
 +extern void swiotlb_dma_init(void);
 +extern struct dma_map_ops *dma_get_ops(struct device *);
 +
  /*
   * Define default versions so we can extend machvec for new platforms without having
   * to update the machvec files for all existing platforms.
  # define platform_kernel_launch_event machvec_noop
  #endif
  #ifndef platform_dma_init
 -# define platform_dma_init            swiotlb_init
 -#endif
 -#ifndef platform_dma_alloc_coherent
 -# define platform_dma_alloc_coherent  swiotlb_alloc_coherent
 -#endif
 -#ifndef platform_dma_free_coherent
 -# define platform_dma_free_coherent   swiotlb_free_coherent
 -#endif
 -#ifndef platform_dma_map_single_attrs
 -# define platform_dma_map_single_attrs        swiotlb_map_single_attrs
 -#endif
 -#ifndef platform_dma_unmap_single_attrs
 -# define platform_dma_unmap_single_attrs      swiotlb_unmap_single_attrs
 -#endif
 -#ifndef platform_dma_map_sg_attrs
 -# define platform_dma_map_sg_attrs    swiotlb_map_sg_attrs
 -#endif
 -#ifndef platform_dma_unmap_sg_attrs
 -# define platform_dma_unmap_sg_attrs  swiotlb_unmap_sg_attrs
 -#endif
 -#ifndef platform_dma_sync_single_for_cpu
 -# define platform_dma_sync_single_for_cpu     swiotlb_sync_single_for_cpu
 -#endif
 -#ifndef platform_dma_sync_sg_for_cpu
 -# define platform_dma_sync_sg_for_cpu         swiotlb_sync_sg_for_cpu
 -#endif
 -#ifndef platform_dma_sync_single_for_device
 -# define platform_dma_sync_single_for_device  swiotlb_sync_single_for_device
 -#endif
 -#ifndef platform_dma_sync_sg_for_device
 -# define platform_dma_sync_sg_for_device      swiotlb_sync_sg_for_device
 -#endif
 -#ifndef platform_dma_mapping_error
 -# define platform_dma_mapping_error           swiotlb_dma_mapping_error
 +# define platform_dma_init            swiotlb_dma_init
  #endif
 -#ifndef platform_dma_supported
 -# define  platform_dma_supported      swiotlb_dma_supported
 +#ifndef platform_dma_get_ops
 +# define platform_dma_get_ops         dma_get_ops
  #endif
+ #ifndef platform_dma_get_required_mask
+ # define  platform_dma_get_required_mask      ia64_dma_get_required_mask
+ #endif
  #ifndef platform_irq_to_vector
  # define platform_irq_to_vector               __ia64_irq_to_vector
  #endif
index afd029b4797ec84587595f902defe076df45cc6e,f1a6e0d6dfa582c1533e2b66f3a8cfb6744b5233..f061a30aac42c7b297434d0ec88ae0f5d348f5ee
@@@ -55,7 -55,19 +55,8 @@@ extern ia64_mv_readb_t __sn_readb_relax
  extern ia64_mv_readw_t __sn_readw_relaxed;
  extern ia64_mv_readl_t __sn_readl_relaxed;
  extern ia64_mv_readq_t __sn_readq_relaxed;
 -extern ia64_mv_dma_alloc_coherent     sn_dma_alloc_coherent;
 -extern ia64_mv_dma_free_coherent      sn_dma_free_coherent;
 -extern ia64_mv_dma_map_single_attrs   sn_dma_map_single_attrs;
 -extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs;
 -extern ia64_mv_dma_map_sg_attrs               sn_dma_map_sg_attrs;
 -extern ia64_mv_dma_unmap_sg_attrs     sn_dma_unmap_sg_attrs;
 -extern ia64_mv_dma_sync_single_for_cpu        sn_dma_sync_single_for_cpu;
 -extern ia64_mv_dma_sync_sg_for_cpu    sn_dma_sync_sg_for_cpu;
 -extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
 -extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
 -extern ia64_mv_dma_mapping_error      sn_dma_mapping_error;
 -extern ia64_mv_dma_supported          sn_dma_supported;
+ extern ia64_mv_dma_get_required_mask  sn_dma_get_required_mask;
 +extern ia64_mv_dma_init                       sn_dma_init;
  extern ia64_mv_migrate_t              sn_migrate;
  extern ia64_mv_kernel_launch_event_t  sn_kernel_launch_event;
  extern ia64_mv_setup_msi_irq_t                sn_setup_msi_irq;
@@@ -99,7 -111,20 +100,8 @@@ extern ia64_mv_pci_fixup_bus_t             sn_pci_
  #define platform_pci_get_legacy_mem   sn_pci_get_legacy_mem
  #define platform_pci_legacy_read      sn_pci_legacy_read
  #define platform_pci_legacy_write     sn_pci_legacy_write
 -#define platform_dma_init             machvec_noop
 -#define platform_dma_alloc_coherent   sn_dma_alloc_coherent
 -#define platform_dma_free_coherent    sn_dma_free_coherent
 -#define platform_dma_map_single_attrs sn_dma_map_single_attrs
 -#define platform_dma_unmap_single_attrs       sn_dma_unmap_single_attrs
 -#define platform_dma_map_sg_attrs     sn_dma_map_sg_attrs
 -#define platform_dma_unmap_sg_attrs   sn_dma_unmap_sg_attrs
 -#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
 -#define platform_dma_sync_sg_for_cpu  sn_dma_sync_sg_for_cpu
 -#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
 -#define platform_dma_sync_sg_for_device       sn_dma_sync_sg_for_device
 -#define platform_dma_mapping_error            sn_dma_mapping_error
 -#define platform_dma_supported                sn_dma_supported
+ #define platform_dma_get_required_mask        sn_dma_get_required_mask
 +#define platform_dma_init             sn_dma_init
  #define platform_migrate              sn_migrate
  #define platform_kernel_launch_event    sn_kernel_launch_event
  #ifdef CONFIG_PCI_MSI
index 9c788f9cedfd334dad67d0b3c085e116addcabd3,863f5017baae6d6981ff809ce2ffe931d8fdcdf6..8c130e8f00e1d3aee8052eb3141b8e33eb3ec3bd
@@@ -10,7 -10,7 +10,7 @@@
   */
  
  #include <linux/module.h>
 -#include <linux/dma-attrs.h>
 +#include <linux/dma-mapping.h>
  #include <asm/dma.h>
  #include <asm/sn/intr.h>
  #include <asm/sn/pcibus_provider_defs.h>
@@@ -31,7 -31,7 +31,7 @@@
   * this function.  Of course, SN only supports devices that have 32 or more
   * address bits when using the PMU.
   */
 -int sn_dma_supported(struct device *dev, u64 mask)
 +static int sn_dma_supported(struct device *dev, u64 mask)
  {
        BUG_ON(dev->bus != &pci_bus_type);
  
@@@ -39,6 -39,7 +39,6 @@@
                return 0;
        return 1;
  }
 -EXPORT_SYMBOL(sn_dma_supported);
  
  /**
   * sn_dma_set_mask - set the DMA mask
@@@ -74,8 -75,8 +74,8 @@@ EXPORT_SYMBOL(sn_dma_set_mask)
   * queue for a SCSI controller).  See Documentation/DMA-API.txt for
   * more information.
   */
 -void *sn_dma_alloc_coherent(struct device *dev, size_t size,
 -                          dma_addr_t * dma_handle, gfp_t flags)
 +static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
 +                                 dma_addr_t * dma_handle, gfp_t flags)
  {
        void *cpuaddr;
        unsigned long phys_addr;
  
        return cpuaddr;
  }
 -EXPORT_SYMBOL(sn_dma_alloc_coherent);
  
  /**
   * sn_pci_free_coherent - free memory associated with coherent DMAable region
   * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
   * any associated IOMMU mappings.
   */
 -void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
 -                        dma_addr_t dma_handle)
 +static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
 +                               dma_addr_t dma_handle)
  {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
        provider->dma_unmap(pdev, dma_handle, 0);
        free_pages((unsigned long)cpu_addr, get_order(size));
  }
 -EXPORT_SYMBOL(sn_dma_free_coherent);
  
  /**
   * sn_dma_map_single_attrs - map a single page for DMA
   * TODO: simplify our interface;
   *       figure out how to save dmamap handle so can use two step.
   */
 -dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
 -                                 size_t size, int direction,
 -                                 struct dma_attrs *attrs)
 +static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
 +                                unsigned long offset, size_t size,
 +                                enum dma_data_direction dir,
 +                                struct dma_attrs *attrs)
  {
 +      void *cpu_addr = page_address(page) + offset;
        dma_addr_t dma_addr;
        unsigned long phys_addr;
        struct pci_dev *pdev = to_pci_dev(dev);
        }
        return dma_addr;
  }
 -EXPORT_SYMBOL(sn_dma_map_single_attrs);
  
  /**
   * sn_dma_unmap_single_attrs - unamp a DMA mapped page
   * by @dma_handle into the coherence domain.  On SN, we're always cache
   * coherent, so we just need to free any ATEs associated with this mapping.
   */
 -void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
 -                             size_t size, int direction,
 -                             struct dma_attrs *attrs)
 +static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
 +                            size_t size, enum dma_data_direction dir,
 +                            struct dma_attrs *attrs)
  {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
  
        BUG_ON(dev->bus != &pci_bus_type);
  
 -      provider->dma_unmap(pdev, dma_addr, direction);
 +      provider->dma_unmap(pdev, dma_addr, dir);
  }
 -EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
  
  /**
 - * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist
 + * sn_dma_unmap_sg - unmap a DMA scatterlist
   * @dev: device to unmap
   * @sg: scatterlist to unmap
   * @nhwentries: number of scatterlist entries
   *
   * Unmap a set of streaming mode DMA translations.
   */
 -void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
 -                         int nhwentries, int direction,
 -                         struct dma_attrs *attrs)
 +static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
 +                          int nhwentries, enum dma_data_direction dir,
 +                          struct dma_attrs *attrs)
  {
        int i;
        struct pci_dev *pdev = to_pci_dev(dev);
        BUG_ON(dev->bus != &pci_bus_type);
  
        for_each_sg(sgl, sg, nhwentries, i) {
 -              provider->dma_unmap(pdev, sg->dma_address, direction);
 +              provider->dma_unmap(pdev, sg->dma_address, dir);
                sg->dma_address = (dma_addr_t) NULL;
                sg->dma_length = 0;
        }
  }
 -EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
  
  /**
 - * sn_dma_map_sg_attrs - map a scatterlist for DMA
 + * sn_dma_map_sg - map a scatterlist for DMA
   * @dev: device to map for
   * @sg: scatterlist to map
   * @nhwentries: number of entries
   *
   * Maps each entry of @sg for DMA.
   */
 -int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
 -                      int nhwentries, int direction, struct dma_attrs *attrs)
 +static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
 +                       int nhwentries, enum dma_data_direction dir,
 +                       struct dma_attrs *attrs)
  {
        unsigned long phys_addr;
        struct scatterlist *saved_sg = sgl, *sg;
                         * Free any successfully allocated entries.
                         */
                        if (i > 0)
 -                              sn_dma_unmap_sg_attrs(dev, saved_sg, i,
 -                                                    direction, attrs);
 +                              sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
                        return 0;
                }
  
  
        return nhwentries;
  }
 -EXPORT_SYMBOL(sn_dma_map_sg_attrs);
  
 -void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
 -                              size_t size, int direction)
 +static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
 +                                     size_t size, enum dma_data_direction dir)
  {
        BUG_ON(dev->bus != &pci_bus_type);
  }
 -EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
  
 -void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
 -                                 size_t size, int direction)
 +static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
 +                                        size_t size,
 +                                        enum dma_data_direction dir)
  {
        BUG_ON(dev->bus != &pci_bus_type);
  }
 -EXPORT_SYMBOL(sn_dma_sync_single_for_device);
  
 -void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 -                          int nelems, int direction)
 +static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 +                                 int nelems, enum dma_data_direction dir)
  {
        BUG_ON(dev->bus != &pci_bus_type);
  }
 -EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
  
 -void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 -                             int nelems, int direction)
 +static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 +                                    int nelems, enum dma_data_direction dir)
  {
        BUG_ON(dev->bus != &pci_bus_type);
  }
 -EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
  
 -int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 +static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  {
        return 0;
  }
 -EXPORT_SYMBOL(sn_dma_mapping_error);
  
+ u64 sn_dma_get_required_mask(struct device *dev)
+ {
+       return DMA_64BIT_MASK;
+ }
+ EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
  char *sn_pci_get_legacy_mem(struct pci_bus *bus)
  {
        if (!SN_PCIBUS_BUSSOFT(bus))
@@@ -456,23 -471,3 +462,23 @@@ int sn_pci_legacy_write(struct pci_bus 
   out:
        return ret;
  }
 +
 +static struct dma_map_ops sn_dma_ops = {
 +      .alloc_coherent         = sn_dma_alloc_coherent,
 +      .free_coherent          = sn_dma_free_coherent,
 +      .map_page               = sn_dma_map_page,
 +      .unmap_page             = sn_dma_unmap_page,
 +      .map_sg                 = sn_dma_map_sg,
 +      .unmap_sg               = sn_dma_unmap_sg,
 +      .sync_single_for_cpu    = sn_dma_sync_single_for_cpu,
 +      .sync_sg_for_cpu        = sn_dma_sync_sg_for_cpu,
 +      .sync_single_for_device = sn_dma_sync_single_for_device,
 +      .sync_sg_for_device     = sn_dma_sync_sg_for_device,
 +      .mapping_error          = sn_dma_mapping_error,
 +      .dma_supported          = sn_dma_supported,
 +};
 +
 +void sn_dma_init(void)
 +{
 +      dma_ops = &sn_dma_ops;
 +}
index b9a5629339039a7e54bbfbe7f1ad6f5e6c173f95,3dfecb20d5e7949953e7fefa2796af4b48bef88f..c933980bf5625c5e3a406c9a84322f9e93b98eab
@@@ -438,7 -438,8 +438,8 @@@ static struct intel_iommu *device_to_io
                        continue;
  
                for (i = 0; i < drhd->devices_cnt; i++)
-                       if (drhd->devices[i]->bus->number == bus &&
+                       if (drhd->devices[i] &&
+                           drhd->devices[i]->bus->number == bus &&
                            drhd->devices[i]->devfn == devfn)
                                return drhd->iommu;
  
@@@ -2273,15 -2274,6 +2274,15 @@@ error
        return 0;
  }
  
 +static dma_addr_t intel_map_page(struct device *dev, struct page *page,
 +                               unsigned long offset, size_t size,
 +                               enum dma_data_direction dir,
 +                               struct dma_attrs *attrs)
 +{
 +      return __intel_map_single(dev, page_to_phys(page) + offset, size,
 +                                dir, to_pci_dev(dev)->dma_mask);
 +}
 +
  dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
                            size_t size, int dir)
  {
@@@ -2350,9 -2342,8 +2351,9 @@@ static void add_unmap(struct dmar_domai
        spin_unlock_irqrestore(&async_umap_flush_lock, flags);
  }
  
 -void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
 -                      int dir)
 +static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
 +                           size_t size, enum dma_data_direction dir,
 +                           struct dma_attrs *attrs)
  {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct dmar_domain *domain;
        }
  }
  
 +void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
 +                      int dir)
 +{
 +      intel_unmap_page(dev, dev_addr, size, dir, NULL);
 +}
 +
  void *intel_alloc_coherent(struct device *hwdev, size_t size,
                           dma_addr_t *dma_handle, gfp_t flags)
  {
@@@ -2441,8 -2426,7 +2442,8 @@@ void intel_free_coherent(struct device 
  #define SG_ENT_VIRT_ADDRESS(sg)       (sg_virt((sg)))
  
  void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
 -                  int nelems, int dir)
 +                  int nelems, enum dma_data_direction dir,
 +                  struct dma_attrs *attrs)
  {
        int i;
        struct pci_dev *pdev = to_pci_dev(hwdev);
@@@ -2500,7 -2484,7 +2501,7 @@@ static int intel_nontranslate_map_sg(st
  }
  
  int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
 -               int dir)
 +               enum dma_data_direction dir, struct dma_attrs *attrs)
  {
        void *addr;
        int i;
        return nelems;
  }
  
 -static struct dma_mapping_ops intel_dma_ops = {
 +struct dma_map_ops intel_dma_ops = {
        .alloc_coherent = intel_alloc_coherent,
        .free_coherent = intel_free_coherent,
 -      .map_single = intel_map_single,
 -      .unmap_single = intel_unmap_single,
        .map_sg = intel_map_sg,
        .unmap_sg = intel_unmap_sg,
 +      .map_page = intel_map_page,
 +      .unmap_page = intel_unmap_page,
  };
  
  static inline int iommu_domain_cache_init(void)