Line data Source code
1 : #ifndef _ASM_X86_DMA_MAPPING_H
2 : #define _ASM_X86_DMA_MAPPING_H
3 :
4 : /*
5 : * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 : * Documentation/DMA-API.txt for documentation.
7 : */
8 :
9 : #include <linux/kmemcheck.h>
10 : #include <linux/scatterlist.h>
11 : #include <linux/dma-debug.h>
12 : #include <linux/dma-attrs.h>
13 : #include <asm/io.h>
14 : #include <asm/swiotlb.h>
15 : #include <asm-generic/dma-coherent.h>
16 :
17 : #ifdef CONFIG_ISA
18 : # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
19 : #else
20 : # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
21 : #endif
22 :
23 : #define DMA_ERROR_CODE 0
24 :
25 : extern int iommu_merge;
26 1 : extern struct device x86_dma_fallback_dev;
27 : extern int panic_on_overflow;
28 :
29 1 : extern struct dma_map_ops *dma_ops;
30 :
31 : static inline struct dma_map_ops *get_dma_ops(struct device *dev)
32 : {
33 10 : #ifdef CONFIG_X86_32
34 : return dma_ops;
35 : #else
36 60 : if (unlikely(!dev) || !dev->archdata.dma_ops)
37 10 : return dma_ops;
38 : else
39 10 : return dev->archdata.dma_ops;
40 : #endif
41 : }
42 :
43 : #include <asm-generic/dma-mapping-common.h>
44 :
45 : /* Make sure we keep the same behaviour */
46 : static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
47 : {
48 : struct dma_map_ops *ops = get_dma_ops(dev);
49 : if (ops->mapping_error)
50 : return ops->mapping_error(dev, dma_addr);
51 :
52 : return (dma_addr == DMA_ERROR_CODE);
53 : }
54 :
55 : #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
56 : #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
57 : #define dma_is_consistent(d, h) (1)
58 :
59 : extern int dma_supported(struct device *hwdev, u64 mask);
60 : extern int dma_set_mask(struct device *dev, u64 mask);
61 :
62 : extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
63 : dma_addr_t *dma_addr, gfp_t flag);
64 :
65 : static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
66 : {
67 : if (!dev->dma_mask)
68 : return 0;
69 :
70 : return addr + size - 1 <= *dev->dma_mask;
71 : }
72 :
73 : static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
74 : {
75 : return paddr;
76 : }
77 :
78 : static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
79 : {
80 : return daddr;
81 : }
82 :
83 : static inline void
84 : dma_cache_sync(struct device *dev, void *vaddr, size_t size,
85 : enum dma_data_direction dir)
86 : {
87 : flush_write_buffers();
88 : }
89 :
90 : static inline int dma_get_cache_alignment(void)
91 : {
92 : /* no easy way to get cache size on all x86, so return the
93 : * maximum possible, to be safe */
94 : return boot_cpu_data.x86_clflush_size;
95 : }
96 :
97 : static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
98 : gfp_t gfp)
99 1 : {
100 1 : unsigned long dma_mask = 0;
101 :
102 2 : dma_mask = dev->coherent_dma_mask;
103 2 : if (!dma_mask)
104 6 : dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
105 :
106 1 : return dma_mask;
107 : }
108 :
109 : static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
110 : {
111 4 : unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
112 1 :
113 2 : if (dma_mask <= DMA_BIT_MASK(24))
114 1 : gfp |= GFP_DMA;
115 : #ifdef CONFIG_X86_64
116 4 : if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
117 1 : gfp |= GFP_DMA32;
118 : #endif
119 1 : return gfp;
120 : }
121 :
122 : static inline void *
123 : dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
124 : gfp_t gfp)
125 : {
126 4 : struct dma_map_ops *ops = get_dma_ops(dev);
127 1 : void *memory;
128 1 :
129 2 : gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
130 1 :
131 : if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
132 : return memory;
133 :
134 2 : if (!dev)
135 1 : dev = &x86_dma_fallback_dev;
136 :
137 4 : if (!is_device_dma_capable(dev))
138 1 : return NULL;
139 :
140 3 : if (!ops->alloc_coherent)
141 1 : return NULL;
142 :
143 3 : memory = ops->alloc_coherent(dev, size, dma_handle,
144 : dma_alloc_coherent_gfp_flags(dev, gfp));
145 2 : debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
146 :
147 1 : return memory;
148 : }
149 :
150 : static inline void dma_free_coherent(struct device *dev, size_t size,
151 : void *vaddr, dma_addr_t bus)
152 : {
153 36 : struct dma_map_ops *ops = get_dma_ops(dev);
154 9 :
155 117 : WARN_ON(irqs_disabled()); /* for portability */
156 9 :
157 9 : if (dma_release_from_coherent(dev, get_order(size), vaddr))
158 9 : return;
159 :
160 18 : debug_dma_free_coherent(dev, size, vaddr, bus);
161 27 : if (ops->free_coherent)
162 9 : ops->free_coherent(dev, size, vaddr, bus);
163 9 : }
164 :
165 : #endif
|