1414 * Generic iommu implementation
1515 */
1616
17- /*
18- * The coherent mask may be smaller than the real mask, check if we can
19- * really use a direct window.
20- */
21- static inline bool dma_iommu_alloc_bypass (struct device * dev )
22- {
23- return dev -> archdata .iommu_bypass && !iommu_fixed_is_weak &&
24- dma_direct_supported (dev , dev -> coherent_dma_mask );
25- }
26-
27- static inline bool dma_iommu_map_bypass (struct device * dev ,
28- unsigned long attrs )
29- {
30- return dev -> archdata .iommu_bypass &&
31- (!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING ));
32- }
33-
3417/* Allocates a contiguous real buffer and creates mappings over it.
3518 * Returns the virtual address of the buffer and sets dma_handle
3619 * to the dma address (mapping) of the first page.
@@ -39,8 +22,6 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
3922 dma_addr_t * dma_handle , gfp_t flag ,
4023 unsigned long attrs )
4124{
42- if (dma_iommu_alloc_bypass (dev ))
43- return dma_direct_alloc (dev , size , dma_handle , flag , attrs );
4425 return iommu_alloc_coherent (dev , get_iommu_table_base (dev ), size ,
4526 dma_handle , dev -> coherent_dma_mask , flag ,
4627 dev_to_node (dev ));
@@ -50,11 +31,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
5031 void * vaddr , dma_addr_t dma_handle ,
5132 unsigned long attrs )
5233{
53- if (dma_iommu_alloc_bypass (dev ))
54- dma_direct_free (dev , size , vaddr , dma_handle , attrs );
55- else
56- iommu_free_coherent (get_iommu_table_base (dev ), size , vaddr ,
57- dma_handle );
34+ iommu_free_coherent (get_iommu_table_base (dev ), size , vaddr , dma_handle );
5835}
5936
6037/* Creates TCEs for a user provided buffer. The user buffer must be
@@ -67,9 +44,6 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
6744 enum dma_data_direction direction ,
6845 unsigned long attrs )
6946{
70- if (dma_iommu_map_bypass (dev , attrs ))
71- return dma_direct_map_page (dev , page , offset , size , direction ,
72- attrs );
7347 return iommu_map_page (dev , get_iommu_table_base (dev ), page , offset ,
7448 size , dma_get_mask (dev ), direction , attrs );
7549}
@@ -79,20 +53,15 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
7953 size_t size , enum dma_data_direction direction ,
8054 unsigned long attrs )
8155{
82- if (!dma_iommu_map_bypass (dev , attrs ))
83- iommu_unmap_page (get_iommu_table_base (dev ), dma_handle , size ,
84- direction , attrs );
85- else
86- dma_direct_unmap_page (dev , dma_handle , size , direction , attrs );
56+ iommu_unmap_page (get_iommu_table_base (dev ), dma_handle , size , direction ,
57+ attrs );
8758}
8859
8960
9061static int dma_iommu_map_sg (struct device * dev , struct scatterlist * sglist ,
9162 int nelems , enum dma_data_direction direction ,
9263 unsigned long attrs )
9364{
94- if (dma_iommu_map_bypass (dev , attrs ))
95- return dma_direct_map_sg (dev , sglist , nelems , direction , attrs );
9665 return ppc_iommu_map_sg (dev , get_iommu_table_base (dev ), sglist , nelems ,
9766 dma_get_mask (dev ), direction , attrs );
9867}
@@ -101,20 +70,18 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
10170 int nelems , enum dma_data_direction direction ,
10271 unsigned long attrs )
10372{
104- if (!dma_iommu_map_bypass (dev , attrs ))
105- ppc_iommu_unmap_sg (get_iommu_table_base (dev ), sglist , nelems ,
73+ ppc_iommu_unmap_sg (get_iommu_table_base (dev ), sglist , nelems ,
10674 direction , attrs );
107- else
108- dma_direct_unmap_sg (dev , sglist , nelems , direction , attrs );
10975}
11076
11177static bool dma_iommu_bypass_supported (struct device * dev , u64 mask )
11278{
11379 struct pci_dev * pdev = to_pci_dev (dev );
11480 struct pci_controller * phb = pci_bus_to_host (pdev -> bus );
11581
116- return phb -> controller_ops .iommu_bypass_supported &&
117- phb -> controller_ops .iommu_bypass_supported (pdev , mask );
82+ if (iommu_fixed_is_weak || !phb -> controller_ops .iommu_bypass_supported )
83+ return false;
84+ return phb -> controller_ops .iommu_bypass_supported (pdev , mask );
11885}
11986
12087/* We support DMA to/from any memory page via the iommu */
@@ -123,7 +90,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
12390 struct iommu_table * tbl = get_iommu_table_base (dev );
12491
12592 if (dev_is_pci (dev ) && dma_iommu_bypass_supported (dev , mask )) {
126- dev -> archdata . iommu_bypass = true;
93+ dev -> dma_ops_bypass = true;
12794 dev_dbg (dev , "iommu: 64-bit OK, using fixed ops\n" );
12895 return 1 ;
12996 }
@@ -141,7 +108,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
141108 }
142109
143110 dev_dbg (dev , "iommu: not 64-bit, using default ops\n" );
144- dev -> archdata . iommu_bypass = false;
111+ dev -> dma_ops_bypass = false;
145112 return 1 ;
146113}
147114
@@ -153,47 +120,12 @@ u64 dma_iommu_get_required_mask(struct device *dev)
153120 if (!tbl )
154121 return 0 ;
155122
156- if (dev_is_pci (dev )) {
157- u64 bypass_mask = dma_direct_get_required_mask (dev );
158-
159- if (dma_iommu_bypass_supported (dev , bypass_mask ))
160- return bypass_mask ;
161- }
162-
163123 mask = 1ULL < (fls_long (tbl -> it_offset + tbl -> it_size ) - 1 );
164124 mask += mask - 1 ;
165125
166126 return mask ;
167127}
168128
169- static void dma_iommu_sync_for_cpu (struct device * dev , dma_addr_t addr ,
170- size_t size , enum dma_data_direction dir )
171- {
172- if (dma_iommu_alloc_bypass (dev ))
173- dma_direct_sync_single_for_cpu (dev , addr , size , dir );
174- }
175-
176- static void dma_iommu_sync_for_device (struct device * dev , dma_addr_t addr ,
177- size_t sz , enum dma_data_direction dir )
178- {
179- if (dma_iommu_alloc_bypass (dev ))
180- dma_direct_sync_single_for_device (dev , addr , sz , dir );
181- }
182-
183- extern void dma_iommu_sync_sg_for_cpu (struct device * dev ,
184- struct scatterlist * sgl , int nents , enum dma_data_direction dir )
185- {
186- if (dma_iommu_alloc_bypass (dev ))
187- dma_direct_sync_sg_for_cpu (dev , sgl , nents , dir );
188- }
189-
190- extern void dma_iommu_sync_sg_for_device (struct device * dev ,
191- struct scatterlist * sgl , int nents , enum dma_data_direction dir )
192- {
193- if (dma_iommu_alloc_bypass (dev ))
194- dma_direct_sync_sg_for_device (dev , sgl , nents , dir );
195- }
196-
197129const struct dma_map_ops dma_iommu_ops = {
198130 .alloc = dma_iommu_alloc_coherent ,
199131 .free = dma_iommu_free_coherent ,
@@ -203,10 +135,6 @@ const struct dma_map_ops dma_iommu_ops = {
203135 .map_page = dma_iommu_map_page ,
204136 .unmap_page = dma_iommu_unmap_page ,
205137 .get_required_mask = dma_iommu_get_required_mask ,
206- .sync_single_for_cpu = dma_iommu_sync_for_cpu ,
207- .sync_single_for_device = dma_iommu_sync_for_device ,
208- .sync_sg_for_cpu = dma_iommu_sync_sg_for_cpu ,
209- .sync_sg_for_device = dma_iommu_sync_sg_for_device ,
210138 .mmap = dma_common_mmap ,
211139 .get_sgtable = dma_common_get_sgtable ,
212140};
0 commit comments