Skip to content

Commit f99d603

Browse files
committed
ARM: dma-mapping: use alloc, mmap, free from dma_ops
This patch converts dma_alloc/free/mmap_{coherent,writecombine} functions to use generic alloc/free/mmap methods from dma_map_ops structure. A new DMA_ATTR_WRITE_COMBINE DMA attribute have been introduced to implement writecombine methods. Signed-off-by: Marek Szyprowski <[email protected]> Acked-by: Kyungmin Park <[email protected]> Acked-by: Arnd Bergmann <[email protected]> Tested-By: Subash Patel <[email protected]>
1 parent 51fde34 commit f99d603

File tree

3 files changed

+104
-66
lines changed

3 files changed

+104
-66
lines changed

arch/arm/common/dmabounce.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -449,6 +449,9 @@ static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
449449
}
450450

451451
static struct dma_map_ops dmabounce_ops = {
452+
.alloc = arm_dma_alloc,
453+
.free = arm_dma_free,
454+
.mmap = arm_dma_mmap,
452455
.map_page = dmabounce_map_page,
453456
.unmap_page = dmabounce_unmap_page,
454457
.sync_single_for_cpu = dmabounce_sync_for_cpu,

arch/arm/include/asm/dma-mapping.h

Lines changed: 77 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
#include <linux/mm_types.h>
77
#include <linux/scatterlist.h>
8+
#include <linux/dma-attrs.h>
89
#include <linux/dma-debug.h>
910

1011
#include <asm-generic/dma-coherent.h>
@@ -110,68 +111,115 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
110111
extern int dma_supported(struct device *dev, u64 mask);
111112

112113
/**
113-
* dma_alloc_coherent - allocate consistent memory for DMA
114+
* arm_dma_alloc - allocate consistent memory for DMA
114115
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
115116
* @size: required memory size
116117
* @handle: bus-specific DMA address
118+
* @attrs: optinal attributes that specific mapping properties
117119
*
118-
* Allocate some uncached, unbuffered memory for a device for
119-
* performing DMA. This function allocates pages, and will
120-
* return the CPU-viewed address, and sets @handle to be the
121-
* device-viewed address.
120+
* Allocate some memory for a device for performing DMA. This function
121+
* allocates pages, and will return the CPU-viewed address, and sets @handle
122+
* to be the device-viewed address.
122123
*/
123-
extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
124+
extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
125+
gfp_t gfp, struct dma_attrs *attrs);
126+
127+
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
128+
129+
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
130+
dma_addr_t *dma_handle, gfp_t flag,
131+
struct dma_attrs *attrs)
132+
{
133+
struct dma_map_ops *ops = get_dma_ops(dev);
134+
void *cpu_addr;
135+
BUG_ON(!ops);
136+
137+
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
138+
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
139+
return cpu_addr;
140+
}
124141

125142
/**
126-
* dma_free_coherent - free memory allocated by dma_alloc_coherent
143+
* arm_dma_free - free memory allocated by arm_dma_alloc
127144
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
128145
* @size: size of memory originally requested in dma_alloc_coherent
129146
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
130147
* @handle: device-view address returned from dma_alloc_coherent
148+
* @attrs: optinal attributes that specific mapping properties
131149
*
132150
* Free (and unmap) a DMA buffer previously allocated by
133-
* dma_alloc_coherent().
151+
* arm_dma_alloc().
134152
*
135153
* References to memory and mappings associated with cpu_addr/handle
136154
* during and after this call executing are illegal.
137155
*/
138-
extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
156+
extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
157+
dma_addr_t handle, struct dma_attrs *attrs);
158+
159+
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
160+
161+
static inline void dma_free_attrs(struct device *dev, size_t size,
162+
void *cpu_addr, dma_addr_t dma_handle,
163+
struct dma_attrs *attrs)
164+
{
165+
struct dma_map_ops *ops = get_dma_ops(dev);
166+
BUG_ON(!ops);
167+
168+
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
169+
ops->free(dev, size, cpu_addr, dma_handle, attrs);
170+
}
139171

140172
/**
141-
* dma_mmap_coherent - map a coherent DMA allocation into user space
173+
* arm_dma_mmap - map a coherent DMA allocation into user space
142174
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
143175
* @vma: vm_area_struct describing requested user mapping
144176
* @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
145177
* @handle: device-view address returned from dma_alloc_coherent
146178
* @size: size of memory originally requested in dma_alloc_coherent
179+
* @attrs: optinal attributes that specific mapping properties
147180
*
148181
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
149182
* into user space. The coherent DMA buffer must not be freed by the
150183
* driver until the user space mapping has been released.
151184
*/
152-
int dma_mmap_coherent(struct device *, struct vm_area_struct *,
153-
void *, dma_addr_t, size_t);
185+
extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
186+
void *cpu_addr, dma_addr_t dma_addr, size_t size,
187+
struct dma_attrs *attrs);
154188

189+
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
155190

156-
/**
157-
* dma_alloc_writecombine - allocate writecombining memory for DMA
158-
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
159-
* @size: required memory size
160-
* @handle: bus-specific DMA address
161-
*
162-
* Allocate some uncached, buffered memory for a device for
163-
* performing DMA. This function allocates pages, and will
164-
* return the CPU-viewed address, and sets @handle to be the
165-
* device-viewed address.
166-
*/
167-
extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
168-
gfp_t);
191+
static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
192+
void *cpu_addr, dma_addr_t dma_addr,
193+
size_t size, struct dma_attrs *attrs)
194+
{
195+
struct dma_map_ops *ops = get_dma_ops(dev);
196+
BUG_ON(!ops);
197+
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
198+
}
169199

170-
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
171-
dma_free_coherent(dev,size,cpu_addr,handle)
200+
static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
201+
dma_addr_t *dma_handle, gfp_t flag)
202+
{
203+
DEFINE_DMA_ATTRS(attrs);
204+
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
205+
return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
206+
}
172207

173-
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
174-
void *, dma_addr_t, size_t);
208+
static inline void dma_free_writecombine(struct device *dev, size_t size,
209+
void *cpu_addr, dma_addr_t dma_handle)
210+
{
211+
DEFINE_DMA_ATTRS(attrs);
212+
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
213+
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
214+
}
215+
216+
static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
217+
void *cpu_addr, dma_addr_t dma_addr, size_t size)
218+
{
219+
DEFINE_DMA_ATTRS(attrs);
220+
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
221+
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
222+
}
175223

176224
/*
177225
* This can be called during boot to increase the size of the consistent
@@ -180,7 +228,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
180228
*/
181229
extern void __init init_consistent_dma_size(unsigned long size);
182230

183-
184231
/*
185232
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
186233
* and utilize bounce buffers as needed to work around limited DMA windows.

arch/arm/mm/dma-mapping.c

Lines changed: 24 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,9 @@ static void arm_dma_sync_single_for_device(struct device *dev,
113113
static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
114114

115115
struct dma_map_ops arm_dma_ops = {
116+
.alloc = arm_dma_alloc,
117+
.free = arm_dma_free,
118+
.mmap = arm_dma_mmap,
116119
.map_page = arm_dma_map_page,
117120
.unmap_page = arm_dma_unmap_page,
118121
.map_sg = arm_dma_map_sg,
@@ -415,10 +418,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
415418
arm_vmregion_free(&consistent_head, c);
416419
}
417420

421+
static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
422+
{
423+
prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
424+
pgprot_writecombine(prot) :
425+
pgprot_dmacoherent(prot);
426+
return prot;
427+
}
428+
418429
#else /* !CONFIG_MMU */
419430

420431
#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page)
421432
#define __dma_free_remap(addr, size) do { } while (0)
433+
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
422434

423435
#endif /* CONFIG_MMU */
424436

@@ -462,41 +474,33 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
462474
* Allocate DMA-coherent memory space and return both the kernel remapped
463475
* virtual and bus address for that space.
464476
*/
465-
void *
466-
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
477+
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
478+
gfp_t gfp, struct dma_attrs *attrs)
467479
{
480+
pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
468481
void *memory;
469482

470483
if (dma_alloc_from_coherent(dev, size, handle, &memory))
471484
return memory;
472485

473-
return __dma_alloc(dev, size, handle, gfp,
474-
pgprot_dmacoherent(pgprot_kernel),
486+
return __dma_alloc(dev, size, handle, gfp, prot,
475487
__builtin_return_address(0));
476488
}
477-
EXPORT_SYMBOL(dma_alloc_coherent);
478489

479490
/*
480-
* Allocate a writecombining region, in much the same way as
481-
* dma_alloc_coherent above.
491+
* Create userspace mapping for the DMA-coherent memory.
482492
*/
483-
void *
484-
dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
485-
{
486-
return __dma_alloc(dev, size, handle, gfp,
487-
pgprot_writecombine(pgprot_kernel),
488-
__builtin_return_address(0));
489-
}
490-
EXPORT_SYMBOL(dma_alloc_writecombine);
491-
492-
static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
493-
void *cpu_addr, dma_addr_t dma_addr, size_t size)
493+
int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
494+
void *cpu_addr, dma_addr_t dma_addr, size_t size,
495+
struct dma_attrs *attrs)
494496
{
495497
int ret = -ENXIO;
496498
#ifdef CONFIG_MMU
497499
unsigned long user_size, kern_size;
498500
struct arm_vmregion *c;
499501

502+
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
503+
500504
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
501505
return ret;
502506

@@ -521,27 +525,12 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
521525
return ret;
522526
}
523527

524-
int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
525-
void *cpu_addr, dma_addr_t dma_addr, size_t size)
526-
{
527-
vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
528-
return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
529-
}
530-
EXPORT_SYMBOL(dma_mmap_coherent);
531-
532-
int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
533-
void *cpu_addr, dma_addr_t dma_addr, size_t size)
534-
{
535-
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
536-
return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
537-
}
538-
EXPORT_SYMBOL(dma_mmap_writecombine);
539-
540528
/*
541529
* free a page as defined by the above mapping.
542530
* Must not be called with IRQs disabled.
543531
*/
544-
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
532+
void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
533+
dma_addr_t handle, struct dma_attrs *attrs)
545534
{
546535
WARN_ON(irqs_disabled());
547536

@@ -555,7 +544,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
555544

556545
__dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
557546
}
558-
EXPORT_SYMBOL(dma_free_coherent);
559547

560548
static void dma_cache_maint_page(struct page *page, unsigned long offset,
561549
size_t size, enum dma_data_direction dir,

0 commit comments

Comments
 (0)