summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@openwrt.org>2013-08-12 17:26:08 +0000
committerFelix Fietkau <nbd@openwrt.org>2013-08-12 17:26:08 +0000
commit3ead1e058ce0a7836650e61941d3f5f0850d3e50 (patch)
treedcf79719c1d0e93036cd93d95c4ba69feade411c
parent58e049ea8087c7c6fee3c0039de175ff8b356215 (diff)
downloadmtk-20170518-3ead1e058ce0a7836650e61941d3f5f0850d3e50.zip
mtk-20170518-3ead1e058ce0a7836650e61941d3f5f0850d3e50.tar.gz
mtk-20170518-3ead1e058ce0a7836650e61941d3f5f0850d3e50.tar.bz2
kernel: add patch to inline mips dma mapping functions - reduces code size and improves performance
Signed-off-by: Felix Fietkau <nbd@openwrt.org> SVN-Revision: 37763
-rw-r--r--target/linux/generic/patches-3.10/130-mips_remove_plat_dma_functions.patch172
-rw-r--r--target/linux/generic/patches-3.10/131-mips_inline_dma_ops.patch661
-rw-r--r--target/linux/generic/patches-3.10/300-mips_expose_boot_raw.patch2
3 files changed, 834 insertions, 1 deletions
diff --git a/target/linux/generic/patches-3.10/130-mips_remove_plat_dma_functions.patch b/target/linux/generic/patches-3.10/130-mips_remove_plat_dma_functions.patch
new file mode 100644
index 0000000..76a3584
--- /dev/null
+++ b/target/linux/generic/patches-3.10/130-mips_remove_plat_dma_functions.patch
@@ -0,0 +1,172 @@
+From: Felix Fietkau <nbd@openwrt.org>
+Subject: [PATCH 1/2] MIPS: remove unnecessary platform dma helper functions
+
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+---
+--- a/arch/mips/mm/dma-default.c
++++ b/arch/mips/mm/dma-default.c
+@@ -289,7 +289,6 @@ static void mips_dma_sync_single_for_cpu
+ static void mips_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
+ {
+- plat_extra_sync_for_device(dev);
+ if (!plat_device_is_coherent(dev))
+ __dma_sync(dma_addr_to_page(dev, dma_handle),
+ dma_handle & ~PAGE_MASK, size, direction);
+@@ -323,7 +322,7 @@ static void mips_dma_sync_sg_for_device(
+
+ int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- return plat_dma_mapping_error(dev, dma_addr);
++ return 0;
+ }
+
+ int mips_dma_supported(struct device *dev, u64 mask)
+@@ -336,7 +335,6 @@ void dma_cache_sync(struct device *dev,
+ {
+ BUG_ON(direction == DMA_NONE);
+
+- plat_extra_sync_for_device(dev);
+ if (!plat_device_is_coherent(dev))
+ __dma_sync_virtual(vaddr, size, direction);
+ }
+--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
++++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+@@ -46,22 +46,11 @@ static inline int plat_dma_supported(str
+ BUG();
+ }
+
+-static inline void plat_extra_sync_for_device(struct device *dev)
+-{
+- BUG();
+-}
+-
+ static inline int plat_device_is_coherent(struct device *dev)
+ {
+ return 1;
+ }
+
+-static inline int plat_dma_mapping_error(struct device *dev,
+- dma_addr_t dma_addr)
+-{
+- BUG();
+-}
+-
+ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
++++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
+@@ -47,16 +47,6 @@ static inline int plat_dma_supported(str
+ return 1;
+ }
+
+-static inline void plat_extra_sync_for_device(struct device *dev)
+-{
+-}
+-
+-static inline int plat_dma_mapping_error(struct device *dev,
+- dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+ static inline int plat_device_is_coherent(struct device *dev)
+ {
+ #ifdef CONFIG_DMA_COHERENT
+--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
++++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
+@@ -58,16 +58,6 @@ static inline int plat_dma_supported(str
+ return 1;
+ }
+
+-static inline void plat_extra_sync_for_device(struct device *dev)
+-{
+-}
+-
+-static inline int plat_dma_mapping_error(struct device *dev,
+- dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+ static inline int plat_device_is_coherent(struct device *dev)
+ {
+ return 1; /* IP27 non-cohernet mode is unsupported */
+--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
++++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
+@@ -80,17 +80,6 @@ static inline int plat_dma_supported(str
+ return 1;
+ }
+
+-static inline void plat_extra_sync_for_device(struct device *dev)
+-{
+- return;
+-}
+-
+-static inline int plat_dma_mapping_error(struct device *dev,
+- dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+ static inline int plat_device_is_coherent(struct device *dev)
+ {
+ return 0; /* IP32 is non-cohernet */
+--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
++++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h
+@@ -48,16 +48,6 @@ static inline int plat_dma_supported(str
+ return 1;
+ }
+
+-static inline void plat_extra_sync_for_device(struct device *dev)
+-{
+-}
+-
+-static inline int plat_dma_mapping_error(struct device *dev,
+- dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+ static inline int plat_device_is_coherent(struct device *dev)
+ {
+ return 0;
+--- a/arch/mips/include/asm/mach-loongson/dma-coherence.h
++++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h
+@@ -53,16 +53,6 @@ static inline int plat_dma_supported(str
+ return 1;
+ }
+
+-static inline void plat_extra_sync_for_device(struct device *dev)
+-{
+-}
+-
+-static inline int plat_dma_mapping_error(struct device *dev,
+- dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+ static inline int plat_device_is_coherent(struct device *dev)
+ {
+ return 0;
+--- a/arch/mips/include/asm/mach-powertv/dma-coherence.h
++++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h
+@@ -99,16 +99,6 @@ static inline int plat_dma_supported(str
+ return 1;
+ }
+
+-static inline void plat_extra_sync_for_device(struct device *dev)
+-{
+-}
+-
+-static inline int plat_dma_mapping_error(struct device *dev,
+- dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+ static inline int plat_device_is_coherent(struct device *dev)
+ {
+ return 0;
diff --git a/target/linux/generic/patches-3.10/131-mips_inline_dma_ops.patch b/target/linux/generic/patches-3.10/131-mips_inline_dma_ops.patch
new file mode 100644
index 0000000..7ab0080
--- /dev/null
+++ b/target/linux/generic/patches-3.10/131-mips_inline_dma_ops.patch
@@ -0,0 +1,661 @@
+From: Felix Fietkau <nbd@openwrt.org>
+Subject: [PATCH 2/2] MIPS: partially inline dma ops
+
+Several DMA ops are no-op on many platforms, and the indirection through
+the mips_dma_map_ops function table is causing the compiler to emit
+unnecessary code.
+
+Inlining visibly improves network performance in my tests (on a 24Kc
+based system), and also slightly reduces code size of a few drivers.
+
+Signed-off-by: Felix Fietkau <nbd@openwrt.org>
+---
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1426,6 +1426,7 @@ config CPU_CAVIUM_OCTEON
+ select LIBFDT
+ select USE_OF
+ select USB_EHCI_BIG_ENDIAN_MMIO
++ select SYS_HAS_DMA_OPS
+ help
+ The Cavium Octeon processor is a highly integrated chip containing
+ many ethernet hardware widgets for networking tasks. The processor
+@@ -1646,6 +1647,9 @@ config SYS_HAS_CPU_XLR
+ config SYS_HAS_CPU_XLP
+ bool
+
++config SYS_HAS_DMA_OPS
++ bool
++
+ #
+ # CPU may reorder R->R, R->W, W->R, W->W
+ # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
+--- a/arch/mips/include/asm/dma-mapping.h
++++ b/arch/mips/include/asm/dma-mapping.h
+@@ -1,6 +1,12 @@
+ #ifndef _ASM_DMA_MAPPING_H
+ #define _ASM_DMA_MAPPING_H
+
++#include <linux/kmemcheck.h>
++#include <linux/bug.h>
++#include <linux/scatterlist.h>
++#include <linux/dma-debug.h>
++#include <linux/dma-attrs.h>
++
+ #include <asm/scatterlist.h>
+ #include <asm/dma-coherence.h>
+ #include <asm/cache.h>
+@@ -10,14 +16,47 @@
+ #include <dma-coherence.h>
+ #endif
+
+-extern struct dma_map_ops *mips_dma_map_ops;
++void __dma_sync(struct page *page, unsigned long offset, size_t size,
++ enum dma_data_direction direction);
++void *mips_dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp,
++ struct dma_attrs *attrs);
++void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs);
+
+ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
++#ifdef CONFIG_SYS_HAS_DMA_OPS
+ if (dev && dev->archdata.dma_ops)
+ return dev->archdata.dma_ops;
+ else
+ return mips_dma_map_ops;
++#else
++ return NULL;
++#endif
++}
++
++/*
++ * Warning on the terminology - Linux calls an uncached area coherent;
++ * MIPS terminology calls memory areas with hardware maintained coherency
++ * coherent.
++ */
++
++static inline int cpu_is_noncoherent_r10000(struct device *dev)
++{
++#ifndef CONFIG_SYS_HAS_CPU_R10000
++ return 0;
++#endif
++ return !plat_device_is_coherent(dev) &&
++ (current_cpu_type() == CPU_R10000 ||
++ current_cpu_type() == CPU_R12000);
++}
++
++static inline struct page *dma_addr_to_page(struct device *dev,
++ dma_addr_t dma_addr)
++{
++ return pfn_to_page(
++ plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
+ }
+
+ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+@@ -30,12 +69,309 @@ static inline bool dma_capable(struct de
+
+ static inline void dma_mark_clean(void *addr, size_t size) {}
+
+-#include <asm-generic/dma-mapping-common.h>
++static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
++ size_t size,
++ enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
++ struct page *page = virt_to_page(ptr);
++ dma_addr_t addr;
++
++ kmemcheck_mark_initialized(ptr, size);
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops) {
++ addr = ops->map_page(dev, page, offset, size, dir, attrs);
++ } else {
++ if (!plat_device_is_coherent(dev))
++ __dma_sync(page, offset, size, dir);
++
++ addr = plat_map_dma_mem_page(dev, page) + offset;
++ }
++ debug_dma_map_page(dev, page, offset, size, dir, addr, true);
++ return addr;
++}
++
++static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
++ size_t size,
++ enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops) {
++ ops->unmap_page(dev, addr, size, dir, attrs);
++ } else {
++ if (cpu_is_noncoherent_r10000(dev))
++ __dma_sync(dma_addr_to_page(dev, addr),
++ addr & ~PAGE_MASK, size, dir);
++
++ plat_unmap_dma_mem(dev, addr, size, dir);
++ }
++ debug_dma_unmap_page(dev, addr, size, dir, true);
++}
++
++static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ int i, ents;
++ struct scatterlist *s;
++
++ for_each_sg(sg, s, nents, i)
++ kmemcheck_mark_initialized(sg_virt(s), s->length);
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops) {
++ ents = ops->map_sg(dev, sg, nents, dir, attrs);
++ } else {
++ for_each_sg(sg, s, nents, i) {
++ struct page *page = sg_page(s);
++
++ if (!plat_device_is_coherent(dev))
++ __dma_sync(page, s->offset, s->length, dir);
++ s->dma_address =
++ plat_map_dma_mem_page(dev, page) + s->offset;
++ }
++ ents = nents;
++ }
++ debug_dma_map_sg(dev, sg, nents, ents, dir);
++
++ return ents;
++}
++
++static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ struct scatterlist *s;
++ int i;
++
++ BUG_ON(!valid_dma_direction(dir));
++ debug_dma_unmap_sg(dev, sg, nents, dir);
++ if (ops) {
++ ops->unmap_sg(dev, sg, nents, dir, attrs);
++ return;
++ }
++
++ for_each_sg(sg, s, nents, i) {
++ if (!plat_device_is_coherent(dev) && dir != DMA_TO_DEVICE)
++ __dma_sync(sg_page(s), s->offset, s->length, dir);
++ plat_unmap_dma_mem(dev, s->dma_address, s->length, dir);
++ }
++}
++
++static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
++ size_t offset, size_t size,
++ enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ dma_addr_t addr;
++
++ kmemcheck_mark_initialized(page_address(page) + offset, size);
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops) {
++ addr = ops->map_page(dev, page, offset, size, dir, NULL);
++ } else {
++ if (!plat_device_is_coherent(dev))
++ __dma_sync(page, offset, size, dir);
++
++ addr = plat_map_dma_mem_page(dev, page) + offset;
++ }
++ debug_dma_map_page(dev, page, offset, size, dir, addr, false);
++
++ return addr;
++}
++
++static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
++ size_t size, enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops) {
++ ops->unmap_page(dev, addr, size, dir, NULL);
++ } else {
++ if (cpu_is_noncoherent_r10000(dev))
++ __dma_sync(dma_addr_to_page(dev, addr),
++ addr & ~PAGE_MASK, size, dir);
++
++ plat_unmap_dma_mem(dev, addr, size, dir);
++ }
++ debug_dma_unmap_page(dev, addr, size, dir, false);
++}
++
++static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
++ size_t size,
++ enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops)
++ ops->sync_single_for_cpu(dev, addr, size, dir);
++ else if (cpu_is_noncoherent_r10000(dev))
++ __dma_sync(dma_addr_to_page(dev, addr),
++ addr & ~PAGE_MASK, size, dir);
++ debug_dma_sync_single_for_cpu(dev, addr, size, dir);
++}
++
++static inline void dma_sync_single_for_device(struct device *dev,
++ dma_addr_t addr, size_t size,
++ enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops)
++ ops->sync_single_for_device(dev, addr, size, dir);
++ else if (!plat_device_is_coherent(dev))
++ __dma_sync(dma_addr_to_page(dev, addr),
++ addr & ~PAGE_MASK, size, dir);
++ debug_dma_sync_single_for_device(dev, addr, size, dir);
++}
++
++static inline void dma_sync_single_range_for_cpu(struct device *dev,
++ dma_addr_t addr,
++ unsigned long offset,
++ size_t size,
++ enum dma_data_direction dir)
++{
++ const struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops)
++ ops->sync_single_for_cpu(dev, addr + offset, size, dir);
++ else if (cpu_is_noncoherent_r10000(dev))
++ __dma_sync(dma_addr_to_page(dev, addr + offset),
++ (addr + offset) & ~PAGE_MASK, size, dir);
++ debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
++}
++
++static inline void dma_sync_single_range_for_device(struct device *dev,
++ dma_addr_t addr,
++ unsigned long offset,
++ size_t size,
++ enum dma_data_direction dir)
++{
++ const struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops)
++ ops->sync_single_for_device(dev, addr + offset, size, dir);
++ else if (!plat_device_is_coherent(dev))
++ __dma_sync(dma_addr_to_page(dev, addr + offset),
++ (addr + offset) & ~PAGE_MASK, size, dir);
++ debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
++}
++
++static inline void
++dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
++ int nelems, enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ struct scatterlist *s;
++ int i;
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops)
++ ops->sync_sg_for_cpu(dev, sg, nelems, dir);
++ else if (cpu_is_noncoherent_r10000(dev)) {
++ for_each_sg(sg, s, nelems, i)
++ __dma_sync(sg_page(s), s->offset, s->length, dir);
++ }
++ debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
++}
++
++static inline void
++dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
++ int nelems, enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ struct scatterlist *s;
++ int i;
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops)
++ ops->sync_sg_for_device(dev, sg, nelems, dir);
++ else if (!plat_device_is_coherent(dev)) {
++ for_each_sg(sg, s, nelems, i)
++ __dma_sync(sg_page(s), s->offset, s->length, dir);
++ }
++ debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
++
++}
++
++#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
++#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
++#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
++#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
++
++extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size);
++
++/**
++ * dma_mmap_attrs - map a coherent DMA allocation into user space
++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
++ * @vma: vm_area_struct describing requested user mapping
++ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
++ * @handle: device-view address returned from dma_alloc_attrs
++ * @size: size of memory originally requested in dma_alloc_attrs
++ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
++ *
++ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
++ * into user space. The coherent DMA buffer must not be freed by the
++ * driver until the user space mapping has been released.
++ */
++static inline int
++dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
++ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ BUG_ON(!ops);
++ if (ops && ops->mmap)
++ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
++ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
++}
++
++#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
++
++static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size)
++{
++ DEFINE_DMA_ATTRS(attrs);
++ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
++ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
++}
++
++int
++dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size);
++
++static inline int
++dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
++ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ BUG_ON(!ops);
++ if (ops && ops->get_sgtable)
++ return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
++ attrs);
++ return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
++}
++
++#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
++
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+ struct dma_map_ops *ops = get_dma_ops(dev);
+- return ops->dma_supported(dev, mask);
++ if (ops)
++ return ops->dma_supported(dev, mask);
++ return plat_dma_supported(dev, mask);
+ }
+
+ static inline int dma_mapping_error(struct device *dev, u64 mask)
+@@ -43,7 +379,9 @@ static inline int dma_mapping_error(stru
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ debug_dma_mapping_error(dev, mask);
+- return ops->mapping_error(dev, mask);
++ if (ops)
++ return ops->mapping_error(dev, mask);
++ return 0;
+ }
+
+ static inline int
+@@ -69,7 +407,11 @@ static inline void *dma_alloc_attrs(stru
+ void *ret;
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+- ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
++ if (ops)
++ ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
++ else
++ ret = mips_dma_alloc_coherent(dev, size, dma_handle, gfp,
++ attrs);
+
+ debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
+
+@@ -84,7 +426,10 @@ static inline void dma_free_attrs(struct
+ {
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+- ops->free(dev, size, vaddr, dma_handle, attrs);
++ if (ops)
++ ops->free(dev, size, vaddr, dma_handle, attrs);
++ else
++ mips_dma_free_coherent(dev, size, vaddr, dma_handle, attrs);
+
+ debug_dma_free_coherent(dev, size, vaddr, dma_handle);
+ }
+--- a/arch/mips/mm/dma-default.c
++++ b/arch/mips/mm/dma-default.c
+@@ -42,26 +42,6 @@ static int __init setnocoherentio(char *
+ }
+ early_param("nocoherentio", setnocoherentio);
+
+-static inline struct page *dma_addr_to_page(struct device *dev,
+- dma_addr_t dma_addr)
+-{
+- return pfn_to_page(
+- plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
+-}
+-
+-/*
+- * Warning on the terminology - Linux calls an uncached area coherent;
+- * MIPS terminology calls memory areas with hardware maintained coherency
+- * coherent.
+- */
+-
+-static inline int cpu_is_noncoherent_r10000(struct device *dev)
+-{
+- return !plat_device_is_coherent(dev) &&
+- (current_cpu_type() == CPU_R10000 ||
+- current_cpu_type() == CPU_R12000);
+-}
+-
+ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
+ {
+ gfp_t dma_flag;
+@@ -117,8 +97,9 @@ void *dma_alloc_noncoherent(struct devic
+ }
+ EXPORT_SYMBOL(dma_alloc_noncoherent);
+
+-static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
++void *mips_dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp,
++ struct dma_attrs *attrs)
+ {
+ void *ret;
+
+@@ -142,6 +123,7 @@ static void *mips_dma_alloc_coherent(str
+
+ return ret;
+ }
++EXPORT_SYMBOL(mips_dma_alloc_coherent);
+
+
+ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+@@ -152,8 +134,8 @@ void dma_free_noncoherent(struct device
+ }
+ EXPORT_SYMBOL(dma_free_noncoherent);
+
+-static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+- dma_addr_t dma_handle, struct dma_attrs *attrs)
++void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+ unsigned long addr = (unsigned long) vaddr;
+ int order = get_order(size);
+@@ -168,6 +150,7 @@ static void mips_dma_free_coherent(struc
+
+ free_pages(addr, get_order(size));
+ }
++EXPORT_SYMBOL(mips_dma_free_coherent);
+
+ static inline void __dma_sync_virtual(void *addr, size_t size,
+ enum dma_data_direction direction)
+@@ -196,8 +179,8 @@ static inline void __dma_sync_virtual(vo
+ * If highmem is not configured then the bulk of this loop gets
+ * optimized out.
+ */
+-static inline void __dma_sync(struct page *page,
+- unsigned long offset, size_t size, enum dma_data_direction direction)
++void __dma_sync(struct page *page, unsigned long offset, size_t size,
++ enum dma_data_direction direction)
+ {
+ size_t left = size;
+
+@@ -226,109 +209,7 @@ static inline void __dma_sync(struct pag
+ left -= len;
+ } while (left);
+ }
+-
+-static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+- size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
+-{
+- if (cpu_is_noncoherent_r10000(dev))
+- __dma_sync(dma_addr_to_page(dev, dma_addr),
+- dma_addr & ~PAGE_MASK, size, direction);
+-
+- plat_unmap_dma_mem(dev, dma_addr, size, direction);
+-}
+-
+-static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
+- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+-{
+- int i;
+-
+- for (i = 0; i < nents; i++, sg++) {
+- if (!plat_device_is_coherent(dev))
+- __dma_sync(sg_page(sg), sg->offset, sg->length,
+- direction);
+- sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
+- sg->offset;
+- }
+-
+- return nents;
+-}
+-
+-static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size, enum dma_data_direction direction,
+- struct dma_attrs *attrs)
+-{
+- if (!plat_device_is_coherent(dev))
+- __dma_sync(page, offset, size, direction);
+-
+- return plat_map_dma_mem_page(dev, page) + offset;
+-}
+-
+-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+- int nhwentries, enum dma_data_direction direction,
+- struct dma_attrs *attrs)
+-{
+- int i;
+-
+- for (i = 0; i < nhwentries; i++, sg++) {
+- if (!plat_device_is_coherent(dev) &&
+- direction != DMA_TO_DEVICE)
+- __dma_sync(sg_page(sg), sg->offset, sg->length,
+- direction);
+- plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
+- }
+-}
+-
+-static void mips_dma_sync_single_for_cpu(struct device *dev,
+- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
+-{
+- if (cpu_is_noncoherent_r10000(dev))
+- __dma_sync(dma_addr_to_page(dev, dma_handle),
+- dma_handle & ~PAGE_MASK, size, direction);
+-}
+-
+-static void mips_dma_sync_single_for_device(struct device *dev,
+- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
+-{
+- if (!plat_device_is_coherent(dev))
+- __dma_sync(dma_addr_to_page(dev, dma_handle),
+- dma_handle & ~PAGE_MASK, size, direction);
+-}
+-
+-static void mips_dma_sync_sg_for_cpu(struct device *dev,
+- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
+-{
+- int i;
+-
+- /* Make sure that gcc doesn't leave the empty loop body. */
+- for (i = 0; i < nelems; i++, sg++) {
+- if (cpu_is_noncoherent_r10000(dev))
+- __dma_sync(sg_page(sg), sg->offset, sg->length,
+- direction);
+- }
+-}
+-
+-static void mips_dma_sync_sg_for_device(struct device *dev,
+- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
+-{
+- int i;
+-
+- /* Make sure that gcc doesn't leave the empty loop body. */
+- for (i = 0; i < nelems; i++, sg++) {
+- if (!plat_device_is_coherent(dev))
+- __dma_sync(sg_page(sg), sg->offset, sg->length,
+- direction);
+- }
+-}
+-
+-int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+-int mips_dma_supported(struct device *dev, u64 mask)
+-{
+- return plat_dma_supported(dev, mask);
+-}
++EXPORT_SYMBOL(__dma_sync);
+
+ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+@@ -341,23 +222,10 @@ void dma_cache_sync(struct device *dev,
+
+ EXPORT_SYMBOL(dma_cache_sync);
+
+-static struct dma_map_ops mips_default_dma_map_ops = {
+- .alloc = mips_dma_alloc_coherent,
+- .free = mips_dma_free_coherent,
+- .map_page = mips_dma_map_page,
+- .unmap_page = mips_dma_unmap_page,
+- .map_sg = mips_dma_map_sg,
+- .unmap_sg = mips_dma_unmap_sg,
+- .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
+- .sync_single_for_device = mips_dma_sync_single_for_device,
+- .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
+- .sync_sg_for_device = mips_dma_sync_sg_for_device,
+- .mapping_error = mips_dma_mapping_error,
+- .dma_supported = mips_dma_supported
+-};
+-
+-struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
++#ifdef CONFIG_SYS_HAS_DMA_OPS
++struct dma_map_ops *mips_dma_map_ops = NULL;
+ EXPORT_SYMBOL(mips_dma_map_ops);
++#endif
+
+ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
diff --git a/target/linux/generic/patches-3.10/300-mips_expose_boot_raw.patch b/target/linux/generic/patches-3.10/300-mips_expose_boot_raw.patch
index 0fcb5a0..de0f1ce 100644
--- a/target/linux/generic/patches-3.10/300-mips_expose_boot_raw.patch
+++ b/target/linux/generic/patches-3.10/300-mips_expose_boot_raw.patch
@@ -18,7 +18,7 @@ Acked-by: Rob Landley <rob@landley.net>
config CEVT_BCM1480
bool
-@@ -2387,6 +2384,18 @@ config USE_OF
+@@ -2391,6 +2388,18 @@ config USE_OF
select OF_EARLY_FLATTREE
select IRQ_DOMAIN