summaryrefslogtreecommitdiff
path: root/target/linux/brcm47xx/patches-4.0/160-kmap_coherent.patch
blob: fde1a706e215357277ed79deeb6114875f4d64d4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -154,6 +154,9 @@
 #ifndef cpu_has_local_ebase
 #define cpu_has_local_ebase	1
 #endif
+#ifndef cpu_use_kmap_coherent
+#define cpu_use_kmap_coherent 1
+#endif
 
 /*
  * I-Cache snoops remote store.	 This only matters on SMP.  Some multiprocessors
--- a/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-bcm47xx/cpu-feature-overrides.h
@@ -79,4 +79,6 @@
 #define cpu_scache_line_size()		0
 #define cpu_has_vz			0
 
+#define cpu_use_kmap_coherent		0
+
 #endif /* __ASM_MACH_BCM47XX_CPU_FEATURE_OVERRIDES_H */
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -591,7 +591,7 @@ static inline void local_r4k_flush_cache
 		 */
 		map_coherent = (cpu_has_dc_aliases &&
 				page_mapped(page) && !Page_dcache_dirty(page));
-		if (map_coherent)
+		if (map_coherent && cpu_use_kmap_coherent)
 			vaddr = kmap_coherent(page, addr);
 		else
 			vaddr = kmap_atomic(page);
@@ -616,7 +616,7 @@ static inline void local_r4k_flush_cache
 	}
 
 	if (vaddr) {
-		if (map_coherent)
+		if (map_coherent && cpu_use_kmap_coherent)
 			kunmap_coherent();
 		else
 			kunmap_atomic(vaddr);
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -155,7 +155,7 @@ void copy_user_highpage(struct page *to,
 	void *vfrom, *vto;
 
 	vto = kmap_atomic(to);
-	if (cpu_has_dc_aliases &&
+	if (cpu_has_dc_aliases && cpu_use_kmap_coherent &&
 	    page_mapped(from) && !Page_dcache_dirty(from)) {
 		vfrom = kmap_coherent(from, vaddr);
 		copy_page(vto, vfrom);
@@ -177,7 +177,7 @@ void copy_to_user_page(struct vm_area_st
 	struct page *page, unsigned long vaddr, void *dst, const void *src,
 	unsigned long len)
 {
-	if (cpu_has_dc_aliases &&
+	if (cpu_has_dc_aliases && cpu_use_kmap_coherent &&
 	    page_mapped(page) && !Page_dcache_dirty(page)) {
 		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 		memcpy(vto, src, len);
@@ -195,7 +195,7 @@ void copy_from_user_page(struct vm_area_
 	struct page *page, unsigned long vaddr, void *dst, const void *src,
 	unsigned long len)
 {
-	if (cpu_has_dc_aliases &&
+	if (cpu_has_dc_aliases && cpu_use_kmap_coherent &&
 	    page_mapped(page) && !Page_dcache_dirty(page)) {
 		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
 		memcpy(dst, vfrom, len);