1 |
commit: ebd54cc8a3c2fb541c454a7e6a288bf9b27fb03a |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Tue Jul 3 13:35:39 2018 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Tue Jul 3 13:35:39 2018 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ebd54cc8 |
7 |
|
8 |
Removal of redundant patch:1800_iommu-amd-dma-direct-revert.patch |
9 |
|
10 |
0000_README | 4 - |
11 |
1800_iommu-amd-dma-direct-revert.patch | 164 --------------------------------- |
12 |
2 files changed, 168 deletions(-) |
13 |
|
14 |
diff --git a/0000_README b/0000_README |
15 |
index f45eebe..76ef096 100644 |
16 |
--- a/0000_README |
17 |
+++ b/0000_README |
18 |
@@ -59,10 +59,6 @@ Patch: 1003_linux-4.17.4.patch |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 4.17.4 |
21 |
|
22 |
-Patch: 1800_iommu-amd-dma-direct-revert.patch |
23 |
-From: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/?id=e16c4790de39dc861b749674c2a9319507f6f64f |
24 |
-Desc: Revert iommu/amd_iommu: Use CONFIG_DMA_DIRECT_OPS=y and dma_direct_{alloc,free}(). See bug #658538. |
25 |
- |
26 |
Patch: 1500_XATTR_USER_PREFIX.patch |
27 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
28 |
Desc: Support for namespace user.pax.* on tmpfs. |
29 |
|
30 |
diff --git a/1800_iommu-amd-dma-direct-revert.patch b/1800_iommu-amd-dma-direct-revert.patch |
31 |
deleted file mode 100644 |
32 |
index a78fa02..0000000 |
33 |
--- a/1800_iommu-amd-dma-direct-revert.patch |
34 |
+++ /dev/null |
35 |
@@ -1,164 +0,0 @@ |
36 |
-From e16c4790de39dc861b749674c2a9319507f6f64f Mon Sep 17 00:00:00 2001 |
37 |
-From: Linus Torvalds <torvalds@××××××××××××××××.org> |
38 |
-Date: Mon, 11 Jun 2018 12:22:12 -0700 |
39 |
-Subject: Revert "iommu/amd_iommu: Use CONFIG_DMA_DIRECT_OPS=y and |
40 |
- dma_direct_{alloc,free}()" |
41 |
-MIME-Version: 1.0 |
42 |
-Content-Type: text/plain; charset=UTF-8 |
43 |
-Content-Transfer-Encoding: 8bit |
44 |
- |
45 |
-This reverts commit b468620f2a1dfdcfddfd6fa54367b8bcc1b51248. |
46 |
- |
47 |
-It turns out that this broke drm on AMD platforms. Quoting Gabriel C: |
48 |
- "I can confirm reverting b468620f2a1dfdcfddfd6fa54367b8bcc1b51248 fixes |
49 |
- that issue for me. |
50 |
- |
51 |
- The GPU is working fine with SME enabled. |
52 |
- |
53 |
- Now with working GPU :) I can also confirm performance is back to |
54 |
- normal without doing any other workarounds" |
55 |
- |
56 |
-Christan König analyzed it partially: |
57 |
- "As far as I analyzed it we now get an -ENOMEM from dma_alloc_attrs() |
58 |
- in drivers/gpu/drm/ttm/ttm_page_alloc_dma.c when IOMMU is enabled" |
59 |
- |
60 |
-and Christoph Hellwig responded: |
61 |
- "I think the prime issue is that dma_direct_alloc respects the dma |
62 |
- mask. Which we don't need if actually using the iommu. This would be |
63 |
- mostly harmless exept for the the SEV bit high in the address that |
64 |
- makes the checks fail. |
65 |
- |
66 |
- For now I'd say revert this commit for 4.17/4.18-rc and I'll look into |
67 |
- addressing these issues properly" |
68 |
- |
69 |
-Reported-and-bisected-by: Gabriel C <nix.or.die@×××××.com> |
70 |
-Acked-by: Christoph Hellwig <hch@×××.de> |
71 |
-Cc: Christian König <christian.koenig@×××.com> |
72 |
-Cc: Michel Dänzer <michel.daenzer@×××.com> |
73 |
-Cc: Joerg Roedel <jroedel@××××.de> |
74 |
-Cc: Tom Lendacky <thomas.lendacky@×××.com> |
75 |
-Cc: Andrew Morton <akpm@××××××××××××××××.org> |
76 |
-Cc: stable@××××××.org # v4.17 |
77 |
-Signed-off-by: Linus Torvalds <torvalds@××××××××××××××××.org> |
78 |
---- |
79 |
- drivers/iommu/Kconfig | 1 - |
80 |
- drivers/iommu/amd_iommu.c | 68 ++++++++++++++++++++++++++++++++--------------- |
81 |
- 2 files changed, 47 insertions(+), 22 deletions(-) |
82 |
- |
83 |
-diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig |
84 |
-index 8ea77ef..e055d22 100644 |
85 |
---- a/drivers/iommu/Kconfig |
86 |
-+++ b/drivers/iommu/Kconfig |
87 |
-@@ -107,7 +107,6 @@ config IOMMU_PGTABLES_L2 |
88 |
- # AMD IOMMU support |
89 |
- config AMD_IOMMU |
90 |
- bool "AMD IOMMU support" |
91 |
-- select DMA_DIRECT_OPS |
92 |
- select SWIOTLB |
93 |
- select PCI_MSI |
94 |
- select PCI_ATS |
95 |
-diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c |
96 |
-index 0cea80be..596b95c 100644 |
97 |
---- a/drivers/iommu/amd_iommu.c |
98 |
-+++ b/drivers/iommu/amd_iommu.c |
99 |
-@@ -2596,32 +2596,51 @@ static void *alloc_coherent(struct device *dev, size_t size, |
100 |
- unsigned long attrs) |
101 |
- { |
102 |
- u64 dma_mask = dev->coherent_dma_mask; |
103 |
-- struct protection_domain *domain = get_domain(dev); |
104 |
-- bool is_direct = false; |
105 |
-- void *virt_addr; |
106 |
-+ struct protection_domain *domain; |
107 |
-+ struct dma_ops_domain *dma_dom; |
108 |
-+ struct page *page; |
109 |
-+ |
110 |
-+ domain = get_domain(dev); |
111 |
-+ if (PTR_ERR(domain) == -EINVAL) { |
112 |
-+ page = alloc_pages(flag, get_order(size)); |
113 |
-+ *dma_addr = page_to_phys(page); |
114 |
-+ return page_address(page); |
115 |
-+ } else if (IS_ERR(domain)) |
116 |
-+ return NULL; |
117 |
- |
118 |
-- if (IS_ERR(domain)) { |
119 |
-- if (PTR_ERR(domain) != -EINVAL) |
120 |
-+ dma_dom = to_dma_ops_domain(domain); |
121 |
-+ size = PAGE_ALIGN(size); |
122 |
-+ dma_mask = dev->coherent_dma_mask; |
123 |
-+ flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
124 |
-+ flag |= __GFP_ZERO; |
125 |
-+ |
126 |
-+ page = alloc_pages(flag | __GFP_NOWARN, get_order(size)); |
127 |
-+ if (!page) { |
128 |
-+ if (!gfpflags_allow_blocking(flag)) |
129 |
- return NULL; |
130 |
-- is_direct = true; |
131 |
-- } |
132 |
- |
133 |
-- virt_addr = dma_direct_alloc(dev, size, dma_addr, flag, attrs); |
134 |
-- if (!virt_addr || is_direct) |
135 |
-- return virt_addr; |
136 |
-+ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, |
137 |
-+ get_order(size), flag); |
138 |
-+ if (!page) |
139 |
-+ return NULL; |
140 |
-+ } |
141 |
- |
142 |
- if (!dma_mask) |
143 |
- dma_mask = *dev->dma_mask; |
144 |
- |
145 |
-- *dma_addr = __map_single(dev, to_dma_ops_domain(domain), |
146 |
-- virt_to_phys(virt_addr), PAGE_ALIGN(size), |
147 |
-- DMA_BIDIRECTIONAL, dma_mask); |
148 |
-+ *dma_addr = __map_single(dev, dma_dom, page_to_phys(page), |
149 |
-+ size, DMA_BIDIRECTIONAL, dma_mask); |
150 |
-+ |
151 |
- if (*dma_addr == AMD_IOMMU_MAPPING_ERROR) |
152 |
- goto out_free; |
153 |
-- return virt_addr; |
154 |
-+ |
155 |
-+ return page_address(page); |
156 |
- |
157 |
- out_free: |
158 |
-- dma_direct_free(dev, size, virt_addr, *dma_addr, attrs); |
159 |
-+ |
160 |
-+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) |
161 |
-+ __free_pages(page, get_order(size)); |
162 |
-+ |
163 |
- return NULL; |
164 |
- } |
165 |
- |
166 |
-@@ -2632,17 +2651,24 @@ static void free_coherent(struct device *dev, size_t size, |
167 |
- void *virt_addr, dma_addr_t dma_addr, |
168 |
- unsigned long attrs) |
169 |
- { |
170 |
-- struct protection_domain *domain = get_domain(dev); |
171 |
-+ struct protection_domain *domain; |
172 |
-+ struct dma_ops_domain *dma_dom; |
173 |
-+ struct page *page; |
174 |
- |
175 |
-+ page = virt_to_page(virt_addr); |
176 |
- size = PAGE_ALIGN(size); |
177 |
- |
178 |
-- if (!IS_ERR(domain)) { |
179 |
-- struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain); |
180 |
-+ domain = get_domain(dev); |
181 |
-+ if (IS_ERR(domain)) |
182 |
-+ goto free_mem; |
183 |
- |
184 |
-- __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL); |
185 |
-- } |
186 |
-+ dma_dom = to_dma_ops_domain(domain); |
187 |
-+ |
188 |
-+ __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL); |
189 |
- |
190 |
-- dma_direct_free(dev, size, virt_addr, dma_addr, attrs); |
191 |
-+free_mem: |
192 |
-+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) |
193 |
-+ __free_pages(page, get_order(size)); |
194 |
- } |
195 |
- |
196 |
- /* |
197 |
--- |
198 |
-cgit v1.1 |
199 |
- |