1 |
commit: a237553df3d9872194b04bb1688fb0ec658cf944 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Wed Jan 29 16:15:47 2020 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Wed Jan 29 16:15:47 2020 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a237553d |
7 |
|
8 |
Linux patch 4.19.100 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1099_linux-4.19.100.patch | 4278 +++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 4282 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index cae3438..7c99cc6 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -435,6 +435,10 @@ Patch: 1098_linux-4.19.99.patch |
21 |
From: https://www.kernel.org |
22 |
Desc: Linux 4.19.99 |
23 |
|
24 |
+Patch: 1099_linux-4.19.100.patch |
25 |
+From: https://www.kernel.org |
26 |
+Desc: Linux 4.19.100 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1099_linux-4.19.100.patch b/1099_linux-4.19.100.patch |
33 |
new file mode 100644 |
34 |
index 0000000..1e9d910 |
35 |
--- /dev/null |
36 |
+++ b/1099_linux-4.19.100.patch |
37 |
@@ -0,0 +1,4278 @@ |
38 |
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt |
39 |
+index e6b6ec974eeb..8bf0c0532046 100644 |
40 |
+--- a/Documentation/admin-guide/kernel-parameters.txt |
41 |
++++ b/Documentation/admin-guide/kernel-parameters.txt |
42 |
+@@ -1946,6 +1946,12 @@ |
43 |
+ Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y, |
44 |
+ the default is off. |
45 |
+ |
46 |
++ kpti= [ARM64] Control page table isolation of user |
47 |
++ and kernel address spaces. |
48 |
++ Default: enabled on cores which need mitigation. |
49 |
++ 0: force disabled |
50 |
++ 1: force enabled |
51 |
++ |
52 |
+ kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs. |
53 |
+ Default is 0 (don't ignore, but inject #GP) |
54 |
+ |
55 |
+diff --git a/Makefile b/Makefile |
56 |
+index a2be0c79eeb8..f1e428271abf 100644 |
57 |
+--- a/Makefile |
58 |
++++ b/Makefile |
59 |
+@@ -1,7 +1,7 @@ |
60 |
+ # SPDX-License-Identifier: GPL-2.0 |
61 |
+ VERSION = 4 |
62 |
+ PATCHLEVEL = 19 |
63 |
+-SUBLEVEL = 99 |
64 |
++SUBLEVEL = 100 |
65 |
+ EXTRAVERSION = |
66 |
+ NAME = "People's Front" |
67 |
+ |
68 |
+diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c |
69 |
+index 3b85c3ecac38..79e5cc70f1fd 100644 |
70 |
+--- a/arch/ia64/mm/init.c |
71 |
++++ b/arch/ia64/mm/init.c |
72 |
+@@ -661,21 +661,12 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, |
73 |
+ return ret; |
74 |
+ } |
75 |
+ |
76 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
77 |
+-int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
78 |
++void arch_remove_memory(int nid, u64 start, u64 size, |
79 |
++ struct vmem_altmap *altmap) |
80 |
+ { |
81 |
+ unsigned long start_pfn = start >> PAGE_SHIFT; |
82 |
+ unsigned long nr_pages = size >> PAGE_SHIFT; |
83 |
+- struct zone *zone; |
84 |
+- int ret; |
85 |
+- |
86 |
+- zone = page_zone(pfn_to_page(start_pfn)); |
87 |
+- ret = __remove_pages(zone, start_pfn, nr_pages, altmap); |
88 |
+- if (ret) |
89 |
+- pr_warn("%s: Problem encountered in __remove_pages() as" |
90 |
+- " ret=%d\n", __func__, ret); |
91 |
+ |
92 |
+- return ret; |
93 |
++ __remove_pages(start_pfn, nr_pages, altmap); |
94 |
+ } |
95 |
+ #endif |
96 |
+-#endif |
97 |
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c |
98 |
+index 9a6afd9f3f9b..84a012e42a7e 100644 |
99 |
+--- a/arch/powerpc/mm/mem.c |
100 |
++++ b/arch/powerpc/mm/mem.c |
101 |
+@@ -118,8 +118,8 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end) |
102 |
+ return -ENODEV; |
103 |
+ } |
104 |
+ |
105 |
+-int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, |
106 |
+- bool want_memblock) |
107 |
++int __ref arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, |
108 |
++ bool want_memblock) |
109 |
+ { |
110 |
+ unsigned long start_pfn = start >> PAGE_SHIFT; |
111 |
+ unsigned long nr_pages = size >> PAGE_SHIFT; |
112 |
+@@ -139,30 +139,20 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap * |
113 |
+ return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); |
114 |
+ } |
115 |
+ |
116 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
117 |
+-int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
118 |
++void __ref arch_remove_memory(int nid, u64 start, u64 size, |
119 |
++ struct vmem_altmap *altmap) |
120 |
+ { |
121 |
+ unsigned long start_pfn = start >> PAGE_SHIFT; |
122 |
+ unsigned long nr_pages = size >> PAGE_SHIFT; |
123 |
+- struct page *page; |
124 |
+ int ret; |
125 |
+ |
126 |
+- /* |
127 |
+- * If we have an altmap then we need to skip over any reserved PFNs |
128 |
+- * when querying the zone. |
129 |
+- */ |
130 |
+- page = pfn_to_page(start_pfn); |
131 |
+- if (altmap) |
132 |
+- page += vmem_altmap_offset(altmap); |
133 |
+- |
134 |
+- ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); |
135 |
+- if (ret) |
136 |
+- return ret; |
137 |
++ __remove_pages(start_pfn, nr_pages, altmap); |
138 |
+ |
139 |
+ /* Remove htab bolted mappings for this section of memory */ |
140 |
+ start = (unsigned long)__va(start); |
141 |
+ flush_inval_dcache_range(start, start + size); |
142 |
+ ret = remove_section_mapping(start, start + size); |
143 |
++ WARN_ON_ONCE(ret); |
144 |
+ |
145 |
+ /* Ensure all vmalloc mappings are flushed in case they also |
146 |
+ * hit that section of memory |
147 |
+@@ -170,11 +160,8 @@ int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap |
148 |
+ vm_unmap_aliases(); |
149 |
+ |
150 |
+ resize_hpt_for_hotplug(memblock_phys_mem_size()); |
151 |
+- |
152 |
+- return ret; |
153 |
+ } |
154 |
+ #endif |
155 |
+-#endif /* CONFIG_MEMORY_HOTPLUG */ |
156 |
+ |
157 |
+ /* |
158 |
+ * walk_memory_resource() needs to make sure there is no holes in a given |
159 |
+diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c |
160 |
+index dd3cc4632b9a..84d038ed3882 100644 |
161 |
+--- a/arch/powerpc/platforms/powernv/memtrace.c |
162 |
++++ b/arch/powerpc/platforms/powernv/memtrace.c |
163 |
+@@ -122,7 +122,7 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) |
164 |
+ */ |
165 |
+ end_pfn = base_pfn + nr_pages; |
166 |
+ for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) { |
167 |
+- remove_memory(nid, pfn << PAGE_SHIFT, bytes); |
168 |
++ __remove_memory(nid, pfn << PAGE_SHIFT, bytes); |
169 |
+ } |
170 |
+ unlock_device_hotplug(); |
171 |
+ return base_pfn << PAGE_SHIFT; |
172 |
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c |
173 |
+index 62d3c72cd931..c2c6f32848e1 100644 |
174 |
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c |
175 |
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c |
176 |
+@@ -301,7 +301,7 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz |
177 |
+ nid = memory_add_physaddr_to_nid(base); |
178 |
+ |
179 |
+ for (i = 0; i < sections_per_block; i++) { |
180 |
+- remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE); |
181 |
++ __remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE); |
182 |
+ base += MIN_MEMORY_BLOCK_SIZE; |
183 |
+ } |
184 |
+ |
185 |
+@@ -393,7 +393,7 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb) |
186 |
+ block_sz = pseries_memory_block_size(); |
187 |
+ nid = memory_add_physaddr_to_nid(lmb->base_addr); |
188 |
+ |
189 |
+- remove_memory(nid, lmb->base_addr, block_sz); |
190 |
++ __remove_memory(nid, lmb->base_addr, block_sz); |
191 |
+ |
192 |
+ /* Update memory regions for memory remove */ |
193 |
+ memblock_remove(lmb->base_addr, block_sz); |
194 |
+@@ -680,7 +680,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb) |
195 |
+ |
196 |
+ rc = dlpar_online_lmb(lmb); |
197 |
+ if (rc) { |
198 |
+- remove_memory(nid, lmb->base_addr, block_sz); |
199 |
++ __remove_memory(nid, lmb->base_addr, block_sz); |
200 |
+ invalidate_lmb_associativity_index(lmb); |
201 |
+ } else { |
202 |
+ lmb->flags |= DRCONF_MEM_ASSIGNED; |
203 |
+diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c |
204 |
+index 3fa3e5323612..379a925d9e82 100644 |
205 |
+--- a/arch/s390/mm/init.c |
206 |
++++ b/arch/s390/mm/init.c |
207 |
+@@ -239,15 +239,13 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, |
208 |
+ return rc; |
209 |
+ } |
210 |
+ |
211 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
212 |
+-int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
213 |
++void arch_remove_memory(int nid, u64 start, u64 size, |
214 |
++ struct vmem_altmap *altmap) |
215 |
+ { |
216 |
+- /* |
217 |
+- * There is no hardware or firmware interface which could trigger a |
218 |
+- * hot memory remove on s390. So there is nothing that needs to be |
219 |
+- * implemented. |
220 |
+- */ |
221 |
+- return -EBUSY; |
222 |
++ unsigned long start_pfn = start >> PAGE_SHIFT; |
223 |
++ unsigned long nr_pages = size >> PAGE_SHIFT; |
224 |
++ |
225 |
++ __remove_pages(start_pfn, nr_pages, altmap); |
226 |
++ vmem_remove_mapping(start, size); |
227 |
+ } |
228 |
+-#endif |
229 |
+ #endif /* CONFIG_MEMORY_HOTPLUG */ |
230 |
+diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c |
231 |
+index 7713c084d040..47882be91121 100644 |
232 |
+--- a/arch/sh/mm/init.c |
233 |
++++ b/arch/sh/mm/init.c |
234 |
+@@ -443,21 +443,12 @@ int memory_add_physaddr_to_nid(u64 addr) |
235 |
+ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
236 |
+ #endif |
237 |
+ |
238 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
239 |
+-int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
240 |
++void arch_remove_memory(int nid, u64 start, u64 size, |
241 |
++ struct vmem_altmap *altmap) |
242 |
+ { |
243 |
+ unsigned long start_pfn = PFN_DOWN(start); |
244 |
+ unsigned long nr_pages = size >> PAGE_SHIFT; |
245 |
+- struct zone *zone; |
246 |
+- int ret; |
247 |
+- |
248 |
+- zone = page_zone(pfn_to_page(start_pfn)); |
249 |
+- ret = __remove_pages(zone, start_pfn, nr_pages, altmap); |
250 |
+- if (unlikely(ret)) |
251 |
+- pr_warn("%s: Failed, __remove_pages() == %d\n", __func__, |
252 |
+- ret); |
253 |
+ |
254 |
+- return ret; |
255 |
++ __remove_pages(start_pfn, nr_pages, altmap); |
256 |
+ } |
257 |
+-#endif |
258 |
+ #endif /* CONFIG_MEMORY_HOTPLUG */ |
259 |
+diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c |
260 |
+index 979e0a02cbe1..79b95910fd9f 100644 |
261 |
+--- a/arch/x86/mm/init_32.c |
262 |
++++ b/arch/x86/mm/init_32.c |
263 |
+@@ -860,18 +860,15 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, |
264 |
+ return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); |
265 |
+ } |
266 |
+ |
267 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
268 |
+-int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
269 |
++void arch_remove_memory(int nid, u64 start, u64 size, |
270 |
++ struct vmem_altmap *altmap) |
271 |
+ { |
272 |
+ unsigned long start_pfn = start >> PAGE_SHIFT; |
273 |
+ unsigned long nr_pages = size >> PAGE_SHIFT; |
274 |
+- struct zone *zone; |
275 |
+ |
276 |
+- zone = page_zone(pfn_to_page(start_pfn)); |
277 |
+- return __remove_pages(zone, start_pfn, nr_pages, altmap); |
278 |
++ __remove_pages(start_pfn, nr_pages, altmap); |
279 |
+ } |
280 |
+ #endif |
281 |
+-#endif |
282 |
+ |
283 |
+ int kernel_set_to_readonly __read_mostly; |
284 |
+ |
285 |
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c |
286 |
+index a3e9c6ee3cf2..81e85a8dd300 100644 |
287 |
+--- a/arch/x86/mm/init_64.c |
288 |
++++ b/arch/x86/mm/init_64.c |
289 |
+@@ -1132,7 +1132,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, |
290 |
+ remove_pagetable(start, end, false, altmap); |
291 |
+ } |
292 |
+ |
293 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
294 |
+ static void __meminit |
295 |
+ kernel_physical_mapping_remove(unsigned long start, unsigned long end) |
296 |
+ { |
297 |
+@@ -1142,25 +1141,15 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end) |
298 |
+ remove_pagetable(start, end, true, NULL); |
299 |
+ } |
300 |
+ |
301 |
+-int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
302 |
++void __ref arch_remove_memory(int nid, u64 start, u64 size, |
303 |
++ struct vmem_altmap *altmap) |
304 |
+ { |
305 |
+ unsigned long start_pfn = start >> PAGE_SHIFT; |
306 |
+ unsigned long nr_pages = size >> PAGE_SHIFT; |
307 |
+- struct page *page = pfn_to_page(start_pfn); |
308 |
+- struct zone *zone; |
309 |
+- int ret; |
310 |
+ |
311 |
+- /* With altmap the first mapped page is offset from @start */ |
312 |
+- if (altmap) |
313 |
+- page += vmem_altmap_offset(altmap); |
314 |
+- zone = page_zone(page); |
315 |
+- ret = __remove_pages(zone, start_pfn, nr_pages, altmap); |
316 |
+- WARN_ON_ONCE(ret); |
317 |
++ __remove_pages(start_pfn, nr_pages, altmap); |
318 |
+ kernel_physical_mapping_remove(start, start + size); |
319 |
+- |
320 |
+- return ret; |
321 |
+ } |
322 |
+-#endif |
323 |
+ #endif /* CONFIG_MEMORY_HOTPLUG */ |
324 |
+ |
325 |
+ static struct kcore_list kcore_vsyscall; |
326 |
+diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c |
327 |
+index 2ccfbb61ca89..8fe0960ea572 100644 |
328 |
+--- a/drivers/acpi/acpi_memhotplug.c |
329 |
++++ b/drivers/acpi/acpi_memhotplug.c |
330 |
+@@ -282,7 +282,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device) |
331 |
+ nid = memory_add_physaddr_to_nid(info->start_addr); |
332 |
+ |
333 |
+ acpi_unbind_memory_blocks(info); |
334 |
+- remove_memory(nid, info->start_addr, info->length); |
335 |
++ __remove_memory(nid, info->start_addr, info->length); |
336 |
+ list_del(&info->list); |
337 |
+ kfree(info); |
338 |
+ } |
339 |
+diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c |
340 |
+index 4e46dc9e41ad..112b1001c269 100644 |
341 |
+--- a/drivers/atm/firestream.c |
342 |
++++ b/drivers/atm/firestream.c |
343 |
+@@ -927,6 +927,7 @@ static int fs_open(struct atm_vcc *atm_vcc) |
344 |
+ } |
345 |
+ if (!to) { |
346 |
+ printk ("No more free channels for FS50..\n"); |
347 |
++ kfree(vcc); |
348 |
+ return -EBUSY; |
349 |
+ } |
350 |
+ vcc->channo = dev->channo; |
351 |
+@@ -937,6 +938,7 @@ static int fs_open(struct atm_vcc *atm_vcc) |
352 |
+ if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) || |
353 |
+ ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) { |
354 |
+ printk ("Channel is in use for FS155.\n"); |
355 |
++ kfree(vcc); |
356 |
+ return -EBUSY; |
357 |
+ } |
358 |
+ } |
359 |
+@@ -950,6 +952,7 @@ static int fs_open(struct atm_vcc *atm_vcc) |
360 |
+ tc, sizeof (struct fs_transmit_config)); |
361 |
+ if (!tc) { |
362 |
+ fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n"); |
363 |
++ kfree(vcc); |
364 |
+ return -ENOMEM; |
365 |
+ } |
366 |
+ |
367 |
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c |
368 |
+index ac1574a69610..e270abc86d46 100644 |
369 |
+--- a/drivers/base/memory.c |
370 |
++++ b/drivers/base/memory.c |
371 |
+@@ -39,6 +39,11 @@ static inline int base_memory_block_id(int section_nr) |
372 |
+ return section_nr / sections_per_block; |
373 |
+ } |
374 |
+ |
375 |
++static inline int pfn_to_block_id(unsigned long pfn) |
376 |
++{ |
377 |
++ return base_memory_block_id(pfn_to_section_nr(pfn)); |
378 |
++} |
379 |
++ |
380 |
+ static int memory_subsys_online(struct device *dev); |
381 |
+ static int memory_subsys_offline(struct device *dev); |
382 |
+ |
383 |
+@@ -230,13 +235,14 @@ static bool pages_correctly_probed(unsigned long start_pfn) |
384 |
+ * OK to have direct references to sparsemem variables in here. |
385 |
+ */ |
386 |
+ static int |
387 |
+-memory_block_action(unsigned long phys_index, unsigned long action, int online_type) |
388 |
++memory_block_action(unsigned long start_section_nr, unsigned long action, |
389 |
++ int online_type) |
390 |
+ { |
391 |
+ unsigned long start_pfn; |
392 |
+ unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; |
393 |
+ int ret; |
394 |
+ |
395 |
+- start_pfn = section_nr_to_pfn(phys_index); |
396 |
++ start_pfn = section_nr_to_pfn(start_section_nr); |
397 |
+ |
398 |
+ switch (action) { |
399 |
+ case MEM_ONLINE: |
400 |
+@@ -250,7 +256,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t |
401 |
+ break; |
402 |
+ default: |
403 |
+ WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " |
404 |
+- "%ld\n", __func__, phys_index, action, action); |
405 |
++ "%ld\n", __func__, start_section_nr, action, action); |
406 |
+ ret = -EINVAL; |
407 |
+ } |
408 |
+ |
409 |
+@@ -590,10 +596,9 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn) |
410 |
+ * A reference for the returned object is held and the reference for the |
411 |
+ * hinted object is released. |
412 |
+ */ |
413 |
+-struct memory_block *find_memory_block_hinted(struct mem_section *section, |
414 |
+- struct memory_block *hint) |
415 |
++static struct memory_block *find_memory_block_by_id(int block_id, |
416 |
++ struct memory_block *hint) |
417 |
+ { |
418 |
+- int block_id = base_memory_block_id(__section_nr(section)); |
419 |
+ struct device *hintdev = hint ? &hint->dev : NULL; |
420 |
+ struct device *dev; |
421 |
+ |
422 |
+@@ -605,6 +610,14 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section, |
423 |
+ return to_memory_block(dev); |
424 |
+ } |
425 |
+ |
426 |
++struct memory_block *find_memory_block_hinted(struct mem_section *section, |
427 |
++ struct memory_block *hint) |
428 |
++{ |
429 |
++ int block_id = base_memory_block_id(__section_nr(section)); |
430 |
++ |
431 |
++ return find_memory_block_by_id(block_id, hint); |
432 |
++} |
433 |
++ |
434 |
+ /* |
435 |
+ * For now, we have a linear search to go find the appropriate |
436 |
+ * memory_block corresponding to a particular phys_index. If |
437 |
+@@ -659,25 +672,28 @@ int register_memory(struct memory_block *memory) |
438 |
+ return ret; |
439 |
+ } |
440 |
+ |
441 |
+-static int init_memory_block(struct memory_block **memory, |
442 |
+- struct mem_section *section, unsigned long state) |
443 |
++static int init_memory_block(struct memory_block **memory, int block_id, |
444 |
++ unsigned long state) |
445 |
+ { |
446 |
+ struct memory_block *mem; |
447 |
+ unsigned long start_pfn; |
448 |
+- int scn_nr; |
449 |
+ int ret = 0; |
450 |
+ |
451 |
++ mem = find_memory_block_by_id(block_id, NULL); |
452 |
++ if (mem) { |
453 |
++ put_device(&mem->dev); |
454 |
++ return -EEXIST; |
455 |
++ } |
456 |
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL); |
457 |
+ if (!mem) |
458 |
+ return -ENOMEM; |
459 |
+ |
460 |
+- scn_nr = __section_nr(section); |
461 |
+- mem->start_section_nr = |
462 |
+- base_memory_block_id(scn_nr) * sections_per_block; |
463 |
++ mem->start_section_nr = block_id * sections_per_block; |
464 |
+ mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; |
465 |
+ mem->state = state; |
466 |
+ start_pfn = section_nr_to_pfn(mem->start_section_nr); |
467 |
+ mem->phys_device = arch_get_memory_phys_device(start_pfn); |
468 |
++ mem->nid = NUMA_NO_NODE; |
469 |
+ |
470 |
+ ret = register_memory(mem); |
471 |
+ |
472 |
+@@ -688,101 +704,98 @@ static int init_memory_block(struct memory_block **memory, |
473 |
+ static int add_memory_block(int base_section_nr) |
474 |
+ { |
475 |
+ struct memory_block *mem; |
476 |
+- int i, ret, section_count = 0, section_nr; |
477 |
++ int i, ret, section_count = 0; |
478 |
+ |
479 |
+ for (i = base_section_nr; |
480 |
+- (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS; |
481 |
+- i++) { |
482 |
+- if (!present_section_nr(i)) |
483 |
+- continue; |
484 |
+- if (section_count == 0) |
485 |
+- section_nr = i; |
486 |
+- section_count++; |
487 |
+- } |
488 |
++ i < base_section_nr + sections_per_block; |
489 |
++ i++) |
490 |
++ if (present_section_nr(i)) |
491 |
++ section_count++; |
492 |
+ |
493 |
+ if (section_count == 0) |
494 |
+ return 0; |
495 |
+- ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE); |
496 |
++ ret = init_memory_block(&mem, base_memory_block_id(base_section_nr), |
497 |
++ MEM_ONLINE); |
498 |
+ if (ret) |
499 |
+ return ret; |
500 |
+ mem->section_count = section_count; |
501 |
+ return 0; |
502 |
+ } |
503 |
+ |
504 |
++static void unregister_memory(struct memory_block *memory) |
505 |
++{ |
506 |
++ if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys)) |
507 |
++ return; |
508 |
++ |
509 |
++ /* drop the ref. we got via find_memory_block() */ |
510 |
++ put_device(&memory->dev); |
511 |
++ device_unregister(&memory->dev); |
512 |
++} |
513 |
++ |
514 |
+ /* |
515 |
+- * need an interface for the VM to add new memory regions, |
516 |
+- * but without onlining it. |
517 |
++ * Create memory block devices for the given memory area. Start and size |
518 |
++ * have to be aligned to memory block granularity. Memory block devices |
519 |
++ * will be initialized as offline. |
520 |
+ */ |
521 |
+-int hotplug_memory_register(int nid, struct mem_section *section) |
522 |
++int create_memory_block_devices(unsigned long start, unsigned long size) |
523 |
+ { |
524 |
+- int ret = 0; |
525 |
++ const int start_block_id = pfn_to_block_id(PFN_DOWN(start)); |
526 |
++ int end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); |
527 |
+ struct memory_block *mem; |
528 |
++ unsigned long block_id; |
529 |
++ int ret = 0; |
530 |
+ |
531 |
+- mutex_lock(&mem_sysfs_mutex); |
532 |
++ if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || |
533 |
++ !IS_ALIGNED(size, memory_block_size_bytes()))) |
534 |
++ return -EINVAL; |
535 |
+ |
536 |
+- mem = find_memory_block(section); |
537 |
+- if (mem) { |
538 |
+- mem->section_count++; |
539 |
+- put_device(&mem->dev); |
540 |
+- } else { |
541 |
+- ret = init_memory_block(&mem, section, MEM_OFFLINE); |
542 |
++ mutex_lock(&mem_sysfs_mutex); |
543 |
++ for (block_id = start_block_id; block_id != end_block_id; block_id++) { |
544 |
++ ret = init_memory_block(&mem, block_id, MEM_OFFLINE); |
545 |
+ if (ret) |
546 |
+- goto out; |
547 |
+- mem->section_count++; |
548 |
++ break; |
549 |
++ mem->section_count = sections_per_block; |
550 |
++ } |
551 |
++ if (ret) { |
552 |
++ end_block_id = block_id; |
553 |
++ for (block_id = start_block_id; block_id != end_block_id; |
554 |
++ block_id++) { |
555 |
++ mem = find_memory_block_by_id(block_id, NULL); |
556 |
++ mem->section_count = 0; |
557 |
++ unregister_memory(mem); |
558 |
++ } |
559 |
+ } |
560 |
+- |
561 |
+-out: |
562 |
+ mutex_unlock(&mem_sysfs_mutex); |
563 |
+ return ret; |
564 |
+ } |
565 |
+ |
566 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
567 |
+-static void |
568 |
+-unregister_memory(struct memory_block *memory) |
569 |
+-{ |
570 |
+- BUG_ON(memory->dev.bus != &memory_subsys); |
571 |
+- |
572 |
+- /* drop the ref. we got in remove_memory_block() */ |
573 |
+- put_device(&memory->dev); |
574 |
+- device_unregister(&memory->dev); |
575 |
+-} |
576 |
+- |
577 |
+-static int remove_memory_section(unsigned long node_id, |
578 |
+- struct mem_section *section, int phys_device) |
579 |
++/* |
580 |
++ * Remove memory block devices for the given memory area. Start and size |
581 |
++ * have to be aligned to memory block granularity. Memory block devices |
582 |
++ * have to be offline. |
583 |
++ */ |
584 |
++void remove_memory_block_devices(unsigned long start, unsigned long size) |
585 |
+ { |
586 |
++ const int start_block_id = pfn_to_block_id(PFN_DOWN(start)); |
587 |
++ const int end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); |
588 |
+ struct memory_block *mem; |
589 |
++ int block_id; |
590 |
+ |
591 |
+- mutex_lock(&mem_sysfs_mutex); |
592 |
+- |
593 |
+- /* |
594 |
+- * Some users of the memory hotplug do not want/need memblock to |
595 |
+- * track all sections. Skip over those. |
596 |
+- */ |
597 |
+- mem = find_memory_block(section); |
598 |
+- if (!mem) |
599 |
+- goto out_unlock; |
600 |
++ if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || |
601 |
++ !IS_ALIGNED(size, memory_block_size_bytes()))) |
602 |
++ return; |
603 |
+ |
604 |
+- unregister_mem_sect_under_nodes(mem, __section_nr(section)); |
605 |
+- |
606 |
+- mem->section_count--; |
607 |
+- if (mem->section_count == 0) |
608 |
++ mutex_lock(&mem_sysfs_mutex); |
609 |
++ for (block_id = start_block_id; block_id != end_block_id; block_id++) { |
610 |
++ mem = find_memory_block_by_id(block_id, NULL); |
611 |
++ if (WARN_ON_ONCE(!mem)) |
612 |
++ continue; |
613 |
++ mem->section_count = 0; |
614 |
++ unregister_memory_block_under_nodes(mem); |
615 |
+ unregister_memory(mem); |
616 |
+- else |
617 |
+- put_device(&mem->dev); |
618 |
+- |
619 |
+-out_unlock: |
620 |
++ } |
621 |
+ mutex_unlock(&mem_sysfs_mutex); |
622 |
+- return 0; |
623 |
+-} |
624 |
+- |
625 |
+-int unregister_memory_section(struct mem_section *section) |
626 |
+-{ |
627 |
+- if (!present_section(section)) |
628 |
+- return -EINVAL; |
629 |
+- |
630 |
+- return remove_memory_section(0, section, 0); |
631 |
+ } |
632 |
+-#endif /* CONFIG_MEMORY_HOTREMOVE */ |
633 |
+ |
634 |
+ /* return true if the memory block is offlined, otherwise, return false */ |
635 |
+ bool is_memblock_offlined(struct memory_block *mem) |
636 |
+@@ -849,3 +862,39 @@ out: |
637 |
+ printk(KERN_ERR "%s() failed: %d\n", __func__, ret); |
638 |
+ return ret; |
639 |
+ } |
640 |
++ |
641 |
++struct for_each_memory_block_cb_data { |
642 |
++ walk_memory_blocks_func_t func; |
643 |
++ void *arg; |
644 |
++}; |
645 |
++ |
646 |
++static int for_each_memory_block_cb(struct device *dev, void *data) |
647 |
++{ |
648 |
++ struct memory_block *mem = to_memory_block(dev); |
649 |
++ struct for_each_memory_block_cb_data *cb_data = data; |
650 |
++ |
651 |
++ return cb_data->func(mem, cb_data->arg); |
652 |
++} |
653 |
++ |
654 |
++/** |
655 |
++ * for_each_memory_block - walk through all present memory blocks |
656 |
++ * |
657 |
++ * @arg: argument passed to func |
658 |
++ * @func: callback for each memory block walked |
659 |
++ * |
660 |
++ * This function walks through all present memory blocks, calling func on |
661 |
++ * each memory block. |
662 |
++ * |
663 |
++ * In case func() returns an error, walking is aborted and the error is |
664 |
++ * returned. |
665 |
++ */ |
666 |
++int for_each_memory_block(void *arg, walk_memory_blocks_func_t func) |
667 |
++{ |
668 |
++ struct for_each_memory_block_cb_data cb_data = { |
669 |
++ .func = func, |
670 |
++ .arg = arg, |
671 |
++ }; |
672 |
++ |
673 |
++ return bus_for_each_dev(&memory_subsys, NULL, &cb_data, |
674 |
++ for_each_memory_block_cb); |
675 |
++} |
676 |
+diff --git a/drivers/base/node.c b/drivers/base/node.c |
677 |
+index c3968e2d0a98..f3565c2dbc52 100644 |
678 |
+--- a/drivers/base/node.c |
679 |
++++ b/drivers/base/node.c |
680 |
+@@ -409,8 +409,6 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg) |
681 |
+ int ret, nid = *(int *)arg; |
682 |
+ unsigned long pfn, sect_start_pfn, sect_end_pfn; |
683 |
+ |
684 |
+- mem_blk->nid = nid; |
685 |
+- |
686 |
+ sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); |
687 |
+ sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr); |
688 |
+ sect_end_pfn += PAGES_PER_SECTION - 1; |
689 |
+@@ -439,6 +437,13 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg) |
690 |
+ if (page_nid != nid) |
691 |
+ continue; |
692 |
+ } |
693 |
++ |
694 |
++ /* |
695 |
++ * If this memory block spans multiple nodes, we only indicate |
696 |
++ * the last processed node. |
697 |
++ */ |
698 |
++ mem_blk->nid = nid; |
699 |
++ |
700 |
+ ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, |
701 |
+ &mem_blk->dev.kobj, |
702 |
+ kobject_name(&mem_blk->dev.kobj)); |
703 |
+@@ -453,40 +458,19 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg) |
704 |
+ return 0; |
705 |
+ } |
706 |
+ |
707 |
+-/* unregister memory section under all nodes that it spans */ |
708 |
+-int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, |
709 |
+- unsigned long phys_index) |
710 |
++/* |
711 |
++ * Unregister a memory block device under the node it spans. Memory blocks |
712 |
++ * with multiple nodes cannot be offlined and therefore also never be removed. |
713 |
++ */ |
714 |
++void unregister_memory_block_under_nodes(struct memory_block *mem_blk) |
715 |
+ { |
716 |
+- NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL); |
717 |
+- unsigned long pfn, sect_start_pfn, sect_end_pfn; |
718 |
+- |
719 |
+- if (!mem_blk) { |
720 |
+- NODEMASK_FREE(unlinked_nodes); |
721 |
+- return -EFAULT; |
722 |
+- } |
723 |
+- if (!unlinked_nodes) |
724 |
+- return -ENOMEM; |
725 |
+- nodes_clear(*unlinked_nodes); |
726 |
+- |
727 |
+- sect_start_pfn = section_nr_to_pfn(phys_index); |
728 |
+- sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; |
729 |
+- for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { |
730 |
+- int nid; |
731 |
++ if (mem_blk->nid == NUMA_NO_NODE) |
732 |
++ return; |
733 |
+ |
734 |
+- nid = get_nid_for_pfn(pfn); |
735 |
+- if (nid < 0) |
736 |
+- continue; |
737 |
+- if (!node_online(nid)) |
738 |
+- continue; |
739 |
+- if (node_test_and_set(nid, *unlinked_nodes)) |
740 |
+- continue; |
741 |
+- sysfs_remove_link(&node_devices[nid]->dev.kobj, |
742 |
+- kobject_name(&mem_blk->dev.kobj)); |
743 |
+- sysfs_remove_link(&mem_blk->dev.kobj, |
744 |
+- kobject_name(&node_devices[nid]->dev.kobj)); |
745 |
+- } |
746 |
+- NODEMASK_FREE(unlinked_nodes); |
747 |
+- return 0; |
748 |
++ sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj, |
749 |
++ kobject_name(&mem_blk->dev.kobj)); |
750 |
++ sysfs_remove_link(&mem_blk->dev.kobj, |
751 |
++ kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); |
752 |
+ } |
753 |
+ |
754 |
+ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn) |
755 |
+diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c |
756 |
+index eb2a0a73cbed..d670f7000cbb 100644 |
757 |
+--- a/drivers/crypto/geode-aes.c |
758 |
++++ b/drivers/crypto/geode-aes.c |
759 |
+@@ -14,6 +14,7 @@ |
760 |
+ #include <linux/spinlock.h> |
761 |
+ #include <crypto/algapi.h> |
762 |
+ #include <crypto/aes.h> |
763 |
++#include <crypto/skcipher.h> |
764 |
+ |
765 |
+ #include <linux/io.h> |
766 |
+ #include <linux/delay.h> |
767 |
+@@ -170,13 +171,15 @@ static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key, |
768 |
+ /* |
769 |
+ * The requested key size is not supported by HW, do a fallback |
770 |
+ */ |
771 |
+- op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
772 |
+- op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); |
773 |
++ crypto_skcipher_clear_flags(op->fallback.blk, CRYPTO_TFM_REQ_MASK); |
774 |
++ crypto_skcipher_set_flags(op->fallback.blk, |
775 |
++ tfm->crt_flags & CRYPTO_TFM_REQ_MASK); |
776 |
+ |
777 |
+- ret = crypto_blkcipher_setkey(op->fallback.blk, key, len); |
778 |
++ ret = crypto_skcipher_setkey(op->fallback.blk, key, len); |
779 |
+ if (ret) { |
780 |
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
781 |
+- tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK); |
782 |
++ tfm->crt_flags |= crypto_skcipher_get_flags(op->fallback.blk) & |
783 |
++ CRYPTO_TFM_RES_MASK; |
784 |
+ } |
785 |
+ return ret; |
786 |
+ } |
787 |
+@@ -185,33 +188,28 @@ static int fallback_blk_dec(struct blkcipher_desc *desc, |
788 |
+ struct scatterlist *dst, struct scatterlist *src, |
789 |
+ unsigned int nbytes) |
790 |
+ { |
791 |
+- unsigned int ret; |
792 |
+- struct crypto_blkcipher *tfm; |
793 |
+ struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); |
794 |
++ SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk); |
795 |
+ |
796 |
+- tfm = desc->tfm; |
797 |
+- desc->tfm = op->fallback.blk; |
798 |
+- |
799 |
+- ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); |
800 |
++ skcipher_request_set_tfm(req, op->fallback.blk); |
801 |
++ skcipher_request_set_callback(req, 0, NULL, NULL); |
802 |
++ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); |
803 |
+ |
804 |
+- desc->tfm = tfm; |
805 |
+- return ret; |
806 |
++ return crypto_skcipher_decrypt(req); |
807 |
+ } |
808 |
++ |
809 |
+ static int fallback_blk_enc(struct blkcipher_desc *desc, |
810 |
+ struct scatterlist *dst, struct scatterlist *src, |
811 |
+ unsigned int nbytes) |
812 |
+ { |
813 |
+- unsigned int ret; |
814 |
+- struct crypto_blkcipher *tfm; |
815 |
+ struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); |
816 |
++ SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk); |
817 |
+ |
818 |
+- tfm = desc->tfm; |
819 |
+- desc->tfm = op->fallback.blk; |
820 |
+- |
821 |
+- ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); |
822 |
++ skcipher_request_set_tfm(req, op->fallback.blk); |
823 |
++ skcipher_request_set_callback(req, 0, NULL, NULL); |
824 |
++ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); |
825 |
+ |
826 |
+- desc->tfm = tfm; |
827 |
+- return ret; |
828 |
++ return crypto_skcipher_encrypt(req); |
829 |
+ } |
830 |
+ |
831 |
+ static void |
832 |
+@@ -311,6 +309,9 @@ geode_cbc_decrypt(struct blkcipher_desc *desc, |
833 |
+ struct blkcipher_walk walk; |
834 |
+ int err, ret; |
835 |
+ |
836 |
++ if (nbytes % AES_BLOCK_SIZE) |
837 |
++ return -EINVAL; |
838 |
++ |
839 |
+ if (unlikely(op->keylen != AES_KEYSIZE_128)) |
840 |
+ return fallback_blk_dec(desc, dst, src, nbytes); |
841 |
+ |
842 |
+@@ -343,6 +344,9 @@ geode_cbc_encrypt(struct blkcipher_desc *desc, |
843 |
+ struct blkcipher_walk walk; |
844 |
+ int err, ret; |
845 |
+ |
846 |
++ if (nbytes % AES_BLOCK_SIZE) |
847 |
++ return -EINVAL; |
848 |
++ |
849 |
+ if (unlikely(op->keylen != AES_KEYSIZE_128)) |
850 |
+ return fallback_blk_enc(desc, dst, src, nbytes); |
851 |
+ |
852 |
+@@ -370,8 +374,9 @@ static int fallback_init_blk(struct crypto_tfm *tfm) |
853 |
+ const char *name = crypto_tfm_alg_name(tfm); |
854 |
+ struct geode_aes_op *op = crypto_tfm_ctx(tfm); |
855 |
+ |
856 |
+- op->fallback.blk = crypto_alloc_blkcipher(name, 0, |
857 |
+- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); |
858 |
++ op->fallback.blk = crypto_alloc_skcipher(name, 0, |
859 |
++ CRYPTO_ALG_ASYNC | |
860 |
++ CRYPTO_ALG_NEED_FALLBACK); |
861 |
+ |
862 |
+ if (IS_ERR(op->fallback.blk)) { |
863 |
+ printk(KERN_ERR "Error allocating fallback algo %s\n", name); |
864 |
+@@ -385,7 +390,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm) |
865 |
+ { |
866 |
+ struct geode_aes_op *op = crypto_tfm_ctx(tfm); |
867 |
+ |
868 |
+- crypto_free_blkcipher(op->fallback.blk); |
869 |
++ crypto_free_skcipher(op->fallback.blk); |
870 |
+ op->fallback.blk = NULL; |
871 |
+ } |
872 |
+ |
873 |
+@@ -424,6 +429,9 @@ geode_ecb_decrypt(struct blkcipher_desc *desc, |
874 |
+ struct blkcipher_walk walk; |
875 |
+ int err, ret; |
876 |
+ |
877 |
++ if (nbytes % AES_BLOCK_SIZE) |
878 |
++ return -EINVAL; |
879 |
++ |
880 |
+ if (unlikely(op->keylen != AES_KEYSIZE_128)) |
881 |
+ return fallback_blk_dec(desc, dst, src, nbytes); |
882 |
+ |
883 |
+@@ -454,6 +462,9 @@ geode_ecb_encrypt(struct blkcipher_desc *desc, |
884 |
+ struct blkcipher_walk walk; |
885 |
+ int err, ret; |
886 |
+ |
887 |
++ if (nbytes % AES_BLOCK_SIZE) |
888 |
++ return -EINVAL; |
889 |
++ |
890 |
+ if (unlikely(op->keylen != AES_KEYSIZE_128)) |
891 |
+ return fallback_blk_enc(desc, dst, src, nbytes); |
892 |
+ |
893 |
+diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h |
894 |
+index f442ca972e3c..c5763a041bb8 100644 |
895 |
+--- a/drivers/crypto/geode-aes.h |
896 |
++++ b/drivers/crypto/geode-aes.h |
897 |
+@@ -64,7 +64,7 @@ struct geode_aes_op { |
898 |
+ u8 *iv; |
899 |
+ |
900 |
+ union { |
901 |
+- struct crypto_blkcipher *blk; |
902 |
++ struct crypto_skcipher *blk; |
903 |
+ struct crypto_cipher *cip; |
904 |
+ } fallback; |
905 |
+ u32 keylen; |
906 |
+diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c |
907 |
+index f4c7516eb989..0a87c5b51286 100644 |
908 |
+--- a/drivers/hwmon/adt7475.c |
909 |
++++ b/drivers/hwmon/adt7475.c |
910 |
+@@ -296,9 +296,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn) |
911 |
+ long reg; |
912 |
+ |
913 |
+ if (bypass_attn & (1 << channel)) |
914 |
+- reg = (volt * 1024) / 2250; |
915 |
++ reg = DIV_ROUND_CLOSEST(volt * 1024, 2250); |
916 |
+ else |
917 |
+- reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250); |
918 |
++ reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024, |
919 |
++ (r[0] + r[1]) * 2250); |
920 |
+ return clamp_val(reg, 0, 1023) & (0xff << 2); |
921 |
+ } |
922 |
+ |
923 |
+diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c |
924 |
+index 6b3559f58b67..d34de21d43ad 100644 |
925 |
+--- a/drivers/hwmon/hwmon.c |
926 |
++++ b/drivers/hwmon/hwmon.c |
927 |
+@@ -51,6 +51,7 @@ struct hwmon_device_attribute { |
928 |
+ |
929 |
+ #define to_hwmon_attr(d) \ |
930 |
+ container_of(d, struct hwmon_device_attribute, dev_attr) |
931 |
++#define to_dev_attr(a) container_of(a, struct device_attribute, attr) |
932 |
+ |
933 |
+ /* |
934 |
+ * Thermal zone information |
935 |
+@@ -58,7 +59,7 @@ struct hwmon_device_attribute { |
936 |
+ * also provides the sensor index. |
937 |
+ */ |
938 |
+ struct hwmon_thermal_data { |
939 |
+- struct hwmon_device *hwdev; /* Reference to hwmon device */ |
940 |
++ struct device *dev; /* Reference to hwmon device */ |
941 |
+ int index; /* sensor index */ |
942 |
+ }; |
943 |
+ |
944 |
+@@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = { |
945 |
+ NULL |
946 |
+ }; |
947 |
+ |
948 |
++static void hwmon_free_attrs(struct attribute **attrs) |
949 |
++{ |
950 |
++ int i; |
951 |
++ |
952 |
++ for (i = 0; attrs[i]; i++) { |
953 |
++ struct device_attribute *dattr = to_dev_attr(attrs[i]); |
954 |
++ struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr); |
955 |
++ |
956 |
++ kfree(hattr); |
957 |
++ } |
958 |
++ kfree(attrs); |
959 |
++} |
960 |
++ |
961 |
+ static void hwmon_dev_release(struct device *dev) |
962 |
+ { |
963 |
+- kfree(to_hwmon_device(dev)); |
964 |
++ struct hwmon_device *hwdev = to_hwmon_device(dev); |
965 |
++ |
966 |
++ if (hwdev->group.attrs) |
967 |
++ hwmon_free_attrs(hwdev->group.attrs); |
968 |
++ kfree(hwdev->groups); |
969 |
++ kfree(hwdev); |
970 |
+ } |
971 |
+ |
972 |
+ static struct class hwmon_class = { |
973 |
+@@ -121,11 +140,11 @@ static DEFINE_IDA(hwmon_ida); |
974 |
+ static int hwmon_thermal_get_temp(void *data, int *temp) |
975 |
+ { |
976 |
+ struct hwmon_thermal_data *tdata = data; |
977 |
+- struct hwmon_device *hwdev = tdata->hwdev; |
978 |
++ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev); |
979 |
+ int ret; |
980 |
+ long t; |
981 |
+ |
982 |
+- ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input, |
983 |
++ ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input, |
984 |
+ tdata->index, &t); |
985 |
+ if (ret < 0) |
986 |
+ return ret; |
987 |
+@@ -139,8 +158,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = { |
988 |
+ .get_temp = hwmon_thermal_get_temp, |
989 |
+ }; |
990 |
+ |
991 |
+-static int hwmon_thermal_add_sensor(struct device *dev, |
992 |
+- struct hwmon_device *hwdev, int index) |
993 |
++static int hwmon_thermal_add_sensor(struct device *dev, int index) |
994 |
+ { |
995 |
+ struct hwmon_thermal_data *tdata; |
996 |
+ struct thermal_zone_device *tzd; |
997 |
+@@ -149,10 +167,10 @@ static int hwmon_thermal_add_sensor(struct device *dev, |
998 |
+ if (!tdata) |
999 |
+ return -ENOMEM; |
1000 |
+ |
1001 |
+- tdata->hwdev = hwdev; |
1002 |
++ tdata->dev = dev; |
1003 |
+ tdata->index = index; |
1004 |
+ |
1005 |
+- tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata, |
1006 |
++ tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, |
1007 |
+ &hwmon_thermal_ops); |
1008 |
+ /* |
1009 |
+ * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, |
1010 |
+@@ -164,8 +182,7 @@ static int hwmon_thermal_add_sensor(struct device *dev, |
1011 |
+ return 0; |
1012 |
+ } |
1013 |
+ #else |
1014 |
+-static int hwmon_thermal_add_sensor(struct device *dev, |
1015 |
+- struct hwmon_device *hwdev, int index) |
1016 |
++static int hwmon_thermal_add_sensor(struct device *dev, int index) |
1017 |
+ { |
1018 |
+ return 0; |
1019 |
+ } |
1020 |
+@@ -242,8 +259,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr) |
1021 |
+ (type == hwmon_fan && attr == hwmon_fan_label); |
1022 |
+ } |
1023 |
+ |
1024 |
+-static struct attribute *hwmon_genattr(struct device *dev, |
1025 |
+- const void *drvdata, |
1026 |
++static struct attribute *hwmon_genattr(const void *drvdata, |
1027 |
+ enum hwmon_sensor_types type, |
1028 |
+ u32 attr, |
1029 |
+ int index, |
1030 |
+@@ -271,7 +287,7 @@ static struct attribute *hwmon_genattr(struct device *dev, |
1031 |
+ if ((mode & S_IWUGO) && !ops->write) |
1032 |
+ return ERR_PTR(-EINVAL); |
1033 |
+ |
1034 |
+- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL); |
1035 |
++ hattr = kzalloc(sizeof(*hattr), GFP_KERNEL); |
1036 |
+ if (!hattr) |
1037 |
+ return ERR_PTR(-ENOMEM); |
1038 |
+ |
1039 |
+@@ -478,8 +494,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info) |
1040 |
+ return n; |
1041 |
+ } |
1042 |
+ |
1043 |
+-static int hwmon_genattrs(struct device *dev, |
1044 |
+- const void *drvdata, |
1045 |
++static int hwmon_genattrs(const void *drvdata, |
1046 |
+ struct attribute **attrs, |
1047 |
+ const struct hwmon_ops *ops, |
1048 |
+ const struct hwmon_channel_info *info) |
1049 |
+@@ -505,7 +520,7 @@ static int hwmon_genattrs(struct device *dev, |
1050 |
+ attr_mask &= ~BIT(attr); |
1051 |
+ if (attr >= template_size) |
1052 |
+ return -EINVAL; |
1053 |
+- a = hwmon_genattr(dev, drvdata, info->type, attr, i, |
1054 |
++ a = hwmon_genattr(drvdata, info->type, attr, i, |
1055 |
+ templates[attr], ops); |
1056 |
+ if (IS_ERR(a)) { |
1057 |
+ if (PTR_ERR(a) != -ENOENT) |
1058 |
+@@ -519,8 +534,7 @@ static int hwmon_genattrs(struct device *dev, |
1059 |
+ } |
1060 |
+ |
1061 |
+ static struct attribute ** |
1062 |
+-__hwmon_create_attrs(struct device *dev, const void *drvdata, |
1063 |
+- const struct hwmon_chip_info *chip) |
1064 |
++__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip) |
1065 |
+ { |
1066 |
+ int ret, i, aindex = 0, nattrs = 0; |
1067 |
+ struct attribute **attrs; |
1068 |
+@@ -531,15 +545,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata, |
1069 |
+ if (nattrs == 0) |
1070 |
+ return ERR_PTR(-EINVAL); |
1071 |
+ |
1072 |
+- attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL); |
1073 |
++ attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL); |
1074 |
+ if (!attrs) |
1075 |
+ return ERR_PTR(-ENOMEM); |
1076 |
+ |
1077 |
+ for (i = 0; chip->info[i]; i++) { |
1078 |
+- ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops, |
1079 |
++ ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops, |
1080 |
+ chip->info[i]); |
1081 |
+- if (ret < 0) |
1082 |
++ if (ret < 0) { |
1083 |
++ hwmon_free_attrs(attrs); |
1084 |
+ return ERR_PTR(ret); |
1085 |
++ } |
1086 |
+ aindex += ret; |
1087 |
+ } |
1088 |
+ |
1089 |
+@@ -581,14 +597,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, |
1090 |
+ for (i = 0; groups[i]; i++) |
1091 |
+ ngroups++; |
1092 |
+ |
1093 |
+- hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups), |
1094 |
+- GFP_KERNEL); |
1095 |
++ hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL); |
1096 |
+ if (!hwdev->groups) { |
1097 |
+ err = -ENOMEM; |
1098 |
+ goto free_hwmon; |
1099 |
+ } |
1100 |
+ |
1101 |
+- attrs = __hwmon_create_attrs(dev, drvdata, chip); |
1102 |
++ attrs = __hwmon_create_attrs(drvdata, chip); |
1103 |
+ if (IS_ERR(attrs)) { |
1104 |
+ err = PTR_ERR(attrs); |
1105 |
+ goto free_hwmon; |
1106 |
+@@ -633,8 +648,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, |
1107 |
+ hwmon_temp_input, j)) |
1108 |
+ continue; |
1109 |
+ if (info[i]->config[j] & HWMON_T_INPUT) { |
1110 |
+- err = hwmon_thermal_add_sensor(dev, |
1111 |
+- hwdev, j); |
1112 |
++ err = hwmon_thermal_add_sensor(hdev, j); |
1113 |
+ if (err) { |
1114 |
+ device_unregister(hdev); |
1115 |
+ goto ida_remove; |
1116 |
+@@ -647,7 +661,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, |
1117 |
+ return hdev; |
1118 |
+ |
1119 |
+ free_hwmon: |
1120 |
+- kfree(hwdev); |
1121 |
++ hwmon_dev_release(hdev); |
1122 |
+ ida_remove: |
1123 |
+ ida_simple_remove(&hwmon_ida, id); |
1124 |
+ return ERR_PTR(err); |
1125 |
+diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c |
1126 |
+index 38ffbdb0a85f..779ec8fdfae0 100644 |
1127 |
+--- a/drivers/hwmon/nct7802.c |
1128 |
++++ b/drivers/hwmon/nct7802.c |
1129 |
+@@ -32,8 +32,8 @@ |
1130 |
+ static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e }; |
1131 |
+ |
1132 |
+ static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = { |
1133 |
+- { 0x40, 0x00, 0x42, 0x44, 0x46 }, |
1134 |
+- { 0x3f, 0x00, 0x41, 0x43, 0x45 }, |
1135 |
++ { 0x46, 0x00, 0x40, 0x42, 0x44 }, |
1136 |
++ { 0x45, 0x00, 0x3f, 0x41, 0x43 }, |
1137 |
+ }; |
1138 |
+ |
1139 |
+ static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 }; |
1140 |
+diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c |
1141 |
+index 0dad8626bcfb..6cf28b049635 100644 |
1142 |
+--- a/drivers/hwtracing/coresight/coresight-etb10.c |
1143 |
++++ b/drivers/hwtracing/coresight/coresight-etb10.c |
1144 |
+@@ -275,9 +275,7 @@ static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu, |
1145 |
+ int node; |
1146 |
+ struct cs_buffers *buf; |
1147 |
+ |
1148 |
+- if (cpu == -1) |
1149 |
+- cpu = smp_processor_id(); |
1150 |
+- node = cpu_to_node(cpu); |
1151 |
++ node = (cpu == -1) ? NUMA_NO_NODE : cpu_to_node(cpu); |
1152 |
+ |
1153 |
+ buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); |
1154 |
+ if (!buf) |
1155 |
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c |
1156 |
+index e31061308e19..e90af39283b1 100644 |
1157 |
+--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c |
1158 |
++++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c |
1159 |
+@@ -304,9 +304,7 @@ static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu, |
1160 |
+ int node; |
1161 |
+ struct cs_buffers *buf; |
1162 |
+ |
1163 |
+- if (cpu == -1) |
1164 |
+- cpu = smp_processor_id(); |
1165 |
+- node = cpu_to_node(cpu); |
1166 |
++ node = (cpu == -1) ? NUMA_NO_NODE : cpu_to_node(cpu); |
1167 |
+ |
1168 |
+ /* Allocate memory structure for interaction with Perf */ |
1169 |
+ buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); |
1170 |
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c |
1171 |
+index f39670c5c25c..9899f7e155a5 100644 |
1172 |
+--- a/drivers/infiniband/ulp/isert/ib_isert.c |
1173 |
++++ b/drivers/infiniband/ulp/isert/ib_isert.c |
1174 |
+@@ -2584,17 +2584,6 @@ isert_wait4logout(struct isert_conn *isert_conn) |
1175 |
+ } |
1176 |
+ } |
1177 |
+ |
1178 |
+-static void |
1179 |
+-isert_wait4cmds(struct iscsi_conn *conn) |
1180 |
+-{ |
1181 |
+- isert_info("iscsi_conn %p\n", conn); |
1182 |
+- |
1183 |
+- if (conn->sess) { |
1184 |
+- target_sess_cmd_list_set_waiting(conn->sess->se_sess); |
1185 |
+- target_wait_for_sess_cmds(conn->sess->se_sess); |
1186 |
+- } |
1187 |
+-} |
1188 |
+- |
1189 |
+ /** |
1190 |
+ * isert_put_unsol_pending_cmds() - Drop commands waiting for |
1191 |
+ * unsolicitate dataout |
1192 |
+@@ -2642,7 +2631,6 @@ static void isert_wait_conn(struct iscsi_conn *conn) |
1193 |
+ |
1194 |
+ ib_drain_qp(isert_conn->qp); |
1195 |
+ isert_put_unsol_pending_cmds(conn); |
1196 |
+- isert_wait4cmds(conn); |
1197 |
+ isert_wait4logout(isert_conn); |
1198 |
+ |
1199 |
+ queue_work(isert_release_wq, &isert_conn->release_work); |
1200 |
+diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c |
1201 |
+index a8937ceac66a..af4db1350915 100644 |
1202 |
+--- a/drivers/input/misc/keyspan_remote.c |
1203 |
++++ b/drivers/input/misc/keyspan_remote.c |
1204 |
+@@ -339,7 +339,8 @@ static int keyspan_setup(struct usb_device* dev) |
1205 |
+ int retval = 0; |
1206 |
+ |
1207 |
+ retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), |
1208 |
+- 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0); |
1209 |
++ 0x11, 0x40, 0x5601, 0x0, NULL, 0, |
1210 |
++ USB_CTRL_SET_TIMEOUT); |
1211 |
+ if (retval) { |
1212 |
+ dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n", |
1213 |
+ __func__, retval); |
1214 |
+@@ -347,7 +348,8 @@ static int keyspan_setup(struct usb_device* dev) |
1215 |
+ } |
1216 |
+ |
1217 |
+ retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), |
1218 |
+- 0x44, 0x40, 0x0, 0x0, NULL, 0, 0); |
1219 |
++ 0x44, 0x40, 0x0, 0x0, NULL, 0, |
1220 |
++ USB_CTRL_SET_TIMEOUT); |
1221 |
+ if (retval) { |
1222 |
+ dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n", |
1223 |
+ __func__, retval); |
1224 |
+@@ -355,7 +357,8 @@ static int keyspan_setup(struct usb_device* dev) |
1225 |
+ } |
1226 |
+ |
1227 |
+ retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), |
1228 |
+- 0x22, 0x40, 0x0, 0x0, NULL, 0, 0); |
1229 |
++ 0x22, 0x40, 0x0, 0x0, NULL, 0, |
1230 |
++ USB_CTRL_SET_TIMEOUT); |
1231 |
+ if (retval) { |
1232 |
+ dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n", |
1233 |
+ __func__, retval); |
1234 |
+diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c |
1235 |
+index 7dd1c1fbe42a..27b3db154a33 100644 |
1236 |
+--- a/drivers/input/misc/pm8xxx-vibrator.c |
1237 |
++++ b/drivers/input/misc/pm8xxx-vibrator.c |
1238 |
+@@ -98,7 +98,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on) |
1239 |
+ |
1240 |
+ if (regs->enable_mask) |
1241 |
+ rc = regmap_update_bits(vib->regmap, regs->enable_addr, |
1242 |
+- on ? regs->enable_mask : 0, val); |
1243 |
++ regs->enable_mask, on ? ~0 : 0); |
1244 |
+ |
1245 |
+ return rc; |
1246 |
+ } |
1247 |
+diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c |
1248 |
+index 4b2466cf2fb1..b6ccf39c6a7b 100644 |
1249 |
+--- a/drivers/input/rmi4/rmi_smbus.c |
1250 |
++++ b/drivers/input/rmi4/rmi_smbus.c |
1251 |
+@@ -166,6 +166,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr, |
1252 |
+ /* prepare to write next block of bytes */ |
1253 |
+ cur_len -= SMB_MAX_COUNT; |
1254 |
+ databuff += SMB_MAX_COUNT; |
1255 |
++ rmiaddr += SMB_MAX_COUNT; |
1256 |
+ } |
1257 |
+ exit: |
1258 |
+ mutex_unlock(&rmi_smb->page_mutex); |
1259 |
+@@ -217,6 +218,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr, |
1260 |
+ /* prepare to read next block of bytes */ |
1261 |
+ cur_len -= SMB_MAX_COUNT; |
1262 |
+ databuff += SMB_MAX_COUNT; |
1263 |
++ rmiaddr += SMB_MAX_COUNT; |
1264 |
+ } |
1265 |
+ |
1266 |
+ retval = 0; |
1267 |
+diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c |
1268 |
+index c82cd5079d0e..dc2ad1cc8fe1 100644 |
1269 |
+--- a/drivers/input/tablet/aiptek.c |
1270 |
++++ b/drivers/input/tablet/aiptek.c |
1271 |
+@@ -1815,14 +1815,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) |
1272 |
+ input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); |
1273 |
+ |
1274 |
+ /* Verify that a device really has an endpoint */ |
1275 |
+- if (intf->altsetting[0].desc.bNumEndpoints < 1) { |
1276 |
++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) { |
1277 |
+ dev_err(&intf->dev, |
1278 |
+ "interface has %d endpoints, but must have minimum 1\n", |
1279 |
+- intf->altsetting[0].desc.bNumEndpoints); |
1280 |
++ intf->cur_altsetting->desc.bNumEndpoints); |
1281 |
+ err = -EINVAL; |
1282 |
+ goto fail3; |
1283 |
+ } |
1284 |
+- endpoint = &intf->altsetting[0].endpoint[0].desc; |
1285 |
++ endpoint = &intf->cur_altsetting->endpoint[0].desc; |
1286 |
+ |
1287 |
+ /* Go set up our URB, which is called when the tablet receives |
1288 |
+ * input. |
1289 |
+diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c |
1290 |
+index 35031228a6d0..799c94dda651 100644 |
1291 |
+--- a/drivers/input/tablet/gtco.c |
1292 |
++++ b/drivers/input/tablet/gtco.c |
1293 |
+@@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface, |
1294 |
+ } |
1295 |
+ |
1296 |
+ /* Sanity check that a device has an endpoint */ |
1297 |
+- if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) { |
1298 |
++ if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) { |
1299 |
+ dev_err(&usbinterface->dev, |
1300 |
+ "Invalid number of endpoints\n"); |
1301 |
+ error = -EINVAL; |
1302 |
+ goto err_free_urb; |
1303 |
+ } |
1304 |
+ |
1305 |
+- /* |
1306 |
+- * The endpoint is always altsetting 0, we know this since we know |
1307 |
+- * this device only has one interrupt endpoint |
1308 |
+- */ |
1309 |
+- endpoint = &usbinterface->altsetting[0].endpoint[0].desc; |
1310 |
++ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc; |
1311 |
+ |
1312 |
+ /* Some debug */ |
1313 |
+ dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting); |
1314 |
+@@ -973,7 +969,7 @@ static int gtco_probe(struct usb_interface *usbinterface, |
1315 |
+ input_dev->dev.parent = &usbinterface->dev; |
1316 |
+ |
1317 |
+ /* Setup the URB, it will be posted later on open of input device */ |
1318 |
+- endpoint = &usbinterface->altsetting[0].endpoint[0].desc; |
1319 |
++ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc; |
1320 |
+ |
1321 |
+ usb_fill_int_urb(gtco->urbinfo, |
1322 |
+ udev, |
1323 |
+diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c |
1324 |
+index ffd03cfe3131..570cdaef3558 100644 |
1325 |
+--- a/drivers/input/tablet/pegasus_notetaker.c |
1326 |
++++ b/drivers/input/tablet/pegasus_notetaker.c |
1327 |
+@@ -274,7 +274,7 @@ static int pegasus_probe(struct usb_interface *intf, |
1328 |
+ return -ENODEV; |
1329 |
+ |
1330 |
+ /* Sanity check that the device has an endpoint */ |
1331 |
+- if (intf->altsetting[0].desc.bNumEndpoints < 1) { |
1332 |
++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) { |
1333 |
+ dev_err(&intf->dev, "Invalid number of endpoints\n"); |
1334 |
+ return -EINVAL; |
1335 |
+ } |
1336 |
+diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c |
1337 |
+index d2e14d9e5975..ab44eb0352d0 100644 |
1338 |
+--- a/drivers/input/touchscreen/sun4i-ts.c |
1339 |
++++ b/drivers/input/touchscreen/sun4i-ts.c |
1340 |
+@@ -246,6 +246,7 @@ static int sun4i_ts_probe(struct platform_device *pdev) |
1341 |
+ struct device *dev = &pdev->dev; |
1342 |
+ struct device_node *np = dev->of_node; |
1343 |
+ struct device *hwmon; |
1344 |
++ struct thermal_zone_device *thermal; |
1345 |
+ int error; |
1346 |
+ u32 reg; |
1347 |
+ bool ts_attached; |
1348 |
+@@ -365,7 +366,10 @@ static int sun4i_ts_probe(struct platform_device *pdev) |
1349 |
+ if (IS_ERR(hwmon)) |
1350 |
+ return PTR_ERR(hwmon); |
1351 |
+ |
1352 |
+- devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops); |
1353 |
++ thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, |
1354 |
++ &sun4i_ts_tz_ops); |
1355 |
++ if (IS_ERR(thermal)) |
1356 |
++ return PTR_ERR(thermal); |
1357 |
+ |
1358 |
+ writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC); |
1359 |
+ |
1360 |
+diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c |
1361 |
+index 894843a7ec7b..caa3aca2ea54 100644 |
1362 |
+--- a/drivers/input/touchscreen/sur40.c |
1363 |
++++ b/drivers/input/touchscreen/sur40.c |
1364 |
+@@ -657,7 +657,7 @@ static int sur40_probe(struct usb_interface *interface, |
1365 |
+ int error; |
1366 |
+ |
1367 |
+ /* Check if we really have the right interface. */ |
1368 |
+- iface_desc = &interface->altsetting[0]; |
1369 |
++ iface_desc = interface->cur_altsetting; |
1370 |
+ if (iface_desc->desc.bInterfaceClass != 0xFF) |
1371 |
+ return -ENODEV; |
1372 |
+ |
1373 |
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c |
1374 |
+index 7675b645db2e..f75d892b6f03 100644 |
1375 |
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c |
1376 |
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c |
1377 |
+@@ -1548,12 +1548,12 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, |
1378 |
+ case V4L2_BUF_TYPE_VBI_CAPTURE: |
1379 |
+ if (unlikely(!ops->vidioc_s_fmt_vbi_cap)) |
1380 |
+ break; |
1381 |
+- CLEAR_AFTER_FIELD(p, fmt.vbi); |
1382 |
++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags); |
1383 |
+ return ops->vidioc_s_fmt_vbi_cap(file, fh, arg); |
1384 |
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: |
1385 |
+ if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap)) |
1386 |
+ break; |
1387 |
+- CLEAR_AFTER_FIELD(p, fmt.sliced); |
1388 |
++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size); |
1389 |
+ return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg); |
1390 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: |
1391 |
+ if (unlikely(!ops->vidioc_s_fmt_vid_out)) |
1392 |
+@@ -1576,22 +1576,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, |
1393 |
+ case V4L2_BUF_TYPE_VBI_OUTPUT: |
1394 |
+ if (unlikely(!ops->vidioc_s_fmt_vbi_out)) |
1395 |
+ break; |
1396 |
+- CLEAR_AFTER_FIELD(p, fmt.vbi); |
1397 |
++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags); |
1398 |
+ return ops->vidioc_s_fmt_vbi_out(file, fh, arg); |
1399 |
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: |
1400 |
+ if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out)) |
1401 |
+ break; |
1402 |
+- CLEAR_AFTER_FIELD(p, fmt.sliced); |
1403 |
++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size); |
1404 |
+ return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg); |
1405 |
+ case V4L2_BUF_TYPE_SDR_CAPTURE: |
1406 |
+ if (unlikely(!ops->vidioc_s_fmt_sdr_cap)) |
1407 |
+ break; |
1408 |
+- CLEAR_AFTER_FIELD(p, fmt.sdr); |
1409 |
++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize); |
1410 |
+ return ops->vidioc_s_fmt_sdr_cap(file, fh, arg); |
1411 |
+ case V4L2_BUF_TYPE_SDR_OUTPUT: |
1412 |
+ if (unlikely(!ops->vidioc_s_fmt_sdr_out)) |
1413 |
+ break; |
1414 |
+- CLEAR_AFTER_FIELD(p, fmt.sdr); |
1415 |
++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize); |
1416 |
+ return ops->vidioc_s_fmt_sdr_out(file, fh, arg); |
1417 |
+ case V4L2_BUF_TYPE_META_CAPTURE: |
1418 |
+ if (unlikely(!ops->vidioc_s_fmt_meta_cap)) |
1419 |
+@@ -1635,12 +1635,12 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, |
1420 |
+ case V4L2_BUF_TYPE_VBI_CAPTURE: |
1421 |
+ if (unlikely(!ops->vidioc_try_fmt_vbi_cap)) |
1422 |
+ break; |
1423 |
+- CLEAR_AFTER_FIELD(p, fmt.vbi); |
1424 |
++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags); |
1425 |
+ return ops->vidioc_try_fmt_vbi_cap(file, fh, arg); |
1426 |
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: |
1427 |
+ if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap)) |
1428 |
+ break; |
1429 |
+- CLEAR_AFTER_FIELD(p, fmt.sliced); |
1430 |
++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size); |
1431 |
+ return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg); |
1432 |
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: |
1433 |
+ if (unlikely(!ops->vidioc_try_fmt_vid_out)) |
1434 |
+@@ -1663,22 +1663,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, |
1435 |
+ case V4L2_BUF_TYPE_VBI_OUTPUT: |
1436 |
+ if (unlikely(!ops->vidioc_try_fmt_vbi_out)) |
1437 |
+ break; |
1438 |
+- CLEAR_AFTER_FIELD(p, fmt.vbi); |
1439 |
++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags); |
1440 |
+ return ops->vidioc_try_fmt_vbi_out(file, fh, arg); |
1441 |
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: |
1442 |
+ if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out)) |
1443 |
+ break; |
1444 |
+- CLEAR_AFTER_FIELD(p, fmt.sliced); |
1445 |
++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size); |
1446 |
+ return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg); |
1447 |
+ case V4L2_BUF_TYPE_SDR_CAPTURE: |
1448 |
+ if (unlikely(!ops->vidioc_try_fmt_sdr_cap)) |
1449 |
+ break; |
1450 |
+- CLEAR_AFTER_FIELD(p, fmt.sdr); |
1451 |
++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize); |
1452 |
+ return ops->vidioc_try_fmt_sdr_cap(file, fh, arg); |
1453 |
+ case V4L2_BUF_TYPE_SDR_OUTPUT: |
1454 |
+ if (unlikely(!ops->vidioc_try_fmt_sdr_out)) |
1455 |
+ break; |
1456 |
+- CLEAR_AFTER_FIELD(p, fmt.sdr); |
1457 |
++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize); |
1458 |
+ return ops->vidioc_try_fmt_sdr_out(file, fh, arg); |
1459 |
+ case V4L2_BUF_TYPE_META_CAPTURE: |
1460 |
+ if (unlikely(!ops->vidioc_try_fmt_meta_cap)) |
1461 |
+diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c |
1462 |
+index 908b23e6a03c..14d749a0de95 100644 |
1463 |
+--- a/drivers/mmc/host/sdhci-tegra.c |
1464 |
++++ b/drivers/mmc/host/sdhci-tegra.c |
1465 |
+@@ -177,7 +177,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) |
1466 |
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; |
1467 |
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) |
1468 |
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; |
1469 |
+- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50) |
1470 |
++ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) |
1471 |
+ clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; |
1472 |
+ } |
1473 |
+ |
1474 |
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c |
1475 |
+index 369817a29c22..5a7fd89a8f2b 100644 |
1476 |
+--- a/drivers/mmc/host/sdhci.c |
1477 |
++++ b/drivers/mmc/host/sdhci.c |
1478 |
+@@ -3700,11 +3700,13 @@ int sdhci_setup_host(struct sdhci_host *host) |
1479 |
+ if (host->ops->get_min_clock) |
1480 |
+ mmc->f_min = host->ops->get_min_clock(host); |
1481 |
+ else if (host->version >= SDHCI_SPEC_300) { |
1482 |
+- if (host->clk_mul) { |
1483 |
+- mmc->f_min = (host->max_clk * host->clk_mul) / 1024; |
1484 |
++ if (host->clk_mul) |
1485 |
+ max_clk = host->max_clk * host->clk_mul; |
1486 |
+- } else |
1487 |
+- mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; |
1488 |
++ /* |
1489 |
++ * Divided Clock Mode minimum clock rate is always less than |
1490 |
++ * Programmable Clock Mode minimum clock rate. |
1491 |
++ */ |
1492 |
++ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; |
1493 |
+ } else |
1494 |
+ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; |
1495 |
+ |
1496 |
+diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c |
1497 |
+index cf0769ad39cd..b2e5bcae7fbe 100644 |
1498 |
+--- a/drivers/net/can/slcan.c |
1499 |
++++ b/drivers/net/can/slcan.c |
1500 |
+@@ -343,9 +343,16 @@ static void slcan_transmit(struct work_struct *work) |
1501 |
+ */ |
1502 |
+ static void slcan_write_wakeup(struct tty_struct *tty) |
1503 |
+ { |
1504 |
+- struct slcan *sl = tty->disc_data; |
1505 |
++ struct slcan *sl; |
1506 |
++ |
1507 |
++ rcu_read_lock(); |
1508 |
++ sl = rcu_dereference(tty->disc_data); |
1509 |
++ if (!sl) |
1510 |
++ goto out; |
1511 |
+ |
1512 |
+ schedule_work(&sl->tx_work); |
1513 |
++out: |
1514 |
++ rcu_read_unlock(); |
1515 |
+ } |
1516 |
+ |
1517 |
+ /* Send a can_frame to a TTY queue. */ |
1518 |
+@@ -640,10 +647,11 @@ static void slcan_close(struct tty_struct *tty) |
1519 |
+ return; |
1520 |
+ |
1521 |
+ spin_lock_bh(&sl->lock); |
1522 |
+- tty->disc_data = NULL; |
1523 |
++ rcu_assign_pointer(tty->disc_data, NULL); |
1524 |
+ sl->tty = NULL; |
1525 |
+ spin_unlock_bh(&sl->lock); |
1526 |
+ |
1527 |
++ synchronize_rcu(); |
1528 |
+ flush_work(&sl->tx_work); |
1529 |
+ |
1530 |
+ /* Flush network side */ |
1531 |
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c |
1532 |
+index b7d75011cede..736a6a5fbd98 100644 |
1533 |
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c |
1534 |
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c |
1535 |
+@@ -2166,8 +2166,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, |
1536 |
+ DMA_END_ADDR); |
1537 |
+ |
1538 |
+ /* Initialize Tx NAPI */ |
1539 |
+- netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, |
1540 |
+- NAPI_POLL_WEIGHT); |
1541 |
++ netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, |
1542 |
++ NAPI_POLL_WEIGHT); |
1543 |
+ } |
1544 |
+ |
1545 |
+ /* Initialize a RDMA ring */ |
1546 |
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c |
1547 |
+index 6be6de0774b6..c82469ab7aba 100644 |
1548 |
+--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c |
1549 |
++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c |
1550 |
+@@ -2449,6 +2449,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) |
1551 |
+ |
1552 |
+ if (!is_offload(adapter)) |
1553 |
+ return -EOPNOTSUPP; |
1554 |
++ if (!capable(CAP_NET_ADMIN)) |
1555 |
++ return -EPERM; |
1556 |
+ if (!(adapter->flags & FULL_INIT_DONE)) |
1557 |
+ return -EIO; /* need the memory controllers */ |
1558 |
+ if (copy_from_user(&t, useraddr, sizeof(t))) |
1559 |
+diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c |
1560 |
+index 5f1875fe47cd..69282f31d519 100644 |
1561 |
+--- a/drivers/net/ethernet/natsemi/sonic.c |
1562 |
++++ b/drivers/net/ethernet/natsemi/sonic.c |
1563 |
+@@ -63,6 +63,8 @@ static int sonic_open(struct net_device *dev) |
1564 |
+ |
1565 |
+ netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__); |
1566 |
+ |
1567 |
++ spin_lock_init(&lp->lock); |
1568 |
++ |
1569 |
+ for (i = 0; i < SONIC_NUM_RRS; i++) { |
1570 |
+ struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); |
1571 |
+ if (skb == NULL) { |
1572 |
+@@ -113,6 +115,24 @@ static int sonic_open(struct net_device *dev) |
1573 |
+ return 0; |
1574 |
+ } |
1575 |
+ |
1576 |
++/* Wait for the SONIC to become idle. */ |
1577 |
++static void sonic_quiesce(struct net_device *dev, u16 mask) |
1578 |
++{ |
1579 |
++ struct sonic_local * __maybe_unused lp = netdev_priv(dev); |
1580 |
++ int i; |
1581 |
++ u16 bits; |
1582 |
++ |
1583 |
++ for (i = 0; i < 1000; ++i) { |
1584 |
++ bits = SONIC_READ(SONIC_CMD) & mask; |
1585 |
++ if (!bits) |
1586 |
++ return; |
1587 |
++ if (irqs_disabled() || in_interrupt()) |
1588 |
++ udelay(20); |
1589 |
++ else |
1590 |
++ usleep_range(100, 200); |
1591 |
++ } |
1592 |
++ WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits); |
1593 |
++} |
1594 |
+ |
1595 |
+ /* |
1596 |
+ * Close the SONIC device |
1597 |
+@@ -129,6 +149,9 @@ static int sonic_close(struct net_device *dev) |
1598 |
+ /* |
1599 |
+ * stop the SONIC, disable interrupts |
1600 |
+ */ |
1601 |
++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); |
1602 |
++ sonic_quiesce(dev, SONIC_CR_ALL); |
1603 |
++ |
1604 |
+ SONIC_WRITE(SONIC_IMR, 0); |
1605 |
+ SONIC_WRITE(SONIC_ISR, 0x7fff); |
1606 |
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); |
1607 |
+@@ -168,6 +191,9 @@ static void sonic_tx_timeout(struct net_device *dev) |
1608 |
+ * put the Sonic into software-reset mode and |
1609 |
+ * disable all interrupts before releasing DMA buffers |
1610 |
+ */ |
1611 |
++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); |
1612 |
++ sonic_quiesce(dev, SONIC_CR_ALL); |
1613 |
++ |
1614 |
+ SONIC_WRITE(SONIC_IMR, 0); |
1615 |
+ SONIC_WRITE(SONIC_ISR, 0x7fff); |
1616 |
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); |
1617 |
+@@ -205,8 +231,6 @@ static void sonic_tx_timeout(struct net_device *dev) |
1618 |
+ * wake the tx queue |
1619 |
+ * Concurrently with all of this, the SONIC is potentially writing to |
1620 |
+ * the status flags of the TDs. |
1621 |
+- * Until some mutual exclusion is added, this code will not work with SMP. However, |
1622 |
+- * MIPS Jazz machines and m68k Macs were all uni-processor machines. |
1623 |
+ */ |
1624 |
+ |
1625 |
+ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) |
1626 |
+@@ -214,7 +238,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) |
1627 |
+ struct sonic_local *lp = netdev_priv(dev); |
1628 |
+ dma_addr_t laddr; |
1629 |
+ int length; |
1630 |
+- int entry = lp->next_tx; |
1631 |
++ int entry; |
1632 |
++ unsigned long flags; |
1633 |
+ |
1634 |
+ netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb); |
1635 |
+ |
1636 |
+@@ -236,6 +261,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) |
1637 |
+ return NETDEV_TX_OK; |
1638 |
+ } |
1639 |
+ |
1640 |
++ spin_lock_irqsave(&lp->lock, flags); |
1641 |
++ |
1642 |
++ entry = lp->next_tx; |
1643 |
++ |
1644 |
+ sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ |
1645 |
+ sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */ |
1646 |
+ sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */ |
1647 |
+@@ -245,10 +274,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) |
1648 |
+ sonic_tda_put(dev, entry, SONIC_TD_LINK, |
1649 |
+ sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL); |
1650 |
+ |
1651 |
+- /* |
1652 |
+- * Must set tx_skb[entry] only after clearing status, and |
1653 |
+- * before clearing EOL and before stopping queue |
1654 |
+- */ |
1655 |
+ wmb(); |
1656 |
+ lp->tx_len[entry] = length; |
1657 |
+ lp->tx_laddr[entry] = laddr; |
1658 |
+@@ -271,6 +296,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) |
1659 |
+ |
1660 |
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); |
1661 |
+ |
1662 |
++ spin_unlock_irqrestore(&lp->lock, flags); |
1663 |
++ |
1664 |
+ return NETDEV_TX_OK; |
1665 |
+ } |
1666 |
+ |
1667 |
+@@ -283,15 +310,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) |
1668 |
+ struct net_device *dev = dev_id; |
1669 |
+ struct sonic_local *lp = netdev_priv(dev); |
1670 |
+ int status; |
1671 |
++ unsigned long flags; |
1672 |
++ |
1673 |
++ /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt() |
1674 |
++ * with sonic_send_packet() so that the two functions can share state. |
1675 |
++ * Secondly, it makes sonic_interrupt() re-entrant, as that is required |
1676 |
++ * by macsonic which must use two IRQs with different priority levels. |
1677 |
++ */ |
1678 |
++ spin_lock_irqsave(&lp->lock, flags); |
1679 |
++ |
1680 |
++ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; |
1681 |
++ if (!status) { |
1682 |
++ spin_unlock_irqrestore(&lp->lock, flags); |
1683 |
+ |
1684 |
+- if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)) |
1685 |
+ return IRQ_NONE; |
1686 |
++ } |
1687 |
+ |
1688 |
+ do { |
1689 |
++ SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */ |
1690 |
++ |
1691 |
+ if (status & SONIC_INT_PKTRX) { |
1692 |
+ netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__); |
1693 |
+ sonic_rx(dev); /* got packet(s) */ |
1694 |
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */ |
1695 |
+ } |
1696 |
+ |
1697 |
+ if (status & SONIC_INT_TXDN) { |
1698 |
+@@ -299,11 +339,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) |
1699 |
+ int td_status; |
1700 |
+ int freed_some = 0; |
1701 |
+ |
1702 |
+- /* At this point, cur_tx is the index of a TD that is one of: |
1703 |
+- * unallocated/freed (status set & tx_skb[entry] clear) |
1704 |
+- * allocated and sent (status set & tx_skb[entry] set ) |
1705 |
+- * allocated and not yet sent (status clear & tx_skb[entry] set ) |
1706 |
+- * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear) |
1707 |
++ /* The state of a Transmit Descriptor may be inferred |
1708 |
++ * from { tx_skb[entry], td_status } as follows. |
1709 |
++ * { clear, clear } => the TD has never been used |
1710 |
++ * { set, clear } => the TD was handed to SONIC |
1711 |
++ * { set, set } => the TD was handed back |
1712 |
++ * { clear, set } => the TD is available for re-use |
1713 |
+ */ |
1714 |
+ |
1715 |
+ netif_dbg(lp, intr, dev, "%s: tx done\n", __func__); |
1716 |
+@@ -312,18 +353,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) |
1717 |
+ if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0) |
1718 |
+ break; |
1719 |
+ |
1720 |
+- if (td_status & 0x0001) { |
1721 |
++ if (td_status & SONIC_TCR_PTX) { |
1722 |
+ lp->stats.tx_packets++; |
1723 |
+ lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE); |
1724 |
+ } else { |
1725 |
+- lp->stats.tx_errors++; |
1726 |
+- if (td_status & 0x0642) |
1727 |
++ if (td_status & (SONIC_TCR_EXD | |
1728 |
++ SONIC_TCR_EXC | SONIC_TCR_BCM)) |
1729 |
+ lp->stats.tx_aborted_errors++; |
1730 |
+- if (td_status & 0x0180) |
1731 |
++ if (td_status & |
1732 |
++ (SONIC_TCR_NCRS | SONIC_TCR_CRLS)) |
1733 |
+ lp->stats.tx_carrier_errors++; |
1734 |
+- if (td_status & 0x0020) |
1735 |
++ if (td_status & SONIC_TCR_OWC) |
1736 |
+ lp->stats.tx_window_errors++; |
1737 |
+- if (td_status & 0x0004) |
1738 |
++ if (td_status & SONIC_TCR_FU) |
1739 |
+ lp->stats.tx_fifo_errors++; |
1740 |
+ } |
1741 |
+ |
1742 |
+@@ -345,7 +387,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) |
1743 |
+ if (freed_some || lp->tx_skb[entry] == NULL) |
1744 |
+ netif_wake_queue(dev); /* The ring is no longer full */ |
1745 |
+ lp->cur_tx = entry; |
1746 |
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */ |
1747 |
+ } |
1748 |
+ |
1749 |
+ /* |
1750 |
+@@ -354,42 +395,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) |
1751 |
+ if (status & SONIC_INT_RFO) { |
1752 |
+ netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n", |
1753 |
+ __func__); |
1754 |
+- lp->stats.rx_fifo_errors++; |
1755 |
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */ |
1756 |
+ } |
1757 |
+ if (status & SONIC_INT_RDE) { |
1758 |
+ netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n", |
1759 |
+ __func__); |
1760 |
+- lp->stats.rx_dropped++; |
1761 |
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */ |
1762 |
+ } |
1763 |
+ if (status & SONIC_INT_RBAE) { |
1764 |
+ netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n", |
1765 |
+ __func__); |
1766 |
+- lp->stats.rx_dropped++; |
1767 |
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */ |
1768 |
+ } |
1769 |
+ |
1770 |
+ /* counter overruns; all counters are 16bit wide */ |
1771 |
+- if (status & SONIC_INT_FAE) { |
1772 |
++ if (status & SONIC_INT_FAE) |
1773 |
+ lp->stats.rx_frame_errors += 65536; |
1774 |
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */ |
1775 |
+- } |
1776 |
+- if (status & SONIC_INT_CRC) { |
1777 |
++ if (status & SONIC_INT_CRC) |
1778 |
+ lp->stats.rx_crc_errors += 65536; |
1779 |
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */ |
1780 |
+- } |
1781 |
+- if (status & SONIC_INT_MP) { |
1782 |
++ if (status & SONIC_INT_MP) |
1783 |
+ lp->stats.rx_missed_errors += 65536; |
1784 |
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */ |
1785 |
+- } |
1786 |
+ |
1787 |
+ /* transmit error */ |
1788 |
+ if (status & SONIC_INT_TXER) { |
1789 |
+- if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) |
1790 |
+- netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n", |
1791 |
+- __func__); |
1792 |
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */ |
1793 |
++ u16 tcr = SONIC_READ(SONIC_TCR); |
1794 |
++ |
1795 |
++ netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n", |
1796 |
++ __func__, tcr); |
1797 |
++ |
1798 |
++ if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC | |
1799 |
++ SONIC_TCR_FU | SONIC_TCR_BCM)) { |
1800 |
++ /* Aborted transmission. Try again. */ |
1801 |
++ netif_stop_queue(dev); |
1802 |
++ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); |
1803 |
++ } |
1804 |
+ } |
1805 |
+ |
1806 |
+ /* bus retry */ |
1807 |
+@@ -399,107 +435,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) |
1808 |
+ /* ... to help debug DMA problems causing endless interrupts. */ |
1809 |
+ /* Bounce the eth interface to turn on the interrupt again. */ |
1810 |
+ SONIC_WRITE(SONIC_IMR, 0); |
1811 |
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */ |
1812 |
+ } |
1813 |
+ |
1814 |
+- /* load CAM done */ |
1815 |
+- if (status & SONIC_INT_LCD) |
1816 |
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */ |
1817 |
+- } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)); |
1818 |
++ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; |
1819 |
++ } while (status); |
1820 |
++ |
1821 |
++ spin_unlock_irqrestore(&lp->lock, flags); |
1822 |
++ |
1823 |
+ return IRQ_HANDLED; |
1824 |
+ } |
1825 |
+ |
1826 |
++/* Return the array index corresponding to a given Receive Buffer pointer. */ |
1827 |
++static int index_from_addr(struct sonic_local *lp, dma_addr_t addr, |
1828 |
++ unsigned int last) |
1829 |
++{ |
1830 |
++ unsigned int i = last; |
1831 |
++ |
1832 |
++ do { |
1833 |
++ i = (i + 1) & SONIC_RRS_MASK; |
1834 |
++ if (addr == lp->rx_laddr[i]) |
1835 |
++ return i; |
1836 |
++ } while (i != last); |
1837 |
++ |
1838 |
++ return -ENOENT; |
1839 |
++} |
1840 |
++ |
1841 |
++/* Allocate and map a new skb to be used as a receive buffer. */ |
1842 |
++static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp, |
1843 |
++ struct sk_buff **new_skb, dma_addr_t *new_addr) |
1844 |
++{ |
1845 |
++ *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); |
1846 |
++ if (!*new_skb) |
1847 |
++ return false; |
1848 |
++ |
1849 |
++ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) |
1850 |
++ skb_reserve(*new_skb, 2); |
1851 |
++ |
1852 |
++ *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE), |
1853 |
++ SONIC_RBSIZE, DMA_FROM_DEVICE); |
1854 |
++ if (!*new_addr) { |
1855 |
++ dev_kfree_skb(*new_skb); |
1856 |
++ *new_skb = NULL; |
1857 |
++ return false; |
1858 |
++ } |
1859 |
++ |
1860 |
++ return true; |
1861 |
++} |
1862 |
++ |
1863 |
++/* Place a new receive resource in the Receive Resource Area and update RWP. */ |
1864 |
++static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp, |
1865 |
++ dma_addr_t old_addr, dma_addr_t new_addr) |
1866 |
++{ |
1867 |
++ unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP)); |
1868 |
++ unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP)); |
1869 |
++ u32 buf; |
1870 |
++ |
1871 |
++ /* The resources in the range [RRP, RWP) belong to the SONIC. This loop |
1872 |
++ * scans the other resources in the RRA, those in the range [RWP, RRP). |
1873 |
++ */ |
1874 |
++ do { |
1875 |
++ buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) | |
1876 |
++ sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L); |
1877 |
++ |
1878 |
++ if (buf == old_addr) |
1879 |
++ break; |
1880 |
++ |
1881 |
++ entry = (entry + 1) & SONIC_RRS_MASK; |
1882 |
++ } while (entry != end); |
1883 |
++ |
1884 |
++ WARN_ONCE(buf != old_addr, "failed to find resource!\n"); |
1885 |
++ |
1886 |
++ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16); |
1887 |
++ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff); |
1888 |
++ |
1889 |
++ entry = (entry + 1) & SONIC_RRS_MASK; |
1890 |
++ |
1891 |
++ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry)); |
1892 |
++} |
1893 |
++ |
1894 |
+ /* |
1895 |
+ * We have a good packet(s), pass it/them up the network stack. |
1896 |
+ */ |
1897 |
+ static void sonic_rx(struct net_device *dev) |
1898 |
+ { |
1899 |
+ struct sonic_local *lp = netdev_priv(dev); |
1900 |
+- int status; |
1901 |
+ int entry = lp->cur_rx; |
1902 |
++ int prev_entry = lp->eol_rx; |
1903 |
++ bool rbe = false; |
1904 |
+ |
1905 |
+ while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) { |
1906 |
+- struct sk_buff *used_skb; |
1907 |
+- struct sk_buff *new_skb; |
1908 |
+- dma_addr_t new_laddr; |
1909 |
+- u16 bufadr_l; |
1910 |
+- u16 bufadr_h; |
1911 |
+- int pkt_len; |
1912 |
+- |
1913 |
+- status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); |
1914 |
+- if (status & SONIC_RCR_PRX) { |
1915 |
+- /* Malloc up new buffer. */ |
1916 |
+- new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); |
1917 |
+- if (new_skb == NULL) { |
1918 |
+- lp->stats.rx_dropped++; |
1919 |
++ u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); |
1920 |
++ |
1921 |
++ /* If the RD has LPKT set, the chip has finished with the RB */ |
1922 |
++ if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) { |
1923 |
++ struct sk_buff *new_skb; |
1924 |
++ dma_addr_t new_laddr; |
1925 |
++ u32 addr = (sonic_rda_get(dev, entry, |
1926 |
++ SONIC_RD_PKTPTR_H) << 16) | |
1927 |
++ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L); |
1928 |
++ int i = index_from_addr(lp, addr, entry); |
1929 |
++ |
1930 |
++ if (i < 0) { |
1931 |
++ WARN_ONCE(1, "failed to find buffer!\n"); |
1932 |
+ break; |
1933 |
+ } |
1934 |
+- /* provide 16 byte IP header alignment unless DMA requires otherwise */ |
1935 |
+- if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) |
1936 |
+- skb_reserve(new_skb, 2); |
1937 |
+- |
1938 |
+- new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE), |
1939 |
+- SONIC_RBSIZE, DMA_FROM_DEVICE); |
1940 |
+- if (!new_laddr) { |
1941 |
+- dev_kfree_skb(new_skb); |
1942 |
+- printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name); |
1943 |
++ |
1944 |
++ if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) { |
1945 |
++ struct sk_buff *used_skb = lp->rx_skb[i]; |
1946 |
++ int pkt_len; |
1947 |
++ |
1948 |
++ /* Pass the used buffer up the stack */ |
1949 |
++ dma_unmap_single(lp->device, addr, SONIC_RBSIZE, |
1950 |
++ DMA_FROM_DEVICE); |
1951 |
++ |
1952 |
++ pkt_len = sonic_rda_get(dev, entry, |
1953 |
++ SONIC_RD_PKTLEN); |
1954 |
++ skb_trim(used_skb, pkt_len); |
1955 |
++ used_skb->protocol = eth_type_trans(used_skb, |
1956 |
++ dev); |
1957 |
++ netif_rx(used_skb); |
1958 |
++ lp->stats.rx_packets++; |
1959 |
++ lp->stats.rx_bytes += pkt_len; |
1960 |
++ |
1961 |
++ lp->rx_skb[i] = new_skb; |
1962 |
++ lp->rx_laddr[i] = new_laddr; |
1963 |
++ } else { |
1964 |
++ /* Failed to obtain a new buffer so re-use it */ |
1965 |
++ new_laddr = addr; |
1966 |
+ lp->stats.rx_dropped++; |
1967 |
+- break; |
1968 |
+ } |
1969 |
+- |
1970 |
+- /* now we have a new skb to replace it, pass the used one up the stack */ |
1971 |
+- dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE); |
1972 |
+- used_skb = lp->rx_skb[entry]; |
1973 |
+- pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN); |
1974 |
+- skb_trim(used_skb, pkt_len); |
1975 |
+- used_skb->protocol = eth_type_trans(used_skb, dev); |
1976 |
+- netif_rx(used_skb); |
1977 |
+- lp->stats.rx_packets++; |
1978 |
+- lp->stats.rx_bytes += pkt_len; |
1979 |
+- |
1980 |
+- /* and insert the new skb */ |
1981 |
+- lp->rx_laddr[entry] = new_laddr; |
1982 |
+- lp->rx_skb[entry] = new_skb; |
1983 |
+- |
1984 |
+- bufadr_l = (unsigned long)new_laddr & 0xffff; |
1985 |
+- bufadr_h = (unsigned long)new_laddr >> 16; |
1986 |
+- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l); |
1987 |
+- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h); |
1988 |
+- } else { |
1989 |
+- /* This should only happen, if we enable accepting broken packets. */ |
1990 |
+- lp->stats.rx_errors++; |
1991 |
+- if (status & SONIC_RCR_FAER) |
1992 |
+- lp->stats.rx_frame_errors++; |
1993 |
+- if (status & SONIC_RCR_CRCR) |
1994 |
+- lp->stats.rx_crc_errors++; |
1995 |
+- } |
1996 |
+- if (status & SONIC_RCR_LPKT) { |
1997 |
+- /* |
1998 |
+- * this was the last packet out of the current receive buffer |
1999 |
+- * give the buffer back to the SONIC |
2000 |
++ /* If RBE is already asserted when RWP advances then |
2001 |
++ * it's safe to clear RBE after processing this packet. |
2002 |
+ */ |
2003 |
+- lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); |
2004 |
+- if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff; |
2005 |
+- SONIC_WRITE(SONIC_RWP, lp->cur_rwp); |
2006 |
+- if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) { |
2007 |
+- netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n", |
2008 |
+- __func__); |
2009 |
+- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */ |
2010 |
+- } |
2011 |
+- } else |
2012 |
+- printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n", |
2013 |
+- dev->name); |
2014 |
++ rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE; |
2015 |
++ sonic_update_rra(dev, lp, addr, new_laddr); |
2016 |
++ } |
2017 |
+ /* |
2018 |
+ * give back the descriptor |
2019 |
+ */ |
2020 |
+- sonic_rda_put(dev, entry, SONIC_RD_LINK, |
2021 |
+- sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL); |
2022 |
++ sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0); |
2023 |
+ sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1); |
2024 |
+- sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, |
2025 |
+- sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL); |
2026 |
+- lp->eol_rx = entry; |
2027 |
+- lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK; |
2028 |
++ |
2029 |
++ prev_entry = entry; |
2030 |
++ entry = (entry + 1) & SONIC_RDS_MASK; |
2031 |
++ } |
2032 |
++ |
2033 |
++ lp->cur_rx = entry; |
2034 |
++ |
2035 |
++ if (prev_entry != lp->eol_rx) { |
2036 |
++ /* Advance the EOL flag to put descriptors back into service */ |
2037 |
++ sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL | |
2038 |
++ sonic_rda_get(dev, prev_entry, SONIC_RD_LINK)); |
2039 |
++ sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL & |
2040 |
++ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK)); |
2041 |
++ lp->eol_rx = prev_entry; |
2042 |
+ } |
2043 |
++ |
2044 |
++ if (rbe) |
2045 |
++ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); |
2046 |
+ /* |
2047 |
+ * If any worth-while packets have been received, netif_rx() |
2048 |
+ * has done a mark_bh(NET_BH) for us and will work on them |
2049 |
+@@ -549,6 +642,8 @@ static void sonic_multicast_list(struct net_device *dev) |
2050 |
+ (netdev_mc_count(dev) > 15)) { |
2051 |
+ rcr |= SONIC_RCR_AMC; |
2052 |
+ } else { |
2053 |
++ unsigned long flags; |
2054 |
++ |
2055 |
+ netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__, |
2056 |
+ netdev_mc_count(dev)); |
2057 |
+ sonic_set_cam_enable(dev, 1); /* always enable our own address */ |
2058 |
+@@ -562,9 +657,14 @@ static void sonic_multicast_list(struct net_device *dev) |
2059 |
+ i++; |
2060 |
+ } |
2061 |
+ SONIC_WRITE(SONIC_CDC, 16); |
2062 |
+- /* issue Load CAM command */ |
2063 |
+ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff); |
2064 |
++ |
2065 |
++ /* LCAM and TXP commands can't be used simultaneously */ |
2066 |
++ spin_lock_irqsave(&lp->lock, flags); |
2067 |
++ sonic_quiesce(dev, SONIC_CR_TXP); |
2068 |
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); |
2069 |
++ sonic_quiesce(dev, SONIC_CR_LCAM); |
2070 |
++ spin_unlock_irqrestore(&lp->lock, flags); |
2071 |
+ } |
2072 |
+ } |
2073 |
+ |
2074 |
+@@ -579,7 +679,6 @@ static void sonic_multicast_list(struct net_device *dev) |
2075 |
+ */ |
2076 |
+ static int sonic_init(struct net_device *dev) |
2077 |
+ { |
2078 |
+- unsigned int cmd; |
2079 |
+ struct sonic_local *lp = netdev_priv(dev); |
2080 |
+ int i; |
2081 |
+ |
2082 |
+@@ -591,12 +690,16 @@ static int sonic_init(struct net_device *dev) |
2083 |
+ SONIC_WRITE(SONIC_ISR, 0x7fff); |
2084 |
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); |
2085 |
+ |
2086 |
++ /* While in reset mode, clear CAM Enable register */ |
2087 |
++ SONIC_WRITE(SONIC_CE, 0); |
2088 |
++ |
2089 |
+ /* |
2090 |
+ * clear software reset flag, disable receiver, clear and |
2091 |
+ * enable interrupts, then completely initialize the SONIC |
2092 |
+ */ |
2093 |
+ SONIC_WRITE(SONIC_CMD, 0); |
2094 |
+- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); |
2095 |
++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP); |
2096 |
++ sonic_quiesce(dev, SONIC_CR_ALL); |
2097 |
+ |
2098 |
+ /* |
2099 |
+ * initialize the receive resource area |
2100 |
+@@ -614,15 +717,10 @@ static int sonic_init(struct net_device *dev) |
2101 |
+ } |
2102 |
+ |
2103 |
+ /* initialize all RRA registers */ |
2104 |
+- lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR * |
2105 |
+- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; |
2106 |
+- lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR * |
2107 |
+- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; |
2108 |
+- |
2109 |
+- SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff); |
2110 |
+- SONIC_WRITE(SONIC_REA, lp->rra_end); |
2111 |
+- SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff); |
2112 |
+- SONIC_WRITE(SONIC_RWP, lp->cur_rwp); |
2113 |
++ SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0)); |
2114 |
++ SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS)); |
2115 |
++ SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0)); |
2116 |
++ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1)); |
2117 |
+ SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16); |
2118 |
+ SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1)); |
2119 |
+ |
2120 |
+@@ -630,14 +728,7 @@ static int sonic_init(struct net_device *dev) |
2121 |
+ netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__); |
2122 |
+ |
2123 |
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA); |
2124 |
+- i = 0; |
2125 |
+- while (i++ < 100) { |
2126 |
+- if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA) |
2127 |
+- break; |
2128 |
+- } |
2129 |
+- |
2130 |
+- netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__, |
2131 |
+- SONIC_READ(SONIC_CMD), i); |
2132 |
++ sonic_quiesce(dev, SONIC_CR_RRRA); |
2133 |
+ |
2134 |
+ /* |
2135 |
+ * Initialize the receive descriptors so that they |
2136 |
+@@ -712,28 +803,17 @@ static int sonic_init(struct net_device *dev) |
2137 |
+ * load the CAM |
2138 |
+ */ |
2139 |
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); |
2140 |
+- |
2141 |
+- i = 0; |
2142 |
+- while (i++ < 100) { |
2143 |
+- if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD) |
2144 |
+- break; |
2145 |
+- } |
2146 |
+- netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__, |
2147 |
+- SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i); |
2148 |
++ sonic_quiesce(dev, SONIC_CR_LCAM); |
2149 |
+ |
2150 |
+ /* |
2151 |
+ * enable receiver, disable loopback |
2152 |
+ * and enable all interrupts |
2153 |
+ */ |
2154 |
+- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP); |
2155 |
+ SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT); |
2156 |
+ SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT); |
2157 |
+ SONIC_WRITE(SONIC_ISR, 0x7fff); |
2158 |
+ SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT); |
2159 |
+- |
2160 |
+- cmd = SONIC_READ(SONIC_CMD); |
2161 |
+- if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0) |
2162 |
+- printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd); |
2163 |
++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN); |
2164 |
+ |
2165 |
+ netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__, |
2166 |
+ SONIC_READ(SONIC_CMD)); |
2167 |
+diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h |
2168 |
+index 2b27f7049acb..1df6d2f06cc4 100644 |
2169 |
+--- a/drivers/net/ethernet/natsemi/sonic.h |
2170 |
++++ b/drivers/net/ethernet/natsemi/sonic.h |
2171 |
+@@ -110,6 +110,9 @@ |
2172 |
+ #define SONIC_CR_TXP 0x0002 |
2173 |
+ #define SONIC_CR_HTX 0x0001 |
2174 |
+ |
2175 |
++#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \ |
2176 |
++ SONIC_CR_RXEN | SONIC_CR_TXP) |
2177 |
++ |
2178 |
+ /* |
2179 |
+ * SONIC data configuration bits |
2180 |
+ */ |
2181 |
+@@ -175,6 +178,7 @@ |
2182 |
+ #define SONIC_TCR_NCRS 0x0100 |
2183 |
+ #define SONIC_TCR_CRLS 0x0080 |
2184 |
+ #define SONIC_TCR_EXC 0x0040 |
2185 |
++#define SONIC_TCR_OWC 0x0020 |
2186 |
+ #define SONIC_TCR_PMB 0x0008 |
2187 |
+ #define SONIC_TCR_FU 0x0004 |
2188 |
+ #define SONIC_TCR_BCM 0x0002 |
2189 |
+@@ -274,8 +278,9 @@ |
2190 |
+ #define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */ |
2191 |
+ #define SONIC_NUM_TDS 16 /* number of transmit descriptors */ |
2192 |
+ |
2193 |
+-#define SONIC_RDS_MASK (SONIC_NUM_RDS-1) |
2194 |
+-#define SONIC_TDS_MASK (SONIC_NUM_TDS-1) |
2195 |
++#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1) |
2196 |
++#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1) |
2197 |
++#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1) |
2198 |
+ |
2199 |
+ #define SONIC_RBSIZE 1520 /* size of one resource buffer */ |
2200 |
+ |
2201 |
+@@ -312,8 +317,6 @@ struct sonic_local { |
2202 |
+ u32 rda_laddr; /* logical DMA address of RDA */ |
2203 |
+ dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */ |
2204 |
+ dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */ |
2205 |
+- unsigned int rra_end; |
2206 |
+- unsigned int cur_rwp; |
2207 |
+ unsigned int cur_rx; |
2208 |
+ unsigned int cur_tx; /* first unacked transmit packet */ |
2209 |
+ unsigned int eol_rx; |
2210 |
+@@ -322,6 +325,7 @@ struct sonic_local { |
2211 |
+ int msg_enable; |
2212 |
+ struct device *device; /* generic device */ |
2213 |
+ struct net_device_stats stats; |
2214 |
++ spinlock_t lock; |
2215 |
+ }; |
2216 |
+ |
2217 |
+ #define TX_TIMEOUT (3 * HZ) |
2218 |
+@@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev); |
2219 |
+ as far as we can tell. */ |
2220 |
+ /* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put() |
2221 |
+ is a much better name. */ |
2222 |
+-static inline void sonic_buf_put(void* base, int bitmode, |
2223 |
++static inline void sonic_buf_put(u16 *base, int bitmode, |
2224 |
+ int offset, __u16 val) |
2225 |
+ { |
2226 |
+ if (bitmode) |
2227 |
+ #ifdef __BIG_ENDIAN |
2228 |
+- ((__u16 *) base + (offset*2))[1] = val; |
2229 |
++ __raw_writew(val, base + (offset * 2) + 1); |
2230 |
+ #else |
2231 |
+- ((__u16 *) base + (offset*2))[0] = val; |
2232 |
++ __raw_writew(val, base + (offset * 2) + 0); |
2233 |
+ #endif |
2234 |
+ else |
2235 |
+- ((__u16 *) base)[offset] = val; |
2236 |
++ __raw_writew(val, base + (offset * 1) + 0); |
2237 |
+ } |
2238 |
+ |
2239 |
+-static inline __u16 sonic_buf_get(void* base, int bitmode, |
2240 |
++static inline __u16 sonic_buf_get(u16 *base, int bitmode, |
2241 |
+ int offset) |
2242 |
+ { |
2243 |
+ if (bitmode) |
2244 |
+ #ifdef __BIG_ENDIAN |
2245 |
+- return ((volatile __u16 *) base + (offset*2))[1]; |
2246 |
++ return __raw_readw(base + (offset * 2) + 1); |
2247 |
+ #else |
2248 |
+- return ((volatile __u16 *) base + (offset*2))[0]; |
2249 |
++ return __raw_readw(base + (offset * 2) + 0); |
2250 |
+ #endif |
2251 |
+ else |
2252 |
+- return ((volatile __u16 *) base)[offset]; |
2253 |
++ return __raw_readw(base + (offset * 1) + 0); |
2254 |
+ } |
2255 |
+ |
2256 |
+ /* Inlines that you should actually use for reading/writing DMA buffers */ |
2257 |
+@@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry, |
2258 |
+ (entry * SIZEOF_SONIC_RR) + offset); |
2259 |
+ } |
2260 |
+ |
2261 |
++static inline u16 sonic_rr_addr(struct net_device *dev, int entry) |
2262 |
++{ |
2263 |
++ struct sonic_local *lp = netdev_priv(dev); |
2264 |
++ |
2265 |
++ return lp->rra_laddr + |
2266 |
++ entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); |
2267 |
++} |
2268 |
++ |
2269 |
++static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr) |
2270 |
++{ |
2271 |
++ struct sonic_local *lp = netdev_priv(dev); |
2272 |
++ |
2273 |
++ return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR * |
2274 |
++ SONIC_BUS_SCALE(lp->dma_bitmode)); |
2275 |
++} |
2276 |
++ |
2277 |
+ static const char version[] = |
2278 |
+ "sonic.c:v0.92 20.9.98 tsbogend@×××××××××××××.de\n"; |
2279 |
+ |
2280 |
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c |
2281 |
+index 6571cac6e786..ee086441dcbe 100644 |
2282 |
+--- a/drivers/net/gtp.c |
2283 |
++++ b/drivers/net/gtp.c |
2284 |
+@@ -809,19 +809,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, |
2285 |
+ return NULL; |
2286 |
+ } |
2287 |
+ |
2288 |
+- if (sock->sk->sk_protocol != IPPROTO_UDP) { |
2289 |
++ sk = sock->sk; |
2290 |
++ if (sk->sk_protocol != IPPROTO_UDP || |
2291 |
++ sk->sk_type != SOCK_DGRAM || |
2292 |
++ (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { |
2293 |
+ pr_debug("socket fd=%d not UDP\n", fd); |
2294 |
+ sk = ERR_PTR(-EINVAL); |
2295 |
+ goto out_sock; |
2296 |
+ } |
2297 |
+ |
2298 |
+- lock_sock(sock->sk); |
2299 |
+- if (sock->sk->sk_user_data) { |
2300 |
++ lock_sock(sk); |
2301 |
++ if (sk->sk_user_data) { |
2302 |
+ sk = ERR_PTR(-EBUSY); |
2303 |
+ goto out_rel_sock; |
2304 |
+ } |
2305 |
+ |
2306 |
+- sk = sock->sk; |
2307 |
+ sock_hold(sk); |
2308 |
+ |
2309 |
+ tuncfg.sk_user_data = gtp; |
2310 |
+diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c |
2311 |
+index 77207f936871..93f303ec17e2 100644 |
2312 |
+--- a/drivers/net/slip/slip.c |
2313 |
++++ b/drivers/net/slip/slip.c |
2314 |
+@@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work) |
2315 |
+ */ |
2316 |
+ static void slip_write_wakeup(struct tty_struct *tty) |
2317 |
+ { |
2318 |
+- struct slip *sl = tty->disc_data; |
2319 |
++ struct slip *sl; |
2320 |
++ |
2321 |
++ rcu_read_lock(); |
2322 |
++ sl = rcu_dereference(tty->disc_data); |
2323 |
++ if (!sl) |
2324 |
++ goto out; |
2325 |
+ |
2326 |
+ schedule_work(&sl->tx_work); |
2327 |
++out: |
2328 |
++ rcu_read_unlock(); |
2329 |
+ } |
2330 |
+ |
2331 |
+ static void sl_tx_timeout(struct net_device *dev) |
2332 |
+@@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty) |
2333 |
+ return; |
2334 |
+ |
2335 |
+ spin_lock_bh(&sl->lock); |
2336 |
+- tty->disc_data = NULL; |
2337 |
++ rcu_assign_pointer(tty->disc_data, NULL); |
2338 |
+ sl->tty = NULL; |
2339 |
+ spin_unlock_bh(&sl->lock); |
2340 |
+ |
2341 |
++ synchronize_rcu(); |
2342 |
+ flush_work(&sl->tx_work); |
2343 |
+ |
2344 |
+ /* VSV = very important to remove timers */ |
2345 |
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
2346 |
+index bbd92221c6ca..09c444d3b496 100644 |
2347 |
+--- a/drivers/net/tun.c |
2348 |
++++ b/drivers/net/tun.c |
2349 |
+@@ -1900,6 +1900,10 @@ drop: |
2350 |
+ if (ret != XDP_PASS) { |
2351 |
+ rcu_read_unlock(); |
2352 |
+ local_bh_enable(); |
2353 |
++ if (frags) { |
2354 |
++ tfile->napi.skb = NULL; |
2355 |
++ mutex_unlock(&tfile->napi_mutex); |
2356 |
++ } |
2357 |
+ return total_len; |
2358 |
+ } |
2359 |
+ } |
2360 |
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c |
2361 |
+index 7d708aeb4576..92548887df2f 100644 |
2362 |
+--- a/drivers/net/usb/lan78xx.c |
2363 |
++++ b/drivers/net/usb/lan78xx.c |
2364 |
+@@ -31,6 +31,7 @@ |
2365 |
+ #include <linux/mdio.h> |
2366 |
+ #include <linux/phy.h> |
2367 |
+ #include <net/ip6_checksum.h> |
2368 |
++#include <net/vxlan.h> |
2369 |
+ #include <linux/interrupt.h> |
2370 |
+ #include <linux/irqdomain.h> |
2371 |
+ #include <linux/irq.h> |
2372 |
+@@ -3686,6 +3687,19 @@ static void lan78xx_tx_timeout(struct net_device *net) |
2373 |
+ tasklet_schedule(&dev->bh); |
2374 |
+ } |
2375 |
+ |
2376 |
++static netdev_features_t lan78xx_features_check(struct sk_buff *skb, |
2377 |
++ struct net_device *netdev, |
2378 |
++ netdev_features_t features) |
2379 |
++{ |
2380 |
++ if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE) |
2381 |
++ features &= ~NETIF_F_GSO_MASK; |
2382 |
++ |
2383 |
++ features = vlan_features_check(skb, features); |
2384 |
++ features = vxlan_features_check(skb, features); |
2385 |
++ |
2386 |
++ return features; |
2387 |
++} |
2388 |
++ |
2389 |
+ static const struct net_device_ops lan78xx_netdev_ops = { |
2390 |
+ .ndo_open = lan78xx_open, |
2391 |
+ .ndo_stop = lan78xx_stop, |
2392 |
+@@ -3699,6 +3713,7 @@ static const struct net_device_ops lan78xx_netdev_ops = { |
2393 |
+ .ndo_set_features = lan78xx_set_features, |
2394 |
+ .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid, |
2395 |
+ .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid, |
2396 |
++ .ndo_features_check = lan78xx_features_check, |
2397 |
+ }; |
2398 |
+ |
2399 |
+ static void lan78xx_stat_monitor(struct timer_list *t) |
2400 |
+diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c |
2401 |
+index 57edfada0665..c9401c121a14 100644 |
2402 |
+--- a/drivers/net/wireless/marvell/libertas/cfg.c |
2403 |
++++ b/drivers/net/wireless/marvell/libertas/cfg.c |
2404 |
+@@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates) |
2405 |
+ int hw, ap, ap_max = ie[1]; |
2406 |
+ u8 hw_rate; |
2407 |
+ |
2408 |
++ if (ap_max > MAX_RATES) { |
2409 |
++ lbs_deb_assoc("invalid rates\n"); |
2410 |
++ return tlv; |
2411 |
++ } |
2412 |
+ /* Advance past IE header */ |
2413 |
+ ie += 2; |
2414 |
+ |
2415 |
+@@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, |
2416 |
+ struct cmd_ds_802_11_ad_hoc_join cmd; |
2417 |
+ u8 preamble = RADIO_PREAMBLE_SHORT; |
2418 |
+ int ret = 0; |
2419 |
++ int hw, i; |
2420 |
++ u8 rates_max; |
2421 |
++ u8 *rates; |
2422 |
+ |
2423 |
+ /* TODO: set preamble based on scan result */ |
2424 |
+ ret = lbs_set_radio(priv, preamble, 1); |
2425 |
+@@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, |
2426 |
+ if (!rates_eid) { |
2427 |
+ lbs_add_rates(cmd.bss.rates); |
2428 |
+ } else { |
2429 |
+- int hw, i; |
2430 |
+- u8 rates_max = rates_eid[1]; |
2431 |
+- u8 *rates = cmd.bss.rates; |
2432 |
++ rates_max = rates_eid[1]; |
2433 |
++ if (rates_max > MAX_RATES) { |
2434 |
++ lbs_deb_join("invalid rates"); |
2435 |
++ goto out; |
2436 |
++ } |
2437 |
++ rates = cmd.bss.rates; |
2438 |
+ for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { |
2439 |
+ u8 hw_rate = lbs_rates[hw].bitrate / 5; |
2440 |
+ for (i = 0; i < rates_max; i++) { |
2441 |
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
2442 |
+index 20a57a48ae1e..36f8eb9f24a7 100644 |
2443 |
+--- a/drivers/pci/quirks.c |
2444 |
++++ b/drivers/pci/quirks.c |
2445 |
+@@ -4891,18 +4891,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); |
2446 |
+ |
2447 |
+ #ifdef CONFIG_PCI_ATS |
2448 |
+ /* |
2449 |
+- * Some devices have a broken ATS implementation causing IOMMU stalls. |
2450 |
+- * Don't use ATS for those devices. |
2451 |
++ * Some devices require additional driver setup to enable ATS. Don't use |
2452 |
++ * ATS for those devices as ATS will be enabled before the driver has had a |
2453 |
++ * chance to load and configure the device. |
2454 |
+ */ |
2455 |
+-static void quirk_no_ats(struct pci_dev *pdev) |
2456 |
++static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) |
2457 |
+ { |
2458 |
+- pci_info(pdev, "disabling ATS (broken on this device)\n"); |
2459 |
++ if (pdev->device == 0x7340 && pdev->revision != 0xc5) |
2460 |
++ return; |
2461 |
++ |
2462 |
++ pci_info(pdev, "disabling ATS\n"); |
2463 |
+ pdev->ats_cap = 0; |
2464 |
+ } |
2465 |
+ |
2466 |
+ /* AMD Stoney platform GPU */ |
2467 |
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); |
2468 |
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats); |
2469 |
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); |
2470 |
++/* AMD Iceland dGPU */ |
2471 |
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); |
2472 |
++/* AMD Navi14 dGPU */ |
2473 |
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); |
2474 |
+ #endif /* CONFIG_PCI_ATS */ |
2475 |
+ |
2476 |
+ /* Freescale PCIe doesn't support MSI in RC mode */ |
2477 |
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c |
2478 |
+index 4d0fc6b01fa0..4c4781e5974f 100644 |
2479 |
+--- a/drivers/scsi/scsi_transport_iscsi.c |
2480 |
++++ b/drivers/scsi/scsi_transport_iscsi.c |
2481 |
+@@ -37,6 +37,8 @@ |
2482 |
+ |
2483 |
+ #define ISCSI_TRANSPORT_VERSION "2.0-870" |
2484 |
+ |
2485 |
++#define ISCSI_SEND_MAX_ALLOWED 10 |
2486 |
++ |
2487 |
+ static int dbg_session; |
2488 |
+ module_param_named(debug_session, dbg_session, int, |
2489 |
+ S_IRUGO | S_IWUSR); |
2490 |
+@@ -3680,6 +3682,7 @@ iscsi_if_rx(struct sk_buff *skb) |
2491 |
+ struct nlmsghdr *nlh; |
2492 |
+ struct iscsi_uevent *ev; |
2493 |
+ uint32_t group; |
2494 |
++ int retries = ISCSI_SEND_MAX_ALLOWED; |
2495 |
+ |
2496 |
+ nlh = nlmsg_hdr(skb); |
2497 |
+ if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || |
2498 |
+@@ -3710,6 +3713,10 @@ iscsi_if_rx(struct sk_buff *skb) |
2499 |
+ break; |
2500 |
+ err = iscsi_if_send_reply(portid, nlh->nlmsg_type, |
2501 |
+ ev, sizeof(*ev)); |
2502 |
++ if (err == -EAGAIN && --retries < 0) { |
2503 |
++ printk(KERN_WARNING "Send reply failed, error %d\n", err); |
2504 |
++ break; |
2505 |
++ } |
2506 |
+ } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); |
2507 |
+ skb_pull(skb, rlen); |
2508 |
+ } |
2509 |
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c |
2510 |
+index 7d868d37ab5f..345b18d52ec6 100644 |
2511 |
+--- a/drivers/scsi/sd.c |
2512 |
++++ b/drivers/scsi/sd.c |
2513 |
+@@ -1969,9 +1969,13 @@ static int sd_done(struct scsi_cmnd *SCpnt) |
2514 |
+ } |
2515 |
+ break; |
2516 |
+ case REQ_OP_ZONE_REPORT: |
2517 |
++ /* To avoid that the block layer performs an incorrect |
2518 |
++ * bio_advance() call and restart of the remainder of |
2519 |
++ * incomplete report zone BIOs, always indicate a full |
2520 |
++ * completion of REQ_OP_ZONE_REPORT. |
2521 |
++ */ |
2522 |
+ if (!result) { |
2523 |
+- good_bytes = scsi_bufflen(SCpnt) |
2524 |
+- - scsi_get_resid(SCpnt); |
2525 |
++ good_bytes = scsi_bufflen(SCpnt); |
2526 |
+ scsi_set_resid(SCpnt, 0); |
2527 |
+ } else { |
2528 |
+ good_bytes = 0; |
2529 |
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c |
2530 |
+index 317d0f3f7a14..14bd54d0e79d 100644 |
2531 |
+--- a/drivers/target/iscsi/iscsi_target.c |
2532 |
++++ b/drivers/target/iscsi/iscsi_target.c |
2533 |
+@@ -4123,9 +4123,6 @@ int iscsit_close_connection( |
2534 |
+ iscsit_stop_nopin_response_timer(conn); |
2535 |
+ iscsit_stop_nopin_timer(conn); |
2536 |
+ |
2537 |
+- if (conn->conn_transport->iscsit_wait_conn) |
2538 |
+- conn->conn_transport->iscsit_wait_conn(conn); |
2539 |
+- |
2540 |
+ /* |
2541 |
+ * During Connection recovery drop unacknowledged out of order |
2542 |
+ * commands for this connection, and prepare the other commands |
2543 |
+@@ -4211,6 +4208,9 @@ int iscsit_close_connection( |
2544 |
+ target_sess_cmd_list_set_waiting(sess->se_sess); |
2545 |
+ target_wait_for_sess_cmds(sess->se_sess); |
2546 |
+ |
2547 |
++ if (conn->conn_transport->iscsit_wait_conn) |
2548 |
++ conn->conn_transport->iscsit_wait_conn(conn); |
2549 |
++ |
2550 |
+ ahash_request_free(conn->conn_tx_hash); |
2551 |
+ if (conn->conn_rx_hash) { |
2552 |
+ struct crypto_ahash *tfm; |
2553 |
+diff --git a/fs/afs/cell.c b/fs/afs/cell.c |
2554 |
+index ee07162d35c7..cce0e23b2454 100644 |
2555 |
+--- a/fs/afs/cell.c |
2556 |
++++ b/fs/afs/cell.c |
2557 |
+@@ -135,8 +135,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, |
2558 |
+ _leave(" = -ENAMETOOLONG"); |
2559 |
+ return ERR_PTR(-ENAMETOOLONG); |
2560 |
+ } |
2561 |
+- if (namelen == 5 && memcmp(name, "@cell", 5) == 0) |
2562 |
++ |
2563 |
++ /* Prohibit cell names that contain unprintable chars, '/' and '@' or |
2564 |
++ * that begin with a dot. This also precludes "@cell". |
2565 |
++ */ |
2566 |
++ if (name[0] == '.') |
2567 |
+ return ERR_PTR(-EINVAL); |
2568 |
++ for (i = 0; i < namelen; i++) { |
2569 |
++ char ch = name[i]; |
2570 |
++ if (!isprint(ch) || ch == '/' || ch == '@') |
2571 |
++ return ERR_PTR(-EINVAL); |
2572 |
++ } |
2573 |
+ |
2574 |
+ _enter("%*.*s,%s", namelen, namelen, name, vllist); |
2575 |
+ |
2576 |
+diff --git a/fs/namei.c b/fs/namei.c |
2577 |
+index 914178cdbe94..2aad8042a05b 100644 |
2578 |
+--- a/fs/namei.c |
2579 |
++++ b/fs/namei.c |
2580 |
+@@ -1009,7 +1009,8 @@ static int may_linkat(struct path *link) |
2581 |
+ * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory |
2582 |
+ * should be allowed, or not, on files that already |
2583 |
+ * exist. |
2584 |
+- * @dir: the sticky parent directory |
2585 |
++ * @dir_mode: mode bits of directory |
2586 |
++ * @dir_uid: owner of directory |
2587 |
+ * @inode: the inode of the file to open |
2588 |
+ * |
2589 |
+ * Block an O_CREAT open of a FIFO (or a regular file) when: |
2590 |
+@@ -1025,18 +1026,18 @@ static int may_linkat(struct path *link) |
2591 |
+ * |
2592 |
+ * Returns 0 if the open is allowed, -ve on error. |
2593 |
+ */ |
2594 |
+-static int may_create_in_sticky(struct dentry * const dir, |
2595 |
++static int may_create_in_sticky(umode_t dir_mode, kuid_t dir_uid, |
2596 |
+ struct inode * const inode) |
2597 |
+ { |
2598 |
+ if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) || |
2599 |
+ (!sysctl_protected_regular && S_ISREG(inode->i_mode)) || |
2600 |
+- likely(!(dir->d_inode->i_mode & S_ISVTX)) || |
2601 |
+- uid_eq(inode->i_uid, dir->d_inode->i_uid) || |
2602 |
++ likely(!(dir_mode & S_ISVTX)) || |
2603 |
++ uid_eq(inode->i_uid, dir_uid) || |
2604 |
+ uid_eq(current_fsuid(), inode->i_uid)) |
2605 |
+ return 0; |
2606 |
+ |
2607 |
+- if (likely(dir->d_inode->i_mode & 0002) || |
2608 |
+- (dir->d_inode->i_mode & 0020 && |
2609 |
++ if (likely(dir_mode & 0002) || |
2610 |
++ (dir_mode & 0020 && |
2611 |
+ ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) || |
2612 |
+ (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) { |
2613 |
+ return -EACCES; |
2614 |
+@@ -3258,6 +3259,8 @@ static int do_last(struct nameidata *nd, |
2615 |
+ struct file *file, const struct open_flags *op) |
2616 |
+ { |
2617 |
+ struct dentry *dir = nd->path.dentry; |
2618 |
++ kuid_t dir_uid = dir->d_inode->i_uid; |
2619 |
++ umode_t dir_mode = dir->d_inode->i_mode; |
2620 |
+ int open_flag = op->open_flag; |
2621 |
+ bool will_truncate = (open_flag & O_TRUNC) != 0; |
2622 |
+ bool got_write = false; |
2623 |
+@@ -3393,7 +3396,7 @@ finish_open: |
2624 |
+ error = -EISDIR; |
2625 |
+ if (d_is_dir(nd->path.dentry)) |
2626 |
+ goto out; |
2627 |
+- error = may_create_in_sticky(dir, |
2628 |
++ error = may_create_in_sticky(dir_mode, dir_uid, |
2629 |
+ d_backing_inode(nd->path.dentry)); |
2630 |
+ if (unlikely(error)) |
2631 |
+ goto out; |
2632 |
+diff --git a/include/linux/memory.h b/include/linux/memory.h |
2633 |
+index a6ddefc60517..5c411365cdbe 100644 |
2634 |
+--- a/include/linux/memory.h |
2635 |
++++ b/include/linux/memory.h |
2636 |
+@@ -111,16 +111,16 @@ extern int register_memory_notifier(struct notifier_block *nb); |
2637 |
+ extern void unregister_memory_notifier(struct notifier_block *nb); |
2638 |
+ extern int register_memory_isolate_notifier(struct notifier_block *nb); |
2639 |
+ extern void unregister_memory_isolate_notifier(struct notifier_block *nb); |
2640 |
+-int hotplug_memory_register(int nid, struct mem_section *section); |
2641 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
2642 |
+-extern int unregister_memory_section(struct mem_section *); |
2643 |
+-#endif |
2644 |
++int create_memory_block_devices(unsigned long start, unsigned long size); |
2645 |
++void remove_memory_block_devices(unsigned long start, unsigned long size); |
2646 |
+ extern int memory_dev_init(void); |
2647 |
+ extern int memory_notify(unsigned long val, void *v); |
2648 |
+ extern int memory_isolate_notify(unsigned long val, void *v); |
2649 |
+ extern struct memory_block *find_memory_block_hinted(struct mem_section *, |
2650 |
+ struct memory_block *); |
2651 |
+ extern struct memory_block *find_memory_block(struct mem_section *); |
2652 |
++typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); |
2653 |
++extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func); |
2654 |
+ #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) |
2655 |
+ #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ |
2656 |
+ |
2657 |
+diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h |
2658 |
+index 4915e6cd7fd5..d17d45c41a0b 100644 |
2659 |
+--- a/include/linux/memory_hotplug.h |
2660 |
++++ b/include/linux/memory_hotplug.h |
2661 |
+@@ -108,12 +108,10 @@ static inline bool movable_node_is_enabled(void) |
2662 |
+ return movable_node_enabled; |
2663 |
+ } |
2664 |
+ |
2665 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
2666 |
+-extern int arch_remove_memory(u64 start, u64 size, |
2667 |
+- struct vmem_altmap *altmap); |
2668 |
+-extern int __remove_pages(struct zone *zone, unsigned long start_pfn, |
2669 |
+- unsigned long nr_pages, struct vmem_altmap *altmap); |
2670 |
+-#endif /* CONFIG_MEMORY_HOTREMOVE */ |
2671 |
++extern void arch_remove_memory(int nid, u64 start, u64 size, |
2672 |
++ struct vmem_altmap *altmap); |
2673 |
++extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, |
2674 |
++ struct vmem_altmap *altmap); |
2675 |
+ |
2676 |
+ /* reasonably generic interface to expand the physical pages */ |
2677 |
+ extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, |
2678 |
+@@ -303,6 +301,7 @@ extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); |
2679 |
+ extern void try_offline_node(int nid); |
2680 |
+ extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); |
2681 |
+ extern void remove_memory(int nid, u64 start, u64 size); |
2682 |
++extern void __remove_memory(int nid, u64 start, u64 size); |
2683 |
+ |
2684 |
+ #else |
2685 |
+ static inline bool is_mem_section_removable(unsigned long pfn, |
2686 |
+@@ -319,6 +318,7 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) |
2687 |
+ } |
2688 |
+ |
2689 |
+ static inline void remove_memory(int nid, u64 start, u64 size) {} |
2690 |
++static inline void __remove_memory(int nid, u64 start, u64 size) {} |
2691 |
+ #endif /* CONFIG_MEMORY_HOTREMOVE */ |
2692 |
+ |
2693 |
+ extern void __ref free_area_init_core_hotplug(int nid); |
2694 |
+@@ -331,12 +331,14 @@ extern int arch_add_memory(int nid, u64 start, u64 size, |
2695 |
+ struct vmem_altmap *altmap, bool want_memblock); |
2696 |
+ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, |
2697 |
+ unsigned long nr_pages, struct vmem_altmap *altmap); |
2698 |
++extern void remove_pfn_range_from_zone(struct zone *zone, |
2699 |
++ unsigned long start_pfn, |
2700 |
++ unsigned long nr_pages); |
2701 |
+ extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); |
2702 |
+ extern bool is_memblock_offlined(struct memory_block *mem); |
2703 |
+-extern void remove_memory(int nid, u64 start, u64 size); |
2704 |
+-extern int sparse_add_one_section(struct pglist_data *pgdat, |
2705 |
+- unsigned long start_pfn, struct vmem_altmap *altmap); |
2706 |
+-extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, |
2707 |
++extern int sparse_add_one_section(int nid, unsigned long start_pfn, |
2708 |
++ struct vmem_altmap *altmap); |
2709 |
++extern void sparse_remove_one_section(struct mem_section *ms, |
2710 |
+ unsigned long map_offset, struct vmem_altmap *altmap); |
2711 |
+ extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, |
2712 |
+ unsigned long pnum); |
2713 |
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h |
2714 |
+index d4b0c79d2924..d6791e2df30a 100644 |
2715 |
+--- a/include/linux/mmzone.h |
2716 |
++++ b/include/linux/mmzone.h |
2717 |
+@@ -637,8 +637,7 @@ typedef struct pglist_data { |
2718 |
+ #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) |
2719 |
+ /* |
2720 |
+ * Must be held any time you expect node_start_pfn, node_present_pages |
2721 |
+- * or node_spanned_pages stay constant. Holding this will also |
2722 |
+- * guarantee that any pfn_valid() stays that way. |
2723 |
++ * or node_spanned_pages stay constant. |
2724 |
+ * |
2725 |
+ * pgdat_resize_lock() and pgdat_resize_unlock() are provided to |
2726 |
+ * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG |
2727 |
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h |
2728 |
+index d5527e3828d1..84bbdcbb199a 100644 |
2729 |
+--- a/include/linux/netdevice.h |
2730 |
++++ b/include/linux/netdevice.h |
2731 |
+@@ -3579,6 +3579,8 @@ int dev_set_alias(struct net_device *, const char *, size_t); |
2732 |
+ int dev_get_alias(const struct net_device *, char *, size_t); |
2733 |
+ int dev_change_net_namespace(struct net_device *, struct net *, const char *); |
2734 |
+ int __dev_set_mtu(struct net_device *, int); |
2735 |
++int dev_validate_mtu(struct net_device *dev, int mtu, |
2736 |
++ struct netlink_ext_ack *extack); |
2737 |
+ int dev_set_mtu_ext(struct net_device *dev, int mtu, |
2738 |
+ struct netlink_ext_ack *extack); |
2739 |
+ int dev_set_mtu(struct net_device *, int); |
2740 |
+diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h |
2741 |
+index 1d100efe74ec..7e39049d2ce6 100644 |
2742 |
+--- a/include/linux/netfilter/ipset/ip_set.h |
2743 |
++++ b/include/linux/netfilter/ipset/ip_set.h |
2744 |
+@@ -451,13 +451,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr) |
2745 |
+ sizeof(*addr)); |
2746 |
+ } |
2747 |
+ |
2748 |
+-/* Calculate the bytes required to store the inclusive range of a-b */ |
2749 |
+-static inline int |
2750 |
+-bitmap_bytes(u32 a, u32 b) |
2751 |
+-{ |
2752 |
+- return 4 * ((((b - a + 8) / 8) + 3) / 4); |
2753 |
+-} |
2754 |
+- |
2755 |
+ #include <linux/netfilter/ipset/ip_set_timeout.h> |
2756 |
+ #include <linux/netfilter/ipset/ip_set_comment.h> |
2757 |
+ #include <linux/netfilter/ipset/ip_set_counter.h> |
2758 |
+diff --git a/include/linux/node.h b/include/linux/node.h |
2759 |
+index 257bb3d6d014..708939bae9aa 100644 |
2760 |
+--- a/include/linux/node.h |
2761 |
++++ b/include/linux/node.h |
2762 |
+@@ -72,8 +72,7 @@ extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); |
2763 |
+ extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); |
2764 |
+ extern int register_mem_sect_under_node(struct memory_block *mem_blk, |
2765 |
+ void *arg); |
2766 |
+-extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, |
2767 |
+- unsigned long phys_index); |
2768 |
++extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk); |
2769 |
+ |
2770 |
+ #ifdef CONFIG_HUGETLBFS |
2771 |
+ extern void register_hugetlbfs_with_node(node_registration_func_t doregister, |
2772 |
+@@ -105,10 +104,8 @@ static inline int register_mem_sect_under_node(struct memory_block *mem_blk, |
2773 |
+ { |
2774 |
+ return 0; |
2775 |
+ } |
2776 |
+-static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, |
2777 |
+- unsigned long phys_index) |
2778 |
++static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk) |
2779 |
+ { |
2780 |
+- return 0; |
2781 |
+ } |
2782 |
+ |
2783 |
+ static inline void register_hugetlbfs_with_node(node_registration_func_t reg, |
2784 |
+diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h |
2785 |
+index fdcf88bcf0ea..bb76c03898cc 100644 |
2786 |
+--- a/include/trace/events/xen.h |
2787 |
++++ b/include/trace/events/xen.h |
2788 |
+@@ -66,7 +66,11 @@ TRACE_EVENT(xen_mc_callback, |
2789 |
+ TP_PROTO(xen_mc_callback_fn_t fn, void *data), |
2790 |
+ TP_ARGS(fn, data), |
2791 |
+ TP_STRUCT__entry( |
2792 |
+- __field(xen_mc_callback_fn_t, fn) |
2793 |
++ /* |
2794 |
++ * Use field_struct to avoid is_signed_type() |
2795 |
++ * comparison of a function pointer. |
2796 |
++ */ |
2797 |
++ __field_struct(xen_mc_callback_fn_t, fn) |
2798 |
+ __field(void *, data) |
2799 |
+ ), |
2800 |
+ TP_fast_assign( |
2801 |
+diff --git a/kernel/memremap.c b/kernel/memremap.c |
2802 |
+index 7c5fb8a208ac..331baad8efec 100644 |
2803 |
+--- a/kernel/memremap.c |
2804 |
++++ b/kernel/memremap.c |
2805 |
+@@ -120,7 +120,9 @@ static void devm_memremap_pages_release(void *data) |
2806 |
+ struct device *dev = pgmap->dev; |
2807 |
+ struct resource *res = &pgmap->res; |
2808 |
+ resource_size_t align_start, align_size; |
2809 |
++ struct page *first_page; |
2810 |
+ unsigned long pfn; |
2811 |
++ int nid; |
2812 |
+ |
2813 |
+ pgmap->kill(pgmap->ref); |
2814 |
+ for_each_device_pfn(pfn, pgmap) |
2815 |
+@@ -131,13 +133,17 @@ static void devm_memremap_pages_release(void *data) |
2816 |
+ align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
2817 |
+ - align_start; |
2818 |
+ |
2819 |
++ /* make sure to access a memmap that was actually initialized */ |
2820 |
++ first_page = pfn_to_page(pfn_first(pgmap)); |
2821 |
++ |
2822 |
++ nid = page_to_nid(first_page); |
2823 |
++ |
2824 |
+ mem_hotplug_begin(); |
2825 |
+ if (pgmap->type == MEMORY_DEVICE_PRIVATE) { |
2826 |
+ pfn = align_start >> PAGE_SHIFT; |
2827 |
+- __remove_pages(page_zone(pfn_to_page(pfn)), pfn, |
2828 |
+- align_size >> PAGE_SHIFT, NULL); |
2829 |
++ __remove_pages(pfn, align_size >> PAGE_SHIFT, NULL); |
2830 |
+ } else { |
2831 |
+- arch_remove_memory(align_start, align_size, |
2832 |
++ arch_remove_memory(nid, align_start, align_size, |
2833 |
+ pgmap->altmap_valid ? &pgmap->altmap : NULL); |
2834 |
+ kasan_remove_zero_shadow(__va(align_start), align_size); |
2835 |
+ } |
2836 |
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c |
2837 |
+index 0fb92d0c7b20..dbd3c97d1501 100644 |
2838 |
+--- a/kernel/trace/trace_events_hist.c |
2839 |
++++ b/kernel/trace/trace_events_hist.c |
2840 |
+@@ -49,6 +49,7 @@ struct hist_field { |
2841 |
+ struct ftrace_event_field *field; |
2842 |
+ unsigned long flags; |
2843 |
+ hist_field_fn_t fn; |
2844 |
++ unsigned int ref; |
2845 |
+ unsigned int size; |
2846 |
+ unsigned int offset; |
2847 |
+ unsigned int is_signed; |
2848 |
+@@ -1274,6 +1275,17 @@ static u64 hist_field_cpu(struct hist_field *hist_field, |
2849 |
+ return cpu; |
2850 |
+ } |
2851 |
+ |
2852 |
++/** |
2853 |
++ * check_field_for_var_ref - Check if a VAR_REF field references a variable |
2854 |
++ * @hist_field: The VAR_REF field to check |
2855 |
++ * @var_data: The hist trigger that owns the variable |
2856 |
++ * @var_idx: The trigger variable identifier |
2857 |
++ * |
2858 |
++ * Check the given VAR_REF field to see whether or not it references |
2859 |
++ * the given variable associated with the given trigger. |
2860 |
++ * |
2861 |
++ * Return: The VAR_REF field if it does reference the variable, NULL if not |
2862 |
++ */ |
2863 |
+ static struct hist_field * |
2864 |
+ check_field_for_var_ref(struct hist_field *hist_field, |
2865 |
+ struct hist_trigger_data *var_data, |
2866 |
+@@ -1324,6 +1336,18 @@ check_field_for_var_refs(struct hist_trigger_data *hist_data, |
2867 |
+ return found; |
2868 |
+ } |
2869 |
+ |
2870 |
++/** |
2871 |
++ * find_var_ref - Check if a trigger has a reference to a trigger variable |
2872 |
++ * @hist_data: The hist trigger that might have a reference to the variable |
2873 |
++ * @var_data: The hist trigger that owns the variable |
2874 |
++ * @var_idx: The trigger variable identifier |
2875 |
++ * |
2876 |
++ * Check the list of var_refs[] on the first hist trigger to see |
2877 |
++ * whether any of them are references to the variable on the second |
2878 |
++ * trigger. |
2879 |
++ * |
2880 |
++ * Return: The VAR_REF field referencing the variable if so, NULL if not |
2881 |
++ */ |
2882 |
+ static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data, |
2883 |
+ struct hist_trigger_data *var_data, |
2884 |
+ unsigned int var_idx) |
2885 |
+@@ -1350,6 +1374,20 @@ static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data, |
2886 |
+ return found; |
2887 |
+ } |
2888 |
+ |
2889 |
++/** |
2890 |
++ * find_any_var_ref - Check if there is a reference to a given trigger variable |
2891 |
++ * @hist_data: The hist trigger |
2892 |
++ * @var_idx: The trigger variable identifier |
2893 |
++ * |
2894 |
++ * Check to see whether the given variable is currently referenced by |
2895 |
++ * any other trigger. |
2896 |
++ * |
2897 |
++ * The trigger the variable is defined on is explicitly excluded - the |
2898 |
++ * assumption being that a self-reference doesn't prevent a trigger |
2899 |
++ * from being removed. |
2900 |
++ * |
2901 |
++ * Return: The VAR_REF field referencing the variable if so, NULL if not |
2902 |
++ */ |
2903 |
+ static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data, |
2904 |
+ unsigned int var_idx) |
2905 |
+ { |
2906 |
+@@ -1368,6 +1406,19 @@ static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data, |
2907 |
+ return found; |
2908 |
+ } |
2909 |
+ |
2910 |
++/** |
2911 |
++ * check_var_refs - Check if there is a reference to any of trigger's variables |
2912 |
++ * @hist_data: The hist trigger |
2913 |
++ * |
2914 |
++ * A trigger can define one or more variables. If any one of them is |
2915 |
++ * currently referenced by any other trigger, this function will |
2916 |
++ * determine that. |
2917 |
++ |
2918 |
++ * Typically used to determine whether or not a trigger can be removed |
2919 |
++ * - if there are any references to a trigger's variables, it cannot. |
2920 |
++ * |
2921 |
++ * Return: True if there is a reference to any of trigger's variables |
2922 |
++ */ |
2923 |
+ static bool check_var_refs(struct hist_trigger_data *hist_data) |
2924 |
+ { |
2925 |
+ struct hist_field *field; |
2926 |
+@@ -1511,11 +1562,13 @@ static struct hist_field *find_var(struct hist_trigger_data *hist_data, |
2927 |
+ struct event_trigger_data *test; |
2928 |
+ struct hist_field *hist_field; |
2929 |
+ |
2930 |
++ lockdep_assert_held(&event_mutex); |
2931 |
++ |
2932 |
+ hist_field = find_var_field(hist_data, var_name); |
2933 |
+ if (hist_field) |
2934 |
+ return hist_field; |
2935 |
+ |
2936 |
+- list_for_each_entry_rcu(test, &file->triggers, list) { |
2937 |
++ list_for_each_entry(test, &file->triggers, list) { |
2938 |
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { |
2939 |
+ test_data = test->private_data; |
2940 |
+ hist_field = find_var_field(test_data, var_name); |
2941 |
+@@ -1565,7 +1618,9 @@ static struct hist_field *find_file_var(struct trace_event_file *file, |
2942 |
+ struct event_trigger_data *test; |
2943 |
+ struct hist_field *hist_field; |
2944 |
+ |
2945 |
+- list_for_each_entry_rcu(test, &file->triggers, list) { |
2946 |
++ lockdep_assert_held(&event_mutex); |
2947 |
++ |
2948 |
++ list_for_each_entry(test, &file->triggers, list) { |
2949 |
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { |
2950 |
+ test_data = test->private_data; |
2951 |
+ hist_field = find_var_field(test_data, var_name); |
2952 |
+@@ -2171,6 +2226,23 @@ static int contains_operator(char *str) |
2953 |
+ return field_op; |
2954 |
+ } |
2955 |
+ |
2956 |
++static void get_hist_field(struct hist_field *hist_field) |
2957 |
++{ |
2958 |
++ hist_field->ref++; |
2959 |
++} |
2960 |
++ |
2961 |
++static void __destroy_hist_field(struct hist_field *hist_field) |
2962 |
++{ |
2963 |
++ if (--hist_field->ref > 1) |
2964 |
++ return; |
2965 |
++ |
2966 |
++ kfree(hist_field->var.name); |
2967 |
++ kfree(hist_field->name); |
2968 |
++ kfree(hist_field->type); |
2969 |
++ |
2970 |
++ kfree(hist_field); |
2971 |
++} |
2972 |
++ |
2973 |
+ static void destroy_hist_field(struct hist_field *hist_field, |
2974 |
+ unsigned int level) |
2975 |
+ { |
2976 |
+@@ -2182,14 +2254,13 @@ static void destroy_hist_field(struct hist_field *hist_field, |
2977 |
+ if (!hist_field) |
2978 |
+ return; |
2979 |
+ |
2980 |
++ if (hist_field->flags & HIST_FIELD_FL_VAR_REF) |
2981 |
++ return; /* var refs will be destroyed separately */ |
2982 |
++ |
2983 |
+ for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) |
2984 |
+ destroy_hist_field(hist_field->operands[i], level + 1); |
2985 |
+ |
2986 |
+- kfree(hist_field->var.name); |
2987 |
+- kfree(hist_field->name); |
2988 |
+- kfree(hist_field->type); |
2989 |
+- |
2990 |
+- kfree(hist_field); |
2991 |
++ __destroy_hist_field(hist_field); |
2992 |
+ } |
2993 |
+ |
2994 |
+ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, |
2995 |
+@@ -2206,6 +2277,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data, |
2996 |
+ if (!hist_field) |
2997 |
+ return NULL; |
2998 |
+ |
2999 |
++ hist_field->ref = 1; |
3000 |
++ |
3001 |
+ hist_field->hist_data = hist_data; |
3002 |
+ |
3003 |
+ if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS) |
3004 |
+@@ -2316,6 +2389,12 @@ static void destroy_hist_fields(struct hist_trigger_data *hist_data) |
3005 |
+ hist_data->fields[i] = NULL; |
3006 |
+ } |
3007 |
+ } |
3008 |
++ |
3009 |
++ for (i = 0; i < hist_data->n_var_refs; i++) { |
3010 |
++ WARN_ON(!(hist_data->var_refs[i]->flags & HIST_FIELD_FL_VAR_REF)); |
3011 |
++ __destroy_hist_field(hist_data->var_refs[i]); |
3012 |
++ hist_data->var_refs[i] = NULL; |
3013 |
++ } |
3014 |
+ } |
3015 |
+ |
3016 |
+ static int init_var_ref(struct hist_field *ref_field, |
3017 |
+@@ -2374,11 +2453,38 @@ static int init_var_ref(struct hist_field *ref_field, |
3018 |
+ goto out; |
3019 |
+ } |
3020 |
+ |
3021 |
+-static struct hist_field *create_var_ref(struct hist_field *var_field, |
3022 |
++/** |
3023 |
++ * create_var_ref - Create a variable reference and attach it to trigger |
3024 |
++ * @hist_data: The trigger that will be referencing the variable |
3025 |
++ * @var_field: The VAR field to create a reference to |
3026 |
++ * @system: The optional system string |
3027 |
++ * @event_name: The optional event_name string |
3028 |
++ * |
3029 |
++ * Given a variable hist_field, create a VAR_REF hist_field that |
3030 |
++ * represents a reference to it. |
3031 |
++ * |
3032 |
++ * This function also adds the reference to the trigger that |
3033 |
++ * now references the variable. |
3034 |
++ * |
3035 |
++ * Return: The VAR_REF field if successful, NULL if not |
3036 |
++ */ |
3037 |
++static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data, |
3038 |
++ struct hist_field *var_field, |
3039 |
+ char *system, char *event_name) |
3040 |
+ { |
3041 |
+ unsigned long flags = HIST_FIELD_FL_VAR_REF; |
3042 |
+ struct hist_field *ref_field; |
3043 |
++ int i; |
3044 |
++ |
3045 |
++ /* Check if the variable already exists */ |
3046 |
++ for (i = 0; i < hist_data->n_var_refs; i++) { |
3047 |
++ ref_field = hist_data->var_refs[i]; |
3048 |
++ if (ref_field->var.idx == var_field->var.idx && |
3049 |
++ ref_field->var.hist_data == var_field->hist_data) { |
3050 |
++ get_hist_field(ref_field); |
3051 |
++ return ref_field; |
3052 |
++ } |
3053 |
++ } |
3054 |
+ |
3055 |
+ ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL); |
3056 |
+ if (ref_field) { |
3057 |
+@@ -2386,6 +2492,9 @@ static struct hist_field *create_var_ref(struct hist_field *var_field, |
3058 |
+ destroy_hist_field(ref_field, 0); |
3059 |
+ return NULL; |
3060 |
+ } |
3061 |
++ |
3062 |
++ hist_data->var_refs[hist_data->n_var_refs] = ref_field; |
3063 |
++ ref_field->var_ref_idx = hist_data->n_var_refs++; |
3064 |
+ } |
3065 |
+ |
3066 |
+ return ref_field; |
3067 |
+@@ -2459,7 +2568,8 @@ static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data, |
3068 |
+ |
3069 |
+ var_field = find_event_var(hist_data, system, event_name, var_name); |
3070 |
+ if (var_field) |
3071 |
+- ref_field = create_var_ref(var_field, system, event_name); |
3072 |
++ ref_field = create_var_ref(hist_data, var_field, |
3073 |
++ system, event_name); |
3074 |
+ |
3075 |
+ if (!ref_field) |
3076 |
+ hist_err_event("Couldn't find variable: $", |
3077 |
+@@ -2579,8 +2689,6 @@ static struct hist_field *parse_atom(struct hist_trigger_data *hist_data, |
3078 |
+ if (!s) { |
3079 |
+ hist_field = parse_var_ref(hist_data, ref_system, ref_event, ref_var); |
3080 |
+ if (hist_field) { |
3081 |
+- hist_data->var_refs[hist_data->n_var_refs] = hist_field; |
3082 |
+- hist_field->var_ref_idx = hist_data->n_var_refs++; |
3083 |
+ if (var_name) { |
3084 |
+ hist_field = create_alias(hist_data, hist_field, var_name); |
3085 |
+ if (!hist_field) { |
3086 |
+@@ -2828,7 +2936,9 @@ static char *find_trigger_filter(struct hist_trigger_data *hist_data, |
3087 |
+ { |
3088 |
+ struct event_trigger_data *test; |
3089 |
+ |
3090 |
+- list_for_each_entry_rcu(test, &file->triggers, list) { |
3091 |
++ lockdep_assert_held(&event_mutex); |
3092 |
++ |
3093 |
++ list_for_each_entry(test, &file->triggers, list) { |
3094 |
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { |
3095 |
+ if (test->private_data == hist_data) |
3096 |
+ return test->filter_str; |
3097 |
+@@ -2879,9 +2989,11 @@ find_compatible_hist(struct hist_trigger_data *target_hist_data, |
3098 |
+ struct event_trigger_data *test; |
3099 |
+ unsigned int n_keys; |
3100 |
+ |
3101 |
++ lockdep_assert_held(&event_mutex); |
3102 |
++ |
3103 |
+ n_keys = target_hist_data->n_fields - target_hist_data->n_vals; |
3104 |
+ |
3105 |
+- list_for_each_entry_rcu(test, &file->triggers, list) { |
3106 |
++ list_for_each_entry(test, &file->triggers, list) { |
3107 |
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { |
3108 |
+ hist_data = test->private_data; |
3109 |
+ |
3110 |
+@@ -3354,7 +3466,6 @@ static int onmax_create(struct hist_trigger_data *hist_data, |
3111 |
+ unsigned int var_ref_idx = hist_data->n_var_refs; |
3112 |
+ struct field_var *field_var; |
3113 |
+ char *onmax_var_str, *param; |
3114 |
+- unsigned long flags; |
3115 |
+ unsigned int i; |
3116 |
+ int ret = 0; |
3117 |
+ |
3118 |
+@@ -3371,18 +3482,10 @@ static int onmax_create(struct hist_trigger_data *hist_data, |
3119 |
+ return -EINVAL; |
3120 |
+ } |
3121 |
+ |
3122 |
+- flags = HIST_FIELD_FL_VAR_REF; |
3123 |
+- ref_field = create_hist_field(hist_data, NULL, flags, NULL); |
3124 |
++ ref_field = create_var_ref(hist_data, var_field, NULL, NULL); |
3125 |
+ if (!ref_field) |
3126 |
+ return -ENOMEM; |
3127 |
+ |
3128 |
+- if (init_var_ref(ref_field, var_field, NULL, NULL)) { |
3129 |
+- destroy_hist_field(ref_field, 0); |
3130 |
+- ret = -ENOMEM; |
3131 |
+- goto out; |
3132 |
+- } |
3133 |
+- hist_data->var_refs[hist_data->n_var_refs] = ref_field; |
3134 |
+- ref_field->var_ref_idx = hist_data->n_var_refs++; |
3135 |
+ data->onmax.var = ref_field; |
3136 |
+ |
3137 |
+ data->fn = onmax_save; |
3138 |
+@@ -3573,9 +3676,6 @@ static void save_synth_var_ref(struct hist_trigger_data *hist_data, |
3139 |
+ struct hist_field *var_ref) |
3140 |
+ { |
3141 |
+ hist_data->synth_var_refs[hist_data->n_synth_var_refs++] = var_ref; |
3142 |
+- |
3143 |
+- hist_data->var_refs[hist_data->n_var_refs] = var_ref; |
3144 |
+- var_ref->var_ref_idx = hist_data->n_var_refs++; |
3145 |
+ } |
3146 |
+ |
3147 |
+ static int check_synth_field(struct synth_event *event, |
3148 |
+@@ -3730,7 +3830,8 @@ static int onmatch_create(struct hist_trigger_data *hist_data, |
3149 |
+ } |
3150 |
+ |
3151 |
+ if (check_synth_field(event, hist_field, field_pos) == 0) { |
3152 |
+- var_ref = create_var_ref(hist_field, system, event_name); |
3153 |
++ var_ref = create_var_ref(hist_data, hist_field, |
3154 |
++ system, event_name); |
3155 |
+ if (!var_ref) { |
3156 |
+ kfree(p); |
3157 |
+ ret = -ENOMEM; |
3158 |
+@@ -4905,7 +5006,7 @@ static int hist_show(struct seq_file *m, void *v) |
3159 |
+ goto out_unlock; |
3160 |
+ } |
3161 |
+ |
3162 |
+- list_for_each_entry_rcu(data, &event_file->triggers, list) { |
3163 |
++ list_for_each_entry(data, &event_file->triggers, list) { |
3164 |
+ if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) |
3165 |
+ hist_trigger_show(m, data, n++); |
3166 |
+ } |
3167 |
+@@ -5296,7 +5397,9 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops, |
3168 |
+ if (hist_data->attrs->name && !named_data) |
3169 |
+ goto new; |
3170 |
+ |
3171 |
+- list_for_each_entry_rcu(test, &file->triggers, list) { |
3172 |
++ lockdep_assert_held(&event_mutex); |
3173 |
++ |
3174 |
++ list_for_each_entry(test, &file->triggers, list) { |
3175 |
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { |
3176 |
+ if (!hist_trigger_match(data, test, named_data, false)) |
3177 |
+ continue; |
3178 |
+@@ -5380,10 +5483,12 @@ static bool have_hist_trigger_match(struct event_trigger_data *data, |
3179 |
+ struct event_trigger_data *test, *named_data = NULL; |
3180 |
+ bool match = false; |
3181 |
+ |
3182 |
++ lockdep_assert_held(&event_mutex); |
3183 |
++ |
3184 |
+ if (hist_data->attrs->name) |
3185 |
+ named_data = find_named_trigger(hist_data->attrs->name); |
3186 |
+ |
3187 |
+- list_for_each_entry_rcu(test, &file->triggers, list) { |
3188 |
++ list_for_each_entry(test, &file->triggers, list) { |
3189 |
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { |
3190 |
+ if (hist_trigger_match(data, test, named_data, false)) { |
3191 |
+ match = true; |
3192 |
+@@ -5401,10 +5506,12 @@ static bool hist_trigger_check_refs(struct event_trigger_data *data, |
3193 |
+ struct hist_trigger_data *hist_data = data->private_data; |
3194 |
+ struct event_trigger_data *test, *named_data = NULL; |
3195 |
+ |
3196 |
++ lockdep_assert_held(&event_mutex); |
3197 |
++ |
3198 |
+ if (hist_data->attrs->name) |
3199 |
+ named_data = find_named_trigger(hist_data->attrs->name); |
3200 |
+ |
3201 |
+- list_for_each_entry_rcu(test, &file->triggers, list) { |
3202 |
++ list_for_each_entry(test, &file->triggers, list) { |
3203 |
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { |
3204 |
+ if (!hist_trigger_match(data, test, named_data, false)) |
3205 |
+ continue; |
3206 |
+@@ -5426,10 +5533,12 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops, |
3207 |
+ struct event_trigger_data *test, *named_data = NULL; |
3208 |
+ bool unregistered = false; |
3209 |
+ |
3210 |
++ lockdep_assert_held(&event_mutex); |
3211 |
++ |
3212 |
+ if (hist_data->attrs->name) |
3213 |
+ named_data = find_named_trigger(hist_data->attrs->name); |
3214 |
+ |
3215 |
+- list_for_each_entry_rcu(test, &file->triggers, list) { |
3216 |
++ list_for_each_entry(test, &file->triggers, list) { |
3217 |
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { |
3218 |
+ if (!hist_trigger_match(data, test, named_data, false)) |
3219 |
+ continue; |
3220 |
+@@ -5455,7 +5564,9 @@ static bool hist_file_check_refs(struct trace_event_file *file) |
3221 |
+ struct hist_trigger_data *hist_data; |
3222 |
+ struct event_trigger_data *test; |
3223 |
+ |
3224 |
+- list_for_each_entry_rcu(test, &file->triggers, list) { |
3225 |
++ lockdep_assert_held(&event_mutex); |
3226 |
++ |
3227 |
++ list_for_each_entry(test, &file->triggers, list) { |
3228 |
+ if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) { |
3229 |
+ hist_data = test->private_data; |
3230 |
+ if (check_var_refs(hist_data)) |
3231 |
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c |
3232 |
+index cd12ecb66eb9..b05d1b6a6291 100644 |
3233 |
+--- a/kernel/trace/trace_events_trigger.c |
3234 |
++++ b/kernel/trace/trace_events_trigger.c |
3235 |
+@@ -495,7 +495,9 @@ void update_cond_flag(struct trace_event_file *file) |
3236 |
+ struct event_trigger_data *data; |
3237 |
+ bool set_cond = false; |
3238 |
+ |
3239 |
+- list_for_each_entry_rcu(data, &file->triggers, list) { |
3240 |
++ lockdep_assert_held(&event_mutex); |
3241 |
++ |
3242 |
++ list_for_each_entry(data, &file->triggers, list) { |
3243 |
+ if (data->filter || event_command_post_trigger(data->cmd_ops) || |
3244 |
+ event_command_needs_rec(data->cmd_ops)) { |
3245 |
+ set_cond = true; |
3246 |
+@@ -530,7 +532,9 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops, |
3247 |
+ struct event_trigger_data *test; |
3248 |
+ int ret = 0; |
3249 |
+ |
3250 |
+- list_for_each_entry_rcu(test, &file->triggers, list) { |
3251 |
++ lockdep_assert_held(&event_mutex); |
3252 |
++ |
3253 |
++ list_for_each_entry(test, &file->triggers, list) { |
3254 |
+ if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) { |
3255 |
+ ret = -EEXIST; |
3256 |
+ goto out; |
3257 |
+@@ -575,7 +579,9 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops, |
3258 |
+ struct event_trigger_data *data; |
3259 |
+ bool unregistered = false; |
3260 |
+ |
3261 |
+- list_for_each_entry_rcu(data, &file->triggers, list) { |
3262 |
++ lockdep_assert_held(&event_mutex); |
3263 |
++ |
3264 |
++ list_for_each_entry(data, &file->triggers, list) { |
3265 |
+ if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { |
3266 |
+ unregistered = true; |
3267 |
+ list_del_rcu(&data->list); |
3268 |
+@@ -1490,7 +1496,9 @@ int event_enable_register_trigger(char *glob, |
3269 |
+ struct event_trigger_data *test; |
3270 |
+ int ret = 0; |
3271 |
+ |
3272 |
+- list_for_each_entry_rcu(test, &file->triggers, list) { |
3273 |
++ lockdep_assert_held(&event_mutex); |
3274 |
++ |
3275 |
++ list_for_each_entry(test, &file->triggers, list) { |
3276 |
+ test_enable_data = test->private_data; |
3277 |
+ if (test_enable_data && |
3278 |
+ (test->cmd_ops->trigger_type == |
3279 |
+@@ -1530,7 +1538,9 @@ void event_enable_unregister_trigger(char *glob, |
3280 |
+ struct event_trigger_data *data; |
3281 |
+ bool unregistered = false; |
3282 |
+ |
3283 |
+- list_for_each_entry_rcu(data, &file->triggers, list) { |
3284 |
++ lockdep_assert_held(&event_mutex); |
3285 |
++ |
3286 |
++ list_for_each_entry(data, &file->triggers, list) { |
3287 |
+ enable_data = data->private_data; |
3288 |
+ if (enable_data && |
3289 |
+ (data->cmd_ops->trigger_type == |
3290 |
+diff --git a/mm/hmm.c b/mm/hmm.c |
3291 |
+index 57f0d2a4ff34..c482c07bbab7 100644 |
3292 |
+--- a/mm/hmm.c |
3293 |
++++ b/mm/hmm.c |
3294 |
+@@ -997,21 +997,21 @@ static void hmm_devmem_release(void *data) |
3295 |
+ struct hmm_devmem *devmem = data; |
3296 |
+ struct resource *resource = devmem->resource; |
3297 |
+ unsigned long start_pfn, npages; |
3298 |
+- struct zone *zone; |
3299 |
+ struct page *page; |
3300 |
++ int nid; |
3301 |
+ |
3302 |
+ /* pages are dead and unused, undo the arch mapping */ |
3303 |
+ start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT; |
3304 |
+ npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT; |
3305 |
+ |
3306 |
+ page = pfn_to_page(start_pfn); |
3307 |
+- zone = page_zone(page); |
3308 |
++ nid = page_to_nid(page); |
3309 |
+ |
3310 |
+ mem_hotplug_begin(); |
3311 |
+ if (resource->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) |
3312 |
+- __remove_pages(zone, start_pfn, npages, NULL); |
3313 |
++ __remove_pages(start_pfn, npages, NULL); |
3314 |
+ else |
3315 |
+- arch_remove_memory(start_pfn << PAGE_SHIFT, |
3316 |
++ arch_remove_memory(nid, start_pfn << PAGE_SHIFT, |
3317 |
+ npages << PAGE_SHIFT, NULL); |
3318 |
+ mem_hotplug_done(); |
3319 |
+ |
3320 |
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c |
3321 |
+index 413f6709039a..abc10dcbc9d5 100644 |
3322 |
+--- a/mm/memory_hotplug.c |
3323 |
++++ b/mm/memory_hotplug.c |
3324 |
+@@ -255,14 +255,8 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn, |
3325 |
+ if (pfn_valid(phys_start_pfn)) |
3326 |
+ return -EEXIST; |
3327 |
+ |
3328 |
+- ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn, altmap); |
3329 |
+- if (ret < 0) |
3330 |
+- return ret; |
3331 |
+- |
3332 |
+- if (!want_memblock) |
3333 |
+- return 0; |
3334 |
+- |
3335 |
+- return hotplug_memory_register(nid, __pfn_to_section(phys_start_pfn)); |
3336 |
++ ret = sparse_add_one_section(nid, phys_start_pfn, altmap); |
3337 |
++ return ret < 0 ? ret : 0; |
3338 |
+ } |
3339 |
+ |
3340 |
+ /* |
3341 |
+@@ -315,7 +309,6 @@ out: |
3342 |
+ return err; |
3343 |
+ } |
3344 |
+ |
3345 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
3346 |
+ /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ |
3347 |
+ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, |
3348 |
+ unsigned long start_pfn, |
3349 |
+@@ -456,10 +449,11 @@ static void update_pgdat_span(struct pglist_data *pgdat) |
3350 |
+ pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; |
3351 |
+ } |
3352 |
+ |
3353 |
+-static void __remove_zone(struct zone *zone, unsigned long start_pfn) |
3354 |
++void __ref remove_pfn_range_from_zone(struct zone *zone, |
3355 |
++ unsigned long start_pfn, |
3356 |
++ unsigned long nr_pages) |
3357 |
+ { |
3358 |
+ struct pglist_data *pgdat = zone->zone_pgdat; |
3359 |
+- int nr_pages = PAGES_PER_SECTION; |
3360 |
+ unsigned long flags; |
3361 |
+ |
3362 |
+ #ifdef CONFIG_ZONE_DEVICE |
3363 |
+@@ -472,37 +466,33 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn) |
3364 |
+ return; |
3365 |
+ #endif |
3366 |
+ |
3367 |
++ clear_zone_contiguous(zone); |
3368 |
++ |
3369 |
+ pgdat_resize_lock(zone->zone_pgdat, &flags); |
3370 |
+ shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); |
3371 |
+ update_pgdat_span(pgdat); |
3372 |
+ pgdat_resize_unlock(zone->zone_pgdat, &flags); |
3373 |
++ |
3374 |
++ set_zone_contiguous(zone); |
3375 |
+ } |
3376 |
+ |
3377 |
+-static int __remove_section(struct zone *zone, struct mem_section *ms, |
3378 |
+- unsigned long map_offset, struct vmem_altmap *altmap) |
3379 |
++static void __remove_section(struct mem_section *ms, unsigned long map_offset, |
3380 |
++ struct vmem_altmap *altmap) |
3381 |
+ { |
3382 |
+ unsigned long start_pfn; |
3383 |
+ int scn_nr; |
3384 |
+- int ret = -EINVAL; |
3385 |
+ |
3386 |
+- if (!valid_section(ms)) |
3387 |
+- return ret; |
3388 |
+- |
3389 |
+- ret = unregister_memory_section(ms); |
3390 |
+- if (ret) |
3391 |
+- return ret; |
3392 |
++ if (WARN_ON_ONCE(!valid_section(ms))) |
3393 |
++ return; |
3394 |
+ |
3395 |
+ scn_nr = __section_nr(ms); |
3396 |
+ start_pfn = section_nr_to_pfn((unsigned long)scn_nr); |
3397 |
+- __remove_zone(zone, start_pfn); |
3398 |
+ |
3399 |
+- sparse_remove_one_section(zone, ms, map_offset, altmap); |
3400 |
+- return 0; |
3401 |
++ sparse_remove_one_section(ms, map_offset, altmap); |
3402 |
+ } |
3403 |
+ |
3404 |
+ /** |
3405 |
+- * __remove_pages() - remove sections of pages from a zone |
3406 |
+- * @zone: zone from which pages need to be removed |
3407 |
++ * __remove_pages() - remove sections of pages |
3408 |
+ * @phys_start_pfn: starting pageframe (must be aligned to start of a section) |
3409 |
+ * @nr_pages: number of pages to remove (must be multiple of section size) |
3410 |
+ * @altmap: alternative device page map or %NULL if default memmap is used |
3411 |
+@@ -512,34 +502,15 @@ static int __remove_section(struct zone *zone, struct mem_section *ms, |
3412 |
+ * sure that pages are marked reserved and zones are adjust properly by |
3413 |
+ * calling offline_pages(). |
3414 |
+ */ |
3415 |
+-int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, |
3416 |
+- unsigned long nr_pages, struct vmem_altmap *altmap) |
3417 |
++void __remove_pages(unsigned long phys_start_pfn, unsigned long nr_pages, |
3418 |
++ struct vmem_altmap *altmap) |
3419 |
+ { |
3420 |
+ unsigned long i; |
3421 |
+ unsigned long map_offset = 0; |
3422 |
+- int sections_to_remove, ret = 0; |
3423 |
+- |
3424 |
+- /* In the ZONE_DEVICE case device driver owns the memory region */ |
3425 |
+- if (is_dev_zone(zone)) { |
3426 |
+- if (altmap) |
3427 |
+- map_offset = vmem_altmap_offset(altmap); |
3428 |
+- } else { |
3429 |
+- resource_size_t start, size; |
3430 |
++ int sections_to_remove; |
3431 |
+ |
3432 |
+- start = phys_start_pfn << PAGE_SHIFT; |
3433 |
+- size = nr_pages * PAGE_SIZE; |
3434 |
+- |
3435 |
+- ret = release_mem_region_adjustable(&iomem_resource, start, |
3436 |
+- size); |
3437 |
+- if (ret) { |
3438 |
+- resource_size_t endres = start + size - 1; |
3439 |
+- |
3440 |
+- pr_warn("Unable to release resource <%pa-%pa> (%d)\n", |
3441 |
+- &start, &endres, ret); |
3442 |
+- } |
3443 |
+- } |
3444 |
+- |
3445 |
+- clear_zone_contiguous(zone); |
3446 |
++ if (altmap) |
3447 |
++ map_offset = vmem_altmap_offset(altmap); |
3448 |
+ |
3449 |
+ /* |
3450 |
+ * We can only remove entire sections |
3451 |
+@@ -552,18 +523,10 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, |
3452 |
+ unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; |
3453 |
+ |
3454 |
+ cond_resched(); |
3455 |
+- ret = __remove_section(zone, __pfn_to_section(pfn), map_offset, |
3456 |
+- altmap); |
3457 |
++ __remove_section(__pfn_to_section(pfn), map_offset, altmap); |
3458 |
+ map_offset = 0; |
3459 |
+- if (ret) |
3460 |
+- break; |
3461 |
+ } |
3462 |
+- |
3463 |
+- set_zone_contiguous(zone); |
3464 |
+- |
3465 |
+- return ret; |
3466 |
+ } |
3467 |
+-#endif /* CONFIG_MEMORY_HOTREMOVE */ |
3468 |
+ |
3469 |
+ int set_online_page_callback(online_page_callback_t callback) |
3470 |
+ { |
3471 |
+@@ -932,6 +895,7 @@ failed_addition: |
3472 |
+ (unsigned long long) pfn << PAGE_SHIFT, |
3473 |
+ (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); |
3474 |
+ memory_notify(MEM_CANCEL_ONLINE, &arg); |
3475 |
++ remove_pfn_range_from_zone(zone, pfn, nr_pages); |
3476 |
+ mem_hotplug_done(); |
3477 |
+ return ret; |
3478 |
+ } |
3479 |
+@@ -1119,6 +1083,13 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online) |
3480 |
+ if (ret < 0) |
3481 |
+ goto error; |
3482 |
+ |
3483 |
++ /* create memory block devices after memory was added */ |
3484 |
++ ret = create_memory_block_devices(start, size); |
3485 |
++ if (ret) { |
3486 |
++ arch_remove_memory(nid, start, size, NULL); |
3487 |
++ goto error; |
3488 |
++ } |
3489 |
++ |
3490 |
+ if (new_node) { |
3491 |
+ /* If sysfs file of new node can't be created, cpu on the node |
3492 |
+ * can't be hot-added. There is no rollback way now. |
3493 |
+@@ -1709,6 +1680,7 @@ repeat: |
3494 |
+ writeback_set_ratelimit(); |
3495 |
+ |
3496 |
+ memory_notify(MEM_OFFLINE, &arg); |
3497 |
++ remove_pfn_range_from_zone(zone, start_pfn, nr_pages); |
3498 |
+ mem_hotplug_done(); |
3499 |
+ return 0; |
3500 |
+ |
3501 |
+@@ -1839,6 +1811,18 @@ static int check_and_unmap_cpu_on_node(pg_data_t *pgdat) |
3502 |
+ return 0; |
3503 |
+ } |
3504 |
+ |
3505 |
++static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg) |
3506 |
++{ |
3507 |
++ int nid = *(int *)arg; |
3508 |
++ |
3509 |
++ /* |
3510 |
++ * If a memory block belongs to multiple nodes, the stored nid is not |
3511 |
++ * reliable. However, such blocks are always online (e.g., cannot get |
3512 |
++ * offlined) and, therefore, are still spanned by the node. |
3513 |
++ */ |
3514 |
++ return mem->nid == nid ? -EEXIST : 0; |
3515 |
++} |
3516 |
++ |
3517 |
+ /** |
3518 |
+ * try_offline_node |
3519 |
+ * @nid: the node ID |
3520 |
+@@ -1851,25 +1835,24 @@ static int check_and_unmap_cpu_on_node(pg_data_t *pgdat) |
3521 |
+ void try_offline_node(int nid) |
3522 |
+ { |
3523 |
+ pg_data_t *pgdat = NODE_DATA(nid); |
3524 |
+- unsigned long start_pfn = pgdat->node_start_pfn; |
3525 |
+- unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; |
3526 |
+- unsigned long pfn; |
3527 |
+- |
3528 |
+- for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
3529 |
+- unsigned long section_nr = pfn_to_section_nr(pfn); |
3530 |
+- |
3531 |
+- if (!present_section_nr(section_nr)) |
3532 |
+- continue; |
3533 |
++ int rc; |
3534 |
+ |
3535 |
+- if (pfn_to_nid(pfn) != nid) |
3536 |
+- continue; |
3537 |
++ /* |
3538 |
++ * If the node still spans pages (especially ZONE_DEVICE), don't |
3539 |
++ * offline it. A node spans memory after move_pfn_range_to_zone(), |
3540 |
++ * e.g., after the memory block was onlined. |
3541 |
++ */ |
3542 |
++ if (pgdat->node_spanned_pages) |
3543 |
++ return; |
3544 |
+ |
3545 |
+- /* |
3546 |
+- * some memory sections of this node are not removed, and we |
3547 |
+- * can't offline node now. |
3548 |
+- */ |
3549 |
++ /* |
3550 |
++ * Especially offline memory blocks might not be spanned by the |
3551 |
++ * node. They will get spanned by the node once they get onlined. |
3552 |
++ * However, they link to the node in sysfs and can get onlined later. |
3553 |
++ */ |
3554 |
++ rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb); |
3555 |
++ if (rc) |
3556 |
+ return; |
3557 |
+- } |
3558 |
+ |
3559 |
+ if (check_and_unmap_cpu_on_node(pgdat)) |
3560 |
+ return; |
3561 |
+@@ -1883,6 +1866,26 @@ void try_offline_node(int nid) |
3562 |
+ } |
3563 |
+ EXPORT_SYMBOL(try_offline_node); |
3564 |
+ |
3565 |
++static void __release_memory_resource(resource_size_t start, |
3566 |
++ resource_size_t size) |
3567 |
++{ |
3568 |
++ int ret; |
3569 |
++ |
3570 |
++ /* |
3571 |
++ * When removing memory in the same granularity as it was added, |
3572 |
++ * this function never fails. It might only fail if resources |
3573 |
++ * have to be adjusted or split. We'll ignore the error, as |
3574 |
++ * removing of memory cannot fail. |
3575 |
++ */ |
3576 |
++ ret = release_mem_region_adjustable(&iomem_resource, start, size); |
3577 |
++ if (ret) { |
3578 |
++ resource_size_t endres = start + size - 1; |
3579 |
++ |
3580 |
++ pr_warn("Unable to release resource <%pa-%pa> (%d)\n", |
3581 |
++ &start, &endres, ret); |
3582 |
++ } |
3583 |
++} |
3584 |
++ |
3585 |
+ /** |
3586 |
+ * remove_memory |
3587 |
+ * @nid: the node ID |
3588 |
+@@ -1893,7 +1896,7 @@ EXPORT_SYMBOL(try_offline_node); |
3589 |
+ * and online/offline operations before this call, as required by |
3590 |
+ * try_offline_node(). |
3591 |
+ */ |
3592 |
+-void __ref remove_memory(int nid, u64 start, u64 size) |
3593 |
++void __ref __remove_memory(int nid, u64 start, u64 size) |
3594 |
+ { |
3595 |
+ int ret; |
3596 |
+ |
3597 |
+@@ -1916,11 +1919,22 @@ void __ref remove_memory(int nid, u64 start, u64 size) |
3598 |
+ memblock_free(start, size); |
3599 |
+ memblock_remove(start, size); |
3600 |
+ |
3601 |
+- arch_remove_memory(start, size, NULL); |
3602 |
++ /* remove memory block devices before removing memory */ |
3603 |
++ remove_memory_block_devices(start, size); |
3604 |
++ |
3605 |
++ arch_remove_memory(nid, start, size, NULL); |
3606 |
++ __release_memory_resource(start, size); |
3607 |
+ |
3608 |
+ try_offline_node(nid); |
3609 |
+ |
3610 |
+ mem_hotplug_done(); |
3611 |
+ } |
3612 |
++ |
3613 |
++void remove_memory(int nid, u64 start, u64 size) |
3614 |
++{ |
3615 |
++ lock_device_hotplug(); |
3616 |
++ __remove_memory(nid, start, size); |
3617 |
++ unlock_device_hotplug(); |
3618 |
++} |
3619 |
+ EXPORT_SYMBOL_GPL(remove_memory); |
3620 |
+ #endif /* CONFIG_MEMORY_HOTREMOVE */ |
3621 |
+diff --git a/mm/sparse.c b/mm/sparse.c |
3622 |
+index 45950a074bdb..3b24ba903d9e 100644 |
3623 |
+--- a/mm/sparse.c |
3624 |
++++ b/mm/sparse.c |
3625 |
+@@ -576,7 +576,6 @@ static void __kfree_section_memmap(struct page *memmap, |
3626 |
+ |
3627 |
+ vmemmap_free(start, end, altmap); |
3628 |
+ } |
3629 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
3630 |
+ static void free_map_bootmem(struct page *memmap) |
3631 |
+ { |
3632 |
+ unsigned long start = (unsigned long)memmap; |
3633 |
+@@ -584,7 +583,6 @@ static void free_map_bootmem(struct page *memmap) |
3634 |
+ |
3635 |
+ vmemmap_free(start, end, NULL); |
3636 |
+ } |
3637 |
+-#endif /* CONFIG_MEMORY_HOTREMOVE */ |
3638 |
+ #else |
3639 |
+ static struct page *__kmalloc_section_memmap(void) |
3640 |
+ { |
3641 |
+@@ -623,7 +621,6 @@ static void __kfree_section_memmap(struct page *memmap, |
3642 |
+ get_order(sizeof(struct page) * PAGES_PER_SECTION)); |
3643 |
+ } |
3644 |
+ |
3645 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
3646 |
+ static void free_map_bootmem(struct page *memmap) |
3647 |
+ { |
3648 |
+ unsigned long maps_section_nr, removing_section_nr, i; |
3649 |
+@@ -653,7 +650,6 @@ static void free_map_bootmem(struct page *memmap) |
3650 |
+ put_page_bootmem(page); |
3651 |
+ } |
3652 |
+ } |
3653 |
+-#endif /* CONFIG_MEMORY_HOTREMOVE */ |
3654 |
+ #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
3655 |
+ |
3656 |
+ /* |
3657 |
+@@ -661,25 +657,24 @@ static void free_map_bootmem(struct page *memmap) |
3658 |
+ * set. If this is <=0, then that means that the passed-in |
3659 |
+ * map was not consumed and must be freed. |
3660 |
+ */ |
3661 |
+-int __meminit sparse_add_one_section(struct pglist_data *pgdat, |
3662 |
+- unsigned long start_pfn, struct vmem_altmap *altmap) |
3663 |
++int __meminit sparse_add_one_section(int nid, unsigned long start_pfn, |
3664 |
++ struct vmem_altmap *altmap) |
3665 |
+ { |
3666 |
+ unsigned long section_nr = pfn_to_section_nr(start_pfn); |
3667 |
+ struct mem_section *ms; |
3668 |
+ struct page *memmap; |
3669 |
+ unsigned long *usemap; |
3670 |
+- unsigned long flags; |
3671 |
+ int ret; |
3672 |
+ |
3673 |
+ /* |
3674 |
+ * no locking for this, because it does its own |
3675 |
+ * plus, it does a kmalloc |
3676 |
+ */ |
3677 |
+- ret = sparse_index_init(section_nr, pgdat->node_id); |
3678 |
++ ret = sparse_index_init(section_nr, nid); |
3679 |
+ if (ret < 0 && ret != -EEXIST) |
3680 |
+ return ret; |
3681 |
+ ret = 0; |
3682 |
+- memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap); |
3683 |
++ memmap = kmalloc_section_memmap(section_nr, nid, altmap); |
3684 |
+ if (!memmap) |
3685 |
+ return -ENOMEM; |
3686 |
+ usemap = __kmalloc_section_usemap(); |
3687 |
+@@ -688,8 +683,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, |
3688 |
+ return -ENOMEM; |
3689 |
+ } |
3690 |
+ |
3691 |
+- pgdat_resize_lock(pgdat, &flags); |
3692 |
+- |
3693 |
+ ms = __pfn_to_section(start_pfn); |
3694 |
+ if (ms->section_mem_map & SECTION_MARKED_PRESENT) { |
3695 |
+ ret = -EEXIST; |
3696 |
+@@ -708,7 +701,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, |
3697 |
+ sparse_init_one_section(ms, section_nr, memmap, usemap); |
3698 |
+ |
3699 |
+ out: |
3700 |
+- pgdat_resize_unlock(pgdat, &flags); |
3701 |
+ if (ret < 0) { |
3702 |
+ kfree(usemap); |
3703 |
+ __kfree_section_memmap(memmap, altmap); |
3704 |
+@@ -716,7 +708,6 @@ out: |
3705 |
+ return ret; |
3706 |
+ } |
3707 |
+ |
3708 |
+-#ifdef CONFIG_MEMORY_HOTREMOVE |
3709 |
+ #ifdef CONFIG_MEMORY_FAILURE |
3710 |
+ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) |
3711 |
+ { |
3712 |
+@@ -766,14 +757,12 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap, |
3713 |
+ free_map_bootmem(memmap); |
3714 |
+ } |
3715 |
+ |
3716 |
+-void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, |
3717 |
+- unsigned long map_offset, struct vmem_altmap *altmap) |
3718 |
++void sparse_remove_one_section(struct mem_section *ms, unsigned long map_offset, |
3719 |
++ struct vmem_altmap *altmap) |
3720 |
+ { |
3721 |
+ struct page *memmap = NULL; |
3722 |
+- unsigned long *usemap = NULL, flags; |
3723 |
+- struct pglist_data *pgdat = zone->zone_pgdat; |
3724 |
++ unsigned long *usemap = NULL; |
3725 |
+ |
3726 |
+- pgdat_resize_lock(pgdat, &flags); |
3727 |
+ if (ms->section_mem_map) { |
3728 |
+ usemap = ms->pageblock_flags; |
3729 |
+ memmap = sparse_decode_mem_map(ms->section_mem_map, |
3730 |
+@@ -781,11 +770,9 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, |
3731 |
+ ms->section_mem_map = 0; |
3732 |
+ ms->pageblock_flags = NULL; |
3733 |
+ } |
3734 |
+- pgdat_resize_unlock(pgdat, &flags); |
3735 |
+ |
3736 |
+ clear_hwpoisoned_pages(memmap + map_offset, |
3737 |
+ PAGES_PER_SECTION - map_offset); |
3738 |
+ free_section_usemap(memmap, usemap, altmap); |
3739 |
+ } |
3740 |
+-#endif /* CONFIG_MEMORY_HOTREMOVE */ |
3741 |
+ #endif /* CONFIG_MEMORY_HOTPLUG */ |
3742 |
+diff --git a/net/core/dev.c b/net/core/dev.c |
3743 |
+index 73ebacabfde8..1c0224e8fc78 100644 |
3744 |
+--- a/net/core/dev.c |
3745 |
++++ b/net/core/dev.c |
3746 |
+@@ -7752,6 +7752,22 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu) |
3747 |
+ } |
3748 |
+ EXPORT_SYMBOL(__dev_set_mtu); |
3749 |
+ |
3750 |
++int dev_validate_mtu(struct net_device *dev, int new_mtu, |
3751 |
++ struct netlink_ext_ack *extack) |
3752 |
++{ |
3753 |
++ /* MTU must be positive, and in range */ |
3754 |
++ if (new_mtu < 0 || new_mtu < dev->min_mtu) { |
3755 |
++ NL_SET_ERR_MSG(extack, "mtu less than device minimum"); |
3756 |
++ return -EINVAL; |
3757 |
++ } |
3758 |
++ |
3759 |
++ if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { |
3760 |
++ NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); |
3761 |
++ return -EINVAL; |
3762 |
++ } |
3763 |
++ return 0; |
3764 |
++} |
3765 |
++ |
3766 |
+ /** |
3767 |
+ * dev_set_mtu_ext - Change maximum transfer unit |
3768 |
+ * @dev: device |
3769 |
+@@ -7768,16 +7784,9 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu, |
3770 |
+ if (new_mtu == dev->mtu) |
3771 |
+ return 0; |
3772 |
+ |
3773 |
+- /* MTU must be positive, and in range */ |
3774 |
+- if (new_mtu < 0 || new_mtu < dev->min_mtu) { |
3775 |
+- NL_SET_ERR_MSG(extack, "mtu less than device minimum"); |
3776 |
+- return -EINVAL; |
3777 |
+- } |
3778 |
+- |
3779 |
+- if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) { |
3780 |
+- NL_SET_ERR_MSG(extack, "mtu greater than device maximum"); |
3781 |
+- return -EINVAL; |
3782 |
+- } |
3783 |
++ err = dev_validate_mtu(dev, new_mtu, extack); |
3784 |
++ if (err) |
3785 |
++ return err; |
3786 |
+ |
3787 |
+ if (!netif_device_present(dev)) |
3788 |
+ return -ENODEV; |
3789 |
+@@ -8696,8 +8705,10 @@ int register_netdevice(struct net_device *dev) |
3790 |
+ goto err_uninit; |
3791 |
+ |
3792 |
+ ret = netdev_register_kobject(dev); |
3793 |
+- if (ret) |
3794 |
++ if (ret) { |
3795 |
++ dev->reg_state = NETREG_UNREGISTERED; |
3796 |
+ goto err_uninit; |
3797 |
++ } |
3798 |
+ dev->reg_state = NETREG_REGISTERED; |
3799 |
+ |
3800 |
+ __netdev_update_features(dev); |
3801 |
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c |
3802 |
+index bf9a3b6ac885..7614a4f42bfc 100644 |
3803 |
+--- a/net/core/net-sysfs.c |
3804 |
++++ b/net/core/net-sysfs.c |
3805 |
+@@ -928,25 +928,30 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) |
3806 |
+ struct kobject *kobj = &queue->kobj; |
3807 |
+ int error = 0; |
3808 |
+ |
3809 |
++ /* Kobject_put later will trigger rx_queue_release call which |
3810 |
++ * decreases dev refcount: Take that reference here |
3811 |
++ */ |
3812 |
++ dev_hold(queue->dev); |
3813 |
++ |
3814 |
+ kobj->kset = dev->queues_kset; |
3815 |
+ error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, |
3816 |
+ "rx-%u", index); |
3817 |
+ if (error) |
3818 |
+- return error; |
3819 |
+- |
3820 |
+- dev_hold(queue->dev); |
3821 |
++ goto err; |
3822 |
+ |
3823 |
+ if (dev->sysfs_rx_queue_group) { |
3824 |
+ error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); |
3825 |
+- if (error) { |
3826 |
+- kobject_put(kobj); |
3827 |
+- return error; |
3828 |
+- } |
3829 |
++ if (error) |
3830 |
++ goto err; |
3831 |
+ } |
3832 |
+ |
3833 |
+ kobject_uevent(kobj, KOBJ_ADD); |
3834 |
+ |
3835 |
+ return error; |
3836 |
++ |
3837 |
++err: |
3838 |
++ kobject_put(kobj); |
3839 |
++ return error; |
3840 |
+ } |
3841 |
+ #endif /* CONFIG_SYSFS */ |
3842 |
+ |
3843 |
+@@ -1467,25 +1472,29 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) |
3844 |
+ struct kobject *kobj = &queue->kobj; |
3845 |
+ int error = 0; |
3846 |
+ |
3847 |
++ /* Kobject_put later will trigger netdev_queue_release call |
3848 |
++ * which decreases dev refcount: Take that reference here |
3849 |
++ */ |
3850 |
++ dev_hold(queue->dev); |
3851 |
++ |
3852 |
+ kobj->kset = dev->queues_kset; |
3853 |
+ error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, |
3854 |
+ "tx-%u", index); |
3855 |
+ if (error) |
3856 |
+- return error; |
3857 |
+- |
3858 |
+- dev_hold(queue->dev); |
3859 |
++ goto err; |
3860 |
+ |
3861 |
+ #ifdef CONFIG_BQL |
3862 |
+ error = sysfs_create_group(kobj, &dql_group); |
3863 |
+- if (error) { |
3864 |
+- kobject_put(kobj); |
3865 |
+- return error; |
3866 |
+- } |
3867 |
++ if (error) |
3868 |
++ goto err; |
3869 |
+ #endif |
3870 |
+ |
3871 |
+ kobject_uevent(kobj, KOBJ_ADD); |
3872 |
+- |
3873 |
+ return 0; |
3874 |
++ |
3875 |
++err: |
3876 |
++ kobject_put(kobj); |
3877 |
++ return error; |
3878 |
+ } |
3879 |
+ #endif /* CONFIG_SYSFS */ |
3880 |
+ |
3881 |
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
3882 |
+index dbb3c0c7c132..f51973f458e4 100644 |
3883 |
+--- a/net/core/rtnetlink.c |
3884 |
++++ b/net/core/rtnetlink.c |
3885 |
+@@ -2875,8 +2875,17 @@ struct net_device *rtnl_create_link(struct net *net, |
3886 |
+ dev->rtnl_link_ops = ops; |
3887 |
+ dev->rtnl_link_state = RTNL_LINK_INITIALIZING; |
3888 |
+ |
3889 |
+- if (tb[IFLA_MTU]) |
3890 |
+- dev->mtu = nla_get_u32(tb[IFLA_MTU]); |
3891 |
++ if (tb[IFLA_MTU]) { |
3892 |
++ u32 mtu = nla_get_u32(tb[IFLA_MTU]); |
3893 |
++ int err; |
3894 |
++ |
3895 |
++ err = dev_validate_mtu(dev, mtu, NULL); |
3896 |
++ if (err) { |
3897 |
++ free_netdev(dev); |
3898 |
++ return ERR_PTR(err); |
3899 |
++ } |
3900 |
++ dev->mtu = mtu; |
3901 |
++ } |
3902 |
+ if (tb[IFLA_ADDRESS]) { |
3903 |
+ memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]), |
3904 |
+ nla_len(tb[IFLA_ADDRESS])); |
3905 |
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c |
3906 |
+index f03a1b68e70f..14fd8a37a729 100644 |
3907 |
+--- a/net/ipv4/ip_tunnel.c |
3908 |
++++ b/net/ipv4/ip_tunnel.c |
3909 |
+@@ -1203,10 +1203,8 @@ int ip_tunnel_init(struct net_device *dev) |
3910 |
+ iph->version = 4; |
3911 |
+ iph->ihl = 5; |
3912 |
+ |
3913 |
+- if (tunnel->collect_md) { |
3914 |
+- dev->features |= NETIF_F_NETNS_LOCAL; |
3915 |
++ if (tunnel->collect_md) |
3916 |
+ netif_keep_dst(dev); |
3917 |
+- } |
3918 |
+ return 0; |
3919 |
+ } |
3920 |
+ EXPORT_SYMBOL_GPL(ip_tunnel_init); |
3921 |
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
3922 |
+index af9361eba64a..e80eb1788f80 100644 |
3923 |
+--- a/net/ipv4/tcp.c |
3924 |
++++ b/net/ipv4/tcp.c |
3925 |
+@@ -2507,6 +2507,7 @@ static void tcp_rtx_queue_purge(struct sock *sk) |
3926 |
+ { |
3927 |
+ struct rb_node *p = rb_first(&sk->tcp_rtx_queue); |
3928 |
+ |
3929 |
++ tcp_sk(sk)->highest_sack = NULL; |
3930 |
+ while (p) { |
3931 |
+ struct sk_buff *skb = rb_to_skb(p); |
3932 |
+ |
3933 |
+diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c |
3934 |
+index 02ff2dde9609..b371e66502c3 100644 |
3935 |
+--- a/net/ipv4/tcp_bbr.c |
3936 |
++++ b/net/ipv4/tcp_bbr.c |
3937 |
+@@ -680,8 +680,7 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs) |
3938 |
+ * bandwidth sample. Delivered is in packets and interval_us in uS and |
3939 |
+ * ratio will be <<1 for most connections. So delivered is first scaled. |
3940 |
+ */ |
3941 |
+- bw = (u64)rs->delivered * BW_UNIT; |
3942 |
+- do_div(bw, rs->interval_us); |
3943 |
++ bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us); |
3944 |
+ |
3945 |
+ /* If this sample is application-limited, it is likely to have a very |
3946 |
+ * low delivered count that represents application behavior rather than |
3947 |
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
3948 |
+index e286a9647a73..38b6d8f90a44 100644 |
3949 |
+--- a/net/ipv4/tcp_input.c |
3950 |
++++ b/net/ipv4/tcp_input.c |
3951 |
+@@ -3149,6 +3149,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, |
3952 |
+ tp->retransmit_skb_hint = NULL; |
3953 |
+ if (unlikely(skb == tp->lost_skb_hint)) |
3954 |
+ tp->lost_skb_hint = NULL; |
3955 |
++ tcp_highest_sack_replace(sk, skb, next); |
3956 |
+ tcp_rtx_queue_unlink_and_free(skb, sk); |
3957 |
+ } |
3958 |
+ |
3959 |
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
3960 |
+index 1cc20edf4762..cc4ba42052c2 100644 |
3961 |
+--- a/net/ipv4/tcp_output.c |
3962 |
++++ b/net/ipv4/tcp_output.c |
3963 |
+@@ -3165,6 +3165,7 @@ int tcp_send_synack(struct sock *sk) |
3964 |
+ if (!nskb) |
3965 |
+ return -ENOMEM; |
3966 |
+ INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor); |
3967 |
++ tcp_highest_sack_replace(sk, skb, nskb); |
3968 |
+ tcp_rtx_queue_unlink_and_free(skb, sk); |
3969 |
+ __skb_header_release(nskb); |
3970 |
+ tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); |
3971 |
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c |
3972 |
+index 2eeae0455b14..0ef04cda1b27 100644 |
3973 |
+--- a/net/ipv4/udp.c |
3974 |
++++ b/net/ipv4/udp.c |
3975 |
+@@ -1305,7 +1305,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial, |
3976 |
+ if (likely(partial)) { |
3977 |
+ up->forward_deficit += size; |
3978 |
+ size = up->forward_deficit; |
3979 |
+- if (size < (sk->sk_rcvbuf >> 2)) |
3980 |
++ if (size < (sk->sk_rcvbuf >> 2) && |
3981 |
++ !skb_queue_empty(&up->reader_queue)) |
3982 |
+ return; |
3983 |
+ } else { |
3984 |
+ size += up->forward_deficit; |
3985 |
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
3986 |
+index 1f2d0022ba6f..90621d498fd1 100644 |
3987 |
+--- a/net/ipv6/ip6_gre.c |
3988 |
++++ b/net/ipv6/ip6_gre.c |
3989 |
+@@ -1486,7 +1486,6 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) |
3990 |
+ dev->mtu -= 8; |
3991 |
+ |
3992 |
+ if (tunnel->parms.collect_md) { |
3993 |
+- dev->features |= NETIF_F_NETNS_LOCAL; |
3994 |
+ netif_keep_dst(dev); |
3995 |
+ } |
3996 |
+ ip6gre_tnl_init_features(dev); |
3997 |
+@@ -1914,7 +1913,6 @@ static void ip6gre_tap_setup(struct net_device *dev) |
3998 |
+ dev->needs_free_netdev = true; |
3999 |
+ dev->priv_destructor = ip6gre_dev_free; |
4000 |
+ |
4001 |
+- dev->features |= NETIF_F_NETNS_LOCAL; |
4002 |
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
4003 |
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
4004 |
+ netif_keep_dst(dev); |
4005 |
+@@ -2223,7 +2221,6 @@ static void ip6erspan_tap_setup(struct net_device *dev) |
4006 |
+ dev->needs_free_netdev = true; |
4007 |
+ dev->priv_destructor = ip6gre_dev_free; |
4008 |
+ |
4009 |
+- dev->features |= NETIF_F_NETNS_LOCAL; |
4010 |
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
4011 |
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
4012 |
+ netif_keep_dst(dev); |
4013 |
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c |
4014 |
+index e3b4237b2832..8e70a015c792 100644 |
4015 |
+--- a/net/ipv6/ip6_tunnel.c |
4016 |
++++ b/net/ipv6/ip6_tunnel.c |
4017 |
+@@ -1882,10 +1882,8 @@ static int ip6_tnl_dev_init(struct net_device *dev) |
4018 |
+ if (err) |
4019 |
+ return err; |
4020 |
+ ip6_tnl_link_config(t); |
4021 |
+- if (t->parms.collect_md) { |
4022 |
+- dev->features |= NETIF_F_NETNS_LOCAL; |
4023 |
++ if (t->parms.collect_md) |
4024 |
+ netif_keep_dst(dev); |
4025 |
+- } |
4026 |
+ return 0; |
4027 |
+ } |
4028 |
+ |
4029 |
+diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c |
4030 |
+index 60325dbfe88b..607709a8847c 100644 |
4031 |
+--- a/net/ipv6/seg6_local.c |
4032 |
++++ b/net/ipv6/seg6_local.c |
4033 |
+@@ -28,6 +28,7 @@ |
4034 |
+ #include <net/addrconf.h> |
4035 |
+ #include <net/ip6_route.h> |
4036 |
+ #include <net/dst_cache.h> |
4037 |
++#include <net/ip_tunnels.h> |
4038 |
+ #ifdef CONFIG_IPV6_SEG6_HMAC |
4039 |
+ #include <net/seg6_hmac.h> |
4040 |
+ #endif |
4041 |
+@@ -135,7 +136,8 @@ static bool decap_and_validate(struct sk_buff *skb, int proto) |
4042 |
+ |
4043 |
+ skb_reset_network_header(skb); |
4044 |
+ skb_reset_transport_header(skb); |
4045 |
+- skb->encapsulation = 0; |
4046 |
++ if (iptunnel_pull_offloads(skb)) |
4047 |
++ return false; |
4048 |
+ |
4049 |
+ return true; |
4050 |
+ } |
4051 |
+diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h |
4052 |
+index af480ffefaf3..37f68062be41 100644 |
4053 |
+--- a/net/netfilter/ipset/ip_set_bitmap_gen.h |
4054 |
++++ b/net/netfilter/ipset/ip_set_bitmap_gen.h |
4055 |
+@@ -79,7 +79,7 @@ mtype_flush(struct ip_set *set) |
4056 |
+ |
4057 |
+ if (set->extensions & IPSET_EXT_DESTROY) |
4058 |
+ mtype_ext_cleanup(set); |
4059 |
+- memset(map->members, 0, map->memsize); |
4060 |
++ bitmap_zero(map->members, map->elements); |
4061 |
+ set->elements = 0; |
4062 |
+ set->ext_size = 0; |
4063 |
+ } |
4064 |
+diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c |
4065 |
+index 488d6d05c65c..e3257077158f 100644 |
4066 |
+--- a/net/netfilter/ipset/ip_set_bitmap_ip.c |
4067 |
++++ b/net/netfilter/ipset/ip_set_bitmap_ip.c |
4068 |
+@@ -40,7 +40,7 @@ MODULE_ALIAS("ip_set_bitmap:ip"); |
4069 |
+ |
4070 |
+ /* Type structure */ |
4071 |
+ struct bitmap_ip { |
4072 |
+- void *members; /* the set members */ |
4073 |
++ unsigned long *members; /* the set members */ |
4074 |
+ u32 first_ip; /* host byte order, included in range */ |
4075 |
+ u32 last_ip; /* host byte order, included in range */ |
4076 |
+ u32 elements; /* number of max elements in the set */ |
4077 |
+@@ -223,7 +223,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map, |
4078 |
+ u32 first_ip, u32 last_ip, |
4079 |
+ u32 elements, u32 hosts, u8 netmask) |
4080 |
+ { |
4081 |
+- map->members = ip_set_alloc(map->memsize); |
4082 |
++ map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN); |
4083 |
+ if (!map->members) |
4084 |
+ return false; |
4085 |
+ map->first_ip = first_ip; |
4086 |
+@@ -313,7 +313,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[], |
4087 |
+ if (!map) |
4088 |
+ return -ENOMEM; |
4089 |
+ |
4090 |
+- map->memsize = bitmap_bytes(0, elements - 1); |
4091 |
++ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); |
4092 |
+ set->variant = &bitmap_ip; |
4093 |
+ if (!init_map_ip(set, map, first_ip, last_ip, |
4094 |
+ elements, hosts, netmask)) { |
4095 |
+diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c |
4096 |
+index 794e0335a864..9669cace4522 100644 |
4097 |
+--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c |
4098 |
++++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c |
4099 |
+@@ -46,7 +46,7 @@ enum { |
4100 |
+ |
4101 |
+ /* Type structure */ |
4102 |
+ struct bitmap_ipmac { |
4103 |
+- void *members; /* the set members */ |
4104 |
++ unsigned long *members; /* the set members */ |
4105 |
+ u32 first_ip; /* host byte order, included in range */ |
4106 |
+ u32 last_ip; /* host byte order, included in range */ |
4107 |
+ u32 elements; /* number of max elements in the set */ |
4108 |
+@@ -303,7 +303,7 @@ static bool |
4109 |
+ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map, |
4110 |
+ u32 first_ip, u32 last_ip, u32 elements) |
4111 |
+ { |
4112 |
+- map->members = ip_set_alloc(map->memsize); |
4113 |
++ map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN); |
4114 |
+ if (!map->members) |
4115 |
+ return false; |
4116 |
+ map->first_ip = first_ip; |
4117 |
+@@ -364,7 +364,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], |
4118 |
+ if (!map) |
4119 |
+ return -ENOMEM; |
4120 |
+ |
4121 |
+- map->memsize = bitmap_bytes(0, elements - 1); |
4122 |
++ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); |
4123 |
+ set->variant = &bitmap_ipmac; |
4124 |
+ if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { |
4125 |
+ kfree(map); |
4126 |
+diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c |
4127 |
+index b561ca8b3659..ae09f2af6a19 100644 |
4128 |
+--- a/net/netfilter/ipset/ip_set_bitmap_port.c |
4129 |
++++ b/net/netfilter/ipset/ip_set_bitmap_port.c |
4130 |
+@@ -34,7 +34,7 @@ MODULE_ALIAS("ip_set_bitmap:port"); |
4131 |
+ |
4132 |
+ /* Type structure */ |
4133 |
+ struct bitmap_port { |
4134 |
+- void *members; /* the set members */ |
4135 |
++ unsigned long *members; /* the set members */ |
4136 |
+ u16 first_port; /* host byte order, included in range */ |
4137 |
+ u16 last_port; /* host byte order, included in range */ |
4138 |
+ u32 elements; /* number of max elements in the set */ |
4139 |
+@@ -208,7 +208,7 @@ static bool |
4140 |
+ init_map_port(struct ip_set *set, struct bitmap_port *map, |
4141 |
+ u16 first_port, u16 last_port) |
4142 |
+ { |
4143 |
+- map->members = ip_set_alloc(map->memsize); |
4144 |
++ map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN); |
4145 |
+ if (!map->members) |
4146 |
+ return false; |
4147 |
+ map->first_port = first_port; |
4148 |
+@@ -248,7 +248,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[], |
4149 |
+ return -ENOMEM; |
4150 |
+ |
4151 |
+ map->elements = elements; |
4152 |
+- map->memsize = bitmap_bytes(0, map->elements); |
4153 |
++ map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); |
4154 |
+ set->variant = &bitmap_port; |
4155 |
+ if (!init_map_port(set, map, first_port, last_port)) { |
4156 |
+ kfree(map); |
4157 |
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c |
4158 |
+index 7f0d3ffd5469..5881f6668817 100644 |
4159 |
+--- a/net/netfilter/nf_tables_api.c |
4160 |
++++ b/net/netfilter/nf_tables_api.c |
4161 |
+@@ -471,15 +471,28 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table) |
4162 |
+ |
4163 |
+ static const struct nft_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX]; |
4164 |
+ |
4165 |
++static const struct nft_chain_type * |
4166 |
++__nft_chain_type_get(u8 family, enum nft_chain_types type) |
4167 |
++{ |
4168 |
++ if (family >= NFPROTO_NUMPROTO || |
4169 |
++ type >= NFT_CHAIN_T_MAX) |
4170 |
++ return NULL; |
4171 |
++ |
4172 |
++ return chain_type[family][type]; |
4173 |
++} |
4174 |
++ |
4175 |
+ static const struct nft_chain_type * |
4176 |
+ __nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family) |
4177 |
+ { |
4178 |
++ const struct nft_chain_type *type; |
4179 |
+ int i; |
4180 |
+ |
4181 |
+ for (i = 0; i < NFT_CHAIN_T_MAX; i++) { |
4182 |
+- if (chain_type[family][i] != NULL && |
4183 |
+- !nla_strcmp(nla, chain_type[family][i]->name)) |
4184 |
+- return chain_type[family][i]; |
4185 |
++ type = __nft_chain_type_get(family, i); |
4186 |
++ if (!type) |
4187 |
++ continue; |
4188 |
++ if (!nla_strcmp(nla, type->name)) |
4189 |
++ return type; |
4190 |
+ } |
4191 |
+ return NULL; |
4192 |
+ } |
4193 |
+@@ -1050,11 +1063,8 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx) |
4194 |
+ |
4195 |
+ void nft_register_chain_type(const struct nft_chain_type *ctype) |
4196 |
+ { |
4197 |
+- if (WARN_ON(ctype->family >= NFPROTO_NUMPROTO)) |
4198 |
+- return; |
4199 |
+- |
4200 |
+ nfnl_lock(NFNL_SUBSYS_NFTABLES); |
4201 |
+- if (WARN_ON(chain_type[ctype->family][ctype->type] != NULL)) { |
4202 |
++ if (WARN_ON(__nft_chain_type_get(ctype->family, ctype->type))) { |
4203 |
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES); |
4204 |
+ return; |
4205 |
+ } |
4206 |
+@@ -1511,7 +1521,10 @@ static int nft_chain_parse_hook(struct net *net, |
4207 |
+ hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM])); |
4208 |
+ hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY])); |
4209 |
+ |
4210 |
+- type = chain_type[family][NFT_CHAIN_T_DEFAULT]; |
4211 |
++ type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT); |
4212 |
++ if (!type) |
4213 |
++ return -EOPNOTSUPP; |
4214 |
++ |
4215 |
+ if (nla[NFTA_CHAIN_TYPE]) { |
4216 |
+ type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE], |
4217 |
+ family, autoload); |
4218 |
+diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c |
4219 |
+index df4e3e0412ed..a003533ff4d9 100644 |
4220 |
+--- a/net/netfilter/nft_osf.c |
4221 |
++++ b/net/netfilter/nft_osf.c |
4222 |
+@@ -47,6 +47,9 @@ static int nft_osf_init(const struct nft_ctx *ctx, |
4223 |
+ struct nft_osf *priv = nft_expr_priv(expr); |
4224 |
+ int err; |
4225 |
+ |
4226 |
++ if (!tb[NFTA_OSF_DREG]) |
4227 |
++ return -EINVAL; |
4228 |
++ |
4229 |
+ priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]); |
4230 |
+ err = nft_validate_register_store(ctx, priv->dreg, NULL, |
4231 |
+ NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN); |
4232 |
+diff --git a/net/sched/ematch.c b/net/sched/ematch.c |
4233 |
+index 1331a4c2d8ff..750d88d0cfd9 100644 |
4234 |
+--- a/net/sched/ematch.c |
4235 |
++++ b/net/sched/ematch.c |
4236 |
+@@ -267,12 +267,12 @@ static int tcf_em_validate(struct tcf_proto *tp, |
4237 |
+ } |
4238 |
+ em->data = (unsigned long) v; |
4239 |
+ } |
4240 |
++ em->datalen = data_len; |
4241 |
+ } |
4242 |
+ } |
4243 |
+ |
4244 |
+ em->matchid = em_hdr->matchid; |
4245 |
+ em->flags = em_hdr->flags; |
4246 |
+- em->datalen = data_len; |
4247 |
+ em->net = net; |
4248 |
+ |
4249 |
+ err = 0; |
4250 |
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c |
4251 |
+index 20a511398389..bd1cbbfe5924 100644 |
4252 |
+--- a/net/x25/af_x25.c |
4253 |
++++ b/net/x25/af_x25.c |
4254 |
+@@ -765,6 +765,10 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, |
4255 |
+ if (sk->sk_state == TCP_ESTABLISHED) |
4256 |
+ goto out; |
4257 |
+ |
4258 |
++ rc = -EALREADY; /* Do nothing if call is already in progress */ |
4259 |
++ if (sk->sk_state == TCP_SYN_SENT) |
4260 |
++ goto out; |
4261 |
++ |
4262 |
+ sk->sk_state = TCP_CLOSE; |
4263 |
+ sock->state = SS_UNCONNECTED; |
4264 |
+ |
4265 |
+@@ -811,7 +815,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr, |
4266 |
+ /* Now the loop */ |
4267 |
+ rc = -EINPROGRESS; |
4268 |
+ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) |
4269 |
+- goto out_put_neigh; |
4270 |
++ goto out; |
4271 |
+ |
4272 |
+ rc = x25_wait_for_connection_establishment(sk); |
4273 |
+ if (rc) |
4274 |
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c |
4275 |
+index 895c40e8679f..3b0dcf38fd8b 100644 |
4276 |
+--- a/scripts/recordmcount.c |
4277 |
++++ b/scripts/recordmcount.c |
4278 |
+@@ -39,6 +39,10 @@ |
4279 |
+ #define R_AARCH64_ABS64 257 |
4280 |
+ #endif |
4281 |
+ |
4282 |
++#define R_ARM_PC24 1 |
4283 |
++#define R_ARM_THM_CALL 10 |
4284 |
++#define R_ARM_CALL 28 |
4285 |
++ |
4286 |
+ static int fd_map; /* File descriptor for file being modified. */ |
4287 |
+ static int mmap_failed; /* Boolean flag. */ |
4288 |
+ static char gpfx; /* prefix for global symbol name (sometimes '_') */ |
4289 |
+@@ -414,6 +418,18 @@ is_mcounted_section_name(char const *const txtname) |
4290 |
+ #define RECORD_MCOUNT_64 |
4291 |
+ #include "recordmcount.h" |
4292 |
+ |
4293 |
++static int arm_is_fake_mcount(Elf32_Rel const *rp) |
4294 |
++{ |
4295 |
++ switch (ELF32_R_TYPE(w(rp->r_info))) { |
4296 |
++ case R_ARM_THM_CALL: |
4297 |
++ case R_ARM_CALL: |
4298 |
++ case R_ARM_PC24: |
4299 |
++ return 0; |
4300 |
++ } |
4301 |
++ |
4302 |
++ return 1; |
4303 |
++} |
4304 |
++ |
4305 |
+ /* 64-bit EM_MIPS has weird ELF64_Rela.r_info. |
4306 |
+ * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf |
4307 |
+ * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40] |
4308 |
+@@ -515,6 +531,7 @@ do_file(char const *const fname) |
4309 |
+ altmcount = "__gnu_mcount_nc"; |
4310 |
+ make_nop = make_nop_arm; |
4311 |
+ rel_type_nop = R_ARM_NONE; |
4312 |
++ is_fake_mcount32 = arm_is_fake_mcount; |
4313 |
+ break; |
4314 |
+ case EM_AARCH64: |
4315 |
+ reltype = R_AARCH64_ABS64; |