Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 29 Jan 2020 16:16:09
Message-Id: 1580314547.a237553df3d9872194b04bb1688fb0ec658cf944.mpagano@gentoo
1 commit: a237553df3d9872194b04bb1688fb0ec658cf944
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 29 16:15:47 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 29 16:15:47 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a237553d
7
8 Linux patch 4.19.100
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1099_linux-4.19.100.patch | 4278 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4282 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index cae3438..7c99cc6 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -435,6 +435,10 @@ Patch: 1098_linux-4.19.99.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.99
23
24 +Patch: 1099_linux-4.19.100.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.100
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1099_linux-4.19.100.patch b/1099_linux-4.19.100.patch
33 new file mode 100644
34 index 0000000..1e9d910
35 --- /dev/null
36 +++ b/1099_linux-4.19.100.patch
37 @@ -0,0 +1,4278 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index e6b6ec974eeb..8bf0c0532046 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -1946,6 +1946,12 @@
43 + Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
44 + the default is off.
45 +
46 ++ kpti= [ARM64] Control page table isolation of user
47 ++ and kernel address spaces.
48 ++ Default: enabled on cores which need mitigation.
49 ++ 0: force disabled
50 ++ 1: force enabled
51 ++
52 + kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
53 + Default is 0 (don't ignore, but inject #GP)
54 +
55 +diff --git a/Makefile b/Makefile
56 +index a2be0c79eeb8..f1e428271abf 100644
57 +--- a/Makefile
58 ++++ b/Makefile
59 +@@ -1,7 +1,7 @@
60 + # SPDX-License-Identifier: GPL-2.0
61 + VERSION = 4
62 + PATCHLEVEL = 19
63 +-SUBLEVEL = 99
64 ++SUBLEVEL = 100
65 + EXTRAVERSION =
66 + NAME = "People's Front"
67 +
68 +diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
69 +index 3b85c3ecac38..79e5cc70f1fd 100644
70 +--- a/arch/ia64/mm/init.c
71 ++++ b/arch/ia64/mm/init.c
72 +@@ -661,21 +661,12 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
73 + return ret;
74 + }
75 +
76 +-#ifdef CONFIG_MEMORY_HOTREMOVE
77 +-int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
78 ++void arch_remove_memory(int nid, u64 start, u64 size,
79 ++ struct vmem_altmap *altmap)
80 + {
81 + unsigned long start_pfn = start >> PAGE_SHIFT;
82 + unsigned long nr_pages = size >> PAGE_SHIFT;
83 +- struct zone *zone;
84 +- int ret;
85 +-
86 +- zone = page_zone(pfn_to_page(start_pfn));
87 +- ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
88 +- if (ret)
89 +- pr_warn("%s: Problem encountered in __remove_pages() as"
90 +- " ret=%d\n", __func__, ret);
91 +
92 +- return ret;
93 ++ __remove_pages(start_pfn, nr_pages, altmap);
94 + }
95 + #endif
96 +-#endif
97 +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
98 +index 9a6afd9f3f9b..84a012e42a7e 100644
99 +--- a/arch/powerpc/mm/mem.c
100 ++++ b/arch/powerpc/mm/mem.c
101 +@@ -118,8 +118,8 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
102 + return -ENODEV;
103 + }
104 +
105 +-int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
106 +- bool want_memblock)
107 ++int __ref arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
108 ++ bool want_memblock)
109 + {
110 + unsigned long start_pfn = start >> PAGE_SHIFT;
111 + unsigned long nr_pages = size >> PAGE_SHIFT;
112 +@@ -139,30 +139,20 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *
113 + return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
114 + }
115 +
116 +-#ifdef CONFIG_MEMORY_HOTREMOVE
117 +-int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
118 ++void __ref arch_remove_memory(int nid, u64 start, u64 size,
119 ++ struct vmem_altmap *altmap)
120 + {
121 + unsigned long start_pfn = start >> PAGE_SHIFT;
122 + unsigned long nr_pages = size >> PAGE_SHIFT;
123 +- struct page *page;
124 + int ret;
125 +
126 +- /*
127 +- * If we have an altmap then we need to skip over any reserved PFNs
128 +- * when querying the zone.
129 +- */
130 +- page = pfn_to_page(start_pfn);
131 +- if (altmap)
132 +- page += vmem_altmap_offset(altmap);
133 +-
134 +- ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
135 +- if (ret)
136 +- return ret;
137 ++ __remove_pages(start_pfn, nr_pages, altmap);
138 +
139 + /* Remove htab bolted mappings for this section of memory */
140 + start = (unsigned long)__va(start);
141 + flush_inval_dcache_range(start, start + size);
142 + ret = remove_section_mapping(start, start + size);
143 ++ WARN_ON_ONCE(ret);
144 +
145 + /* Ensure all vmalloc mappings are flushed in case they also
146 + * hit that section of memory
147 +@@ -170,11 +160,8 @@ int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap
148 + vm_unmap_aliases();
149 +
150 + resize_hpt_for_hotplug(memblock_phys_mem_size());
151 +-
152 +- return ret;
153 + }
154 + #endif
155 +-#endif /* CONFIG_MEMORY_HOTPLUG */
156 +
157 + /*
158 + * walk_memory_resource() needs to make sure there is no holes in a given
159 +diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
160 +index dd3cc4632b9a..84d038ed3882 100644
161 +--- a/arch/powerpc/platforms/powernv/memtrace.c
162 ++++ b/arch/powerpc/platforms/powernv/memtrace.c
163 +@@ -122,7 +122,7 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
164 + */
165 + end_pfn = base_pfn + nr_pages;
166 + for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
167 +- remove_memory(nid, pfn << PAGE_SHIFT, bytes);
168 ++ __remove_memory(nid, pfn << PAGE_SHIFT, bytes);
169 + }
170 + unlock_device_hotplug();
171 + return base_pfn << PAGE_SHIFT;
172 +diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
173 +index 62d3c72cd931..c2c6f32848e1 100644
174 +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
175 ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
176 +@@ -301,7 +301,7 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
177 + nid = memory_add_physaddr_to_nid(base);
178 +
179 + for (i = 0; i < sections_per_block; i++) {
180 +- remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
181 ++ __remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
182 + base += MIN_MEMORY_BLOCK_SIZE;
183 + }
184 +
185 +@@ -393,7 +393,7 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb)
186 + block_sz = pseries_memory_block_size();
187 + nid = memory_add_physaddr_to_nid(lmb->base_addr);
188 +
189 +- remove_memory(nid, lmb->base_addr, block_sz);
190 ++ __remove_memory(nid, lmb->base_addr, block_sz);
191 +
192 + /* Update memory regions for memory remove */
193 + memblock_remove(lmb->base_addr, block_sz);
194 +@@ -680,7 +680,7 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
195 +
196 + rc = dlpar_online_lmb(lmb);
197 + if (rc) {
198 +- remove_memory(nid, lmb->base_addr, block_sz);
199 ++ __remove_memory(nid, lmb->base_addr, block_sz);
200 + invalidate_lmb_associativity_index(lmb);
201 + } else {
202 + lmb->flags |= DRCONF_MEM_ASSIGNED;
203 +diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
204 +index 3fa3e5323612..379a925d9e82 100644
205 +--- a/arch/s390/mm/init.c
206 ++++ b/arch/s390/mm/init.c
207 +@@ -239,15 +239,13 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
208 + return rc;
209 + }
210 +
211 +-#ifdef CONFIG_MEMORY_HOTREMOVE
212 +-int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
213 ++void arch_remove_memory(int nid, u64 start, u64 size,
214 ++ struct vmem_altmap *altmap)
215 + {
216 +- /*
217 +- * There is no hardware or firmware interface which could trigger a
218 +- * hot memory remove on s390. So there is nothing that needs to be
219 +- * implemented.
220 +- */
221 +- return -EBUSY;
222 ++ unsigned long start_pfn = start >> PAGE_SHIFT;
223 ++ unsigned long nr_pages = size >> PAGE_SHIFT;
224 ++
225 ++ __remove_pages(start_pfn, nr_pages, altmap);
226 ++ vmem_remove_mapping(start, size);
227 + }
228 +-#endif
229 + #endif /* CONFIG_MEMORY_HOTPLUG */
230 +diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
231 +index 7713c084d040..47882be91121 100644
232 +--- a/arch/sh/mm/init.c
233 ++++ b/arch/sh/mm/init.c
234 +@@ -443,21 +443,12 @@ int memory_add_physaddr_to_nid(u64 addr)
235 + EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
236 + #endif
237 +
238 +-#ifdef CONFIG_MEMORY_HOTREMOVE
239 +-int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
240 ++void arch_remove_memory(int nid, u64 start, u64 size,
241 ++ struct vmem_altmap *altmap)
242 + {
243 + unsigned long start_pfn = PFN_DOWN(start);
244 + unsigned long nr_pages = size >> PAGE_SHIFT;
245 +- struct zone *zone;
246 +- int ret;
247 +-
248 +- zone = page_zone(pfn_to_page(start_pfn));
249 +- ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
250 +- if (unlikely(ret))
251 +- pr_warn("%s: Failed, __remove_pages() == %d\n", __func__,
252 +- ret);
253 +
254 +- return ret;
255 ++ __remove_pages(start_pfn, nr_pages, altmap);
256 + }
257 +-#endif
258 + #endif /* CONFIG_MEMORY_HOTPLUG */
259 +diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
260 +index 979e0a02cbe1..79b95910fd9f 100644
261 +--- a/arch/x86/mm/init_32.c
262 ++++ b/arch/x86/mm/init_32.c
263 +@@ -860,18 +860,15 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
264 + return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
265 + }
266 +
267 +-#ifdef CONFIG_MEMORY_HOTREMOVE
268 +-int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
269 ++void arch_remove_memory(int nid, u64 start, u64 size,
270 ++ struct vmem_altmap *altmap)
271 + {
272 + unsigned long start_pfn = start >> PAGE_SHIFT;
273 + unsigned long nr_pages = size >> PAGE_SHIFT;
274 +- struct zone *zone;
275 +
276 +- zone = page_zone(pfn_to_page(start_pfn));
277 +- return __remove_pages(zone, start_pfn, nr_pages, altmap);
278 ++ __remove_pages(start_pfn, nr_pages, altmap);
279 + }
280 + #endif
281 +-#endif
282 +
283 + int kernel_set_to_readonly __read_mostly;
284 +
285 +diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
286 +index a3e9c6ee3cf2..81e85a8dd300 100644
287 +--- a/arch/x86/mm/init_64.c
288 ++++ b/arch/x86/mm/init_64.c
289 +@@ -1132,7 +1132,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
290 + remove_pagetable(start, end, false, altmap);
291 + }
292 +
293 +-#ifdef CONFIG_MEMORY_HOTREMOVE
294 + static void __meminit
295 + kernel_physical_mapping_remove(unsigned long start, unsigned long end)
296 + {
297 +@@ -1142,25 +1141,15 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end)
298 + remove_pagetable(start, end, true, NULL);
299 + }
300 +
301 +-int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
302 ++void __ref arch_remove_memory(int nid, u64 start, u64 size,
303 ++ struct vmem_altmap *altmap)
304 + {
305 + unsigned long start_pfn = start >> PAGE_SHIFT;
306 + unsigned long nr_pages = size >> PAGE_SHIFT;
307 +- struct page *page = pfn_to_page(start_pfn);
308 +- struct zone *zone;
309 +- int ret;
310 +
311 +- /* With altmap the first mapped page is offset from @start */
312 +- if (altmap)
313 +- page += vmem_altmap_offset(altmap);
314 +- zone = page_zone(page);
315 +- ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
316 +- WARN_ON_ONCE(ret);
317 ++ __remove_pages(start_pfn, nr_pages, altmap);
318 + kernel_physical_mapping_remove(start, start + size);
319 +-
320 +- return ret;
321 + }
322 +-#endif
323 + #endif /* CONFIG_MEMORY_HOTPLUG */
324 +
325 + static struct kcore_list kcore_vsyscall;
326 +diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
327 +index 2ccfbb61ca89..8fe0960ea572 100644
328 +--- a/drivers/acpi/acpi_memhotplug.c
329 ++++ b/drivers/acpi/acpi_memhotplug.c
330 +@@ -282,7 +282,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
331 + nid = memory_add_physaddr_to_nid(info->start_addr);
332 +
333 + acpi_unbind_memory_blocks(info);
334 +- remove_memory(nid, info->start_addr, info->length);
335 ++ __remove_memory(nid, info->start_addr, info->length);
336 + list_del(&info->list);
337 + kfree(info);
338 + }
339 +diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
340 +index 4e46dc9e41ad..112b1001c269 100644
341 +--- a/drivers/atm/firestream.c
342 ++++ b/drivers/atm/firestream.c
343 +@@ -927,6 +927,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
344 + }
345 + if (!to) {
346 + printk ("No more free channels for FS50..\n");
347 ++ kfree(vcc);
348 + return -EBUSY;
349 + }
350 + vcc->channo = dev->channo;
351 +@@ -937,6 +938,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
352 + if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
353 + ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
354 + printk ("Channel is in use for FS155.\n");
355 ++ kfree(vcc);
356 + return -EBUSY;
357 + }
358 + }
359 +@@ -950,6 +952,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
360 + tc, sizeof (struct fs_transmit_config));
361 + if (!tc) {
362 + fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
363 ++ kfree(vcc);
364 + return -ENOMEM;
365 + }
366 +
367 +diff --git a/drivers/base/memory.c b/drivers/base/memory.c
368 +index ac1574a69610..e270abc86d46 100644
369 +--- a/drivers/base/memory.c
370 ++++ b/drivers/base/memory.c
371 +@@ -39,6 +39,11 @@ static inline int base_memory_block_id(int section_nr)
372 + return section_nr / sections_per_block;
373 + }
374 +
375 ++static inline int pfn_to_block_id(unsigned long pfn)
376 ++{
377 ++ return base_memory_block_id(pfn_to_section_nr(pfn));
378 ++}
379 ++
380 + static int memory_subsys_online(struct device *dev);
381 + static int memory_subsys_offline(struct device *dev);
382 +
383 +@@ -230,13 +235,14 @@ static bool pages_correctly_probed(unsigned long start_pfn)
384 + * OK to have direct references to sparsemem variables in here.
385 + */
386 + static int
387 +-memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
388 ++memory_block_action(unsigned long start_section_nr, unsigned long action,
389 ++ int online_type)
390 + {
391 + unsigned long start_pfn;
392 + unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
393 + int ret;
394 +
395 +- start_pfn = section_nr_to_pfn(phys_index);
396 ++ start_pfn = section_nr_to_pfn(start_section_nr);
397 +
398 + switch (action) {
399 + case MEM_ONLINE:
400 +@@ -250,7 +256,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
401 + break;
402 + default:
403 + WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
404 +- "%ld\n", __func__, phys_index, action, action);
405 ++ "%ld\n", __func__, start_section_nr, action, action);
406 + ret = -EINVAL;
407 + }
408 +
409 +@@ -590,10 +596,9 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
410 + * A reference for the returned object is held and the reference for the
411 + * hinted object is released.
412 + */
413 +-struct memory_block *find_memory_block_hinted(struct mem_section *section,
414 +- struct memory_block *hint)
415 ++static struct memory_block *find_memory_block_by_id(int block_id,
416 ++ struct memory_block *hint)
417 + {
418 +- int block_id = base_memory_block_id(__section_nr(section));
419 + struct device *hintdev = hint ? &hint->dev : NULL;
420 + struct device *dev;
421 +
422 +@@ -605,6 +610,14 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section,
423 + return to_memory_block(dev);
424 + }
425 +
426 ++struct memory_block *find_memory_block_hinted(struct mem_section *section,
427 ++ struct memory_block *hint)
428 ++{
429 ++ int block_id = base_memory_block_id(__section_nr(section));
430 ++
431 ++ return find_memory_block_by_id(block_id, hint);
432 ++}
433 ++
434 + /*
435 + * For now, we have a linear search to go find the appropriate
436 + * memory_block corresponding to a particular phys_index. If
437 +@@ -659,25 +672,28 @@ int register_memory(struct memory_block *memory)
438 + return ret;
439 + }
440 +
441 +-static int init_memory_block(struct memory_block **memory,
442 +- struct mem_section *section, unsigned long state)
443 ++static int init_memory_block(struct memory_block **memory, int block_id,
444 ++ unsigned long state)
445 + {
446 + struct memory_block *mem;
447 + unsigned long start_pfn;
448 +- int scn_nr;
449 + int ret = 0;
450 +
451 ++ mem = find_memory_block_by_id(block_id, NULL);
452 ++ if (mem) {
453 ++ put_device(&mem->dev);
454 ++ return -EEXIST;
455 ++ }
456 + mem = kzalloc(sizeof(*mem), GFP_KERNEL);
457 + if (!mem)
458 + return -ENOMEM;
459 +
460 +- scn_nr = __section_nr(section);
461 +- mem->start_section_nr =
462 +- base_memory_block_id(scn_nr) * sections_per_block;
463 ++ mem->start_section_nr = block_id * sections_per_block;
464 + mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
465 + mem->state = state;
466 + start_pfn = section_nr_to_pfn(mem->start_section_nr);
467 + mem->phys_device = arch_get_memory_phys_device(start_pfn);
468 ++ mem->nid = NUMA_NO_NODE;
469 +
470 + ret = register_memory(mem);
471 +
472 +@@ -688,101 +704,98 @@ static int init_memory_block(struct memory_block **memory,
473 + static int add_memory_block(int base_section_nr)
474 + {
475 + struct memory_block *mem;
476 +- int i, ret, section_count = 0, section_nr;
477 ++ int i, ret, section_count = 0;
478 +
479 + for (i = base_section_nr;
480 +- (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS;
481 +- i++) {
482 +- if (!present_section_nr(i))
483 +- continue;
484 +- if (section_count == 0)
485 +- section_nr = i;
486 +- section_count++;
487 +- }
488 ++ i < base_section_nr + sections_per_block;
489 ++ i++)
490 ++ if (present_section_nr(i))
491 ++ section_count++;
492 +
493 + if (section_count == 0)
494 + return 0;
495 +- ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
496 ++ ret = init_memory_block(&mem, base_memory_block_id(base_section_nr),
497 ++ MEM_ONLINE);
498 + if (ret)
499 + return ret;
500 + mem->section_count = section_count;
501 + return 0;
502 + }
503 +
504 ++static void unregister_memory(struct memory_block *memory)
505 ++{
506 ++ if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
507 ++ return;
508 ++
509 ++ /* drop the ref. we got via find_memory_block() */
510 ++ put_device(&memory->dev);
511 ++ device_unregister(&memory->dev);
512 ++}
513 ++
514 + /*
515 +- * need an interface for the VM to add new memory regions,
516 +- * but without onlining it.
517 ++ * Create memory block devices for the given memory area. Start and size
518 ++ * have to be aligned to memory block granularity. Memory block devices
519 ++ * will be initialized as offline.
520 + */
521 +-int hotplug_memory_register(int nid, struct mem_section *section)
522 ++int create_memory_block_devices(unsigned long start, unsigned long size)
523 + {
524 +- int ret = 0;
525 ++ const int start_block_id = pfn_to_block_id(PFN_DOWN(start));
526 ++ int end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
527 + struct memory_block *mem;
528 ++ unsigned long block_id;
529 ++ int ret = 0;
530 +
531 +- mutex_lock(&mem_sysfs_mutex);
532 ++ if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
533 ++ !IS_ALIGNED(size, memory_block_size_bytes())))
534 ++ return -EINVAL;
535 +
536 +- mem = find_memory_block(section);
537 +- if (mem) {
538 +- mem->section_count++;
539 +- put_device(&mem->dev);
540 +- } else {
541 +- ret = init_memory_block(&mem, section, MEM_OFFLINE);
542 ++ mutex_lock(&mem_sysfs_mutex);
543 ++ for (block_id = start_block_id; block_id != end_block_id; block_id++) {
544 ++ ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
545 + if (ret)
546 +- goto out;
547 +- mem->section_count++;
548 ++ break;
549 ++ mem->section_count = sections_per_block;
550 ++ }
551 ++ if (ret) {
552 ++ end_block_id = block_id;
553 ++ for (block_id = start_block_id; block_id != end_block_id;
554 ++ block_id++) {
555 ++ mem = find_memory_block_by_id(block_id, NULL);
556 ++ mem->section_count = 0;
557 ++ unregister_memory(mem);
558 ++ }
559 + }
560 +-
561 +-out:
562 + mutex_unlock(&mem_sysfs_mutex);
563 + return ret;
564 + }
565 +
566 +-#ifdef CONFIG_MEMORY_HOTREMOVE
567 +-static void
568 +-unregister_memory(struct memory_block *memory)
569 +-{
570 +- BUG_ON(memory->dev.bus != &memory_subsys);
571 +-
572 +- /* drop the ref. we got in remove_memory_block() */
573 +- put_device(&memory->dev);
574 +- device_unregister(&memory->dev);
575 +-}
576 +-
577 +-static int remove_memory_section(unsigned long node_id,
578 +- struct mem_section *section, int phys_device)
579 ++/*
580 ++ * Remove memory block devices for the given memory area. Start and size
581 ++ * have to be aligned to memory block granularity. Memory block devices
582 ++ * have to be offline.
583 ++ */
584 ++void remove_memory_block_devices(unsigned long start, unsigned long size)
585 + {
586 ++ const int start_block_id = pfn_to_block_id(PFN_DOWN(start));
587 ++ const int end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
588 + struct memory_block *mem;
589 ++ int block_id;
590 +
591 +- mutex_lock(&mem_sysfs_mutex);
592 +-
593 +- /*
594 +- * Some users of the memory hotplug do not want/need memblock to
595 +- * track all sections. Skip over those.
596 +- */
597 +- mem = find_memory_block(section);
598 +- if (!mem)
599 +- goto out_unlock;
600 ++ if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
601 ++ !IS_ALIGNED(size, memory_block_size_bytes())))
602 ++ return;
603 +
604 +- unregister_mem_sect_under_nodes(mem, __section_nr(section));
605 +-
606 +- mem->section_count--;
607 +- if (mem->section_count == 0)
608 ++ mutex_lock(&mem_sysfs_mutex);
609 ++ for (block_id = start_block_id; block_id != end_block_id; block_id++) {
610 ++ mem = find_memory_block_by_id(block_id, NULL);
611 ++ if (WARN_ON_ONCE(!mem))
612 ++ continue;
613 ++ mem->section_count = 0;
614 ++ unregister_memory_block_under_nodes(mem);
615 + unregister_memory(mem);
616 +- else
617 +- put_device(&mem->dev);
618 +-
619 +-out_unlock:
620 ++ }
621 + mutex_unlock(&mem_sysfs_mutex);
622 +- return 0;
623 +-}
624 +-
625 +-int unregister_memory_section(struct mem_section *section)
626 +-{
627 +- if (!present_section(section))
628 +- return -EINVAL;
629 +-
630 +- return remove_memory_section(0, section, 0);
631 + }
632 +-#endif /* CONFIG_MEMORY_HOTREMOVE */
633 +
634 + /* return true if the memory block is offlined, otherwise, return false */
635 + bool is_memblock_offlined(struct memory_block *mem)
636 +@@ -849,3 +862,39 @@ out:
637 + printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
638 + return ret;
639 + }
640 ++
641 ++struct for_each_memory_block_cb_data {
642 ++ walk_memory_blocks_func_t func;
643 ++ void *arg;
644 ++};
645 ++
646 ++static int for_each_memory_block_cb(struct device *dev, void *data)
647 ++{
648 ++ struct memory_block *mem = to_memory_block(dev);
649 ++ struct for_each_memory_block_cb_data *cb_data = data;
650 ++
651 ++ return cb_data->func(mem, cb_data->arg);
652 ++}
653 ++
654 ++/**
655 ++ * for_each_memory_block - walk through all present memory blocks
656 ++ *
657 ++ * @arg: argument passed to func
658 ++ * @func: callback for each memory block walked
659 ++ *
660 ++ * This function walks through all present memory blocks, calling func on
661 ++ * each memory block.
662 ++ *
663 ++ * In case func() returns an error, walking is aborted and the error is
664 ++ * returned.
665 ++ */
666 ++int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
667 ++{
668 ++ struct for_each_memory_block_cb_data cb_data = {
669 ++ .func = func,
670 ++ .arg = arg,
671 ++ };
672 ++
673 ++ return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
674 ++ for_each_memory_block_cb);
675 ++}
676 +diff --git a/drivers/base/node.c b/drivers/base/node.c
677 +index c3968e2d0a98..f3565c2dbc52 100644
678 +--- a/drivers/base/node.c
679 ++++ b/drivers/base/node.c
680 +@@ -409,8 +409,6 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
681 + int ret, nid = *(int *)arg;
682 + unsigned long pfn, sect_start_pfn, sect_end_pfn;
683 +
684 +- mem_blk->nid = nid;
685 +-
686 + sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
687 + sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
688 + sect_end_pfn += PAGES_PER_SECTION - 1;
689 +@@ -439,6 +437,13 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
690 + if (page_nid != nid)
691 + continue;
692 + }
693 ++
694 ++ /*
695 ++ * If this memory block spans multiple nodes, we only indicate
696 ++ * the last processed node.
697 ++ */
698 ++ mem_blk->nid = nid;
699 ++
700 + ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
701 + &mem_blk->dev.kobj,
702 + kobject_name(&mem_blk->dev.kobj));
703 +@@ -453,40 +458,19 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg)
704 + return 0;
705 + }
706 +
707 +-/* unregister memory section under all nodes that it spans */
708 +-int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
709 +- unsigned long phys_index)
710 ++/*
711 ++ * Unregister a memory block device under the node it spans. Memory blocks
712 ++ * with multiple nodes cannot be offlined and therefore also never be removed.
713 ++ */
714 ++void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
715 + {
716 +- NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
717 +- unsigned long pfn, sect_start_pfn, sect_end_pfn;
718 +-
719 +- if (!mem_blk) {
720 +- NODEMASK_FREE(unlinked_nodes);
721 +- return -EFAULT;
722 +- }
723 +- if (!unlinked_nodes)
724 +- return -ENOMEM;
725 +- nodes_clear(*unlinked_nodes);
726 +-
727 +- sect_start_pfn = section_nr_to_pfn(phys_index);
728 +- sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
729 +- for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
730 +- int nid;
731 ++ if (mem_blk->nid == NUMA_NO_NODE)
732 ++ return;
733 +
734 +- nid = get_nid_for_pfn(pfn);
735 +- if (nid < 0)
736 +- continue;
737 +- if (!node_online(nid))
738 +- continue;
739 +- if (node_test_and_set(nid, *unlinked_nodes))
740 +- continue;
741 +- sysfs_remove_link(&node_devices[nid]->dev.kobj,
742 +- kobject_name(&mem_blk->dev.kobj));
743 +- sysfs_remove_link(&mem_blk->dev.kobj,
744 +- kobject_name(&node_devices[nid]->dev.kobj));
745 +- }
746 +- NODEMASK_FREE(unlinked_nodes);
747 +- return 0;
748 ++ sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
749 ++ kobject_name(&mem_blk->dev.kobj));
750 ++ sysfs_remove_link(&mem_blk->dev.kobj,
751 ++ kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
752 + }
753 +
754 + int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
755 +diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
756 +index eb2a0a73cbed..d670f7000cbb 100644
757 +--- a/drivers/crypto/geode-aes.c
758 ++++ b/drivers/crypto/geode-aes.c
759 +@@ -14,6 +14,7 @@
760 + #include <linux/spinlock.h>
761 + #include <crypto/algapi.h>
762 + #include <crypto/aes.h>
763 ++#include <crypto/skcipher.h>
764 +
765 + #include <linux/io.h>
766 + #include <linux/delay.h>
767 +@@ -170,13 +171,15 @@ static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
768 + /*
769 + * The requested key size is not supported by HW, do a fallback
770 + */
771 +- op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
772 +- op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
773 ++ crypto_skcipher_clear_flags(op->fallback.blk, CRYPTO_TFM_REQ_MASK);
774 ++ crypto_skcipher_set_flags(op->fallback.blk,
775 ++ tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
776 +
777 +- ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
778 ++ ret = crypto_skcipher_setkey(op->fallback.blk, key, len);
779 + if (ret) {
780 + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
781 +- tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
782 ++ tfm->crt_flags |= crypto_skcipher_get_flags(op->fallback.blk) &
783 ++ CRYPTO_TFM_RES_MASK;
784 + }
785 + return ret;
786 + }
787 +@@ -185,33 +188,28 @@ static int fallback_blk_dec(struct blkcipher_desc *desc,
788 + struct scatterlist *dst, struct scatterlist *src,
789 + unsigned int nbytes)
790 + {
791 +- unsigned int ret;
792 +- struct crypto_blkcipher *tfm;
793 + struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
794 ++ SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
795 +
796 +- tfm = desc->tfm;
797 +- desc->tfm = op->fallback.blk;
798 +-
799 +- ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
800 ++ skcipher_request_set_tfm(req, op->fallback.blk);
801 ++ skcipher_request_set_callback(req, 0, NULL, NULL);
802 ++ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
803 +
804 +- desc->tfm = tfm;
805 +- return ret;
806 ++ return crypto_skcipher_decrypt(req);
807 + }
808 ++
809 + static int fallback_blk_enc(struct blkcipher_desc *desc,
810 + struct scatterlist *dst, struct scatterlist *src,
811 + unsigned int nbytes)
812 + {
813 +- unsigned int ret;
814 +- struct crypto_blkcipher *tfm;
815 + struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
816 ++ SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
817 +
818 +- tfm = desc->tfm;
819 +- desc->tfm = op->fallback.blk;
820 +-
821 +- ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
822 ++ skcipher_request_set_tfm(req, op->fallback.blk);
823 ++ skcipher_request_set_callback(req, 0, NULL, NULL);
824 ++ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
825 +
826 +- desc->tfm = tfm;
827 +- return ret;
828 ++ return crypto_skcipher_encrypt(req);
829 + }
830 +
831 + static void
832 +@@ -311,6 +309,9 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
833 + struct blkcipher_walk walk;
834 + int err, ret;
835 +
836 ++ if (nbytes % AES_BLOCK_SIZE)
837 ++ return -EINVAL;
838 ++
839 + if (unlikely(op->keylen != AES_KEYSIZE_128))
840 + return fallback_blk_dec(desc, dst, src, nbytes);
841 +
842 +@@ -343,6 +344,9 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
843 + struct blkcipher_walk walk;
844 + int err, ret;
845 +
846 ++ if (nbytes % AES_BLOCK_SIZE)
847 ++ return -EINVAL;
848 ++
849 + if (unlikely(op->keylen != AES_KEYSIZE_128))
850 + return fallback_blk_enc(desc, dst, src, nbytes);
851 +
852 +@@ -370,8 +374,9 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
853 + const char *name = crypto_tfm_alg_name(tfm);
854 + struct geode_aes_op *op = crypto_tfm_ctx(tfm);
855 +
856 +- op->fallback.blk = crypto_alloc_blkcipher(name, 0,
857 +- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
858 ++ op->fallback.blk = crypto_alloc_skcipher(name, 0,
859 ++ CRYPTO_ALG_ASYNC |
860 ++ CRYPTO_ALG_NEED_FALLBACK);
861 +
862 + if (IS_ERR(op->fallback.blk)) {
863 + printk(KERN_ERR "Error allocating fallback algo %s\n", name);
864 +@@ -385,7 +390,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
865 + {
866 + struct geode_aes_op *op = crypto_tfm_ctx(tfm);
867 +
868 +- crypto_free_blkcipher(op->fallback.blk);
869 ++ crypto_free_skcipher(op->fallback.blk);
870 + op->fallback.blk = NULL;
871 + }
872 +
873 +@@ -424,6 +429,9 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
874 + struct blkcipher_walk walk;
875 + int err, ret;
876 +
877 ++ if (nbytes % AES_BLOCK_SIZE)
878 ++ return -EINVAL;
879 ++
880 + if (unlikely(op->keylen != AES_KEYSIZE_128))
881 + return fallback_blk_dec(desc, dst, src, nbytes);
882 +
883 +@@ -454,6 +462,9 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
884 + struct blkcipher_walk walk;
885 + int err, ret;
886 +
887 ++ if (nbytes % AES_BLOCK_SIZE)
888 ++ return -EINVAL;
889 ++
890 + if (unlikely(op->keylen != AES_KEYSIZE_128))
891 + return fallback_blk_enc(desc, dst, src, nbytes);
892 +
893 +diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
894 +index f442ca972e3c..c5763a041bb8 100644
895 +--- a/drivers/crypto/geode-aes.h
896 ++++ b/drivers/crypto/geode-aes.h
897 +@@ -64,7 +64,7 @@ struct geode_aes_op {
898 + u8 *iv;
899 +
900 + union {
901 +- struct crypto_blkcipher *blk;
902 ++ struct crypto_skcipher *blk;
903 + struct crypto_cipher *cip;
904 + } fallback;
905 + u32 keylen;
906 +diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
907 +index f4c7516eb989..0a87c5b51286 100644
908 +--- a/drivers/hwmon/adt7475.c
909 ++++ b/drivers/hwmon/adt7475.c
910 +@@ -296,9 +296,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
911 + long reg;
912 +
913 + if (bypass_attn & (1 << channel))
914 +- reg = (volt * 1024) / 2250;
915 ++ reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
916 + else
917 +- reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
918 ++ reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
919 ++ (r[0] + r[1]) * 2250);
920 + return clamp_val(reg, 0, 1023) & (0xff << 2);
921 + }
922 +
923 +diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
924 +index 6b3559f58b67..d34de21d43ad 100644
925 +--- a/drivers/hwmon/hwmon.c
926 ++++ b/drivers/hwmon/hwmon.c
927 +@@ -51,6 +51,7 @@ struct hwmon_device_attribute {
928 +
929 + #define to_hwmon_attr(d) \
930 + container_of(d, struct hwmon_device_attribute, dev_attr)
931 ++#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
932 +
933 + /*
934 + * Thermal zone information
935 +@@ -58,7 +59,7 @@ struct hwmon_device_attribute {
936 + * also provides the sensor index.
937 + */
938 + struct hwmon_thermal_data {
939 +- struct hwmon_device *hwdev; /* Reference to hwmon device */
940 ++ struct device *dev; /* Reference to hwmon device */
941 + int index; /* sensor index */
942 + };
943 +
944 +@@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
945 + NULL
946 + };
947 +
948 ++static void hwmon_free_attrs(struct attribute **attrs)
949 ++{
950 ++ int i;
951 ++
952 ++ for (i = 0; attrs[i]; i++) {
953 ++ struct device_attribute *dattr = to_dev_attr(attrs[i]);
954 ++ struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
955 ++
956 ++ kfree(hattr);
957 ++ }
958 ++ kfree(attrs);
959 ++}
960 ++
961 + static void hwmon_dev_release(struct device *dev)
962 + {
963 +- kfree(to_hwmon_device(dev));
964 ++ struct hwmon_device *hwdev = to_hwmon_device(dev);
965 ++
966 ++ if (hwdev->group.attrs)
967 ++ hwmon_free_attrs(hwdev->group.attrs);
968 ++ kfree(hwdev->groups);
969 ++ kfree(hwdev);
970 + }
971 +
972 + static struct class hwmon_class = {
973 +@@ -121,11 +140,11 @@ static DEFINE_IDA(hwmon_ida);
974 + static int hwmon_thermal_get_temp(void *data, int *temp)
975 + {
976 + struct hwmon_thermal_data *tdata = data;
977 +- struct hwmon_device *hwdev = tdata->hwdev;
978 ++ struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
979 + int ret;
980 + long t;
981 +
982 +- ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
983 ++ ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
984 + tdata->index, &t);
985 + if (ret < 0)
986 + return ret;
987 +@@ -139,8 +158,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
988 + .get_temp = hwmon_thermal_get_temp,
989 + };
990 +
991 +-static int hwmon_thermal_add_sensor(struct device *dev,
992 +- struct hwmon_device *hwdev, int index)
993 ++static int hwmon_thermal_add_sensor(struct device *dev, int index)
994 + {
995 + struct hwmon_thermal_data *tdata;
996 + struct thermal_zone_device *tzd;
997 +@@ -149,10 +167,10 @@ static int hwmon_thermal_add_sensor(struct device *dev,
998 + if (!tdata)
999 + return -ENOMEM;
1000 +
1001 +- tdata->hwdev = hwdev;
1002 ++ tdata->dev = dev;
1003 + tdata->index = index;
1004 +
1005 +- tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
1006 ++ tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
1007 + &hwmon_thermal_ops);
1008 + /*
1009 + * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
1010 +@@ -164,8 +182,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
1011 + return 0;
1012 + }
1013 + #else
1014 +-static int hwmon_thermal_add_sensor(struct device *dev,
1015 +- struct hwmon_device *hwdev, int index)
1016 ++static int hwmon_thermal_add_sensor(struct device *dev, int index)
1017 + {
1018 + return 0;
1019 + }
1020 +@@ -242,8 +259,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
1021 + (type == hwmon_fan && attr == hwmon_fan_label);
1022 + }
1023 +
1024 +-static struct attribute *hwmon_genattr(struct device *dev,
1025 +- const void *drvdata,
1026 ++static struct attribute *hwmon_genattr(const void *drvdata,
1027 + enum hwmon_sensor_types type,
1028 + u32 attr,
1029 + int index,
1030 +@@ -271,7 +287,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
1031 + if ((mode & S_IWUGO) && !ops->write)
1032 + return ERR_PTR(-EINVAL);
1033 +
1034 +- hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
1035 ++ hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
1036 + if (!hattr)
1037 + return ERR_PTR(-ENOMEM);
1038 +
1039 +@@ -478,8 +494,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
1040 + return n;
1041 + }
1042 +
1043 +-static int hwmon_genattrs(struct device *dev,
1044 +- const void *drvdata,
1045 ++static int hwmon_genattrs(const void *drvdata,
1046 + struct attribute **attrs,
1047 + const struct hwmon_ops *ops,
1048 + const struct hwmon_channel_info *info)
1049 +@@ -505,7 +520,7 @@ static int hwmon_genattrs(struct device *dev,
1050 + attr_mask &= ~BIT(attr);
1051 + if (attr >= template_size)
1052 + return -EINVAL;
1053 +- a = hwmon_genattr(dev, drvdata, info->type, attr, i,
1054 ++ a = hwmon_genattr(drvdata, info->type, attr, i,
1055 + templates[attr], ops);
1056 + if (IS_ERR(a)) {
1057 + if (PTR_ERR(a) != -ENOENT)
1058 +@@ -519,8 +534,7 @@ static int hwmon_genattrs(struct device *dev,
1059 + }
1060 +
1061 + static struct attribute **
1062 +-__hwmon_create_attrs(struct device *dev, const void *drvdata,
1063 +- const struct hwmon_chip_info *chip)
1064 ++__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
1065 + {
1066 + int ret, i, aindex = 0, nattrs = 0;
1067 + struct attribute **attrs;
1068 +@@ -531,15 +545,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
1069 + if (nattrs == 0)
1070 + return ERR_PTR(-EINVAL);
1071 +
1072 +- attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
1073 ++ attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
1074 + if (!attrs)
1075 + return ERR_PTR(-ENOMEM);
1076 +
1077 + for (i = 0; chip->info[i]; i++) {
1078 +- ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
1079 ++ ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
1080 + chip->info[i]);
1081 +- if (ret < 0)
1082 ++ if (ret < 0) {
1083 ++ hwmon_free_attrs(attrs);
1084 + return ERR_PTR(ret);
1085 ++ }
1086 + aindex += ret;
1087 + }
1088 +
1089 +@@ -581,14 +597,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1090 + for (i = 0; groups[i]; i++)
1091 + ngroups++;
1092 +
1093 +- hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
1094 +- GFP_KERNEL);
1095 ++ hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
1096 + if (!hwdev->groups) {
1097 + err = -ENOMEM;
1098 + goto free_hwmon;
1099 + }
1100 +
1101 +- attrs = __hwmon_create_attrs(dev, drvdata, chip);
1102 ++ attrs = __hwmon_create_attrs(drvdata, chip);
1103 + if (IS_ERR(attrs)) {
1104 + err = PTR_ERR(attrs);
1105 + goto free_hwmon;
1106 +@@ -633,8 +648,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1107 + hwmon_temp_input, j))
1108 + continue;
1109 + if (info[i]->config[j] & HWMON_T_INPUT) {
1110 +- err = hwmon_thermal_add_sensor(dev,
1111 +- hwdev, j);
1112 ++ err = hwmon_thermal_add_sensor(hdev, j);
1113 + if (err) {
1114 + device_unregister(hdev);
1115 + goto ida_remove;
1116 +@@ -647,7 +661,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
1117 + return hdev;
1118 +
1119 + free_hwmon:
1120 +- kfree(hwdev);
1121 ++ hwmon_dev_release(hdev);
1122 + ida_remove:
1123 + ida_simple_remove(&hwmon_ida, id);
1124 + return ERR_PTR(err);
1125 +diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
1126 +index 38ffbdb0a85f..779ec8fdfae0 100644
1127 +--- a/drivers/hwmon/nct7802.c
1128 ++++ b/drivers/hwmon/nct7802.c
1129 +@@ -32,8 +32,8 @@
1130 + static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
1131 +
1132 + static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
1133 +- { 0x40, 0x00, 0x42, 0x44, 0x46 },
1134 +- { 0x3f, 0x00, 0x41, 0x43, 0x45 },
1135 ++ { 0x46, 0x00, 0x40, 0x42, 0x44 },
1136 ++ { 0x45, 0x00, 0x3f, 0x41, 0x43 },
1137 + };
1138 +
1139 + static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
1140 +diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
1141 +index 0dad8626bcfb..6cf28b049635 100644
1142 +--- a/drivers/hwtracing/coresight/coresight-etb10.c
1143 ++++ b/drivers/hwtracing/coresight/coresight-etb10.c
1144 +@@ -275,9 +275,7 @@ static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
1145 + int node;
1146 + struct cs_buffers *buf;
1147 +
1148 +- if (cpu == -1)
1149 +- cpu = smp_processor_id();
1150 +- node = cpu_to_node(cpu);
1151 ++ node = (cpu == -1) ? NUMA_NO_NODE : cpu_to_node(cpu);
1152 +
1153 + buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
1154 + if (!buf)
1155 +diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
1156 +index e31061308e19..e90af39283b1 100644
1157 +--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
1158 ++++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
1159 +@@ -304,9 +304,7 @@ static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
1160 + int node;
1161 + struct cs_buffers *buf;
1162 +
1163 +- if (cpu == -1)
1164 +- cpu = smp_processor_id();
1165 +- node = cpu_to_node(cpu);
1166 ++ node = (cpu == -1) ? NUMA_NO_NODE : cpu_to_node(cpu);
1167 +
1168 + /* Allocate memory structure for interaction with Perf */
1169 + buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
1170 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1171 +index f39670c5c25c..9899f7e155a5 100644
1172 +--- a/drivers/infiniband/ulp/isert/ib_isert.c
1173 ++++ b/drivers/infiniband/ulp/isert/ib_isert.c
1174 +@@ -2584,17 +2584,6 @@ isert_wait4logout(struct isert_conn *isert_conn)
1175 + }
1176 + }
1177 +
1178 +-static void
1179 +-isert_wait4cmds(struct iscsi_conn *conn)
1180 +-{
1181 +- isert_info("iscsi_conn %p\n", conn);
1182 +-
1183 +- if (conn->sess) {
1184 +- target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1185 +- target_wait_for_sess_cmds(conn->sess->se_sess);
1186 +- }
1187 +-}
1188 +-
1189 + /**
1190 + * isert_put_unsol_pending_cmds() - Drop commands waiting for
1191 + * unsolicitate dataout
1192 +@@ -2642,7 +2631,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
1193 +
1194 + ib_drain_qp(isert_conn->qp);
1195 + isert_put_unsol_pending_cmds(conn);
1196 +- isert_wait4cmds(conn);
1197 + isert_wait4logout(isert_conn);
1198 +
1199 + queue_work(isert_release_wq, &isert_conn->release_work);
1200 +diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
1201 +index a8937ceac66a..af4db1350915 100644
1202 +--- a/drivers/input/misc/keyspan_remote.c
1203 ++++ b/drivers/input/misc/keyspan_remote.c
1204 +@@ -339,7 +339,8 @@ static int keyspan_setup(struct usb_device* dev)
1205 + int retval = 0;
1206 +
1207 + retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1208 +- 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
1209 ++ 0x11, 0x40, 0x5601, 0x0, NULL, 0,
1210 ++ USB_CTRL_SET_TIMEOUT);
1211 + if (retval) {
1212 + dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
1213 + __func__, retval);
1214 +@@ -347,7 +348,8 @@ static int keyspan_setup(struct usb_device* dev)
1215 + }
1216 +
1217 + retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1218 +- 0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
1219 ++ 0x44, 0x40, 0x0, 0x0, NULL, 0,
1220 ++ USB_CTRL_SET_TIMEOUT);
1221 + if (retval) {
1222 + dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
1223 + __func__, retval);
1224 +@@ -355,7 +357,8 @@ static int keyspan_setup(struct usb_device* dev)
1225 + }
1226 +
1227 + retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1228 +- 0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
1229 ++ 0x22, 0x40, 0x0, 0x0, NULL, 0,
1230 ++ USB_CTRL_SET_TIMEOUT);
1231 + if (retval) {
1232 + dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
1233 + __func__, retval);
1234 +diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
1235 +index 7dd1c1fbe42a..27b3db154a33 100644
1236 +--- a/drivers/input/misc/pm8xxx-vibrator.c
1237 ++++ b/drivers/input/misc/pm8xxx-vibrator.c
1238 +@@ -98,7 +98,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
1239 +
1240 + if (regs->enable_mask)
1241 + rc = regmap_update_bits(vib->regmap, regs->enable_addr,
1242 +- on ? regs->enable_mask : 0, val);
1243 ++ regs->enable_mask, on ? ~0 : 0);
1244 +
1245 + return rc;
1246 + }
1247 +diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
1248 +index 4b2466cf2fb1..b6ccf39c6a7b 100644
1249 +--- a/drivers/input/rmi4/rmi_smbus.c
1250 ++++ b/drivers/input/rmi4/rmi_smbus.c
1251 +@@ -166,6 +166,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
1252 + /* prepare to write next block of bytes */
1253 + cur_len -= SMB_MAX_COUNT;
1254 + databuff += SMB_MAX_COUNT;
1255 ++ rmiaddr += SMB_MAX_COUNT;
1256 + }
1257 + exit:
1258 + mutex_unlock(&rmi_smb->page_mutex);
1259 +@@ -217,6 +218,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
1260 + /* prepare to read next block of bytes */
1261 + cur_len -= SMB_MAX_COUNT;
1262 + databuff += SMB_MAX_COUNT;
1263 ++ rmiaddr += SMB_MAX_COUNT;
1264 + }
1265 +
1266 + retval = 0;
1267 +diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
1268 +index c82cd5079d0e..dc2ad1cc8fe1 100644
1269 +--- a/drivers/input/tablet/aiptek.c
1270 ++++ b/drivers/input/tablet/aiptek.c
1271 +@@ -1815,14 +1815,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
1272 + input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
1273 +
1274 + /* Verify that a device really has an endpoint */
1275 +- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
1276 ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
1277 + dev_err(&intf->dev,
1278 + "interface has %d endpoints, but must have minimum 1\n",
1279 +- intf->altsetting[0].desc.bNumEndpoints);
1280 ++ intf->cur_altsetting->desc.bNumEndpoints);
1281 + err = -EINVAL;
1282 + goto fail3;
1283 + }
1284 +- endpoint = &intf->altsetting[0].endpoint[0].desc;
1285 ++ endpoint = &intf->cur_altsetting->endpoint[0].desc;
1286 +
1287 + /* Go set up our URB, which is called when the tablet receives
1288 + * input.
1289 +diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
1290 +index 35031228a6d0..799c94dda651 100644
1291 +--- a/drivers/input/tablet/gtco.c
1292 ++++ b/drivers/input/tablet/gtco.c
1293 +@@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
1294 + }
1295 +
1296 + /* Sanity check that a device has an endpoint */
1297 +- if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
1298 ++ if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
1299 + dev_err(&usbinterface->dev,
1300 + "Invalid number of endpoints\n");
1301 + error = -EINVAL;
1302 + goto err_free_urb;
1303 + }
1304 +
1305 +- /*
1306 +- * The endpoint is always altsetting 0, we know this since we know
1307 +- * this device only has one interrupt endpoint
1308 +- */
1309 +- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
1310 ++ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
1311 +
1312 + /* Some debug */
1313 + dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
1314 +@@ -973,7 +969,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
1315 + input_dev->dev.parent = &usbinterface->dev;
1316 +
1317 + /* Setup the URB, it will be posted later on open of input device */
1318 +- endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
1319 ++ endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
1320 +
1321 + usb_fill_int_urb(gtco->urbinfo,
1322 + udev,
1323 +diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
1324 +index ffd03cfe3131..570cdaef3558 100644
1325 +--- a/drivers/input/tablet/pegasus_notetaker.c
1326 ++++ b/drivers/input/tablet/pegasus_notetaker.c
1327 +@@ -274,7 +274,7 @@ static int pegasus_probe(struct usb_interface *intf,
1328 + return -ENODEV;
1329 +
1330 + /* Sanity check that the device has an endpoint */
1331 +- if (intf->altsetting[0].desc.bNumEndpoints < 1) {
1332 ++ if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
1333 + dev_err(&intf->dev, "Invalid number of endpoints\n");
1334 + return -EINVAL;
1335 + }
1336 +diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
1337 +index d2e14d9e5975..ab44eb0352d0 100644
1338 +--- a/drivers/input/touchscreen/sun4i-ts.c
1339 ++++ b/drivers/input/touchscreen/sun4i-ts.c
1340 +@@ -246,6 +246,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
1341 + struct device *dev = &pdev->dev;
1342 + struct device_node *np = dev->of_node;
1343 + struct device *hwmon;
1344 ++ struct thermal_zone_device *thermal;
1345 + int error;
1346 + u32 reg;
1347 + bool ts_attached;
1348 +@@ -365,7 +366,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
1349 + if (IS_ERR(hwmon))
1350 + return PTR_ERR(hwmon);
1351 +
1352 +- devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
1353 ++ thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
1354 ++ &sun4i_ts_tz_ops);
1355 ++ if (IS_ERR(thermal))
1356 ++ return PTR_ERR(thermal);
1357 +
1358 + writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
1359 +
1360 +diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
1361 +index 894843a7ec7b..caa3aca2ea54 100644
1362 +--- a/drivers/input/touchscreen/sur40.c
1363 ++++ b/drivers/input/touchscreen/sur40.c
1364 +@@ -657,7 +657,7 @@ static int sur40_probe(struct usb_interface *interface,
1365 + int error;
1366 +
1367 + /* Check if we really have the right interface. */
1368 +- iface_desc = &interface->altsetting[0];
1369 ++ iface_desc = interface->cur_altsetting;
1370 + if (iface_desc->desc.bInterfaceClass != 0xFF)
1371 + return -ENODEV;
1372 +
1373 +diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
1374 +index 7675b645db2e..f75d892b6f03 100644
1375 +--- a/drivers/media/v4l2-core/v4l2-ioctl.c
1376 ++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
1377 +@@ -1548,12 +1548,12 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1378 + case V4L2_BUF_TYPE_VBI_CAPTURE:
1379 + if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
1380 + break;
1381 +- CLEAR_AFTER_FIELD(p, fmt.vbi);
1382 ++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1383 + return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
1384 + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
1385 + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
1386 + break;
1387 +- CLEAR_AFTER_FIELD(p, fmt.sliced);
1388 ++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1389 + return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
1390 + case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1391 + if (unlikely(!ops->vidioc_s_fmt_vid_out))
1392 +@@ -1576,22 +1576,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1393 + case V4L2_BUF_TYPE_VBI_OUTPUT:
1394 + if (unlikely(!ops->vidioc_s_fmt_vbi_out))
1395 + break;
1396 +- CLEAR_AFTER_FIELD(p, fmt.vbi);
1397 ++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1398 + return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
1399 + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1400 + if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
1401 + break;
1402 +- CLEAR_AFTER_FIELD(p, fmt.sliced);
1403 ++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1404 + return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
1405 + case V4L2_BUF_TYPE_SDR_CAPTURE:
1406 + if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
1407 + break;
1408 +- CLEAR_AFTER_FIELD(p, fmt.sdr);
1409 ++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1410 + return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
1411 + case V4L2_BUF_TYPE_SDR_OUTPUT:
1412 + if (unlikely(!ops->vidioc_s_fmt_sdr_out))
1413 + break;
1414 +- CLEAR_AFTER_FIELD(p, fmt.sdr);
1415 ++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1416 + return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
1417 + case V4L2_BUF_TYPE_META_CAPTURE:
1418 + if (unlikely(!ops->vidioc_s_fmt_meta_cap))
1419 +@@ -1635,12 +1635,12 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1420 + case V4L2_BUF_TYPE_VBI_CAPTURE:
1421 + if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
1422 + break;
1423 +- CLEAR_AFTER_FIELD(p, fmt.vbi);
1424 ++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1425 + return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
1426 + case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
1427 + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
1428 + break;
1429 +- CLEAR_AFTER_FIELD(p, fmt.sliced);
1430 ++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1431 + return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
1432 + case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1433 + if (unlikely(!ops->vidioc_try_fmt_vid_out))
1434 +@@ -1663,22 +1663,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1435 + case V4L2_BUF_TYPE_VBI_OUTPUT:
1436 + if (unlikely(!ops->vidioc_try_fmt_vbi_out))
1437 + break;
1438 +- CLEAR_AFTER_FIELD(p, fmt.vbi);
1439 ++ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
1440 + return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
1441 + case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1442 + if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
1443 + break;
1444 +- CLEAR_AFTER_FIELD(p, fmt.sliced);
1445 ++ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
1446 + return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
1447 + case V4L2_BUF_TYPE_SDR_CAPTURE:
1448 + if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
1449 + break;
1450 +- CLEAR_AFTER_FIELD(p, fmt.sdr);
1451 ++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1452 + return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
1453 + case V4L2_BUF_TYPE_SDR_OUTPUT:
1454 + if (unlikely(!ops->vidioc_try_fmt_sdr_out))
1455 + break;
1456 +- CLEAR_AFTER_FIELD(p, fmt.sdr);
1457 ++ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
1458 + return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
1459 + case V4L2_BUF_TYPE_META_CAPTURE:
1460 + if (unlikely(!ops->vidioc_try_fmt_meta_cap))
1461 +diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
1462 +index 908b23e6a03c..14d749a0de95 100644
1463 +--- a/drivers/mmc/host/sdhci-tegra.c
1464 ++++ b/drivers/mmc/host/sdhci-tegra.c
1465 +@@ -177,7 +177,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
1466 + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
1467 + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
1468 + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
1469 +- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
1470 ++ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
1471 + clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
1472 + }
1473 +
1474 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1475 +index 369817a29c22..5a7fd89a8f2b 100644
1476 +--- a/drivers/mmc/host/sdhci.c
1477 ++++ b/drivers/mmc/host/sdhci.c
1478 +@@ -3700,11 +3700,13 @@ int sdhci_setup_host(struct sdhci_host *host)
1479 + if (host->ops->get_min_clock)
1480 + mmc->f_min = host->ops->get_min_clock(host);
1481 + else if (host->version >= SDHCI_SPEC_300) {
1482 +- if (host->clk_mul) {
1483 +- mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
1484 ++ if (host->clk_mul)
1485 + max_clk = host->max_clk * host->clk_mul;
1486 +- } else
1487 +- mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
1488 ++ /*
1489 ++ * Divided Clock Mode minimum clock rate is always less than
1490 ++ * Programmable Clock Mode minimum clock rate.
1491 ++ */
1492 ++ mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
1493 + } else
1494 + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
1495 +
1496 +diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
1497 +index cf0769ad39cd..b2e5bcae7fbe 100644
1498 +--- a/drivers/net/can/slcan.c
1499 ++++ b/drivers/net/can/slcan.c
1500 +@@ -343,9 +343,16 @@ static void slcan_transmit(struct work_struct *work)
1501 + */
1502 + static void slcan_write_wakeup(struct tty_struct *tty)
1503 + {
1504 +- struct slcan *sl = tty->disc_data;
1505 ++ struct slcan *sl;
1506 ++
1507 ++ rcu_read_lock();
1508 ++ sl = rcu_dereference(tty->disc_data);
1509 ++ if (!sl)
1510 ++ goto out;
1511 +
1512 + schedule_work(&sl->tx_work);
1513 ++out:
1514 ++ rcu_read_unlock();
1515 + }
1516 +
1517 + /* Send a can_frame to a TTY queue. */
1518 +@@ -640,10 +647,11 @@ static void slcan_close(struct tty_struct *tty)
1519 + return;
1520 +
1521 + spin_lock_bh(&sl->lock);
1522 +- tty->disc_data = NULL;
1523 ++ rcu_assign_pointer(tty->disc_data, NULL);
1524 + sl->tty = NULL;
1525 + spin_unlock_bh(&sl->lock);
1526 +
1527 ++ synchronize_rcu();
1528 + flush_work(&sl->tx_work);
1529 +
1530 + /* Flush network side */
1531 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1532 +index b7d75011cede..736a6a5fbd98 100644
1533 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1534 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1535 +@@ -2166,8 +2166,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1536 + DMA_END_ADDR);
1537 +
1538 + /* Initialize Tx NAPI */
1539 +- netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
1540 +- NAPI_POLL_WEIGHT);
1541 ++ netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
1542 ++ NAPI_POLL_WEIGHT);
1543 + }
1544 +
1545 + /* Initialize a RDMA ring */
1546 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1547 +index 6be6de0774b6..c82469ab7aba 100644
1548 +--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1549 ++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1550 +@@ -2449,6 +2449,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1551 +
1552 + if (!is_offload(adapter))
1553 + return -EOPNOTSUPP;
1554 ++ if (!capable(CAP_NET_ADMIN))
1555 ++ return -EPERM;
1556 + if (!(adapter->flags & FULL_INIT_DONE))
1557 + return -EIO; /* need the memory controllers */
1558 + if (copy_from_user(&t, useraddr, sizeof(t)))
1559 +diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
1560 +index 5f1875fe47cd..69282f31d519 100644
1561 +--- a/drivers/net/ethernet/natsemi/sonic.c
1562 ++++ b/drivers/net/ethernet/natsemi/sonic.c
1563 +@@ -63,6 +63,8 @@ static int sonic_open(struct net_device *dev)
1564 +
1565 + netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
1566 +
1567 ++ spin_lock_init(&lp->lock);
1568 ++
1569 + for (i = 0; i < SONIC_NUM_RRS; i++) {
1570 + struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
1571 + if (skb == NULL) {
1572 +@@ -113,6 +115,24 @@ static int sonic_open(struct net_device *dev)
1573 + return 0;
1574 + }
1575 +
1576 ++/* Wait for the SONIC to become idle. */
1577 ++static void sonic_quiesce(struct net_device *dev, u16 mask)
1578 ++{
1579 ++ struct sonic_local * __maybe_unused lp = netdev_priv(dev);
1580 ++ int i;
1581 ++ u16 bits;
1582 ++
1583 ++ for (i = 0; i < 1000; ++i) {
1584 ++ bits = SONIC_READ(SONIC_CMD) & mask;
1585 ++ if (!bits)
1586 ++ return;
1587 ++ if (irqs_disabled() || in_interrupt())
1588 ++ udelay(20);
1589 ++ else
1590 ++ usleep_range(100, 200);
1591 ++ }
1592 ++ WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
1593 ++}
1594 +
1595 + /*
1596 + * Close the SONIC device
1597 +@@ -129,6 +149,9 @@ static int sonic_close(struct net_device *dev)
1598 + /*
1599 + * stop the SONIC, disable interrupts
1600 + */
1601 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
1602 ++ sonic_quiesce(dev, SONIC_CR_ALL);
1603 ++
1604 + SONIC_WRITE(SONIC_IMR, 0);
1605 + SONIC_WRITE(SONIC_ISR, 0x7fff);
1606 + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
1607 +@@ -168,6 +191,9 @@ static void sonic_tx_timeout(struct net_device *dev)
1608 + * put the Sonic into software-reset mode and
1609 + * disable all interrupts before releasing DMA buffers
1610 + */
1611 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
1612 ++ sonic_quiesce(dev, SONIC_CR_ALL);
1613 ++
1614 + SONIC_WRITE(SONIC_IMR, 0);
1615 + SONIC_WRITE(SONIC_ISR, 0x7fff);
1616 + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
1617 +@@ -205,8 +231,6 @@ static void sonic_tx_timeout(struct net_device *dev)
1618 + * wake the tx queue
1619 + * Concurrently with all of this, the SONIC is potentially writing to
1620 + * the status flags of the TDs.
1621 +- * Until some mutual exclusion is added, this code will not work with SMP. However,
1622 +- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
1623 + */
1624 +
1625 + static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
1626 +@@ -214,7 +238,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
1627 + struct sonic_local *lp = netdev_priv(dev);
1628 + dma_addr_t laddr;
1629 + int length;
1630 +- int entry = lp->next_tx;
1631 ++ int entry;
1632 ++ unsigned long flags;
1633 +
1634 + netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
1635 +
1636 +@@ -236,6 +261,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
1637 + return NETDEV_TX_OK;
1638 + }
1639 +
1640 ++ spin_lock_irqsave(&lp->lock, flags);
1641 ++
1642 ++ entry = lp->next_tx;
1643 ++
1644 + sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
1645 + sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
1646 + sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
1647 +@@ -245,10 +274,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
1648 + sonic_tda_put(dev, entry, SONIC_TD_LINK,
1649 + sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
1650 +
1651 +- /*
1652 +- * Must set tx_skb[entry] only after clearing status, and
1653 +- * before clearing EOL and before stopping queue
1654 +- */
1655 + wmb();
1656 + lp->tx_len[entry] = length;
1657 + lp->tx_laddr[entry] = laddr;
1658 +@@ -271,6 +296,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
1659 +
1660 + SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
1661 +
1662 ++ spin_unlock_irqrestore(&lp->lock, flags);
1663 ++
1664 + return NETDEV_TX_OK;
1665 + }
1666 +
1667 +@@ -283,15 +310,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
1668 + struct net_device *dev = dev_id;
1669 + struct sonic_local *lp = netdev_priv(dev);
1670 + int status;
1671 ++ unsigned long flags;
1672 ++
1673 ++ /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
1674 ++ * with sonic_send_packet() so that the two functions can share state.
1675 ++ * Secondly, it makes sonic_interrupt() re-entrant, as that is required
1676 ++ * by macsonic which must use two IRQs with different priority levels.
1677 ++ */
1678 ++ spin_lock_irqsave(&lp->lock, flags);
1679 ++
1680 ++ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
1681 ++ if (!status) {
1682 ++ spin_unlock_irqrestore(&lp->lock, flags);
1683 +
1684 +- if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
1685 + return IRQ_NONE;
1686 ++ }
1687 +
1688 + do {
1689 ++ SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
1690 ++
1691 + if (status & SONIC_INT_PKTRX) {
1692 + netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
1693 + sonic_rx(dev); /* got packet(s) */
1694 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
1695 + }
1696 +
1697 + if (status & SONIC_INT_TXDN) {
1698 +@@ -299,11 +339,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
1699 + int td_status;
1700 + int freed_some = 0;
1701 +
1702 +- /* At this point, cur_tx is the index of a TD that is one of:
1703 +- * unallocated/freed (status set & tx_skb[entry] clear)
1704 +- * allocated and sent (status set & tx_skb[entry] set )
1705 +- * allocated and not yet sent (status clear & tx_skb[entry] set )
1706 +- * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
1707 ++ /* The state of a Transmit Descriptor may be inferred
1708 ++ * from { tx_skb[entry], td_status } as follows.
1709 ++ * { clear, clear } => the TD has never been used
1710 ++ * { set, clear } => the TD was handed to SONIC
1711 ++ * { set, set } => the TD was handed back
1712 ++ * { clear, set } => the TD is available for re-use
1713 + */
1714 +
1715 + netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
1716 +@@ -312,18 +353,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
1717 + if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
1718 + break;
1719 +
1720 +- if (td_status & 0x0001) {
1721 ++ if (td_status & SONIC_TCR_PTX) {
1722 + lp->stats.tx_packets++;
1723 + lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
1724 + } else {
1725 +- lp->stats.tx_errors++;
1726 +- if (td_status & 0x0642)
1727 ++ if (td_status & (SONIC_TCR_EXD |
1728 ++ SONIC_TCR_EXC | SONIC_TCR_BCM))
1729 + lp->stats.tx_aborted_errors++;
1730 +- if (td_status & 0x0180)
1731 ++ if (td_status &
1732 ++ (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
1733 + lp->stats.tx_carrier_errors++;
1734 +- if (td_status & 0x0020)
1735 ++ if (td_status & SONIC_TCR_OWC)
1736 + lp->stats.tx_window_errors++;
1737 +- if (td_status & 0x0004)
1738 ++ if (td_status & SONIC_TCR_FU)
1739 + lp->stats.tx_fifo_errors++;
1740 + }
1741 +
1742 +@@ -345,7 +387,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
1743 + if (freed_some || lp->tx_skb[entry] == NULL)
1744 + netif_wake_queue(dev); /* The ring is no longer full */
1745 + lp->cur_tx = entry;
1746 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
1747 + }
1748 +
1749 + /*
1750 +@@ -354,42 +395,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
1751 + if (status & SONIC_INT_RFO) {
1752 + netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
1753 + __func__);
1754 +- lp->stats.rx_fifo_errors++;
1755 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
1756 + }
1757 + if (status & SONIC_INT_RDE) {
1758 + netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
1759 + __func__);
1760 +- lp->stats.rx_dropped++;
1761 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
1762 + }
1763 + if (status & SONIC_INT_RBAE) {
1764 + netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
1765 + __func__);
1766 +- lp->stats.rx_dropped++;
1767 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
1768 + }
1769 +
1770 + /* counter overruns; all counters are 16bit wide */
1771 +- if (status & SONIC_INT_FAE) {
1772 ++ if (status & SONIC_INT_FAE)
1773 + lp->stats.rx_frame_errors += 65536;
1774 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
1775 +- }
1776 +- if (status & SONIC_INT_CRC) {
1777 ++ if (status & SONIC_INT_CRC)
1778 + lp->stats.rx_crc_errors += 65536;
1779 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
1780 +- }
1781 +- if (status & SONIC_INT_MP) {
1782 ++ if (status & SONIC_INT_MP)
1783 + lp->stats.rx_missed_errors += 65536;
1784 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
1785 +- }
1786 +
1787 + /* transmit error */
1788 + if (status & SONIC_INT_TXER) {
1789 +- if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
1790 +- netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
1791 +- __func__);
1792 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
1793 ++ u16 tcr = SONIC_READ(SONIC_TCR);
1794 ++
1795 ++ netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
1796 ++ __func__, tcr);
1797 ++
1798 ++ if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
1799 ++ SONIC_TCR_FU | SONIC_TCR_BCM)) {
1800 ++ /* Aborted transmission. Try again. */
1801 ++ netif_stop_queue(dev);
1802 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
1803 ++ }
1804 + }
1805 +
1806 + /* bus retry */
1807 +@@ -399,107 +435,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
1808 + /* ... to help debug DMA problems causing endless interrupts. */
1809 + /* Bounce the eth interface to turn on the interrupt again. */
1810 + SONIC_WRITE(SONIC_IMR, 0);
1811 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
1812 + }
1813 +
1814 +- /* load CAM done */
1815 +- if (status & SONIC_INT_LCD)
1816 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
1817 +- } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
1818 ++ status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
1819 ++ } while (status);
1820 ++
1821 ++ spin_unlock_irqrestore(&lp->lock, flags);
1822 ++
1823 + return IRQ_HANDLED;
1824 + }
1825 +
1826 ++/* Return the array index corresponding to a given Receive Buffer pointer. */
1827 ++static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
1828 ++ unsigned int last)
1829 ++{
1830 ++ unsigned int i = last;
1831 ++
1832 ++ do {
1833 ++ i = (i + 1) & SONIC_RRS_MASK;
1834 ++ if (addr == lp->rx_laddr[i])
1835 ++ return i;
1836 ++ } while (i != last);
1837 ++
1838 ++ return -ENOENT;
1839 ++}
1840 ++
1841 ++/* Allocate and map a new skb to be used as a receive buffer. */
1842 ++static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
1843 ++ struct sk_buff **new_skb, dma_addr_t *new_addr)
1844 ++{
1845 ++ *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
1846 ++ if (!*new_skb)
1847 ++ return false;
1848 ++
1849 ++ if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
1850 ++ skb_reserve(*new_skb, 2);
1851 ++
1852 ++ *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
1853 ++ SONIC_RBSIZE, DMA_FROM_DEVICE);
1854 ++ if (!*new_addr) {
1855 ++ dev_kfree_skb(*new_skb);
1856 ++ *new_skb = NULL;
1857 ++ return false;
1858 ++ }
1859 ++
1860 ++ return true;
1861 ++}
1862 ++
1863 ++/* Place a new receive resource in the Receive Resource Area and update RWP. */
1864 ++static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
1865 ++ dma_addr_t old_addr, dma_addr_t new_addr)
1866 ++{
1867 ++ unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
1868 ++ unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
1869 ++ u32 buf;
1870 ++
1871 ++ /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
1872 ++ * scans the other resources in the RRA, those in the range [RWP, RRP).
1873 ++ */
1874 ++ do {
1875 ++ buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
1876 ++ sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
1877 ++
1878 ++ if (buf == old_addr)
1879 ++ break;
1880 ++
1881 ++ entry = (entry + 1) & SONIC_RRS_MASK;
1882 ++ } while (entry != end);
1883 ++
1884 ++ WARN_ONCE(buf != old_addr, "failed to find resource!\n");
1885 ++
1886 ++ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
1887 ++ sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
1888 ++
1889 ++ entry = (entry + 1) & SONIC_RRS_MASK;
1890 ++
1891 ++ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
1892 ++}
1893 ++
1894 + /*
1895 + * We have a good packet(s), pass it/them up the network stack.
1896 + */
1897 + static void sonic_rx(struct net_device *dev)
1898 + {
1899 + struct sonic_local *lp = netdev_priv(dev);
1900 +- int status;
1901 + int entry = lp->cur_rx;
1902 ++ int prev_entry = lp->eol_rx;
1903 ++ bool rbe = false;
1904 +
1905 + while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
1906 +- struct sk_buff *used_skb;
1907 +- struct sk_buff *new_skb;
1908 +- dma_addr_t new_laddr;
1909 +- u16 bufadr_l;
1910 +- u16 bufadr_h;
1911 +- int pkt_len;
1912 +-
1913 +- status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
1914 +- if (status & SONIC_RCR_PRX) {
1915 +- /* Malloc up new buffer. */
1916 +- new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
1917 +- if (new_skb == NULL) {
1918 +- lp->stats.rx_dropped++;
1919 ++ u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
1920 ++
1921 ++ /* If the RD has LPKT set, the chip has finished with the RB */
1922 ++ if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
1923 ++ struct sk_buff *new_skb;
1924 ++ dma_addr_t new_laddr;
1925 ++ u32 addr = (sonic_rda_get(dev, entry,
1926 ++ SONIC_RD_PKTPTR_H) << 16) |
1927 ++ sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
1928 ++ int i = index_from_addr(lp, addr, entry);
1929 ++
1930 ++ if (i < 0) {
1931 ++ WARN_ONCE(1, "failed to find buffer!\n");
1932 + break;
1933 + }
1934 +- /* provide 16 byte IP header alignment unless DMA requires otherwise */
1935 +- if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
1936 +- skb_reserve(new_skb, 2);
1937 +-
1938 +- new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
1939 +- SONIC_RBSIZE, DMA_FROM_DEVICE);
1940 +- if (!new_laddr) {
1941 +- dev_kfree_skb(new_skb);
1942 +- printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
1943 ++
1944 ++ if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
1945 ++ struct sk_buff *used_skb = lp->rx_skb[i];
1946 ++ int pkt_len;
1947 ++
1948 ++ /* Pass the used buffer up the stack */
1949 ++ dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
1950 ++ DMA_FROM_DEVICE);
1951 ++
1952 ++ pkt_len = sonic_rda_get(dev, entry,
1953 ++ SONIC_RD_PKTLEN);
1954 ++ skb_trim(used_skb, pkt_len);
1955 ++ used_skb->protocol = eth_type_trans(used_skb,
1956 ++ dev);
1957 ++ netif_rx(used_skb);
1958 ++ lp->stats.rx_packets++;
1959 ++ lp->stats.rx_bytes += pkt_len;
1960 ++
1961 ++ lp->rx_skb[i] = new_skb;
1962 ++ lp->rx_laddr[i] = new_laddr;
1963 ++ } else {
1964 ++ /* Failed to obtain a new buffer so re-use it */
1965 ++ new_laddr = addr;
1966 + lp->stats.rx_dropped++;
1967 +- break;
1968 + }
1969 +-
1970 +- /* now we have a new skb to replace it, pass the used one up the stack */
1971 +- dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
1972 +- used_skb = lp->rx_skb[entry];
1973 +- pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
1974 +- skb_trim(used_skb, pkt_len);
1975 +- used_skb->protocol = eth_type_trans(used_skb, dev);
1976 +- netif_rx(used_skb);
1977 +- lp->stats.rx_packets++;
1978 +- lp->stats.rx_bytes += pkt_len;
1979 +-
1980 +- /* and insert the new skb */
1981 +- lp->rx_laddr[entry] = new_laddr;
1982 +- lp->rx_skb[entry] = new_skb;
1983 +-
1984 +- bufadr_l = (unsigned long)new_laddr & 0xffff;
1985 +- bufadr_h = (unsigned long)new_laddr >> 16;
1986 +- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
1987 +- sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
1988 +- } else {
1989 +- /* This should only happen, if we enable accepting broken packets. */
1990 +- lp->stats.rx_errors++;
1991 +- if (status & SONIC_RCR_FAER)
1992 +- lp->stats.rx_frame_errors++;
1993 +- if (status & SONIC_RCR_CRCR)
1994 +- lp->stats.rx_crc_errors++;
1995 +- }
1996 +- if (status & SONIC_RCR_LPKT) {
1997 +- /*
1998 +- * this was the last packet out of the current receive buffer
1999 +- * give the buffer back to the SONIC
2000 ++ /* If RBE is already asserted when RWP advances then
2001 ++ * it's safe to clear RBE after processing this packet.
2002 + */
2003 +- lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
2004 +- if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
2005 +- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
2006 +- if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
2007 +- netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
2008 +- __func__);
2009 +- SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
2010 +- }
2011 +- } else
2012 +- printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
2013 +- dev->name);
2014 ++ rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
2015 ++ sonic_update_rra(dev, lp, addr, new_laddr);
2016 ++ }
2017 + /*
2018 + * give back the descriptor
2019 + */
2020 +- sonic_rda_put(dev, entry, SONIC_RD_LINK,
2021 +- sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
2022 ++ sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
2023 + sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
2024 +- sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
2025 +- sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
2026 +- lp->eol_rx = entry;
2027 +- lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
2028 ++
2029 ++ prev_entry = entry;
2030 ++ entry = (entry + 1) & SONIC_RDS_MASK;
2031 ++ }
2032 ++
2033 ++ lp->cur_rx = entry;
2034 ++
2035 ++ if (prev_entry != lp->eol_rx) {
2036 ++ /* Advance the EOL flag to put descriptors back into service */
2037 ++ sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
2038 ++ sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
2039 ++ sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
2040 ++ sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
2041 ++ lp->eol_rx = prev_entry;
2042 + }
2043 ++
2044 ++ if (rbe)
2045 ++ SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
2046 + /*
2047 + * If any worth-while packets have been received, netif_rx()
2048 + * has done a mark_bh(NET_BH) for us and will work on them
2049 +@@ -549,6 +642,8 @@ static void sonic_multicast_list(struct net_device *dev)
2050 + (netdev_mc_count(dev) > 15)) {
2051 + rcr |= SONIC_RCR_AMC;
2052 + } else {
2053 ++ unsigned long flags;
2054 ++
2055 + netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
2056 + netdev_mc_count(dev));
2057 + sonic_set_cam_enable(dev, 1); /* always enable our own address */
2058 +@@ -562,9 +657,14 @@ static void sonic_multicast_list(struct net_device *dev)
2059 + i++;
2060 + }
2061 + SONIC_WRITE(SONIC_CDC, 16);
2062 +- /* issue Load CAM command */
2063 + SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
2064 ++
2065 ++ /* LCAM and TXP commands can't be used simultaneously */
2066 ++ spin_lock_irqsave(&lp->lock, flags);
2067 ++ sonic_quiesce(dev, SONIC_CR_TXP);
2068 + SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
2069 ++ sonic_quiesce(dev, SONIC_CR_LCAM);
2070 ++ spin_unlock_irqrestore(&lp->lock, flags);
2071 + }
2072 + }
2073 +
2074 +@@ -579,7 +679,6 @@ static void sonic_multicast_list(struct net_device *dev)
2075 + */
2076 + static int sonic_init(struct net_device *dev)
2077 + {
2078 +- unsigned int cmd;
2079 + struct sonic_local *lp = netdev_priv(dev);
2080 + int i;
2081 +
2082 +@@ -591,12 +690,16 @@ static int sonic_init(struct net_device *dev)
2083 + SONIC_WRITE(SONIC_ISR, 0x7fff);
2084 + SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
2085 +
2086 ++ /* While in reset mode, clear CAM Enable register */
2087 ++ SONIC_WRITE(SONIC_CE, 0);
2088 ++
2089 + /*
2090 + * clear software reset flag, disable receiver, clear and
2091 + * enable interrupts, then completely initialize the SONIC
2092 + */
2093 + SONIC_WRITE(SONIC_CMD, 0);
2094 +- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
2095 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
2096 ++ sonic_quiesce(dev, SONIC_CR_ALL);
2097 +
2098 + /*
2099 + * initialize the receive resource area
2100 +@@ -614,15 +717,10 @@ static int sonic_init(struct net_device *dev)
2101 + }
2102 +
2103 + /* initialize all RRA registers */
2104 +- lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
2105 +- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
2106 +- lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
2107 +- SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
2108 +-
2109 +- SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
2110 +- SONIC_WRITE(SONIC_REA, lp->rra_end);
2111 +- SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
2112 +- SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
2113 ++ SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
2114 ++ SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
2115 ++ SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
2116 ++ SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
2117 + SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
2118 + SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
2119 +
2120 +@@ -630,14 +728,7 @@ static int sonic_init(struct net_device *dev)
2121 + netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
2122 +
2123 + SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
2124 +- i = 0;
2125 +- while (i++ < 100) {
2126 +- if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
2127 +- break;
2128 +- }
2129 +-
2130 +- netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
2131 +- SONIC_READ(SONIC_CMD), i);
2132 ++ sonic_quiesce(dev, SONIC_CR_RRRA);
2133 +
2134 + /*
2135 + * Initialize the receive descriptors so that they
2136 +@@ -712,28 +803,17 @@ static int sonic_init(struct net_device *dev)
2137 + * load the CAM
2138 + */
2139 + SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
2140 +-
2141 +- i = 0;
2142 +- while (i++ < 100) {
2143 +- if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
2144 +- break;
2145 +- }
2146 +- netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
2147 +- SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
2148 ++ sonic_quiesce(dev, SONIC_CR_LCAM);
2149 +
2150 + /*
2151 + * enable receiver, disable loopback
2152 + * and enable all interrupts
2153 + */
2154 +- SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
2155 + SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
2156 + SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
2157 + SONIC_WRITE(SONIC_ISR, 0x7fff);
2158 + SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
2159 +-
2160 +- cmd = SONIC_READ(SONIC_CMD);
2161 +- if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
2162 +- printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
2163 ++ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
2164 +
2165 + netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
2166 + SONIC_READ(SONIC_CMD));
2167 +diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
2168 +index 2b27f7049acb..1df6d2f06cc4 100644
2169 +--- a/drivers/net/ethernet/natsemi/sonic.h
2170 ++++ b/drivers/net/ethernet/natsemi/sonic.h
2171 +@@ -110,6 +110,9 @@
2172 + #define SONIC_CR_TXP 0x0002
2173 + #define SONIC_CR_HTX 0x0001
2174 +
2175 ++#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
2176 ++ SONIC_CR_RXEN | SONIC_CR_TXP)
2177 ++
2178 + /*
2179 + * SONIC data configuration bits
2180 + */
2181 +@@ -175,6 +178,7 @@
2182 + #define SONIC_TCR_NCRS 0x0100
2183 + #define SONIC_TCR_CRLS 0x0080
2184 + #define SONIC_TCR_EXC 0x0040
2185 ++#define SONIC_TCR_OWC 0x0020
2186 + #define SONIC_TCR_PMB 0x0008
2187 + #define SONIC_TCR_FU 0x0004
2188 + #define SONIC_TCR_BCM 0x0002
2189 +@@ -274,8 +278,9 @@
2190 + #define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
2191 + #define SONIC_NUM_TDS 16 /* number of transmit descriptors */
2192 +
2193 +-#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
2194 +-#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
2195 ++#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1)
2196 ++#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1)
2197 ++#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1)
2198 +
2199 + #define SONIC_RBSIZE 1520 /* size of one resource buffer */
2200 +
2201 +@@ -312,8 +317,6 @@ struct sonic_local {
2202 + u32 rda_laddr; /* logical DMA address of RDA */
2203 + dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
2204 + dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
2205 +- unsigned int rra_end;
2206 +- unsigned int cur_rwp;
2207 + unsigned int cur_rx;
2208 + unsigned int cur_tx; /* first unacked transmit packet */
2209 + unsigned int eol_rx;
2210 +@@ -322,6 +325,7 @@ struct sonic_local {
2211 + int msg_enable;
2212 + struct device *device; /* generic device */
2213 + struct net_device_stats stats;
2214 ++ spinlock_t lock;
2215 + };
2216 +
2217 + #define TX_TIMEOUT (3 * HZ)
2218 +@@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev);
2219 + as far as we can tell. */
2220 + /* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put()
2221 + is a much better name. */
2222 +-static inline void sonic_buf_put(void* base, int bitmode,
2223 ++static inline void sonic_buf_put(u16 *base, int bitmode,
2224 + int offset, __u16 val)
2225 + {
2226 + if (bitmode)
2227 + #ifdef __BIG_ENDIAN
2228 +- ((__u16 *) base + (offset*2))[1] = val;
2229 ++ __raw_writew(val, base + (offset * 2) + 1);
2230 + #else
2231 +- ((__u16 *) base + (offset*2))[0] = val;
2232 ++ __raw_writew(val, base + (offset * 2) + 0);
2233 + #endif
2234 + else
2235 +- ((__u16 *) base)[offset] = val;
2236 ++ __raw_writew(val, base + (offset * 1) + 0);
2237 + }
2238 +
2239 +-static inline __u16 sonic_buf_get(void* base, int bitmode,
2240 ++static inline __u16 sonic_buf_get(u16 *base, int bitmode,
2241 + int offset)
2242 + {
2243 + if (bitmode)
2244 + #ifdef __BIG_ENDIAN
2245 +- return ((volatile __u16 *) base + (offset*2))[1];
2246 ++ return __raw_readw(base + (offset * 2) + 1);
2247 + #else
2248 +- return ((volatile __u16 *) base + (offset*2))[0];
2249 ++ return __raw_readw(base + (offset * 2) + 0);
2250 + #endif
2251 + else
2252 +- return ((volatile __u16 *) base)[offset];
2253 ++ return __raw_readw(base + (offset * 1) + 0);
2254 + }
2255 +
2256 + /* Inlines that you should actually use for reading/writing DMA buffers */
2257 +@@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
2258 + (entry * SIZEOF_SONIC_RR) + offset);
2259 + }
2260 +
2261 ++static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
2262 ++{
2263 ++ struct sonic_local *lp = netdev_priv(dev);
2264 ++
2265 ++ return lp->rra_laddr +
2266 ++ entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
2267 ++}
2268 ++
2269 ++static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
2270 ++{
2271 ++ struct sonic_local *lp = netdev_priv(dev);
2272 ++
2273 ++ return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
2274 ++ SONIC_BUS_SCALE(lp->dma_bitmode));
2275 ++}
2276 ++
2277 + static const char version[] =
2278 + "sonic.c:v0.92 20.9.98 tsbogend@×××××××××××××.de\n";
2279 +
2280 +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
2281 +index 6571cac6e786..ee086441dcbe 100644
2282 +--- a/drivers/net/gtp.c
2283 ++++ b/drivers/net/gtp.c
2284 +@@ -809,19 +809,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
2285 + return NULL;
2286 + }
2287 +
2288 +- if (sock->sk->sk_protocol != IPPROTO_UDP) {
2289 ++ sk = sock->sk;
2290 ++ if (sk->sk_protocol != IPPROTO_UDP ||
2291 ++ sk->sk_type != SOCK_DGRAM ||
2292 ++ (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
2293 + pr_debug("socket fd=%d not UDP\n", fd);
2294 + sk = ERR_PTR(-EINVAL);
2295 + goto out_sock;
2296 + }
2297 +
2298 +- lock_sock(sock->sk);
2299 +- if (sock->sk->sk_user_data) {
2300 ++ lock_sock(sk);
2301 ++ if (sk->sk_user_data) {
2302 + sk = ERR_PTR(-EBUSY);
2303 + goto out_rel_sock;
2304 + }
2305 +
2306 +- sk = sock->sk;
2307 + sock_hold(sk);
2308 +
2309 + tuncfg.sk_user_data = gtp;
2310 +diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
2311 +index 77207f936871..93f303ec17e2 100644
2312 +--- a/drivers/net/slip/slip.c
2313 ++++ b/drivers/net/slip/slip.c
2314 +@@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work)
2315 + */
2316 + static void slip_write_wakeup(struct tty_struct *tty)
2317 + {
2318 +- struct slip *sl = tty->disc_data;
2319 ++ struct slip *sl;
2320 ++
2321 ++ rcu_read_lock();
2322 ++ sl = rcu_dereference(tty->disc_data);
2323 ++ if (!sl)
2324 ++ goto out;
2325 +
2326 + schedule_work(&sl->tx_work);
2327 ++out:
2328 ++ rcu_read_unlock();
2329 + }
2330 +
2331 + static void sl_tx_timeout(struct net_device *dev)
2332 +@@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty)
2333 + return;
2334 +
2335 + spin_lock_bh(&sl->lock);
2336 +- tty->disc_data = NULL;
2337 ++ rcu_assign_pointer(tty->disc_data, NULL);
2338 + sl->tty = NULL;
2339 + spin_unlock_bh(&sl->lock);
2340 +
2341 ++ synchronize_rcu();
2342 + flush_work(&sl->tx_work);
2343 +
2344 + /* VSV = very important to remove timers */
2345 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2346 +index bbd92221c6ca..09c444d3b496 100644
2347 +--- a/drivers/net/tun.c
2348 ++++ b/drivers/net/tun.c
2349 +@@ -1900,6 +1900,10 @@ drop:
2350 + if (ret != XDP_PASS) {
2351 + rcu_read_unlock();
2352 + local_bh_enable();
2353 ++ if (frags) {
2354 ++ tfile->napi.skb = NULL;
2355 ++ mutex_unlock(&tfile->napi_mutex);
2356 ++ }
2357 + return total_len;
2358 + }
2359 + }
2360 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
2361 +index 7d708aeb4576..92548887df2f 100644
2362 +--- a/drivers/net/usb/lan78xx.c
2363 ++++ b/drivers/net/usb/lan78xx.c
2364 +@@ -31,6 +31,7 @@
2365 + #include <linux/mdio.h>
2366 + #include <linux/phy.h>
2367 + #include <net/ip6_checksum.h>
2368 ++#include <net/vxlan.h>
2369 + #include <linux/interrupt.h>
2370 + #include <linux/irqdomain.h>
2371 + #include <linux/irq.h>
2372 +@@ -3686,6 +3687,19 @@ static void lan78xx_tx_timeout(struct net_device *net)
2373 + tasklet_schedule(&dev->bh);
2374 + }
2375 +
2376 ++static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
2377 ++ struct net_device *netdev,
2378 ++ netdev_features_t features)
2379 ++{
2380 ++ if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
2381 ++ features &= ~NETIF_F_GSO_MASK;
2382 ++
2383 ++ features = vlan_features_check(skb, features);
2384 ++ features = vxlan_features_check(skb, features);
2385 ++
2386 ++ return features;
2387 ++}
2388 ++
2389 + static const struct net_device_ops lan78xx_netdev_ops = {
2390 + .ndo_open = lan78xx_open,
2391 + .ndo_stop = lan78xx_stop,
2392 +@@ -3699,6 +3713,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
2393 + .ndo_set_features = lan78xx_set_features,
2394 + .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
2395 + .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
2396 ++ .ndo_features_check = lan78xx_features_check,
2397 + };
2398 +
2399 + static void lan78xx_stat_monitor(struct timer_list *t)
2400 +diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
2401 +index 57edfada0665..c9401c121a14 100644
2402 +--- a/drivers/net/wireless/marvell/libertas/cfg.c
2403 ++++ b/drivers/net/wireless/marvell/libertas/cfg.c
2404 +@@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
2405 + int hw, ap, ap_max = ie[1];
2406 + u8 hw_rate;
2407 +
2408 ++ if (ap_max > MAX_RATES) {
2409 ++ lbs_deb_assoc("invalid rates\n");
2410 ++ return tlv;
2411 ++ }
2412 + /* Advance past IE header */
2413 + ie += 2;
2414 +
2415 +@@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
2416 + struct cmd_ds_802_11_ad_hoc_join cmd;
2417 + u8 preamble = RADIO_PREAMBLE_SHORT;
2418 + int ret = 0;
2419 ++ int hw, i;
2420 ++ u8 rates_max;
2421 ++ u8 *rates;
2422 +
2423 + /* TODO: set preamble based on scan result */
2424 + ret = lbs_set_radio(priv, preamble, 1);
2425 +@@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
2426 + if (!rates_eid) {
2427 + lbs_add_rates(cmd.bss.rates);
2428 + } else {
2429 +- int hw, i;
2430 +- u8 rates_max = rates_eid[1];
2431 +- u8 *rates = cmd.bss.rates;
2432 ++ rates_max = rates_eid[1];
2433 ++ if (rates_max > MAX_RATES) {
2434 ++ lbs_deb_join("invalid rates");
2435 ++ goto out;
2436 ++ }
2437 ++ rates = cmd.bss.rates;
2438 + for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
2439 + u8 hw_rate = lbs_rates[hw].bitrate / 5;
2440 + for (i = 0; i < rates_max; i++) {
2441 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2442 +index 20a57a48ae1e..36f8eb9f24a7 100644
2443 +--- a/drivers/pci/quirks.c
2444 ++++ b/drivers/pci/quirks.c
2445 +@@ -4891,18 +4891,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
2446 +
2447 + #ifdef CONFIG_PCI_ATS
2448 + /*
2449 +- * Some devices have a broken ATS implementation causing IOMMU stalls.
2450 +- * Don't use ATS for those devices.
2451 ++ * Some devices require additional driver setup to enable ATS. Don't use
2452 ++ * ATS for those devices as ATS will be enabled before the driver has had a
2453 ++ * chance to load and configure the device.
2454 + */
2455 +-static void quirk_no_ats(struct pci_dev *pdev)
2456 ++static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
2457 + {
2458 +- pci_info(pdev, "disabling ATS (broken on this device)\n");
2459 ++ if (pdev->device == 0x7340 && pdev->revision != 0xc5)
2460 ++ return;
2461 ++
2462 ++ pci_info(pdev, "disabling ATS\n");
2463 + pdev->ats_cap = 0;
2464 + }
2465 +
2466 + /* AMD Stoney platform GPU */
2467 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
2468 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
2469 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
2470 ++/* AMD Iceland dGPU */
2471 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
2472 ++/* AMD Navi14 dGPU */
2473 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
2474 + #endif /* CONFIG_PCI_ATS */
2475 +
2476 + /* Freescale PCIe doesn't support MSI in RC mode */
2477 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
2478 +index 4d0fc6b01fa0..4c4781e5974f 100644
2479 +--- a/drivers/scsi/scsi_transport_iscsi.c
2480 ++++ b/drivers/scsi/scsi_transport_iscsi.c
2481 +@@ -37,6 +37,8 @@
2482 +
2483 + #define ISCSI_TRANSPORT_VERSION "2.0-870"
2484 +
2485 ++#define ISCSI_SEND_MAX_ALLOWED 10
2486 ++
2487 + static int dbg_session;
2488 + module_param_named(debug_session, dbg_session, int,
2489 + S_IRUGO | S_IWUSR);
2490 +@@ -3680,6 +3682,7 @@ iscsi_if_rx(struct sk_buff *skb)
2491 + struct nlmsghdr *nlh;
2492 + struct iscsi_uevent *ev;
2493 + uint32_t group;
2494 ++ int retries = ISCSI_SEND_MAX_ALLOWED;
2495 +
2496 + nlh = nlmsg_hdr(skb);
2497 + if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
2498 +@@ -3710,6 +3713,10 @@ iscsi_if_rx(struct sk_buff *skb)
2499 + break;
2500 + err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
2501 + ev, sizeof(*ev));
2502 ++ if (err == -EAGAIN && --retries < 0) {
2503 ++ printk(KERN_WARNING "Send reply failed, error %d\n", err);
2504 ++ break;
2505 ++ }
2506 + } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
2507 + skb_pull(skb, rlen);
2508 + }
2509 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2510 +index 7d868d37ab5f..345b18d52ec6 100644
2511 +--- a/drivers/scsi/sd.c
2512 ++++ b/drivers/scsi/sd.c
2513 +@@ -1969,9 +1969,13 @@ static int sd_done(struct scsi_cmnd *SCpnt)
2514 + }
2515 + break;
2516 + case REQ_OP_ZONE_REPORT:
2517 ++ /* To avoid that the block layer performs an incorrect
2518 ++ * bio_advance() call and restart of the remainder of
2519 ++ * incomplete report zone BIOs, always indicate a full
2520 ++ * completion of REQ_OP_ZONE_REPORT.
2521 ++ */
2522 + if (!result) {
2523 +- good_bytes = scsi_bufflen(SCpnt)
2524 +- - scsi_get_resid(SCpnt);
2525 ++ good_bytes = scsi_bufflen(SCpnt);
2526 + scsi_set_resid(SCpnt, 0);
2527 + } else {
2528 + good_bytes = 0;
2529 +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
2530 +index 317d0f3f7a14..14bd54d0e79d 100644
2531 +--- a/drivers/target/iscsi/iscsi_target.c
2532 ++++ b/drivers/target/iscsi/iscsi_target.c
2533 +@@ -4123,9 +4123,6 @@ int iscsit_close_connection(
2534 + iscsit_stop_nopin_response_timer(conn);
2535 + iscsit_stop_nopin_timer(conn);
2536 +
2537 +- if (conn->conn_transport->iscsit_wait_conn)
2538 +- conn->conn_transport->iscsit_wait_conn(conn);
2539 +-
2540 + /*
2541 + * During Connection recovery drop unacknowledged out of order
2542 + * commands for this connection, and prepare the other commands
2543 +@@ -4211,6 +4208,9 @@ int iscsit_close_connection(
2544 + target_sess_cmd_list_set_waiting(sess->se_sess);
2545 + target_wait_for_sess_cmds(sess->se_sess);
2546 +
2547 ++ if (conn->conn_transport->iscsit_wait_conn)
2548 ++ conn->conn_transport->iscsit_wait_conn(conn);
2549 ++
2550 + ahash_request_free(conn->conn_tx_hash);
2551 + if (conn->conn_rx_hash) {
2552 + struct crypto_ahash *tfm;
2553 +diff --git a/fs/afs/cell.c b/fs/afs/cell.c
2554 +index ee07162d35c7..cce0e23b2454 100644