Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Thu, 30 Sep 2021 10:49:52
Message-Id: 1632998976.c1eec10c568d0a28f11c2ed5f9141dbae3e66fa2.mpagano@gentoo
1 commit: c1eec10c568d0a28f11c2ed5f9141dbae3e66fa2
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Sep 30 10:49:36 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Sep 30 10:49:36 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c1eec10c
7
8 Linux patch 5.4.150
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1149_linux-5.4.150.patch | 2559 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2563 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 734dc7b..a878738 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -639,6 +639,10 @@ Patch: 1148_linux-5.4.149.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.149
23
24 +Patch: 1149_linux-5.4.150.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.150
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1149_linux-5.4.150.patch b/1149_linux-5.4.150.patch
33 new file mode 100644
34 index 0000000..a01e3d7
35 --- /dev/null
36 +++ b/1149_linux-5.4.150.patch
37 @@ -0,0 +1,2559 @@
38 +diff --git a/Makefile b/Makefile
39 +index 1834f47fbaf61..c6b3a3d62f6ca 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 149
47 ++SUBLEVEL = 150
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
52 +index 103270d5a9fc6..66a384a4ddbad 100644
53 +--- a/arch/alpha/include/asm/io.h
54 ++++ b/arch/alpha/include/asm/io.h
55 +@@ -61,7 +61,7 @@ extern inline void set_hae(unsigned long new_hae)
56 + * Change virtual addresses to physical addresses and vv.
57 + */
58 + #ifdef USE_48_BIT_KSEG
59 +-static inline unsigned long virt_to_phys(void *address)
60 ++static inline unsigned long virt_to_phys(volatile void *address)
61 + {
62 + return (unsigned long)address - IDENT_ADDR;
63 + }
64 +@@ -71,7 +71,7 @@ static inline void * phys_to_virt(unsigned long address)
65 + return (void *) (address + IDENT_ADDR);
66 + }
67 + #else
68 +-static inline unsigned long virt_to_phys(void *address)
69 ++static inline unsigned long virt_to_phys(volatile void *address)
70 + {
71 + unsigned long phys = (unsigned long)address;
72 +
73 +@@ -107,7 +107,7 @@ static inline void * phys_to_virt(unsigned long address)
74 + extern unsigned long __direct_map_base;
75 + extern unsigned long __direct_map_size;
76 +
77 +-static inline unsigned long __deprecated virt_to_bus(void *address)
78 ++static inline unsigned long __deprecated virt_to_bus(volatile void *address)
79 + {
80 + unsigned long phys = virt_to_phys(address);
81 + unsigned long bus = phys + __direct_map_base;
82 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
83 +index 025e02d23da9b..de0eabff29353 100644
84 +--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
85 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
86 +@@ -138,6 +138,23 @@
87 + max-link-speed = <2>;
88 + reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
89 + phys = <&comphy1 0>;
90 ++ /*
91 ++ * U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property
92 ++ * contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and
93 ++ * 2 size cells and also expects that the second range starts at 16 MB offset. If these
94 ++ * conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address
95 ++ * space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window
96 ++ * for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB.
97 ++ * This bug is not present in U-Boot ports for other Armada 3700 devices and is fixed in
98 ++ * U-Boot version 2021.07. See relevant U-Boot commits (the last one contains fix):
99 ++ * https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7
100 ++ * https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf
101 ++ * https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33
102 ++ */
103 ++ #address-cells = <3>;
104 ++ #size-cells = <2>;
105 ++ ranges = <0x81000000 0 0xe8000000 0 0xe8000000 0 0x01000000 /* Port 0 IO */
106 ++ 0x82000000 0 0xe9000000 0 0xe9000000 0 0x07000000>; /* Port 0 MEM */
107 +
108 + /* enabled by U-Boot if PCIe module is present */
109 + status = "disabled";
110 +diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
111 +index 52767037e0494..c28611c1c251a 100644
112 +--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
113 ++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
114 +@@ -487,8 +487,15 @@
115 + #interrupt-cells = <1>;
116 + msi-parent = <&pcie0>;
117 + msi-controller;
118 +- ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000 /* Port 0 MEM */
119 +- 0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>; /* Port 0 IO*/
120 ++ /*
121 ++ * The 128 MiB address range [0xe8000000-0xf0000000] is
122 ++ * dedicated for PCIe and can be assigned to 8 windows
123 ++ * with size a power of two. Use one 64 KiB window for
124 ++ * IO at the end and the remaining seven windows
125 ++ * (totaling 127 MiB) for MEM.
126 ++ */
127 ++ ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x07f00000 /* Port 0 MEM */
128 ++ 0x81000000 0 0xefff0000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */
129 + interrupt-map-mask = <0 0 0 7>;
130 + interrupt-map = <0 0 0 1 &pcie_intc 0>,
131 + <0 0 0 2 &pcie_intc 1>,
132 +diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
133 +index 7d7cfa128b71b..f61ef46ebff74 100644
134 +--- a/arch/arm64/kernel/process.c
135 ++++ b/arch/arm64/kernel/process.c
136 +@@ -56,7 +56,7 @@
137 +
138 + #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
139 + #include <linux/stackprotector.h>
140 +-unsigned long __stack_chk_guard __read_mostly;
141 ++unsigned long __stack_chk_guard __ro_after_init;
142 + EXPORT_SYMBOL(__stack_chk_guard);
143 + #endif
144 +
145 +diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h
146 +index 8a6dc6e5a279c..8ab3c350bd530 100644
147 +--- a/arch/m68k/include/asm/raw_io.h
148 ++++ b/arch/m68k/include/asm/raw_io.h
149 +@@ -17,21 +17,21 @@
150 + * two accesses to memory, which may be undesirable for some devices.
151 + */
152 + #define in_8(addr) \
153 +- ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
154 ++ ({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; })
155 + #define in_be16(addr) \
156 +- ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
157 ++ ({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; })
158 + #define in_be32(addr) \
159 +- ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
160 ++ ({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; })
161 + #define in_le16(addr) \
162 +- ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
163 ++ ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; })
164 + #define in_le32(addr) \
165 +- ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
166 ++ ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; })
167 +
168 +-#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
169 +-#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
170 +-#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
171 +-#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
172 +-#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
173 ++#define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b))
174 ++#define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w))
175 ++#define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l))
176 ++#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w))
177 ++#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l))
178 +
179 + #define raw_inb in_8
180 + #define raw_inw in_be16
181 +diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
182 +index 93caf17ac5e2f..9ebf3b0413d5f 100644
183 +--- a/arch/parisc/include/asm/page.h
184 ++++ b/arch/parisc/include/asm/page.h
185 +@@ -181,7 +181,7 @@ extern int npmem_ranges;
186 + #include <asm-generic/getorder.h>
187 + #include <asm/pdc.h>
188 +
189 +-#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
190 ++#define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET))
191 +
192 + /* DEFINITION OF THE ZERO-PAGE (PAG0) */
193 + /* based on work by Jason Eckhardt (jason@×××××××.com) */
194 +diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
195 +index f89603855f1ec..b87e0002131dd 100644
196 +--- a/arch/sparc/kernel/ioport.c
197 ++++ b/arch/sparc/kernel/ioport.c
198 +@@ -356,7 +356,9 @@ err_nomem:
199 + void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
200 + dma_addr_t dma_addr, unsigned long attrs)
201 + {
202 +- if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size)))
203 ++ size = PAGE_ALIGN(size);
204 ++
205 ++ if (!sparc_dma_free_resource(cpu_addr, size))
206 + return;
207 +
208 + dma_make_coherent(dma_addr, size);
209 +diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
210 +index 8e645ddac58e2..30f171b7b00c2 100644
211 +--- a/arch/sparc/kernel/mdesc.c
212 ++++ b/arch/sparc/kernel/mdesc.c
213 +@@ -39,6 +39,7 @@ struct mdesc_hdr {
214 + u32 node_sz; /* node block size */
215 + u32 name_sz; /* name block size */
216 + u32 data_sz; /* data block size */
217 ++ char data[];
218 + } __attribute__((aligned(16)));
219 +
220 + struct mdesc_elem {
221 +@@ -612,7 +613,7 @@ EXPORT_SYMBOL(mdesc_get_node_info);
222 +
223 + static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
224 + {
225 +- return (struct mdesc_elem *) (mdesc + 1);
226 ++ return (struct mdesc_elem *) mdesc->data;
227 + }
228 +
229 + static void *name_block(struct mdesc_hdr *mdesc)
230 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
231 +index b99074ca5e686..65cf405cd9753 100644
232 +--- a/arch/x86/xen/enlighten_pv.c
233 ++++ b/arch/x86/xen/enlighten_pv.c
234 +@@ -727,8 +727,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
235 + preempt_enable();
236 + }
237 +
238 +-static void xen_convert_trap_info(const struct desc_ptr *desc,
239 +- struct trap_info *traps)
240 ++static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
241 ++ struct trap_info *traps, bool full)
242 + {
243 + unsigned in, out, count;
244 +
245 +@@ -738,17 +738,18 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
246 + for (in = out = 0; in < count; in++) {
247 + gate_desc *entry = (gate_desc *)(desc->address) + in;
248 +
249 +- if (cvt_gate_to_trap(in, entry, &traps[out]))
250 ++ if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
251 + out++;
252 + }
253 +- traps[out].address = 0;
254 ++
255 ++ return out;
256 + }
257 +
258 + void xen_copy_trap_info(struct trap_info *traps)
259 + {
260 + const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
261 +
262 +- xen_convert_trap_info(desc, traps);
263 ++ xen_convert_trap_info(desc, traps, true);
264 + }
265 +
266 + /* Load a new IDT into Xen. In principle this can be per-CPU, so we
267 +@@ -758,6 +759,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
268 + {
269 + static DEFINE_SPINLOCK(lock);
270 + static struct trap_info traps[257];
271 ++ unsigned out;
272 +
273 + trace_xen_cpu_load_idt(desc);
274 +
275 +@@ -765,7 +767,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
276 +
277 + memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
278 +
279 +- xen_convert_trap_info(desc, traps);
280 ++ out = xen_convert_trap_info(desc, traps, false);
281 ++ memset(&traps[out], 0, sizeof(traps[0]));
282 +
283 + xen_mc_flush();
284 + if (HYPERVISOR_set_trap_table(traps))
285 +diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
286 +index cb3d44d200055..dde8d0acfb34f 100644
287 +--- a/block/blk-cgroup.c
288 ++++ b/block/blk-cgroup.c
289 +@@ -1462,10 +1462,14 @@ enomem:
290 + /* alloc failed, nothing's initialized yet, free everything */
291 + spin_lock_irq(&q->queue_lock);
292 + list_for_each_entry(blkg, &q->blkg_list, q_node) {
293 ++ struct blkcg *blkcg = blkg->blkcg;
294 ++
295 ++ spin_lock(&blkcg->lock);
296 + if (blkg->pd[pol->plid]) {
297 + pol->pd_free_fn(blkg->pd[pol->plid]);
298 + blkg->pd[pol->plid] = NULL;
299 + }
300 ++ spin_unlock(&blkcg->lock);
301 + }
302 + spin_unlock_irq(&q->queue_lock);
303 + ret = -ENOMEM;
304 +@@ -1497,12 +1501,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
305 + __clear_bit(pol->plid, q->blkcg_pols);
306 +
307 + list_for_each_entry(blkg, &q->blkg_list, q_node) {
308 ++ struct blkcg *blkcg = blkg->blkcg;
309 ++
310 ++ spin_lock(&blkcg->lock);
311 + if (blkg->pd[pol->plid]) {
312 + if (pol->pd_offline_fn)
313 + pol->pd_offline_fn(blkg->pd[pol->plid]);
314 + pol->pd_free_fn(blkg->pd[pol->plid]);
315 + blkg->pd[pol->plid] = NULL;
316 + }
317 ++ spin_unlock(&blkcg->lock);
318 + }
319 +
320 + spin_unlock_irq(&q->queue_lock);
321 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
322 +index 89b590c9573ff..4eaef780844ea 100644
323 +--- a/drivers/android/binder.c
324 ++++ b/drivers/android/binder.c
325 +@@ -2239,6 +2239,7 @@ static void binder_deferred_fd_close(int fd)
326 + }
327 +
328 + static void binder_transaction_buffer_release(struct binder_proc *proc,
329 ++ struct binder_thread *thread,
330 + struct binder_buffer *buffer,
331 + binder_size_t failed_at,
332 + bool is_failure)
333 +@@ -2398,8 +2399,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
334 + &proc->alloc, &fd, buffer,
335 + offset, sizeof(fd));
336 + WARN_ON(err);
337 +- if (!err)
338 ++ if (!err) {
339 + binder_deferred_fd_close(fd);
340 ++ /*
341 ++ * Need to make sure the thread goes
342 ++ * back to userspace to complete the
343 ++ * deferred close
344 ++ */
345 ++ if (thread)
346 ++ thread->looper_need_return = true;
347 ++ }
348 + }
349 + } break;
350 + default:
351 +@@ -3469,7 +3478,7 @@ err_bad_parent:
352 + err_copy_data_failed:
353 + binder_free_txn_fixups(t);
354 + trace_binder_transaction_failed_buffer_release(t->buffer);
355 +- binder_transaction_buffer_release(target_proc, t->buffer,
356 ++ binder_transaction_buffer_release(target_proc, NULL, t->buffer,
357 + buffer_offset, true);
358 + if (target_node)
359 + binder_dec_node_tmpref(target_node);
360 +@@ -3546,7 +3555,9 @@ err_invalid_target_handle:
361 + * Cleanup buffer and free it.
362 + */
363 + static void
364 +-binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
365 ++binder_free_buf(struct binder_proc *proc,
366 ++ struct binder_thread *thread,
367 ++ struct binder_buffer *buffer)
368 + {
369 + binder_inner_proc_lock(proc);
370 + if (buffer->transaction) {
371 +@@ -3574,7 +3585,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
372 + binder_node_inner_unlock(buf_node);
373 + }
374 + trace_binder_transaction_buffer_release(buffer);
375 +- binder_transaction_buffer_release(proc, buffer, 0, false);
376 ++ binder_transaction_buffer_release(proc, thread, buffer, 0, false);
377 + binder_alloc_free_buf(&proc->alloc, buffer);
378 + }
379 +
380 +@@ -3775,7 +3786,7 @@ static int binder_thread_write(struct binder_proc *proc,
381 + proc->pid, thread->pid, (u64)data_ptr,
382 + buffer->debug_id,
383 + buffer->transaction ? "active" : "finished");
384 +- binder_free_buf(proc, buffer);
385 ++ binder_free_buf(proc, thread, buffer);
386 + break;
387 + }
388 +
389 +@@ -4463,7 +4474,7 @@ retry:
390 + buffer->transaction = NULL;
391 + binder_cleanup_transaction(t, "fd fixups failed",
392 + BR_FAILED_REPLY);
393 +- binder_free_buf(proc, buffer);
394 ++ binder_free_buf(proc, thread, buffer);
395 + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
396 + "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
397 + proc->pid, thread->pid,
398 +diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
399 +index 880ffd8337187..6becf3363ad57 100644
400 +--- a/drivers/edac/synopsys_edac.c
401 ++++ b/drivers/edac/synopsys_edac.c
402 +@@ -782,7 +782,7 @@ static void init_csrows(struct mem_ctl_info *mci)
403 +
404 + for (j = 0; j < csi->nr_channels; j++) {
405 + dimm = csi->channels[j]->dimm;
406 +- dimm->edac_mode = EDAC_FLAG_SECDED;
407 ++ dimm->edac_mode = EDAC_SECDED;
408 + dimm->mtype = p_data->get_mtype(priv->baseaddr);
409 + dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
410 + dimm->grain = SYNPS_EDAC_ERR_GRAIN;
411 +diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
412 +index 4d8a876415874..37e54e375528e 100644
413 +--- a/drivers/fpga/machxo2-spi.c
414 ++++ b/drivers/fpga/machxo2-spi.c
415 +@@ -223,8 +223,10 @@ static int machxo2_write_init(struct fpga_manager *mgr,
416 + goto fail;
417 +
418 + get_status(spi, &status);
419 +- if (test_bit(FAIL, &status))
420 ++ if (test_bit(FAIL, &status)) {
421 ++ ret = -EINVAL;
422 + goto fail;
423 ++ }
424 + dump_status_reg(&status);
425 +
426 + spi_message_init(&msg);
427 +@@ -310,6 +312,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
428 + dump_status_reg(&status);
429 + if (!test_bit(DONE, &status)) {
430 + machxo2_cleanup(mgr);
431 ++ ret = -EINVAL;
432 + goto fail;
433 + }
434 +
435 +@@ -331,6 +334,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
436 + break;
437 + if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {
438 + machxo2_cleanup(mgr);
439 ++ ret = -EINVAL;
440 + goto fail;
441 + }
442 + } while (1);
443 +diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
444 +index 93cdcc41e9fbc..0f1cf50b4dcea 100644
445 +--- a/drivers/gpio/gpio-uniphier.c
446 ++++ b/drivers/gpio/gpio-uniphier.c
447 +@@ -188,7 +188,7 @@ static void uniphier_gpio_irq_mask(struct irq_data *data)
448 +
449 + uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0);
450 +
451 +- return irq_chip_mask_parent(data);
452 ++ irq_chip_mask_parent(data);
453 + }
454 +
455 + static void uniphier_gpio_irq_unmask(struct irq_data *data)
456 +@@ -198,7 +198,7 @@ static void uniphier_gpio_irq_unmask(struct irq_data *data)
457 +
458 + uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask);
459 +
460 +- return irq_chip_unmask_parent(data);
461 ++ irq_chip_unmask_parent(data);
462 + }
463 +
464 + static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)
465 +diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
466 +index 97f9c001d8ff7..20f44ef9c4c9b 100644
467 +--- a/drivers/irqchip/Kconfig
468 ++++ b/drivers/irqchip/Kconfig
469 +@@ -415,6 +415,7 @@ config MESON_IRQ_GPIO
470 + config GOLDFISH_PIC
471 + bool "Goldfish programmable interrupt controller"
472 + depends on MIPS && (GOLDFISH || COMPILE_TEST)
473 ++ select GENERIC_IRQ_CHIP
474 + select IRQ_DOMAIN
475 + help
476 + Say yes here to enable Goldfish interrupt controller driver used
477 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
478 +index f298313b87ac7..398c54387988a 100644
479 +--- a/drivers/irqchip/irq-gic-v3-its.c
480 ++++ b/drivers/irqchip/irq-gic-v3-its.c
481 +@@ -3123,7 +3123,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
482 +
483 + if (err) {
484 + if (i > 0)
485 +- its_vpe_irq_domain_free(domain, virq, i - 1);
486 ++ its_vpe_irq_domain_free(domain, virq, i);
487 +
488 + its_lpi_free(bitmap, base, nr_ids);
489 + its_free_prop_table(vprop_page);
490 +diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
491 +index b72e82efaee52..c799bb81ab03d 100644
492 +--- a/drivers/mcb/mcb-core.c
493 ++++ b/drivers/mcb/mcb-core.c
494 +@@ -277,8 +277,8 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
495 +
496 + bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
497 + if (bus_nr < 0) {
498 +- rc = bus_nr;
499 +- goto err_free;
500 ++ kfree(bus);
501 ++ return ERR_PTR(bus_nr);
502 + }
503 +
504 + bus->bus_nr = bus_nr;
505 +@@ -293,12 +293,12 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
506 + dev_set_name(&bus->dev, "mcb:%d", bus_nr);
507 + rc = device_add(&bus->dev);
508 + if (rc)
509 +- goto err_free;
510 ++ goto err_put;
511 +
512 + return bus;
513 +-err_free:
514 +- put_device(carrier);
515 +- kfree(bus);
516 ++
517 ++err_put:
518 ++ put_device(&bus->dev);
519 + return ERR_PTR(rc);
520 + }
521 + EXPORT_SYMBOL_GPL(mcb_alloc_bus);
522 +diff --git a/drivers/md/md.c b/drivers/md/md.c
523 +index 761d43829b2b7..c178b2f406de3 100644
524 +--- a/drivers/md/md.c
525 ++++ b/drivers/md/md.c
526 +@@ -5535,10 +5535,6 @@ static int md_alloc(dev_t dev, char *name)
527 + */
528 + disk->flags |= GENHD_FL_EXT_DEVT;
529 + mddev->gendisk = disk;
530 +- /* As soon as we call add_disk(), another thread could get
531 +- * through to md_open, so make sure it doesn't get too far
532 +- */
533 +- mutex_lock(&mddev->open_mutex);
534 + add_disk(disk);
535 +
536 + error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
537 +@@ -5553,7 +5549,6 @@ static int md_alloc(dev_t dev, char *name)
538 + if (mddev->kobj.sd &&
539 + sysfs_create_group(&mddev->kobj, &md_bitmap_group))
540 + pr_debug("pointless warning\n");
541 +- mutex_unlock(&mddev->open_mutex);
542 + abort:
543 + mutex_unlock(&disks_mutex);
544 + if (!error && mddev->kobj.sd) {
545 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
546 +index e67f07faca789..7f590a9e3af79 100644
547 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
548 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
549 +@@ -372,7 +372,7 @@ static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
550 + * netif_tx_queue_stopped().
551 + */
552 + smp_mb();
553 +- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
554 ++ if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
555 + netif_tx_wake_queue(txq);
556 + return false;
557 + }
558 +@@ -701,7 +701,7 @@ next_tx_int:
559 + smp_mb();
560 +
561 + if (unlikely(netif_tx_queue_stopped(txq)) &&
562 +- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
563 ++ bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
564 + READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
565 + netif_tx_wake_queue(txq);
566 + }
567 +@@ -2206,7 +2206,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
568 + if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
569 + tx_pkts++;
570 + /* return full budget so NAPI will complete. */
571 +- if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
572 ++ if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
573 + rx_pkts = budget;
574 + raw_cons = NEXT_RAW_CMP(raw_cons);
575 + if (budget)
576 +@@ -3329,7 +3329,7 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
577 + u16 i;
578 +
579 + bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
580 +- MAX_SKB_FRAGS + 1);
581 ++ BNXT_MIN_TX_DESC_CNT);
582 +
583 + for (i = 0; i < bp->tx_nr_rings; i++) {
584 + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
585 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
586 +index 510ff01bdad8c..8ba369c0100b4 100644
587 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
588 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
589 +@@ -601,6 +601,11 @@ struct nqe_cn {
590 + #define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
591 + #define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
592 +
593 ++/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
594 ++ * BD because the first TX BD is always a long BD.
595 ++ */
596 ++#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
597 ++
598 + #define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
599 + #define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
600 +
601 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
602 +index 2118523782246..97aff84fd1d17 100644
603 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
604 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
605 +@@ -744,7 +744,7 @@ static int bnxt_set_ringparam(struct net_device *dev,
606 +
607 + if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
608 + (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
609 +- (ering->tx_pending <= MAX_SKB_FRAGS))
610 ++ (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
611 + return -EINVAL;
612 +
613 + if (netif_running(dev))
614 +diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
615 +index 617b3b728dd04..94f3babfad309 100644
616 +--- a/drivers/net/ethernet/cadence/macb_pci.c
617 ++++ b/drivers/net/ethernet/cadence/macb_pci.c
618 +@@ -112,9 +112,9 @@ static void macb_remove(struct pci_dev *pdev)
619 + struct platform_device *plat_dev = pci_get_drvdata(pdev);
620 + struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
621 +
622 +- platform_device_unregister(plat_dev);
623 + clk_unregister(plat_data->pclk);
624 + clk_unregister(plat_data->hclk);
625 ++ platform_device_unregister(plat_dev);
626 + }
627 +
628 + static const struct pci_device_id dev_id_table[] = {
629 +diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
630 +index b77eaf31bd4ed..cee77326e7e85 100644
631 +--- a/drivers/net/ethernet/freescale/enetc/enetc.c
632 ++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
633 +@@ -1222,7 +1222,6 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
634 + static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
635 + {
636 + struct pci_dev *pdev = priv->si->pdev;
637 +- cpumask_t cpu_mask;
638 + int i, j, err;
639 +
640 + for (i = 0; i < priv->bdr_int_num; i++) {
641 +@@ -1249,9 +1248,7 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
642 +
643 + enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
644 + }
645 +- cpumask_clear(&cpu_mask);
646 +- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
647 +- irq_set_affinity_hint(irq, &cpu_mask);
648 ++ irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
649 + }
650 +
651 + return 0;
652 +diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
653 +index 92929750f8325..54d5b402b0e8d 100644
654 +--- a/drivers/net/ethernet/i825xx/82596.c
655 ++++ b/drivers/net/ethernet/i825xx/82596.c
656 +@@ -1155,7 +1155,7 @@ struct net_device * __init i82596_probe(int unit)
657 + err = -ENODEV;
658 + goto out;
659 + }
660 +- memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */
661 ++ memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */
662 + dev->base_addr = MVME_I596_BASE;
663 + dev->irq = (unsigned) MVME16x_IRQ_I596;
664 + goto found;
665 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
666 +index cd165e52ed33c..d029179a4804c 100644
667 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
668 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
669 +@@ -371,6 +371,9 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
670 + int nhoff = skb_network_offset(skb);
671 + int ret = 0;
672 +
673 ++ if (skb->encapsulation)
674 ++ return -EPROTONOSUPPORT;
675 ++
676 + if (skb->protocol != htons(ETH_P_IP))
677 + return -EPROTONOSUPPORT;
678 +
679 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
680 +index 9adbaccd0c5ed..934740d604709 100644
681 +--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
682 ++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
683 +@@ -1307,6 +1307,14 @@ qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
684 + prev_weight = weight;
685 +
686 + while (weight) {
687 ++ /* If the HW device is during recovery, all resources are
688 ++ * immediately reset without receiving a per-cid indication
689 ++ * from HW. In this case we don't expect the cid_map to be
690 ++ * cleared.
691 ++ */
692 ++ if (p_hwfn->cdev->recov_in_prog)
693 ++ return 0;
694 ++
695 + msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
696 +
697 + weight = bitmap_weight(bmap->bitmap, bmap->max_count);
698 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
699 +index 83817bb50e9fa..6e6563b51d688 100644
700 +--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
701 ++++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
702 +@@ -107,6 +107,14 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
703 + * Beyond the added delay we clear the bitmap anyway.
704 + */
705 + while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
706 ++ /* If the HW device is during recovery, all resources are
707 ++ * immediately reset without receiving a per-cid indication
708 ++ * from HW. In this case we don't expect the cid bitmap to be
709 ++ * cleared.
710 ++ */
711 ++ if (p_hwfn->cdev->recov_in_prog)
712 ++ return;
713 ++
714 + msleep(100);
715 + if (wait_count++ > 20) {
716 + DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
717 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
718 +index 4e7cfd3bfcd2e..e09851c7da9b8 100644
719 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
720 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
721 +@@ -225,7 +225,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
722 + priv->clk_csr = STMMAC_CSR_100_150M;
723 + else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
724 + priv->clk_csr = STMMAC_CSR_150_250M;
725 +- else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
726 ++ else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
727 + priv->clk_csr = STMMAC_CSR_250_300M;
728 + }
729 +
730 +diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
731 +index da13683d52d1a..bd0beb16d68a9 100644
732 +--- a/drivers/net/hamradio/6pack.c
733 ++++ b/drivers/net/hamradio/6pack.c
734 +@@ -68,9 +68,9 @@
735 + #define SIXP_DAMA_OFF 0
736 +
737 + /* default level 2 parameters */
738 +-#define SIXP_TXDELAY (HZ/4) /* in 1 s */
739 ++#define SIXP_TXDELAY 25 /* 250 ms */
740 + #define SIXP_PERSIST 50 /* in 256ths */
741 +-#define SIXP_SLOTTIME (HZ/10) /* in 1 s */
742 ++#define SIXP_SLOTTIME 10 /* 100 ms */
743 + #define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */
744 + #define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */
745 +
746 +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
747 +index 22450c4a92251..9ad1f093c4ae1 100644
748 +--- a/drivers/net/usb/hso.c
749 ++++ b/drivers/net/usb/hso.c
750 +@@ -2704,14 +2704,14 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
751 +
752 + serial = kzalloc(sizeof(*serial), GFP_KERNEL);
753 + if (!serial)
754 +- goto exit;
755 ++ goto err_free_dev;
756 +
757 + hso_dev->port_data.dev_serial = serial;
758 + serial->parent = hso_dev;
759 +
760 + if (hso_serial_common_create
761 + (serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE))
762 +- goto exit;
763 ++ goto err_free_serial;
764 +
765 + serial->tx_data_length--;
766 + serial->write_data = hso_mux_serial_write_data;
767 +@@ -2727,11 +2727,9 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
768 + /* done, return it */
769 + return hso_dev;
770 +
771 +-exit:
772 +- if (serial) {
773 +- tty_unregister_device(tty_drv, serial->minor);
774 +- kfree(serial);
775 +- }
776 ++err_free_serial:
777 ++ kfree(serial);
778 ++err_free_dev:
779 + kfree(hso_dev);
780 + return NULL;
781 +
782 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
783 +index 590b040e90a34..016a67fd41989 100644
784 +--- a/drivers/nvme/host/multipath.c
785 ++++ b/drivers/nvme/host/multipath.c
786 +@@ -522,14 +522,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
787 +
788 + down_read(&ctrl->namespaces_rwsem);
789 + list_for_each_entry(ns, &ctrl->namespaces, list) {
790 +- unsigned nsid = le32_to_cpu(desc->nsids[n]);
791 +-
792 ++ unsigned nsid;
793 ++again:
794 ++ nsid = le32_to_cpu(desc->nsids[n]);
795 + if (ns->head->ns_id < nsid)
796 + continue;
797 + if (ns->head->ns_id == nsid)
798 + nvme_update_ns_ana_state(desc, ns);
799 + if (++n == nr_nsids)
800 + break;
801 ++ if (ns->head->ns_id > nsid)
802 ++ goto again;
803 + }
804 + up_read(&ctrl->namespaces_rwsem);
805 + return 0;
806 +diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
807 +index ccb44f2eb2407..af3a28623e869 100644
808 +--- a/drivers/platform/x86/intel_punit_ipc.c
809 ++++ b/drivers/platform/x86/intel_punit_ipc.c
810 +@@ -8,7 +8,6 @@
811 + * which provide mailbox interface for power management usage.
812 + */
813 +
814 +-#include <linux/acpi.h>
815 + #include <linux/bitops.h>
816 + #include <linux/delay.h>
817 + #include <linux/device.h>
818 +@@ -335,7 +334,7 @@ static struct platform_driver intel_punit_ipc_driver = {
819 + .remove = intel_punit_ipc_remove,
820 + .driver = {
821 + .name = "intel_punit_ipc",
822 +- .acpi_match_table = ACPI_PTR(punit_ipc_acpi_ids),
823 ++ .acpi_match_table = punit_ipc_acpi_ids,
824 + },
825 + };
826 +
827 +diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
828 +index 45db19e31b348..f0ecfe565660a 100644
829 +--- a/drivers/scsi/lpfc/lpfc_attr.c
830 ++++ b/drivers/scsi/lpfc/lpfc_attr.c
831 +@@ -5881,7 +5881,8 @@ lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
832 + len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
833 + phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
834 +
835 +- len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
836 ++ len += scnprintf(buf + len, PAGE_SIZE - len,
837 ++ "Cfg: %d SCSI: %d NVME: %d\n",
838 + phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
839 + phba->cfg_nvme_seg_cnt);
840 + return len;
841 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
842 +index 643b8ae36cbeb..5dae7ac0d3efe 100644
843 +--- a/drivers/scsi/qla2xxx/qla_init.c
844 ++++ b/drivers/scsi/qla2xxx/qla_init.c
845 +@@ -6803,7 +6803,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
846 + return 0;
847 + break;
848 + case QLA2XXX_INI_MODE_DUAL:
849 +- if (!qla_dual_mode_enabled(vha))
850 ++ if (!qla_dual_mode_enabled(vha) &&
851 ++ !qla_ini_mode_enabled(vha))
852 + return 0;
853 + break;
854 + case QLA2XXX_INI_MODE_ENABLED:
855 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
856 +index 77bba91b57147..6f21cb75d95fd 100644
857 +--- a/drivers/scsi/scsi_transport_iscsi.c
858 ++++ b/drivers/scsi/scsi_transport_iscsi.c
859 +@@ -434,9 +434,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
860 + struct iscsi_transport *t = iface->transport;
861 + int param = -1;
862 +
863 +- if (attr == &dev_attr_iface_enabled.attr)
864 +- param = ISCSI_NET_PARAM_IFACE_ENABLE;
865 +- else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
866 ++ if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
867 + param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
868 + else if (attr == &dev_attr_iface_header_digest.attr)
869 + param = ISCSI_IFACE_PARAM_HDRDGST_EN;
870 +@@ -476,7 +474,9 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
871 + if (param != -1)
872 + return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
873 +
874 +- if (attr == &dev_attr_iface_vlan_id.attr)
875 ++ if (attr == &dev_attr_iface_enabled.attr)
876 ++ param = ISCSI_NET_PARAM_IFACE_ENABLE;
877 ++ else if (attr == &dev_attr_iface_vlan_id.attr)
878 + param = ISCSI_NET_PARAM_VLAN_ID;
879 + else if (attr == &dev_attr_iface_vlan_priority.attr)
880 + param = ISCSI_NET_PARAM_VLAN_PRIORITY;
881 +diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
882 +index 2a1905c43a0b7..9b59539c87359 100644
883 +--- a/drivers/spi/spi-tegra20-slink.c
884 ++++ b/drivers/spi/spi-tegra20-slink.c
885 +@@ -1205,7 +1205,7 @@ static int tegra_slink_resume(struct device *dev)
886 + }
887 + #endif
888 +
889 +-static int tegra_slink_runtime_suspend(struct device *dev)
890 ++static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
891 + {
892 + struct spi_master *master = dev_get_drvdata(dev);
893 + struct tegra_slink_data *tspi = spi_master_get_devdata(master);
894 +@@ -1217,7 +1217,7 @@ static int tegra_slink_runtime_suspend(struct device *dev)
895 + return 0;
896 + }
897 +
898 +-static int tegra_slink_runtime_resume(struct device *dev)
899 ++static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
900 + {
901 + struct spi_master *master = dev_get_drvdata(dev);
902 + struct tegra_slink_data *tspi = spi_master_get_devdata(master);
903 +diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
904 +index fc1bd68889c91..0e6bebc20695d 100644
905 +--- a/drivers/staging/greybus/uart.c
906 ++++ b/drivers/staging/greybus/uart.c
907 +@@ -789,6 +789,17 @@ out:
908 + gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev);
909 + }
910 +
911 ++static void gb_tty_port_destruct(struct tty_port *port)
912 ++{
913 ++ struct gb_tty *gb_tty = container_of(port, struct gb_tty, port);
914 ++
915 ++ if (gb_tty->minor != GB_NUM_MINORS)
916 ++ release_minor(gb_tty);
917 ++ kfifo_free(&gb_tty->write_fifo);
918 ++ kfree(gb_tty->buffer);
919 ++ kfree(gb_tty);
920 ++}
921 ++
922 + static const struct tty_operations gb_ops = {
923 + .install = gb_tty_install,
924 + .open = gb_tty_open,
925 +@@ -814,6 +825,7 @@ static const struct tty_port_operations gb_port_ops = {
926 + .dtr_rts = gb_tty_dtr_rts,
927 + .activate = gb_tty_port_activate,
928 + .shutdown = gb_tty_port_shutdown,
929 ++ .destruct = gb_tty_port_destruct,
930 + };
931 +
932 + static int gb_uart_probe(struct gbphy_device *gbphy_dev,
933 +@@ -826,17 +838,11 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
934 + int retval;
935 + int minor;
936 +
937 +- gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
938 +- if (!gb_tty)
939 +- return -ENOMEM;
940 +-
941 + connection = gb_connection_create(gbphy_dev->bundle,
942 + le16_to_cpu(gbphy_dev->cport_desc->id),
943 + gb_uart_request_handler);
944 +- if (IS_ERR(connection)) {
945 +- retval = PTR_ERR(connection);
946 +- goto exit_tty_free;
947 +- }
948 ++ if (IS_ERR(connection))
949 ++ return PTR_ERR(connection);
950 +
951 + max_payload = gb_operation_get_payload_size_max(connection);
952 + if (max_payload < sizeof(struct gb_uart_send_data_request)) {
953 +@@ -844,13 +850,23 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
954 + goto exit_connection_destroy;
955 + }
956 +
957 ++ gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
958 ++ if (!gb_tty) {
959 ++ retval = -ENOMEM;
960 ++ goto exit_connection_destroy;
961 ++ }
962 ++
963 ++ tty_port_init(&gb_tty->port);
964 ++ gb_tty->port.ops = &gb_port_ops;
965 ++ gb_tty->minor = GB_NUM_MINORS;
966 ++
967 + gb_tty->buffer_payload_max = max_payload -
968 + sizeof(struct gb_uart_send_data_request);
969 +
970 + gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL);
971 + if (!gb_tty->buffer) {
972 + retval = -ENOMEM;
973 +- goto exit_connection_destroy;
974 ++ goto exit_put_port;
975 + }
976 +
977 + INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work);
978 +@@ -858,7 +874,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
979 + retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE,
980 + GFP_KERNEL);
981 + if (retval)
982 +- goto exit_buf_free;
983 ++ goto exit_put_port;
984 +
985 + gb_tty->credits = GB_UART_FIRMWARE_CREDITS;
986 + init_completion(&gb_tty->credits_complete);
987 +@@ -872,7 +888,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
988 + } else {
989 + retval = minor;
990 + }
991 +- goto exit_kfifo_free;
992 ++ goto exit_put_port;
993 + }
994 +
995 + gb_tty->minor = minor;
996 +@@ -881,9 +897,6 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
997 + init_waitqueue_head(&gb_tty->wioctl);
998 + mutex_init(&gb_tty->mutex);
999 +
1000 +- tty_port_init(&gb_tty->port);
1001 +- gb_tty->port.ops = &gb_port_ops;
1002 +-
1003 + gb_tty->connection = connection;
1004 + gb_tty->gbphy_dev = gbphy_dev;
1005 + gb_connection_set_data(connection, gb_tty);
1006 +@@ -891,7 +904,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
1007 +
1008 + retval = gb_connection_enable_tx(connection);
1009 + if (retval)
1010 +- goto exit_release_minor;
1011 ++ goto exit_put_port;
1012 +
1013 + send_control(gb_tty, gb_tty->ctrlout);
1014 +
1015 +@@ -918,16 +931,10 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
1016 +
1017 + exit_connection_disable:
1018 + gb_connection_disable(connection);
1019 +-exit_release_minor:
1020 +- release_minor(gb_tty);
1021 +-exit_kfifo_free:
1022 +- kfifo_free(&gb_tty->write_fifo);
1023 +-exit_buf_free:
1024 +- kfree(gb_tty->buffer);
1025 ++exit_put_port:
1026 ++ tty_port_put(&gb_tty->port);
1027 + exit_connection_destroy:
1028 + gb_connection_destroy(connection);
1029 +-exit_tty_free:
1030 +- kfree(gb_tty);
1031 +
1032 + return retval;
1033 + }
1034 +@@ -958,15 +965,10 @@ static void gb_uart_remove(struct gbphy_device *gbphy_dev)
1035 + gb_connection_disable_rx(connection);
1036 + tty_unregister_device(gb_tty_driver, gb_tty->minor);
1037 +
1038 +- /* FIXME - free transmit / receive buffers */
1039 +-
1040 + gb_connection_disable(connection);
1041 +- tty_port_destroy(&gb_tty->port);
1042 + gb_connection_destroy(connection);
1043 +- release_minor(gb_tty);
1044 +- kfifo_free(&gb_tty->write_fifo);
1045 +- kfree(gb_tty->buffer);
1046 +- kfree(gb_tty);
1047 ++
1048 ++ tty_port_put(&gb_tty->port);
1049 + }
1050 +
1051 + static int gb_tty_init(void)
1052 +diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
1053 +index 576523d0326c8..a902e2a053ee3 100644
1054 +--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
1055 ++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
1056 +@@ -179,7 +179,7 @@ static int tcc_offset_update(unsigned int tcc)
1057 + return 0;
1058 + }
1059 +
1060 +-static unsigned int tcc_offset_save;
1061 ++static int tcc_offset_save = -1;
1062 +
1063 + static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
1064 + struct device_attribute *attr, const char *buf,
1065 +@@ -703,7 +703,8 @@ static int proc_thermal_resume(struct device *dev)
1066 + proc_dev = dev_get_drvdata(dev);
1067 + proc_thermal_read_ppcc(proc_dev);
1068 +
1069 +- tcc_offset_update(tcc_offset_save);
1070 ++ if (tcc_offset_save >= 0)
1071 ++ tcc_offset_update(tcc_offset_save);
1072 +
1073 + return 0;
1074 + }
1075 +diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
1076 +index f526ce31f5a2f..20eab56b02cb9 100644
1077 +--- a/drivers/thermal/thermal_core.c
1078 ++++ b/drivers/thermal/thermal_core.c
1079 +@@ -228,15 +228,14 @@ int thermal_build_list_of_policies(char *buf)
1080 + {
1081 + struct thermal_governor *pos;
1082 + ssize_t count = 0;
1083 +- ssize_t size = PAGE_SIZE;
1084 +
1085 + mutex_lock(&thermal_governor_lock);
1086 +
1087 + list_for_each_entry(pos, &thermal_governor_list, governor_list) {
1088 +- size = PAGE_SIZE - count;
1089 +- count += scnprintf(buf + count, size, "%s ", pos->name);
1090 ++ count += scnprintf(buf + count, PAGE_SIZE - count, "%s ",
1091 ++ pos->name);
1092 + }
1093 +- count += scnprintf(buf + count, size, "\n");
1094 ++ count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
1095 +
1096 + mutex_unlock(&thermal_governor_lock);
1097 +
1098 +diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
1099 +index 51b4d8d1dcaca..13db15118cb94 100644
1100 +--- a/drivers/tty/serial/mvebu-uart.c
1101 ++++ b/drivers/tty/serial/mvebu-uart.c
1102 +@@ -164,7 +164,7 @@ static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
1103 + st = readl(port->membase + UART_STAT);
1104 + spin_unlock_irqrestore(&port->lock, flags);
1105 +
1106 +- return (st & STAT_TX_FIFO_EMP) ? TIOCSER_TEMT : 0;
1107 ++ return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
1108 + }
1109 +
1110 + static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
1111 +diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
1112 +index 36f1a4d870eb1..ff345a8e0fcc6 100644
1113 +--- a/drivers/tty/synclink_gt.c
1114 ++++ b/drivers/tty/synclink_gt.c
1115 +@@ -137,37 +137,14 @@ MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)");
1116 + */
1117 + static struct tty_driver *serial_driver;
1118 +
1119 +-static int open(struct tty_struct *tty, struct file * filp);
1120 +-static void close(struct tty_struct *tty, struct file * filp);
1121 +-static void hangup(struct tty_struct *tty);
1122 +-static void set_termios(struct tty_struct *tty, struct ktermios *old_termios);
1123 +-
1124 +-static int write(struct tty_struct *tty, const unsigned char *buf, int count);
1125 +-static int put_char(struct tty_struct *tty, unsigned char ch);
1126 +-static void send_xchar(struct tty_struct *tty, char ch);
1127 + static void wait_until_sent(struct tty_struct *tty, int timeout);
1128 +-static int write_room(struct tty_struct *tty);
1129 +-static void flush_chars(struct tty_struct *tty);
1130 + static void flush_buffer(struct tty_struct *tty);
1131 +-static void tx_hold(struct tty_struct *tty);
1132 + static void tx_release(struct tty_struct *tty);
1133 +
1134 +-static int ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
1135 +-static int chars_in_buffer(struct tty_struct *tty);
1136 +-static void throttle(struct tty_struct * tty);
1137 +-static void unthrottle(struct tty_struct * tty);
1138 +-static int set_break(struct tty_struct *tty, int break_state);
1139 +-
1140 + /*
1141 +- * generic HDLC support and callbacks
1142 ++ * generic HDLC support
1143 + */
1144 +-#if SYNCLINK_GENERIC_HDLC
1145 + #define dev_to_port(D) (dev_to_hdlc(D)->priv)
1146 +-static void hdlcdev_tx_done(struct slgt_info *info);
1147 +-static void hdlcdev_rx(struct slgt_info *info, char *buf, int size);
1148 +-static int hdlcdev_init(struct slgt_info *info);
1149 +-static void hdlcdev_exit(struct slgt_info *info);
1150 +-#endif
1151 +
1152 +
1153 + /*
1154 +@@ -186,9 +163,6 @@ struct cond_wait {
1155 + wait_queue_entry_t wait;
1156 + unsigned int data;
1157 + };
1158 +-static void init_cond_wait(struct cond_wait *w, unsigned int data);
1159 +-static void add_cond_wait(struct cond_wait **head, struct cond_wait *w);
1160 +-static void remove_cond_wait(struct cond_wait **head, struct cond_wait *w);
1161 + static void flush_cond_wait(struct cond_wait **head);
1162 +
1163 + /*
1164 +@@ -443,12 +417,8 @@ static void shutdown(struct slgt_info *info);
1165 + static void program_hw(struct slgt_info *info);
1166 + static void change_params(struct slgt_info *info);
1167 +
1168 +-static int register_test(struct slgt_info *info);
1169 +-static int irq_test(struct slgt_info *info);
1170 +-static int loopback_test(struct slgt_info *info);
1171 + static int adapter_test(struct slgt_info *info);
1172 +
1173 +-static void reset_adapter(struct slgt_info *info);
1174 + static void reset_port(struct slgt_info *info);
1175 + static void async_mode(struct slgt_info *info);
1176 + static void sync_mode(struct slgt_info *info);
1177 +@@ -457,41 +427,23 @@ static void rx_stop(struct slgt_info *info);
1178 + static void rx_start(struct slgt_info *info);
1179 + static void reset_rbufs(struct slgt_info *info);
1180 + static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last);
1181 +-static void rdma_reset(struct slgt_info *info);
1182 + static bool rx_get_frame(struct slgt_info *info);
1183 + static bool rx_get_buf(struct slgt_info *info);
1184 +
1185 + static void tx_start(struct slgt_info *info);
1186 + static void tx_stop(struct slgt_info *info);
1187 + static void tx_set_idle(struct slgt_info *info);
1188 +-static unsigned int free_tbuf_count(struct slgt_info *info);
1189 + static unsigned int tbuf_bytes(struct slgt_info *info);
1190 + static void reset_tbufs(struct slgt_info *info);
1191 + static void tdma_reset(struct slgt_info *info);
1192 + static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
1193 +
1194 +-static void get_signals(struct slgt_info *info);
1195 +-static void set_signals(struct slgt_info *info);
1196 +-static void enable_loopback(struct slgt_info *info);
1197 ++static void get_gtsignals(struct slgt_info *info);
1198 ++static void set_gtsignals(struct slgt_info *info);
1199 + static void set_rate(struct slgt_info *info, u32 data_rate);
1200 +
1201 +-static int bh_action(struct slgt_info *info);
1202 +-static void bh_handler(struct work_struct *work);
1203 + static void bh_transmit(struct slgt_info *info);
1204 +-static void isr_serial(struct slgt_info *info);
1205 +-static void isr_rdma(struct slgt_info *info);
1206 + static void isr_txeom(struct slgt_info *info, unsigned short status);
1207 +-static void isr_tdma(struct slgt_info *info);
1208 +-
1209 +-static int alloc_dma_bufs(struct slgt_info *info);
1210 +-static void free_dma_bufs(struct slgt_info *info);
1211 +-static int alloc_desc(struct slgt_info *info);
1212 +-static void free_desc(struct slgt_info *info);
1213 +-static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
1214 +-static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
1215 +-
1216 +-static int alloc_tmp_rbuf(struct slgt_info *info);
1217 +-static void free_tmp_rbuf(struct slgt_info *info);
1218 +
1219 + static void tx_timeout(struct timer_list *t);
1220 + static void rx_timeout(struct timer_list *t);
1221 +@@ -509,10 +461,6 @@ static int tx_abort(struct slgt_info *info);
1222 + static int rx_enable(struct slgt_info *info, int enable);
1223 + static int modem_input_wait(struct slgt_info *info,int arg);
1224 + static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr);
1225 +-static int tiocmget(struct tty_struct *tty);
1226 +-static int tiocmset(struct tty_struct *tty,
1227 +- unsigned int set, unsigned int clear);
1228 +-static int set_break(struct tty_struct *tty, int break_state);
1229 + static int get_interface(struct slgt_info *info, int __user *if_mode);
1230 + static int set_interface(struct slgt_info *info, int if_mode);
1231 + static int set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
1232 +@@ -526,9 +474,6 @@ static int set_xctrl(struct slgt_info *info, int if_mode);
1233 + /*
1234 + * driver functions
1235 + */
1236 +-static void add_device(struct slgt_info *info);
1237 +-static void device_init(int adapter_num, struct pci_dev *pdev);
1238 +-static int claim_resources(struct slgt_info *info);
1239 + static void release_resources(struct slgt_info *info);
1240 +
1241 + /*
1242 +@@ -776,7 +721,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
1243 + if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
1244 + info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1245 + spin_lock_irqsave(&info->lock,flags);
1246 +- set_signals(info);
1247 ++ set_gtsignals(info);
1248 + spin_unlock_irqrestore(&info->lock,flags);
1249 + }
1250 +
1251 +@@ -786,7 +731,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
1252 + if (!C_CRTSCTS(tty) || !tty_throttled(tty))
1253 + info->signals |= SerialSignal_RTS;
1254 + spin_lock_irqsave(&info->lock,flags);
1255 +- set_signals(info);
1256 ++ set_gtsignals(info);
1257 + spin_unlock_irqrestore(&info->lock,flags);
1258 + }
1259 +
1260 +@@ -1237,7 +1182,7 @@ static inline void line_info(struct seq_file *m, struct slgt_info *info)
1261 +
1262 + /* output current serial signal states */
1263 + spin_lock_irqsave(&info->lock,flags);
1264 +- get_signals(info);
1265 ++ get_gtsignals(info);
1266 + spin_unlock_irqrestore(&info->lock,flags);
1267 +
1268 + stat_buf[0] = 0;
1269 +@@ -1337,7 +1282,7 @@ static void throttle(struct tty_struct * tty)
1270 + if (C_CRTSCTS(tty)) {
1271 + spin_lock_irqsave(&info->lock,flags);
1272 + info->signals &= ~SerialSignal_RTS;
1273 +- set_signals(info);
1274 ++ set_gtsignals(info);
1275 + spin_unlock_irqrestore(&info->lock,flags);
1276 + }
1277 + }
1278 +@@ -1362,7 +1307,7 @@ static void unthrottle(struct tty_struct * tty)
1279 + if (C_CRTSCTS(tty)) {
1280 + spin_lock_irqsave(&info->lock,flags);
1281 + info->signals |= SerialSignal_RTS;
1282 +- set_signals(info);
1283 ++ set_gtsignals(info);
1284 + spin_unlock_irqrestore(&info->lock,flags);
1285 + }
1286 + }
1287 +@@ -1534,7 +1479,7 @@ static int hdlcdev_open(struct net_device *dev)
1288 +
1289 + /* inform generic HDLC layer of current DCD status */
1290 + spin_lock_irqsave(&info->lock, flags);
1291 +- get_signals(info);
1292 ++ get_gtsignals(info);
1293 + spin_unlock_irqrestore(&info->lock, flags);
1294 + if (info->signals & SerialSignal_DCD)
1295 + netif_carrier_on(dev);
1296 +@@ -2290,7 +2235,7 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
1297 + if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
1298 + info->signals &= ~SerialSignal_RTS;
1299 + info->drop_rts_on_tx_done = false;
1300 +- set_signals(info);
1301 ++ set_gtsignals(info);
1302 + }
1303 +
1304 + #if SYNCLINK_GENERIC_HDLC
1305 +@@ -2455,7 +2400,7 @@ static void shutdown(struct slgt_info *info)
1306 +
1307 + if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
1308 + info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1309 +- set_signals(info);
1310 ++ set_gtsignals(info);
1311 + }
1312 +
1313 + flush_cond_wait(&info->gpio_wait_q);
1314 +@@ -2483,7 +2428,7 @@ static void program_hw(struct slgt_info *info)
1315 + else
1316 + async_mode(info);
1317 +
1318 +- set_signals(info);
1319 ++ set_gtsignals(info);
1320 +
1321 + info->dcd_chkcount = 0;
1322 + info->cts_chkcount = 0;
1323 +@@ -2491,7 +2436,7 @@ static void program_hw(struct slgt_info *info)
1324 + info->dsr_chkcount = 0;
1325 +
1326 + slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
1327 +- get_signals(info);
1328 ++ get_gtsignals(info);
1329 +
1330 + if (info->netcount ||
1331 + (info->port.tty && info->port.tty->termios.c_cflag & CREAD))
1332 +@@ -2735,7 +2680,7 @@ static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
1333 + spin_lock_irqsave(&info->lock,flags);
1334 +
1335 + /* return immediately if state matches requested events */
1336 +- get_signals(info);
1337 ++ get_gtsignals(info);
1338 + s = info->signals;
1339 +
1340 + events = mask &
1341 +@@ -3153,7 +3098,7 @@ static int tiocmget(struct tty_struct *tty)
1342 + unsigned long flags;
1343 +
1344 + spin_lock_irqsave(&info->lock,flags);
1345 +- get_signals(info);
1346 ++ get_gtsignals(info);
1347 + spin_unlock_irqrestore(&info->lock,flags);
1348 +
1349 + result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
1350 +@@ -3192,7 +3137,7 @@ static int tiocmset(struct tty_struct *tty,
1351 + info->signals &= ~SerialSignal_DTR;
1352 +
1353 + spin_lock_irqsave(&info->lock,flags);
1354 +- set_signals(info);
1355 ++ set_gtsignals(info);
1356 + spin_unlock_irqrestore(&info->lock,flags);
1357 + return 0;
1358 + }
1359 +@@ -3203,7 +3148,7 @@ static int carrier_raised(struct tty_port *port)
1360 + struct slgt_info *info = container_of(port, struct slgt_info, port);
1361 +
1362 + spin_lock_irqsave(&info->lock,flags);
1363 +- get_signals(info);
1364 ++ get_gtsignals(info);
1365 + spin_unlock_irqrestore(&info->lock,flags);
1366 + return (info->signals & SerialSignal_DCD) ? 1 : 0;
1367 + }
1368 +@@ -3218,7 +3163,7 @@ static void dtr_rts(struct tty_port *port, int on)
1369 + info->signals |= SerialSignal_RTS | SerialSignal_DTR;
1370 + else
1371 + info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1372 +- set_signals(info);
1373 ++ set_gtsignals(info);
1374 + spin_unlock_irqrestore(&info->lock,flags);
1375 + }
1376 +
1377 +@@ -4017,10 +3962,10 @@ static void tx_start(struct slgt_info *info)
1378 +
1379 + if (info->params.mode != MGSL_MODE_ASYNC) {
1380 + if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
1381 +- get_signals(info);
1382 ++ get_gtsignals(info);
1383 + if (!(info->signals & SerialSignal_RTS)) {
1384 + info->signals |= SerialSignal_RTS;
1385 +- set_signals(info);
1386 ++ set_gtsignals(info);
1387 + info->drop_rts_on_tx_done = true;
1388 + }
1389 + }
1390 +@@ -4074,7 +4019,7 @@ static void reset_port(struct slgt_info *info)
1391 + rx_stop(info);
1392 +
1393 + info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1394 +- set_signals(info);
1395 ++ set_gtsignals(info);
1396 +
1397 + slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
1398 + }
1399 +@@ -4496,7 +4441,7 @@ static void tx_set_idle(struct slgt_info *info)
1400 + /*
1401 + * get state of V24 status (input) signals
1402 + */
1403 +-static void get_signals(struct slgt_info *info)
1404 ++static void get_gtsignals(struct slgt_info *info)
1405 + {
1406 + unsigned short status = rd_reg16(info, SSR);
1407 +
1408 +@@ -4558,7 +4503,7 @@ static void msc_set_vcr(struct slgt_info *info)
1409 + /*
1410 + * set state of V24 control (output) signals
1411 + */
1412 +-static void set_signals(struct slgt_info *info)
1413 ++static void set_gtsignals(struct slgt_info *info)
1414 + {
1415 + unsigned char val = rd_reg8(info, VCR);
1416 + if (info->signals & SerialSignal_DTR)
1417 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1418 +index c0604c60ebd01..e139cda35f639 100644
1419 +--- a/drivers/usb/class/cdc-acm.c
1420 ++++ b/drivers/usb/class/cdc-acm.c
1421 +@@ -725,7 +725,8 @@ static void acm_port_destruct(struct tty_port *port)
1422 + {
1423 + struct acm *acm = container_of(port, struct acm, port);
1424 +
1425 +- acm_release_minor(acm);
1426 ++ if (acm->minor != ACM_MINOR_INVALID)
1427 ++ acm_release_minor(acm);
1428 + usb_put_intf(acm->control);
1429 + kfree(acm->country_codes);
1430 + kfree(acm);
1431 +@@ -1356,8 +1357,10 @@ made_compressed_probe:
1432 + usb_get_intf(acm->control); /* undone in destruct() */
1433 +
1434 + minor = acm_alloc_minor(acm);
1435 +- if (minor < 0)
1436 ++ if (minor < 0) {
1437 ++ acm->minor = ACM_MINOR_INVALID;
1438 + goto alloc_fail1;
1439 ++ }
1440 +
1441 + acm->minor = minor;
1442 + acm->dev = usb_dev;
1443 +diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
1444 +index b95ff769072e7..ef7fe5eacff69 100644
1445 +--- a/drivers/usb/class/cdc-acm.h
1446 ++++ b/drivers/usb/class/cdc-acm.h
1447 +@@ -22,6 +22,8 @@
1448 + #define ACM_TTY_MAJOR 166
1449 + #define ACM_TTY_MINORS 256
1450 +
1451 ++#define ACM_MINOR_INVALID ACM_TTY_MINORS
1452 ++
1453 + /*
1454 + * Requests.
1455 + */
1456 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
1457 +index d0f45600b6698..48ff9c66ae46d 100644
1458 +--- a/drivers/usb/core/hcd.c
1459 ++++ b/drivers/usb/core/hcd.c
1460 +@@ -2636,6 +2636,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
1461 + {
1462 + int retval;
1463 + struct usb_device *rhdev;
1464 ++ struct usb_hcd *shared_hcd;
1465 +
1466 + if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
1467 + hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
1468 +@@ -2792,13 +2793,26 @@ int usb_add_hcd(struct usb_hcd *hcd,
1469 + goto err_hcd_driver_start;
1470 + }
1471 +
1472 ++ /* starting here, usbcore will pay attention to the shared HCD roothub */
1473 ++ shared_hcd = hcd->shared_hcd;
1474 ++ if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
1475 ++ retval = register_root_hub(shared_hcd);
1476 ++ if (retval != 0)
1477 ++ goto err_register_root_hub;
1478 ++
1479 ++ if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
1480 ++ usb_hcd_poll_rh_status(shared_hcd);
1481 ++ }
1482 ++
1483 + /* starting here, usbcore will pay attention to this root hub */
1484 +- retval = register_root_hub(hcd);
1485 +- if (retval != 0)
1486 +- goto err_register_root_hub;
1487 ++ if (!HCD_DEFER_RH_REGISTER(hcd)) {
1488 ++ retval = register_root_hub(hcd);
1489 ++ if (retval != 0)
1490 ++ goto err_register_root_hub;
1491 +
1492 +- if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
1493 +- usb_hcd_poll_rh_status(hcd);
1494 ++ if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
1495 ++ usb_hcd_poll_rh_status(hcd);
1496 ++ }
1497 +
1498 + return retval;
1499 +
1500 +@@ -2841,6 +2855,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
1501 + void usb_remove_hcd(struct usb_hcd *hcd)
1502 + {
1503 + struct usb_device *rhdev = hcd->self.root_hub;
1504 ++ bool rh_registered;
1505 +
1506 + dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
1507 +
1508 +@@ -2851,6 +2866,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
1509 +
1510 + dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
1511 + spin_lock_irq (&hcd_root_hub_lock);
1512 ++ rh_registered = hcd->rh_registered;
1513 + hcd->rh_registered = 0;
1514 + spin_unlock_irq (&hcd_root_hub_lock);
1515 +
1516 +@@ -2860,7 +2876,8 @@ void usb_remove_hcd(struct usb_hcd *hcd)
1517 + cancel_work_sync(&hcd->died_work);
1518 +
1519 + mutex_lock(&usb_bus_idr_lock);
1520 +- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
1521 ++ if (rh_registered)
1522 ++ usb_disconnect(&rhdev); /* Sets rhdev to NULL */
1523 + mutex_unlock(&usb_bus_idr_lock);
1524 +
1525 + /*
1526 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
1527 +index 66dfcdbd1e03a..e8b25dae09499 100644
1528 +--- a/drivers/usb/dwc2/gadget.c
1529 ++++ b/drivers/usb/dwc2/gadget.c
1530 +@@ -115,10 +115,16 @@ static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
1531 + */
1532 + static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
1533 + {
1534 ++ struct dwc2_hsotg *hsotg = hs_ep->parent;
1535 ++ u16 limit = DSTS_SOFFN_LIMIT;
1536 ++
1537 ++ if (hsotg->gadget.speed != USB_SPEED_HIGH)
1538 ++ limit >>= 3;
1539 ++
1540 + hs_ep->target_frame += hs_ep->interval;
1541 +- if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
1542 ++ if (hs_ep->target_frame > limit) {
1543 + hs_ep->frame_overrun = true;
1544 +- hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
1545 ++ hs_ep->target_frame &= limit;
1546 + } else {
1547 + hs_ep->frame_overrun = false;
1548 + }
1549 +@@ -136,10 +142,16 @@ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
1550 + */
1551 + static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
1552 + {
1553 ++ struct dwc2_hsotg *hsotg = hs_ep->parent;
1554 ++ u16 limit = DSTS_SOFFN_LIMIT;
1555 ++
1556 ++ if (hsotg->gadget.speed != USB_SPEED_HIGH)
1557 ++ limit >>= 3;
1558 ++
1559 + if (hs_ep->target_frame)
1560 + hs_ep->target_frame -= 1;
1561 + else
1562 +- hs_ep->target_frame = DSTS_SOFFN_LIMIT;
1563 ++ hs_ep->target_frame = limit;
1564 + }
1565 +
1566 + /**
1567 +@@ -1018,6 +1030,12 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
1568 + dwc2_writel(hsotg, ctrl, depctl);
1569 + }
1570 +
1571 ++static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
1572 ++static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
1573 ++ struct dwc2_hsotg_ep *hs_ep,
1574 ++ struct dwc2_hsotg_req *hs_req,
1575 ++ int result);
1576 ++
1577 + /**
1578 + * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
1579 + * @hsotg: The controller state.
1580 +@@ -1170,14 +1188,19 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
1581 + }
1582 + }
1583 +
1584 +- if (hs_ep->isochronous && hs_ep->interval == 1) {
1585 +- hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
1586 +- dwc2_gadget_incr_frame_num(hs_ep);
1587 +-
1588 +- if (hs_ep->target_frame & 0x1)
1589 +- ctrl |= DXEPCTL_SETODDFR;
1590 +- else
1591 +- ctrl |= DXEPCTL_SETEVENFR;
1592 ++ if (hs_ep->isochronous) {
1593 ++ if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
1594 ++ if (hs_ep->interval == 1) {
1595 ++ if (hs_ep->target_frame & 0x1)
1596 ++ ctrl |= DXEPCTL_SETODDFR;
1597 ++ else
1598 ++ ctrl |= DXEPCTL_SETEVENFR;
1599 ++ }
1600 ++ ctrl |= DXEPCTL_CNAK;
1601 ++ } else {
1602 ++ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
1603 ++ return;
1604 ++ }
1605 + }
1606 +
1607 + ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
1608 +@@ -1325,12 +1348,16 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
1609 + u32 target_frame = hs_ep->target_frame;
1610 + u32 current_frame = hsotg->frame_number;
1611 + bool frame_overrun = hs_ep->frame_overrun;
1612 ++ u16 limit = DSTS_SOFFN_LIMIT;
1613 ++
1614 ++ if (hsotg->gadget.speed != USB_SPEED_HIGH)
1615 ++ limit >>= 3;
1616 +
1617 + if (!frame_overrun && current_frame >= target_frame)
1618 + return true;
1619 +
1620 + if (frame_overrun && current_frame >= target_frame &&
1621 +- ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
1622 ++ ((current_frame - target_frame) < limit / 2))
1623 + return true;
1624 +
1625 + return false;
1626 +@@ -1712,11 +1739,9 @@ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
1627 + */
1628 + static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
1629 + {
1630 +- u32 mask;
1631 + struct dwc2_hsotg *hsotg = hs_ep->parent;
1632 + int dir_in = hs_ep->dir_in;
1633 + struct dwc2_hsotg_req *hs_req;
1634 +- u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
1635 +
1636 + if (!list_empty(&hs_ep->queue)) {
1637 + hs_req = get_ep_head(hs_ep);
1638 +@@ -1732,9 +1757,6 @@ static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
1639 + } else {
1640 + dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
1641 + __func__);
1642 +- mask = dwc2_readl(hsotg, epmsk_reg);
1643 +- mask |= DOEPMSK_OUTTKNEPDISMSK;
1644 +- dwc2_writel(hsotg, mask, epmsk_reg);
1645 + }
1646 + }
1647 +
1648 +@@ -2304,19 +2326,6 @@ static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
1649 + dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
1650 + }
1651 +
1652 +-static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
1653 +- u32 epctl_reg)
1654 +-{
1655 +- u32 ctrl;
1656 +-
1657 +- ctrl = dwc2_readl(hsotg, epctl_reg);
1658 +- if (ctrl & DXEPCTL_EOFRNUM)
1659 +- ctrl |= DXEPCTL_SETEVENFR;
1660 +- else
1661 +- ctrl |= DXEPCTL_SETODDFR;
1662 +- dwc2_writel(hsotg, ctrl, epctl_reg);
1663 +-}
1664 +-
1665 + /*
1666 + * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
1667 + * @hs_ep - The endpoint on which transfer went
1668 +@@ -2437,20 +2446,11 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
1669 + dwc2_hsotg_ep0_zlp(hsotg, true);
1670 + }
1671 +
1672 +- /*
1673 +- * Slave mode OUT transfers do not go through XferComplete so
1674 +- * adjust the ISOC parity here.
1675 +- */
1676 +- if (!using_dma(hsotg)) {
1677 +- if (hs_ep->isochronous && hs_ep->interval == 1)
1678 +- dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
1679 +- else if (hs_ep->isochronous && hs_ep->interval > 1)
1680 +- dwc2_gadget_incr_frame_num(hs_ep);
1681 +- }
1682 +-
1683 + /* Set actual frame number for completed transfers */
1684 +- if (!using_desc_dma(hsotg) && hs_ep->isochronous)
1685 +- req->frame_number = hsotg->frame_number;
1686 ++ if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
1687 ++ req->frame_number = hs_ep->target_frame;
1688 ++ dwc2_gadget_incr_frame_num(hs_ep);
1689 ++ }
1690 +
1691 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
1692 + }
1693 +@@ -2764,6 +2764,12 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
1694 + return;
1695 + }
1696 +
1697 ++ /* Set actual frame number for completed transfers */
1698 ++ if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
1699 ++ hs_req->req.frame_number = hs_ep->target_frame;
1700 ++ dwc2_gadget_incr_frame_num(hs_ep);
1701 ++ }
1702 ++
1703 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
1704 + }
1705 +
1706 +@@ -2824,23 +2830,18 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
1707 +
1708 + dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
1709 +
1710 +- if (hs_ep->isochronous) {
1711 +- dwc2_hsotg_complete_in(hsotg, hs_ep);
1712 +- return;
1713 +- }
1714 +-
1715 + if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
1716 + int dctl = dwc2_readl(hsotg, DCTL);
1717 +
1718 + dctl |= DCTL_CGNPINNAK;
1719 + dwc2_writel(hsotg, dctl, DCTL);
1720 + }
1721 +- return;
1722 +- }
1723 ++ } else {
1724 +
1725 +- if (dctl & DCTL_GOUTNAKSTS) {
1726 +- dctl |= DCTL_CGOUTNAK;
1727 +- dwc2_writel(hsotg, dctl, DCTL);
1728 ++ if (dctl & DCTL_GOUTNAKSTS) {
1729 ++ dctl |= DCTL_CGOUTNAK;
1730 ++ dwc2_writel(hsotg, dctl, DCTL);
1731 ++ }
1732 + }
1733 +
1734 + if (!hs_ep->isochronous)
1735 +@@ -2861,8 +2862,6 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
1736 + /* Update current frame number value. */
1737 + hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
1738 + } while (dwc2_gadget_target_frame_elapsed(hs_ep));
1739 +-
1740 +- dwc2_gadget_start_next_request(hs_ep);
1741 + }
1742 +
1743 + /**
1744 +@@ -2879,8 +2878,8 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
1745 + static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
1746 + {
1747 + struct dwc2_hsotg *hsotg = ep->parent;
1748 ++ struct dwc2_hsotg_req *hs_req;
1749 + int dir_in = ep->dir_in;
1750 +- u32 doepmsk;
1751 +
1752 + if (dir_in || !ep->isochronous)
1753 + return;
1754 +@@ -2894,28 +2893,39 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
1755 + return;
1756 + }
1757 +
1758 +- if (ep->interval > 1 &&
1759 +- ep->target_frame == TARGET_FRAME_INITIAL) {
1760 ++ if (ep->target_frame == TARGET_FRAME_INITIAL) {
1761 + u32 ctrl;
1762 +
1763 + ep->target_frame = hsotg->frame_number;
1764 +- dwc2_gadget_incr_frame_num(ep);
1765 ++ if (ep->interval > 1) {
1766 ++ ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
1767 ++ if (ep->target_frame & 0x1)
1768 ++ ctrl |= DXEPCTL_SETODDFR;
1769 ++ else
1770 ++ ctrl |= DXEPCTL_SETEVENFR;
1771 +
1772 +- ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
1773 +- if (ep->target_frame & 0x1)
1774 +- ctrl |= DXEPCTL_SETODDFR;
1775 +- else
1776 +- ctrl |= DXEPCTL_SETEVENFR;
1777 ++ dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
1778 ++ }
1779 ++ }
1780 ++
1781 ++ while (dwc2_gadget_target_frame_elapsed(ep)) {
1782 ++ hs_req = get_ep_head(ep);
1783 ++ if (hs_req)
1784 ++ dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
1785 +
1786 +- dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
1787 ++ dwc2_gadget_incr_frame_num(ep);
1788 ++ /* Update current frame number value. */
1789 ++ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
1790 + }
1791 +
1792 +- dwc2_gadget_start_next_request(ep);
1793 +- doepmsk = dwc2_readl(hsotg, DOEPMSK);
1794 +- doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
1795 +- dwc2_writel(hsotg, doepmsk, DOEPMSK);
1796 ++ if (!ep->req)
1797 ++ dwc2_gadget_start_next_request(ep);
1798 ++
1799 + }
1800 +
1801 ++static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
1802 ++ struct dwc2_hsotg_ep *hs_ep);
1803 ++
1804 + /**
1805 + * dwc2_gadget_handle_nak - handle NAK interrupt
1806 + * @hs_ep: The endpoint on which interrupt is asserted.
1807 +@@ -2933,7 +2943,9 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
1808 + static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
1809 + {
1810 + struct dwc2_hsotg *hsotg = hs_ep->parent;
1811 ++ struct dwc2_hsotg_req *hs_req;
1812 + int dir_in = hs_ep->dir_in;
1813 ++ u32 ctrl;
1814 +
1815 + if (!dir_in || !hs_ep->isochronous)
1816 + return;
1817 +@@ -2975,13 +2987,29 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
1818 +
1819 + dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
1820 + }
1821 +-
1822 +- dwc2_hsotg_complete_request(hsotg, hs_ep,
1823 +- get_ep_head(hs_ep), 0);
1824 + }
1825 +
1826 +- if (!using_desc_dma(hsotg))
1827 ++ if (using_desc_dma(hsotg))
1828 ++ return;
1829 ++
1830 ++ ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
1831 ++ if (ctrl & DXEPCTL_EPENA)
1832 ++ dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
1833 ++ else
1834 ++ dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
1835 ++
1836 ++ while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
1837 ++ hs_req = get_ep_head(hs_ep);
1838 ++ if (hs_req)
1839 ++ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
1840 ++
1841 + dwc2_gadget_incr_frame_num(hs_ep);
1842 ++ /* Update current frame number value. */
1843 ++ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
1844 ++ }
1845 ++
1846 ++ if (!hs_ep->req)
1847 ++ dwc2_gadget_start_next_request(hs_ep);
1848 + }
1849 +
1850 + /**
1851 +@@ -3039,21 +3067,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
1852 +
1853 + /* In DDMA handle isochronous requests separately */
1854 + if (using_desc_dma(hsotg) && hs_ep->isochronous) {
1855 +- /* XferCompl set along with BNA */
1856 +- if (!(ints & DXEPINT_BNAINTR))
1857 +- dwc2_gadget_complete_isoc_request_ddma(hs_ep);
1858 ++ dwc2_gadget_complete_isoc_request_ddma(hs_ep);
1859 + } else if (dir_in) {
1860 + /*
1861 + * We get OutDone from the FIFO, so we only
1862 + * need to look at completing IN requests here
1863 + * if operating slave mode
1864 + */
1865 +- if (hs_ep->isochronous && hs_ep->interval > 1)
1866 +- dwc2_gadget_incr_frame_num(hs_ep);
1867 +-
1868 +- dwc2_hsotg_complete_in(hsotg, hs_ep);
1869 +- if (ints & DXEPINT_NAKINTRPT)
1870 +- ints &= ~DXEPINT_NAKINTRPT;
1871 ++ if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
1872 ++ dwc2_hsotg_complete_in(hsotg, hs_ep);
1873 +
1874 + if (idx == 0 && !hs_ep->req)
1875 + dwc2_hsotg_enqueue_setup(hsotg);
1876 +@@ -3062,10 +3084,8 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
1877 + * We're using DMA, we need to fire an OutDone here
1878 + * as we ignore the RXFIFO.
1879 + */
1880 +- if (hs_ep->isochronous && hs_ep->interval > 1)
1881 +- dwc2_gadget_incr_frame_num(hs_ep);
1882 +-
1883 +- dwc2_hsotg_handle_outdone(hsotg, idx);
1884 ++ if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
1885 ++ dwc2_hsotg_handle_outdone(hsotg, idx);
1886 + }
1887 + }
1888 +
1889 +@@ -4055,6 +4075,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
1890 + mask |= DIEPMSK_NAKMSK;
1891 + dwc2_writel(hsotg, mask, DIEPMSK);
1892 + } else {
1893 ++ epctrl |= DXEPCTL_SNAK;
1894 + mask = dwc2_readl(hsotg, DOEPMSK);
1895 + mask |= DOEPMSK_OUTTKNEPDISMSK;
1896 + dwc2_writel(hsotg, mask, DOEPMSK);
1897 +diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
1898 +index a766476fd742e..ca0aebb5bd0cc 100644
1899 +--- a/drivers/usb/gadget/udc/r8a66597-udc.c
1900 ++++ b/drivers/usb/gadget/udc/r8a66597-udc.c
1901 +@@ -1250,7 +1250,7 @@ static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
1902 + do {
1903 + tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
1904 + udelay(1);
1905 +- } while (tmp != CS_IDST || timeout-- > 0);
1906 ++ } while (tmp != CS_IDST && timeout-- > 0);
1907 +
1908 + if (tmp == CS_IDST)
1909 + r8a66597_bset(r8a66597,
1910 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
1911 +index 505da4999e208..02a2afd130eb6 100644
1912 +--- a/drivers/usb/host/xhci.c
1913 ++++ b/drivers/usb/host/xhci.c
1914 +@@ -693,6 +693,7 @@ int xhci_run(struct usb_hcd *hcd)
1915 + if (ret)
1916 + xhci_free_command(xhci, command);
1917 + }
1918 ++ set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
1919 + xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1920 + "Finished xhci_run for USB2 roothub");
1921 +
1922 +diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
1923 +index 39453287b5c36..4ecfbf6bb1fa8 100644
1924 +--- a/drivers/usb/musb/tusb6010.c
1925 ++++ b/drivers/usb/musb/tusb6010.c
1926 +@@ -190,6 +190,7 @@ tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
1927 + }
1928 + if (len > 0) {
1929 + /* Write the rest 1 - 3 bytes to FIFO */
1930 ++ val = 0;
1931 + memcpy(&val, buf, len);
1932 + musb_writel(fifo, 0, val);
1933 + }
1934 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1935 +index 8e5878ec656d0..004b6598706b1 100644
1936 +--- a/drivers/usb/serial/cp210x.c
1937 ++++ b/drivers/usb/serial/cp210x.c
1938 +@@ -234,6 +234,7 @@ static const struct usb_device_id id_table[] = {
1939 + { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
1940 + { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
1941 + { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
1942 ++ { USB_DEVICE(0x2184, 0x0030) }, /* GW Instek GDM-834x Digital Multimeter */
1943 + { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
1944 + { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
1945 + { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
1946 +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
1947 +index 2b8a0d4b66fce..84b6093ed1d28 100644
1948 +--- a/drivers/usb/serial/mos7840.c
1949 ++++ b/drivers/usb/serial/mos7840.c
1950 +@@ -114,7 +114,6 @@
1951 + #define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
1952 + #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
1953 + #define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
1954 +-#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
1955 +
1956 + /* This driver also supports
1957 + * ATEN UC2324 device using Moschip MCS7840
1958 +@@ -196,7 +195,6 @@ static const struct usb_device_id id_table[] = {
1959 + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
1960 + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
1961 + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
1962 +- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
1963 + {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
1964 + {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
1965 + {USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)},
1966 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1967 +index d42ca13569965..82016d9781460 100644
1968 +--- a/drivers/usb/serial/option.c
1969 ++++ b/drivers/usb/serial/option.c
1970 +@@ -1205,6 +1205,14 @@ static const struct usb_device_id option_ids[] = {
1971 + .driver_info = NCTRL(0) | RSVD(1) },
1972 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */
1973 + .driver_info = NCTRL(2) | RSVD(3) },
1974 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */
1975 ++ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
1976 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */
1977 ++ .driver_info = NCTRL(0) | RSVD(1) },
1978 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1062, 0xff), /* Telit LN920 (RNDIS) */
1979 ++ .driver_info = NCTRL(2) | RSVD(3) },
1980 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
1981 ++ .driver_info = NCTRL(0) | RSVD(1) },
1982 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
1983 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
1984 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
1985 +@@ -1650,7 +1658,6 @@ static const struct usb_device_id option_ids[] = {
1986 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
1987 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
1988 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
1989 +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
1990 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
1991 + .driver_info = RSVD(1) },
1992 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
1993 +@@ -2068,6 +2075,8 @@ static const struct usb_device_id option_ids[] = {
1994 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
1995 + { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
1996 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
1997 ++ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */
1998 ++ .driver_info = RSVD(3) },
1999 + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
2000 + .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
2001 + { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
2002 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
2003 +index 861153d294b67..7442793fe0502 100644
2004 +--- a/drivers/usb/storage/unusual_devs.h
2005 ++++ b/drivers/usb/storage/unusual_devs.h
2006 +@@ -416,9 +416,16 @@ UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210,
2007 + USB_SC_UFI, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
2008 +
2009 + /*
2010 +- * Reported by Ondrej Zary <linux@××××××××××××××××.org>
2011 ++ * Reported by Ondrej Zary <linux@××××.sk>
2012 + * The device reports one sector more and breaks when that sector is accessed
2013 ++ * Firmwares older than 2.6c (the latest one and the only that claims Linux
2014 ++ * support) have also broken tag handling
2015 + */
2016 ++UNUSUAL_DEV( 0x04ce, 0x0002, 0x0000, 0x026b,
2017 ++ "ScanLogic",
2018 ++ "SL11R-IDE",
2019 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2020 ++ US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG),
2021 + UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c,
2022 + "ScanLogic",
2023 + "SL11R-IDE",
2024 +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
2025 +index c7db6c943ba51..2f72753c3e225 100644
2026 +--- a/drivers/usb/storage/unusual_uas.h
2027 ++++ b/drivers/usb/storage/unusual_uas.h
2028 +@@ -50,7 +50,7 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
2029 + "LaCie",
2030 + "Rugged USB3-FW",
2031 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2032 +- US_FL_IGNORE_UAS),
2033 ++ US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
2034 +
2035 + /*
2036 + * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
2037 +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
2038 +index ebb05517b6aa1..be31c296eed4c 100644
2039 +--- a/drivers/xen/balloon.c
2040 ++++ b/drivers/xen/balloon.c
2041 +@@ -43,6 +43,8 @@
2042 + #include <linux/sched.h>
2043 + #include <linux/cred.h>
2044 + #include <linux/errno.h>
2045 ++#include <linux/freezer.h>
2046 ++#include <linux/kthread.h>
2047 + #include <linux/mm.h>
2048 + #include <linux/memblock.h>
2049 + #include <linux/pagemap.h>
2050 +@@ -117,7 +119,7 @@ static struct ctl_table xen_root[] = {
2051 + #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
2052 +
2053 + /*
2054 +- * balloon_process() state:
2055 ++ * balloon_thread() state:
2056 + *
2057 + * BP_DONE: done or nothing to do,
2058 + * BP_WAIT: wait to be rescheduled,
2059 +@@ -132,6 +134,8 @@ enum bp_state {
2060 + BP_ECANCELED
2061 + };
2062 +
2063 ++/* Main waiting point for xen-balloon thread. */
2064 ++static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
2065 +
2066 + static DEFINE_MUTEX(balloon_mutex);
2067 +
2068 +@@ -146,10 +150,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
2069 + static LIST_HEAD(ballooned_pages);
2070 + static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
2071 +
2072 +-/* Main work function, always executed in process context. */
2073 +-static void balloon_process(struct work_struct *work);
2074 +-static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
2075 +-
2076 + /* When ballooning out (allocating memory to return to Xen) we don't really
2077 + want the kernel to try too hard since that can trigger the oom killer. */
2078 + #define GFP_BALLOON \
2079 +@@ -383,7 +383,7 @@ static void xen_online_page(struct page *page, unsigned int order)
2080 + static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
2081 + {
2082 + if (val == MEM_ONLINE)
2083 +- schedule_delayed_work(&balloon_worker, 0);
2084 ++ wake_up(&balloon_thread_wq);
2085 +
2086 + return NOTIFY_OK;
2087 + }
2088 +@@ -508,18 +508,43 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
2089 + }
2090 +
2091 + /*
2092 +- * As this is a work item it is guaranteed to run as a single instance only.
2093 ++ * Stop waiting if either state is not BP_EAGAIN and ballooning action is
2094 ++ * needed, or if the credit has changed while state is BP_EAGAIN.
2095 ++ */
2096 ++static bool balloon_thread_cond(enum bp_state state, long credit)
2097 ++{
2098 ++ if (state != BP_EAGAIN)
2099 ++ credit = 0;
2100 ++
2101 ++ return current_credit() != credit || kthread_should_stop();
2102 ++}
2103 ++
2104 ++/*
2105 ++ * As this is a kthread it is guaranteed to run as a single instance only.
2106 + * We may of course race updates of the target counts (which are protected
2107 + * by the balloon lock), or with changes to the Xen hard limit, but we will
2108 + * recover from these in time.
2109 + */
2110 +-static void balloon_process(struct work_struct *work)
2111 ++static int balloon_thread(void *unused)
2112 + {
2113 + enum bp_state state = BP_DONE;
2114 + long credit;
2115 ++ unsigned long timeout;
2116 ++
2117 ++ set_freezable();
2118 ++ for (;;) {
2119 ++ if (state == BP_EAGAIN)
2120 ++ timeout = balloon_stats.schedule_delay * HZ;
2121 ++ else
2122 ++ timeout = 3600 * HZ;
2123 ++ credit = current_credit();
2124 +
2125 ++ wait_event_freezable_timeout(balloon_thread_wq,
2126 ++ balloon_thread_cond(state, credit), timeout);
2127 ++
2128 ++ if (kthread_should_stop())
2129 ++ return 0;
2130 +
2131 +- do {
2132 + mutex_lock(&balloon_mutex);
2133 +
2134 + credit = current_credit();
2135 +@@ -546,12 +571,7 @@ static void balloon_process(struct work_struct *work)
2136 + mutex_unlock(&balloon_mutex);
2137 +
2138 + cond_resched();
2139 +-
2140 +- } while (credit && state == BP_DONE);
2141 +-
2142 +- /* Schedule more work if there is some still to be done. */
2143 +- if (state == BP_EAGAIN)
2144 +- schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
2145 ++ }
2146 + }
2147 +
2148 + /* Resets the Xen limit, sets new target, and kicks off processing. */
2149 +@@ -559,7 +579,7 @@ void balloon_set_new_target(unsigned long target)
2150 + {
2151 + /* No need for lock. Not read-modify-write updates. */
2152 + balloon_stats.target_pages = target;
2153 +- schedule_delayed_work(&balloon_worker, 0);
2154 ++ wake_up(&balloon_thread_wq);
2155 + }
2156 + EXPORT_SYMBOL_GPL(balloon_set_new_target);
2157 +
2158 +@@ -664,7 +684,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
2159 +
2160 + /* The balloon may be too large now. Shrink it if needed. */
2161 + if (current_credit())
2162 +- schedule_delayed_work(&balloon_worker, 0);
2163 ++ wake_up(&balloon_thread_wq);
2164 +
2165 + mutex_unlock(&balloon_mutex);
2166 + }
2167 +@@ -696,6 +716,8 @@ static void __init balloon_add_region(unsigned long start_pfn,
2168 +
2169 + static int __init balloon_init(void)
2170 + {
2171 ++ struct task_struct *task;
2172 ++
2173 + if (!xen_domain())
2174 + return -ENODEV;
2175 +
2176 +@@ -739,6 +761,12 @@ static int __init balloon_init(void)
2177 + }
2178 + #endif
2179 +
2180 ++ task = kthread_run(balloon_thread, NULL, "xen-balloon");
2181 ++ if (IS_ERR(task)) {
2182 ++ pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
2183 ++ return PTR_ERR(task);
2184 ++ }
2185 ++
2186 + /* Init the xen-balloon driver. */
2187 + xen_balloon_init();
2188 +
2189 +diff --git a/fs/afs/dir.c b/fs/afs/dir.c
2190 +index e7494cd49ce7b..8c39533d122a5 100644
2191 +--- a/fs/afs/dir.c
2192 ++++ b/fs/afs/dir.c
2193 +@@ -977,9 +977,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
2194 + */
2195 + static int afs_d_revalidate_rcu(struct dentry *dentry)
2196 + {
2197 +- struct afs_vnode *dvnode, *vnode;
2198 ++ struct afs_vnode *dvnode;
2199 + struct dentry *parent;
2200 +- struct inode *dir, *inode;
2201 ++ struct inode *dir;
2202 + long dir_version, de_version;
2203 +
2204 + _enter("%p", dentry);
2205 +@@ -1009,18 +1009,6 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
2206 + return -ECHILD;
2207 + }
2208 +
2209 +- /* Check to see if the vnode referred to by the dentry still
2210 +- * has a callback.
2211 +- */
2212 +- if (d_really_is_positive(dentry)) {
2213 +- inode = d_inode_rcu(dentry);
2214 +- if (inode) {
2215 +- vnode = AFS_FS_I(inode);
2216 +- if (!afs_check_validity(vnode))
2217 +- return -ECHILD;
2218 +- }
2219 +- }
2220 +-
2221 + return 1; /* Still valid */
2222 + }
2223 +
2224 +@@ -1056,17 +1044,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
2225 + if (IS_ERR(key))
2226 + key = NULL;
2227 +
2228 +- if (d_really_is_positive(dentry)) {
2229 +- inode = d_inode(dentry);
2230 +- if (inode) {
2231 +- vnode = AFS_FS_I(inode);
2232 +- afs_validate(vnode, key);
2233 +- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
2234 +- goto out_bad;
2235 +- }
2236 +- }
2237 +-
2238 +- /* lock down the parent dentry so we can peer at it */
2239 ++ /* Hold the parent dentry so we can peer at it */
2240 + parent = dget_parent(dentry);
2241 + dir = AFS_FS_I(d_inode(parent));
2242 +
2243 +@@ -1075,7 +1053,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
2244 +
2245 + if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
2246 + _debug("%pd: parent dir deleted", dentry);
2247 +- goto out_bad_parent;
2248 ++ goto not_found;
2249 + }
2250 +
2251 + /* We only need to invalidate a dentry if the server's copy changed
2252 +@@ -1101,12 +1079,12 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
2253 + case 0:
2254 + /* the filename maps to something */
2255 + if (d_really_is_negative(dentry))
2256 +- goto out_bad_parent;
2257 ++ goto not_found;
2258 + inode = d_inode(dentry);
2259 + if (is_bad_inode(inode)) {
2260 + printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n",
2261 + dentry);
2262 +- goto out_bad_parent;
2263 ++ goto not_found;
2264 + }
2265 +
2266 + vnode = AFS_FS_I(inode);
2267 +@@ -1128,9 +1106,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
2268 + dentry, fid.unique,
2269 + vnode->fid.unique,
2270 + vnode->vfs_inode.i_generation);
2271 +- write_seqlock(&vnode->cb_lock);
2272 +- set_bit(AFS_VNODE_DELETED, &vnode->flags);
2273 +- write_sequnlock(&vnode->cb_lock);
2274 + goto not_found;
2275 + }
2276 + goto out_valid;
2277 +@@ -1145,7 +1120,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
2278 + default:
2279 + _debug("failed to iterate dir %pd: %d",
2280 + parent, ret);
2281 +- goto out_bad_parent;
2282 ++ goto not_found;
2283 + }
2284 +
2285 + out_valid:
2286 +@@ -1156,16 +1131,9 @@ out_valid_noupdate:
2287 + _leave(" = 1 [valid]");
2288 + return 1;
2289 +
2290 +- /* the dirent, if it exists, now points to a different vnode */
2291 + not_found:
2292 +- spin_lock(&dentry->d_lock);
2293 +- dentry->d_flags |= DCACHE_NFSFS_RENAMED;
2294 +- spin_unlock(&dentry->d_lock);
2295 +-
2296 +-out_bad_parent:
2297 + _debug("dropping dentry %pd2", dentry);
2298 + dput(parent);
2299 +-out_bad:
2300 + key_put(key);
2301 +
2302 + _leave(" = 0 [bad]");
2303 +diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
2304 +index 90500b6c41fc6..1cd39f6a9c3ad 100644
2305 +--- a/fs/btrfs/space-info.c
2306 ++++ b/fs/btrfs/space-info.c
2307 +@@ -262,9 +262,10 @@ static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
2308 + {
2309 + lockdep_assert_held(&info->lock);
2310 +
2311 +- btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
2312 ++ /* The free space could be negative in case of overcommit */
2313 ++ btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
2314 + info->flags,
2315 +- info->total_bytes - btrfs_space_info_used(info, true),
2316 ++ (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
2317 + info->full ? "" : "not ");
2318 + btrfs_info(fs_info,
2319 + "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
2320 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
2321 +index da0720f41ebcb..86bdebd2ece65 100644
2322 +--- a/fs/cifs/connect.c
2323 ++++ b/fs/cifs/connect.c
2324 +@@ -3691,9 +3691,10 @@ cifs_match_super(struct super_block *sb, void *data)
2325 + spin_lock(&cifs_tcp_ses_lock);
2326 + cifs_sb = CIFS_SB(sb);
2327 + tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
2328 +- if (IS_ERR(tlink)) {
2329 ++ if (tlink == NULL) {
2330 ++ /* can not match superblock if tlink were ever null */
2331 + spin_unlock(&cifs_tcp_ses_lock);
2332 +- return rc;
2333 ++ return 0;
2334 + }
2335 + tcon = tlink_tcon(tlink);
2336 + ses = tcon->ses;
2337 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2338 +index 1aac8d38f887d..a9746af5a44db 100644
2339 +--- a/fs/cifs/file.c
2340 ++++ b/fs/cifs/file.c
2341 +@@ -2989,7 +2989,7 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2342 + struct cifs_tcon *tcon;
2343 + struct cifs_sb_info *cifs_sb;
2344 + struct dentry *dentry = ctx->cfile->dentry;
2345 +- int rc;
2346 ++ ssize_t rc;
2347 +
2348 + tcon = tlink_tcon(ctx->cfile->tlink);
2349 + cifs_sb = CIFS_SB(dentry->d_sb);
2350 +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
2351 +index 50a863fc17792..207ec61569ea4 100644
2352 +--- a/fs/ocfs2/dlmglue.c
2353 ++++ b/fs/ocfs2/dlmglue.c
2354 +@@ -3933,7 +3933,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
2355 + oi = OCFS2_I(inode);
2356 + oi->ip_dir_lock_gen++;
2357 + mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
2358 +- goto out;
2359 ++ goto out_forget;
2360 + }
2361 +
2362 + if (!S_ISREG(inode->i_mode))
2363 +@@ -3964,6 +3964,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
2364 + filemap_fdatawait(mapping);
2365 + }
2366 +
2367 ++out_forget:
2368 + forget_all_cached_acls(inode);
2369 +
2370 + out:
2371 +diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
2372 +index a6ee23aadd283..66645a5a35f30 100644
2373 +--- a/fs/qnx4/dir.c
2374 ++++ b/fs/qnx4/dir.c
2375 +@@ -15,13 +15,48 @@
2376 + #include <linux/buffer_head.h>
2377 + #include "qnx4.h"
2378 +
2379 ++/*
2380 ++ * A qnx4 directory entry is an inode entry or link info
2381 ++ * depending on the status field in the last byte. The
2382 ++ * first byte is where the name start either way, and a
2383 ++ * zero means it's empty.
2384 ++ *
2385 ++ * Also, due to a bug in gcc, we don't want to use the
2386 ++ * real (differently sized) name arrays in the inode and
2387 ++ * link entries, but always the 'de_name[]' one in the
2388 ++ * fake struct entry.
2389 ++ *
2390 ++ * See
2391 ++ *
2392 ++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6
2393 ++ *
2394 ++ * for details, but basically gcc will take the size of the
2395 ++ * 'name' array from one of the used union entries randomly.
2396 ++ *
2397 ++ * This use of 'de_name[]' (48 bytes) avoids the false positive
2398 ++ * warnings that would happen if gcc decides to use 'inode.di_name'
2399 ++ * (16 bytes) even when the pointer and size were to come from
2400 ++ * 'link.dl_name' (48 bytes).
2401 ++ *
2402 ++ * In all cases the actual name pointer itself is the same, it's
2403 ++ * only the gcc internal 'what is the size of this field' logic
2404 ++ * that can get confused.
2405 ++ */
2406 ++union qnx4_directory_entry {
2407 ++ struct {
2408 ++ const char de_name[48];
2409 ++ u8 de_pad[15];
2410 ++ u8 de_status;
2411 ++ };
2412 ++ struct qnx4_inode_entry inode;
2413 ++ struct qnx4_link_info link;
2414 ++};
2415 ++
2416 + static int qnx4_readdir(struct file *file, struct dir_context *ctx)
2417 + {
2418 + struct inode *inode = file_inode(file);
2419 + unsigned int offset;
2420 + struct buffer_head *bh;
2421 +- struct qnx4_inode_entry *de;
2422 +- struct qnx4_link_info *le;
2423 + unsigned long blknum;
2424 + int ix, ino;
2425 + int size;
2426 +@@ -38,27 +73,27 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
2427 + }
2428 + ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
2429 + for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
2430 ++ union qnx4_directory_entry *de;
2431 ++
2432 + offset = ix * QNX4_DIR_ENTRY_SIZE;
2433 +- de = (struct qnx4_inode_entry *) (bh->b_data + offset);
2434 +- if (!de->di_fname[0])
2435 ++ de = (union qnx4_directory_entry *) (bh->b_data + offset);
2436 ++
2437 ++ if (!de->de_name[0])
2438 + continue;
2439 +- if (!(de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
2440 ++ if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
2441 + continue;
2442 +- if (!(de->di_status & QNX4_FILE_LINK))
2443 +- size = QNX4_SHORT_NAME_MAX;
2444 +- else
2445 +- size = QNX4_NAME_MAX;
2446 +- size = strnlen(de->di_fname, size);
2447 +- QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, de->di_fname));
2448 +- if (!(de->di_status & QNX4_FILE_LINK))
2449 ++ if (!(de->de_status & QNX4_FILE_LINK)) {
2450 ++ size = sizeof(de->inode.di_fname);
2451 + ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
2452 +- else {
2453 +- le = (struct qnx4_link_info*)de;
2454 +- ino = ( le32_to_cpu(le->dl_inode_blk) - 1 ) *
2455 ++ } else {
2456 ++ size = sizeof(de->link.dl_fname);
2457 ++ ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) *
2458 + QNX4_INODES_PER_BLOCK +
2459 +- le->dl_inode_ndx;
2460 ++ de->link.dl_inode_ndx;
2461 + }
2462 +- if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) {
2463 ++ size = strnlen(de->de_name, size);
2464 ++ QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name));
2465 ++ if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) {
2466 + brelse(bh);
2467 + return 0;
2468 + }
2469 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h
2470 +index 9446e8fbe55c5..bce983406aaf3 100644
2471 +--- a/include/linux/compiler.h
2472 ++++ b/include/linux/compiler.h
2473 +@@ -233,6 +233,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
2474 + (typeof(ptr)) (__ptr + (off)); })
2475 + #endif
2476 +
2477 ++#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
2478 ++
2479 + #ifndef OPTIMIZER_HIDE_VAR
2480 + /* Make the optimizer believe the variable can be manipulated arbitrarily. */
2481 + #define OPTIMIZER_HIDE_VAR(var) \
2482 +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
2483 +index 712b2a603645f..c0eb85b2981e0 100644
2484 +--- a/include/linux/usb/hcd.h
2485 ++++ b/include/linux/usb/hcd.h
2486 +@@ -124,6 +124,7 @@ struct usb_hcd {
2487 + #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
2488 + #define HCD_FLAG_DEAD 6 /* controller has died? */
2489 + #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
2490 ++#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */
2491 +
2492 + /* The flags can be tested using these macros; they are likely to
2493 + * be slightly faster than test_bit().
2494 +@@ -134,6 +135,7 @@ struct usb_hcd {
2495 + #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
2496 + #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
2497 + #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
2498 ++#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
2499 +
2500 + /*
2501 + * Specifies if interfaces are authorized by default
2502 +diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
2503 +index 27f5caa6299a3..ecfad7641096e 100644
2504 +--- a/include/trace/events/erofs.h
2505 ++++ b/include/trace/events/erofs.h
2506 +@@ -35,20 +35,20 @@ TRACE_EVENT(erofs_lookup,
2507 + TP_STRUCT__entry(
2508 + __field(dev_t, dev )
2509 + __field(erofs_nid_t, nid )
2510 +- __field(const char *, name )
2511 ++ __string(name, dentry->d_name.name )
2512 + __field(unsigned int, flags )
2513 + ),
2514 +
2515 + TP_fast_assign(
2516 + __entry->dev = dir->i_sb->s_dev;
2517 + __entry->nid = EROFS_I(dir)->nid;
2518 +- __entry->name = dentry->d_name.name;
2519 ++ __assign_str(name, dentry->d_name.name);
2520 + __entry->flags = flags;
2521 + ),
2522 +
2523 + TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x",
2524 + show_dev_nid(__entry),
2525 +- __entry->name,
2526 ++ __get_str(name),
2527 + __entry->flags)
2528 + );
2529 +
2530 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2531 +index 60383b28549be..9c5fa5c529031 100644
2532 +--- a/kernel/bpf/verifier.c
2533 ++++ b/kernel/bpf/verifier.c
2534 +@@ -6839,6 +6839,8 @@ static int check_btf_line(struct bpf_verifier_env *env,
2535 + nr_linfo = attr->line_info_cnt;
2536 + if (!nr_linfo)
2537 + return 0;
2538 ++ if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
2539 ++ return -EINVAL;
2540 +
2541 + rec_size = attr->line_info_rec_size;
2542 + if (rec_size < MIN_BPF_LINEINFO_SIZE ||
2543 +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
2544 +index 884333b9fc767..749b27851f45d 100644
2545 +--- a/kernel/trace/blktrace.c
2546 ++++ b/kernel/trace/blktrace.c
2547 +@@ -1656,6 +1656,14 @@ static int blk_trace_remove_queue(struct request_queue *q)
2548 + if (bt == NULL)
2549 + return -EINVAL;
2550 +
2551 ++ if (bt->trace_state == Blktrace_running) {
2552 ++ bt->trace_state = Blktrace_stopped;
2553 ++ spin_lock_irq(&running_trace_lock);
2554 ++ list_del_init(&bt->running_list);
2555 ++ spin_unlock_irq(&running_trace_lock);
2556 ++ relay_flush(bt->rchan);
2557 ++ }
2558 ++
2559 + put_probe_ref();
2560 + synchronize_rcu();
2561 + blk_trace_free(bt);
2562 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2563 +index bb68290ad68d8..9a6f66e0e9a27 100644
2564 +--- a/net/ipv6/ip6_fib.c
2565 ++++ b/net/ipv6/ip6_fib.c
2566 +@@ -1310,7 +1310,6 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
2567 + int err = -ENOMEM;
2568 + int allow_create = 1;
2569 + int replace_required = 0;
2570 +- int sernum = fib6_new_sernum(info->nl_net);
2571 +
2572 + if (info->nlh) {
2573 + if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
2574 +@@ -1410,7 +1409,7 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
2575 + if (!err) {
2576 + if (rt->nh)
2577 + list_add(&rt->nh_list, &rt->nh->f6i_list);
2578 +- __fib6_update_sernum_upto_root(rt, sernum);
2579 ++ __fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net));
2580 + fib6_start_gc(info->nl_net, rt);
2581 + }
2582 +
2583 +diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
2584 +index aee9ccfa99c22..ade1232699bbf 100644
2585 +--- a/net/smc/smc_clc.c
2586 ++++ b/net/smc/smc_clc.c
2587 +@@ -164,7 +164,8 @@ static int smc_clc_prfx_set(struct socket *clcsock,
2588 + goto out_rel;
2589 + }
2590 + /* get address to which the internal TCP socket is bound */
2591 +- kernel_getsockname(clcsock, (struct sockaddr *)&addrs);
2592 ++ if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0)
2593 ++ goto out_rel;
2594 + /* analyze IP specific data of net_device belonging to TCP socket */
2595 + addr6 = (struct sockaddr_in6 *)&addrs;
2596 + rcu_read_lock();