Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Sat, 02 May 2020 19:23:11
Message-Id: 1588447374.11b08a10760b6d7004331fc6074fa4eeb2d7f08d.mpagano@gentoo
1 commit: 11b08a10760b6d7004331fc6074fa4eeb2d7f08d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat May 2 19:22:54 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat May 2 19:22:54 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=11b08a10
7
8 Linux patch 4.14.178
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1177_linux-4.14.178.patch | 3850 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3854 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f56b415..36bc305 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -751,6 +751,10 @@ Patch: 1176_linux-4.14.177.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.177
23
24 +Patch: 1177_linux-4.14.178.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.178
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1177_linux-4.14.178.patch b/1177_linux-4.14.178.patch
33 new file mode 100644
34 index 0000000..a685a18
35 --- /dev/null
36 +++ b/1177_linux-4.14.178.patch
37 @@ -0,0 +1,3850 @@
38 +diff --git a/Makefile b/Makefile
39 +index d81fb98737f7..73e93e596e50 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 177
47 ++SUBLEVEL = 178
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
52 +index fdb018e1278f..9d1e1061d8af 100644
53 +--- a/arch/arm/boot/dts/bcm283x.dtsi
54 ++++ b/arch/arm/boot/dts/bcm283x.dtsi
55 +@@ -454,6 +454,7 @@
56 + "dsi0_ddr2",
57 + "dsi0_ddr";
58 +
59 ++ status = "disabled";
60 + };
61 +
62 + thermal: thermal@7e212000 {
63 +diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
64 +index 8cf1a98785a5..1d0923b4a82b 100644
65 +--- a/arch/arm/mach-imx/Makefile
66 ++++ b/arch/arm/mach-imx/Makefile
67 +@@ -87,8 +87,10 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
68 + obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
69 + obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
70 + endif
71 ++ifeq ($(CONFIG_ARM_CPU_SUSPEND),y)
72 + AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
73 + obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
74 ++endif
75 + obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
76 +
77 + obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
78 +diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
79 +index 50a89bcf9072..2564dd429ab6 100644
80 +--- a/arch/arm64/include/asm/sysreg.h
81 ++++ b/arch/arm64/include/asm/sysreg.h
82 +@@ -60,7 +60,9 @@
83 + #ifndef CONFIG_BROKEN_GAS_INST
84 +
85 + #ifdef __ASSEMBLY__
86 +-#define __emit_inst(x) .inst (x)
87 ++// The space separator is omitted so that __emit_inst(x) can be parsed as
88 ++// either an assembler directive or an assembler macro argument.
89 ++#define __emit_inst(x) .inst(x)
90 + #else
91 + #define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
92 + #endif
93 +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
94 +index 41b3b2787f23..a1e336901cc8 100644
95 +--- a/arch/powerpc/kernel/setup_64.c
96 ++++ b/arch/powerpc/kernel/setup_64.c
97 +@@ -466,6 +466,8 @@ static bool __init parse_cache_info(struct device_node *np,
98 + lsizep = of_get_property(np, propnames[3], NULL);
99 + if (bsizep == NULL)
100 + bsizep = lsizep;
101 ++ if (lsizep == NULL)
102 ++ lsizep = bsizep;
103 + if (lsizep != NULL)
104 + lsize = be32_to_cpu(*lsizep);
105 + if (bsizep != NULL)
106 +diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
107 +index c0201b11e9e2..a6b323a3a630 100644
108 +--- a/arch/x86/kernel/cpu/mshyperv.c
109 ++++ b/arch/x86/kernel/cpu/mshyperv.c
110 +@@ -178,8 +178,8 @@ static void __init ms_hyperv_init_platform(void)
111 + ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
112 + ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
113 +
114 +- pr_info("Hyper-V: features 0x%x, hints 0x%x\n",
115 +- ms_hyperv.features, ms_hyperv.hints);
116 ++ pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n",
117 ++ ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features);
118 +
119 + ms_hyperv.max_vp_index = cpuid_eax(HVCPUID_IMPLEMENTATION_LIMITS);
120 + ms_hyperv.max_lp_index = cpuid_ebx(HVCPUID_IMPLEMENTATION_LIMITS);
121 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
122 +index f8e3f3c48283..c139dedec12b 100644
123 +--- a/arch/x86/kvm/vmx.c
124 ++++ b/arch/x86/kvm/vmx.c
125 +@@ -6250,7 +6250,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
126 + */
127 + static void kvm_machine_check(void)
128 + {
129 +-#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
130 ++#if defined(CONFIG_X86_MCE)
131 + struct pt_regs regs = {
132 + .cs = 3, /* Fake ring 3 no matter what the guest ran on */
133 + .flags = X86_EFLAGS_IF,
134 +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
135 +index cdb386fa7101..0415c0cd4a19 100644
136 +--- a/arch/x86/net/bpf_jit_comp.c
137 ++++ b/arch/x86/net/bpf_jit_comp.c
138 +@@ -153,6 +153,19 @@ static bool is_ereg(u32 reg)
139 + BIT(BPF_REG_AX));
140 + }
141 +
142 ++/*
143 ++ * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
144 ++ * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
145 ++ * of encoding. al,cl,dl,bl have simpler encoding.
146 ++ */
147 ++static bool is_ereg_8l(u32 reg)
148 ++{
149 ++ return is_ereg(reg) ||
150 ++ (1 << reg) & (BIT(BPF_REG_1) |
151 ++ BIT(BPF_REG_2) |
152 ++ BIT(BPF_REG_FP));
153 ++}
154 ++
155 + /* add modifiers if 'reg' maps to x64 registers r8..r15 */
156 + static u8 add_1mod(u8 byte, u32 reg)
157 + {
158 +@@ -770,9 +783,8 @@ st: if (is_imm8(insn->off))
159 + /* STX: *(u8*)(dst_reg + off) = src_reg */
160 + case BPF_STX | BPF_MEM | BPF_B:
161 + /* emit 'mov byte ptr [rax + off], al' */
162 +- if (is_ereg(dst_reg) || is_ereg(src_reg) ||
163 +- /* have to add extra byte for x86 SIL, DIL regs */
164 +- src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
165 ++ if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
166 ++ /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
167 + EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
168 + else
169 + EMIT1(0x88);
170 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
171 +index 9d5cb3b7a7a2..41474eec2181 100644
172 +--- a/drivers/android/binder_alloc.c
173 ++++ b/drivers/android/binder_alloc.c
174 +@@ -951,8 +951,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
175 + mm = alloc->vma_vm_mm;
176 + if (!mmget_not_zero(mm))
177 + goto err_mmget;
178 +- if (!down_write_trylock(&mm->mmap_sem))
179 +- goto err_down_write_mmap_sem_failed;
180 ++ if (!down_read_trylock(&mm->mmap_sem))
181 ++ goto err_down_read_mmap_sem_failed;
182 + vma = binder_alloc_get_vma(alloc);
183 +
184 + list_lru_isolate(lru, item);
185 +@@ -967,7 +967,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
186 +
187 + trace_binder_unmap_user_end(alloc, index);
188 + }
189 +- up_write(&mm->mmap_sem);
190 ++ up_read(&mm->mmap_sem);
191 + mmput(mm);
192 +
193 + trace_binder_unmap_kernel_start(alloc, index);
194 +@@ -982,7 +982,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
195 + mutex_unlock(&alloc->mutex);
196 + return LRU_REMOVED_RETRY;
197 +
198 +-err_down_write_mmap_sem_failed:
199 ++err_down_read_mmap_sem_failed:
200 + mmput_async(mm);
201 + err_mmget:
202 + err_page_already_freed:
203 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
204 +index 77e47dc5aacc..569e93e1f06c 100644
205 +--- a/drivers/char/tpm/tpm_ibmvtpm.c
206 ++++ b/drivers/char/tpm/tpm_ibmvtpm.c
207 +@@ -1,5 +1,5 @@
208 + /*
209 +- * Copyright (C) 2012 IBM Corporation
210 ++ * Copyright (C) 2012-2020 IBM Corporation
211 + *
212 + * Author: Ashley Lai <ashleydlai@×××××.com>
213 + *
214 +@@ -140,6 +140,64 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
215 + return len;
216 + }
217 +
218 ++/**
219 ++ * ibmvtpm_crq_send_init - Send a CRQ initialize message
220 ++ * @ibmvtpm: vtpm device struct
221 ++ *
222 ++ * Return:
223 ++ * 0 on success.
224 ++ * Non-zero on failure.
225 ++ */
226 ++static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
227 ++{
228 ++ int rc;
229 ++
230 ++ rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
231 ++ if (rc != H_SUCCESS)
232 ++ dev_err(ibmvtpm->dev,
233 ++ "%s failed rc=%d\n", __func__, rc);
234 ++
235 ++ return rc;
236 ++}
237 ++
238 ++/**
239 ++ * tpm_ibmvtpm_resume - Resume from suspend
240 ++ *
241 ++ * @dev: device struct
242 ++ *
243 ++ * Return: Always 0.
244 ++ */
245 ++static int tpm_ibmvtpm_resume(struct device *dev)
246 ++{
247 ++ struct tpm_chip *chip = dev_get_drvdata(dev);
248 ++ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
249 ++ int rc = 0;
250 ++
251 ++ do {
252 ++ if (rc)
253 ++ msleep(100);
254 ++ rc = plpar_hcall_norets(H_ENABLE_CRQ,
255 ++ ibmvtpm->vdev->unit_address);
256 ++ } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
257 ++
258 ++ if (rc) {
259 ++ dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
260 ++ return rc;
261 ++ }
262 ++
263 ++ rc = vio_enable_interrupts(ibmvtpm->vdev);
264 ++ if (rc) {
265 ++ dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
266 ++ return rc;
267 ++ }
268 ++
269 ++ rc = ibmvtpm_crq_send_init(ibmvtpm);
270 ++ if (rc)
271 ++ dev_err(dev, "Error send_init rc=%d\n", rc);
272 ++
273 ++ return rc;
274 ++}
275 ++
276 + /**
277 + * tpm_ibmvtpm_send() - Send a TPM command
278 + * @chip: tpm chip struct
279 +@@ -153,6 +211,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
280 + static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
281 + {
282 + struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
283 ++ bool retry = true;
284 + int rc, sig;
285 +
286 + if (!ibmvtpm->rtce_buf) {
287 +@@ -186,18 +245,27 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
288 + */
289 + ibmvtpm->tpm_processing_cmd = true;
290 +
291 ++again:
292 + rc = ibmvtpm_send_crq(ibmvtpm->vdev,
293 + IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
294 + count, ibmvtpm->rtce_dma_handle);
295 + if (rc != H_SUCCESS) {
296 ++ /*
297 ++ * H_CLOSED can be returned after LPM resume. Call
298 ++ * tpm_ibmvtpm_resume() to re-enable the CRQ then retry
299 ++ * ibmvtpm_send_crq() once before failing.
300 ++ */
301 ++ if (rc == H_CLOSED && retry) {
302 ++ tpm_ibmvtpm_resume(ibmvtpm->dev);
303 ++ retry = false;
304 ++ goto again;
305 ++ }
306 + dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
307 +- rc = 0;
308 + ibmvtpm->tpm_processing_cmd = false;
309 +- } else
310 +- rc = 0;
311 ++ }
312 +
313 + spin_unlock(&ibmvtpm->rtce_lock);
314 +- return rc;
315 ++ return 0;
316 + }
317 +
318 + static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
319 +@@ -275,26 +343,6 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
320 + return rc;
321 + }
322 +
323 +-/**
324 +- * ibmvtpm_crq_send_init - Send a CRQ initialize message
325 +- * @ibmvtpm: vtpm device struct
326 +- *
327 +- * Return:
328 +- * 0 on success.
329 +- * Non-zero on failure.
330 +- */
331 +-static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
332 +-{
333 +- int rc;
334 +-
335 +- rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
336 +- if (rc != H_SUCCESS)
337 +- dev_err(ibmvtpm->dev,
338 +- "ibmvtpm_crq_send_init failed rc=%d\n", rc);
339 +-
340 +- return rc;
341 +-}
342 +-
343 + /**
344 + * tpm_ibmvtpm_remove - ibm vtpm remove entry point
345 + * @vdev: vio device struct
346 +@@ -407,44 +455,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
347 + ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
348 + }
349 +
350 +-/**
351 +- * tpm_ibmvtpm_resume - Resume from suspend
352 +- *
353 +- * @dev: device struct
354 +- *
355 +- * Return: Always 0.
356 +- */
357 +-static int tpm_ibmvtpm_resume(struct device *dev)
358 +-{
359 +- struct tpm_chip *chip = dev_get_drvdata(dev);
360 +- struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
361 +- int rc = 0;
362 +-
363 +- do {
364 +- if (rc)
365 +- msleep(100);
366 +- rc = plpar_hcall_norets(H_ENABLE_CRQ,
367 +- ibmvtpm->vdev->unit_address);
368 +- } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
369 +-
370 +- if (rc) {
371 +- dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
372 +- return rc;
373 +- }
374 +-
375 +- rc = vio_enable_interrupts(ibmvtpm->vdev);
376 +- if (rc) {
377 +- dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
378 +- return rc;
379 +- }
380 +-
381 +- rc = ibmvtpm_crq_send_init(ibmvtpm);
382 +- if (rc)
383 +- dev_err(dev, "Error send_init rc=%d\n", rc);
384 +-
385 +- return rc;
386 +-}
387 +-
388 + static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
389 + {
390 + return (status == 0);
391 +diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
392 +index a7d9c0c53fcd..9b1116501f20 100644
393 +--- a/drivers/char/tpm/tpm_tis_core.c
394 ++++ b/drivers/char/tpm/tpm_tis_core.c
395 +@@ -331,6 +331,9 @@ static void disable_interrupts(struct tpm_chip *chip)
396 + u32 intmask;
397 + int rc;
398 +
399 ++ if (priv->irq == 0)
400 ++ return;
401 ++
402 + rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
403 + if (rc < 0)
404 + intmask = 0;
405 +@@ -874,9 +877,12 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
406 + if (irq) {
407 + tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
408 + irq);
409 +- if (!(chip->flags & TPM_CHIP_FLAG_IRQ))
410 ++ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
411 + dev_err(&chip->dev, FW_BUG
412 + "TPM interrupt not working, polling instead\n");
413 ++
414 ++ disable_interrupts(chip);
415 ++ }
416 + } else {
417 + tpm_tis_probe_irq(chip, intmask);
418 + }
419 +diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
420 +index 5a4b8aee22a8..eb569cf06309 100644
421 +--- a/drivers/crypto/mxs-dcp.c
422 ++++ b/drivers/crypto/mxs-dcp.c
423 +@@ -37,11 +37,11 @@
424 + * Null hashes to align with hw behavior on imx6sl and ull
425 + * these are flipped for consistency with hw output
426 + */
427 +-const uint8_t sha1_null_hash[] =
428 ++static const uint8_t sha1_null_hash[] =
429 + "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
430 + "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
431 +
432 +-const uint8_t sha256_null_hash[] =
433 ++static const uint8_t sha256_null_hash[] =
434 + "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
435 + "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
436 + "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
437 +diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
438 +index 300c4624aa6c..b9403851f37f 100644
439 +--- a/drivers/gpu/drm/msm/msm_gem.c
440 ++++ b/drivers/gpu/drm/msm/msm_gem.c
441 +@@ -61,7 +61,7 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
442 + {
443 + struct device *dev = msm_obj->base.dev->dev;
444 +
445 +- if (get_dma_ops(dev)) {
446 ++ if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
447 + dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
448 + msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
449 + } else {
450 +@@ -74,7 +74,7 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj)
451 + {
452 + struct device *dev = msm_obj->base.dev->dev;
453 +
454 +- if (get_dma_ops(dev)) {
455 ++ if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
456 + dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
457 + msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
458 + } else {
459 +diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
460 +index e5234f953a6d..b6e5aaa54963 100644
461 +--- a/drivers/hwmon/jc42.c
462 ++++ b/drivers/hwmon/jc42.c
463 +@@ -527,7 +527,7 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
464 + }
465 + data->config = config;
466 +
467 +- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
468 ++ hwmon_dev = devm_hwmon_device_register_with_info(dev, "jc42",
469 + data, &jc42_chip_info,
470 + NULL);
471 + return PTR_ERR_OR_ZERO(hwmon_dev);
472 +diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
473 +index a1cdcfc74acf..8915ee30a5b4 100644
474 +--- a/drivers/i2c/busses/i2c-altera.c
475 ++++ b/drivers/i2c/busses/i2c-altera.c
476 +@@ -395,7 +395,6 @@ static int altr_i2c_probe(struct platform_device *pdev)
477 + struct altr_i2c_dev *idev = NULL;
478 + struct resource *res;
479 + int irq, ret;
480 +- u32 val;
481 +
482 + idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
483 + if (!idev)
484 +@@ -422,17 +421,17 @@ static int altr_i2c_probe(struct platform_device *pdev)
485 + init_completion(&idev->msg_complete);
486 + spin_lock_init(&idev->lock);
487 +
488 +- val = device_property_read_u32(idev->dev, "fifo-size",
489 ++ ret = device_property_read_u32(idev->dev, "fifo-size",
490 + &idev->fifo_size);
491 +- if (val) {
492 ++ if (ret) {
493 + dev_err(&pdev->dev, "FIFO size set to default of %d\n",
494 + ALTR_I2C_DFLT_FIFO_SZ);
495 + idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ;
496 + }
497 +
498 +- val = device_property_read_u32(idev->dev, "clock-frequency",
499 ++ ret = device_property_read_u32(idev->dev, "clock-frequency",
500 + &idev->bus_clk_rate);
501 +- if (val) {
502 ++ if (ret) {
503 + dev_err(&pdev->dev, "Default to 100kHz\n");
504 + idev->bus_clk_rate = 100000; /* default clock rate */
505 + }
506 +diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
507 +index 07246a6037e3..f64781d03d5d 100644
508 +--- a/drivers/iio/adc/ad7793.c
509 ++++ b/drivers/iio/adc/ad7793.c
510 +@@ -543,7 +543,7 @@ static const struct iio_info ad7797_info = {
511 + .read_raw = &ad7793_read_raw,
512 + .write_raw = &ad7793_write_raw,
513 + .write_raw_get_fmt = &ad7793_write_raw_get_fmt,
514 +- .attrs = &ad7793_attribute_group,
515 ++ .attrs = &ad7797_attribute_group,
516 + .validate_trigger = ad_sd_validate_trigger,
517 + .driver_module = THIS_MODULE,
518 + };
519 +diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
520 +index 258a4712167a..3cfb2d4b2441 100644
521 +--- a/drivers/iio/adc/stm32-adc.c
522 ++++ b/drivers/iio/adc/stm32-adc.c
523 +@@ -1311,8 +1311,30 @@ static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc)
524 + static void stm32_adc_dma_buffer_done(void *data)
525 + {
526 + struct iio_dev *indio_dev = data;
527 ++ struct stm32_adc *adc = iio_priv(indio_dev);
528 ++ int residue = stm32_adc_dma_residue(adc);
529 ++
530 ++ /*
531 ++ * In DMA mode the trigger services of IIO are not used
532 ++ * (e.g. no call to iio_trigger_poll).
533 ++ * Calling irq handler associated to the hardware trigger is not
534 ++ * relevant as the conversions have already been done. Data
535 ++ * transfers are performed directly in DMA callback instead.
536 ++ * This implementation avoids to call trigger irq handler that
537 ++ * may sleep, in an atomic context (DMA irq handler context).
538 ++ */
539 ++ dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi);
540 +
541 +- iio_trigger_poll_chained(indio_dev->trig);
542 ++ while (residue >= indio_dev->scan_bytes) {
543 ++ u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi];
544 ++
545 ++ iio_push_to_buffers(indio_dev, buffer);
546 ++
547 ++ residue -= indio_dev->scan_bytes;
548 ++ adc->bufi += indio_dev->scan_bytes;
549 ++ if (adc->bufi >= adc->rx_buf_sz)
550 ++ adc->bufi = 0;
551 ++ }
552 + }
553 +
554 + static int stm32_adc_dma_start(struct iio_dev *indio_dev)
555 +@@ -1648,6 +1670,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
556 + {
557 + struct iio_dev *indio_dev;
558 + struct device *dev = &pdev->dev;
559 ++ irqreturn_t (*handler)(int irq, void *p) = NULL;
560 + struct stm32_adc *adc;
561 + int ret;
562 +
563 +@@ -1730,9 +1753,11 @@ static int stm32_adc_probe(struct platform_device *pdev)
564 + if (ret < 0)
565 + goto err_clk_disable;
566 +
567 ++ if (!adc->dma_chan)
568 ++ handler = &stm32_adc_trigger_handler;
569 ++
570 + ret = iio_triggered_buffer_setup(indio_dev,
571 +- &iio_pollfunc_store_time,
572 +- &stm32_adc_trigger_handler,
573 ++ &iio_pollfunc_store_time, handler,
574 + &stm32_adc_buffer_setup_ops);
575 + if (ret) {
576 + dev_err(&pdev->dev, "buffer setup failed\n");
577 +diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
578 +index e89711b30ae8..36db28b9099f 100644
579 +--- a/drivers/iio/adc/xilinx-xadc-core.c
580 ++++ b/drivers/iio/adc/xilinx-xadc-core.c
581 +@@ -660,7 +660,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
582 +
583 + spin_lock_irqsave(&xadc->lock, flags);
584 + xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
585 +- xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS);
586 ++ xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS);
587 + if (state)
588 + val |= XADC_AXI_INT_EOS;
589 + else
590 +@@ -709,13 +709,14 @@ static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
591 + {
592 + uint16_t val;
593 +
594 ++ /* Powerdown the ADC-B when it is not needed. */
595 + switch (seq_mode) {
596 + case XADC_CONF1_SEQ_SIMULTANEOUS:
597 + case XADC_CONF1_SEQ_INDEPENDENT:
598 +- val = XADC_CONF2_PD_ADC_B;
599 ++ val = 0;
600 + break;
601 + default:
602 +- val = 0;
603 ++ val = XADC_CONF2_PD_ADC_B;
604 + break;
605 + }
606 +
607 +@@ -784,6 +785,16 @@ static int xadc_preenable(struct iio_dev *indio_dev)
608 + if (ret)
609 + goto err;
610 +
611 ++ /*
612 ++ * In simultaneous mode the upper and lower aux channels are samples at
613 ++ * the same time. In this mode the upper 8 bits in the sequencer
614 ++ * register are don't care and the lower 8 bits control two channels
615 ++ * each. As such we must set the bit if either the channel in the lower
616 ++ * group or the upper group is enabled.
617 ++ */
618 ++ if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS)
619 ++ scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000;
620 ++
621 + ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
622 + if (ret)
623 + goto err;
624 +diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
625 +index e773dc6fdd3c..1f0d83086cb0 100644
626 +--- a/drivers/mtd/chips/cfi_cmdset_0002.c
627 ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
628 +@@ -1883,7 +1883,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
629 + continue;
630 + }
631 +
632 +- if (time_after(jiffies, timeo) && !chip_ready(map, adr))
633 ++ /*
634 ++ * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
635 ++ * the failure due to scheduling.
636 ++ */
637 ++ if (time_after(jiffies, timeo) && !chip_good(map, adr, datum))
638 + break;
639 +
640 + if (chip_good(map, adr, datum)) {
641 +diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
642 +index e5c86d44667a..1b2a337d673d 100644
643 +--- a/drivers/net/dsa/b53/b53_regs.h
644 ++++ b/drivers/net/dsa/b53/b53_regs.h
645 +@@ -294,7 +294,7 @@
646 + *
647 + * BCM5325 and BCM5365 share most definitions below
648 + */
649 +-#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
650 ++#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10)
651 + #define ARLTBL_MAC_MASK 0xffffffffffffULL
652 + #define ARLTBL_VID_S 48
653 + #define ARLTBL_VID_MASK_25 0xff
654 +@@ -306,7 +306,7 @@
655 + #define ARLTBL_VALID_25 BIT(63)
656 +
657 + /* ARL Table Data Entry N Registers (32 bit) */
658 +-#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08)
659 ++#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
660 + #define ARLTBL_DATA_PORT_ID_MASK 0x1ff
661 + #define ARLTBL_TC(tc) ((3 & tc) << 11)
662 + #define ARLTBL_AGE BIT(14)
663 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
664 +index 3e3044fe3206..4b3660c63b86 100644
665 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
666 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
667 +@@ -973,6 +973,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
668 + if (netif_running(dev))
669 + bcmgenet_update_mib_counters(priv);
670 +
671 ++ dev->netdev_ops->ndo_get_stats(dev);
672 ++
673 + for (i = 0; i < BCMGENET_STATS_LEN; i++) {
674 + const struct bcmgenet_stats *s;
675 + char *p;
676 +@@ -3215,6 +3217,7 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
677 + dev->stats.rx_packets = rx_packets;
678 + dev->stats.rx_errors = rx_errors;
679 + dev->stats.rx_missed_errors = rx_errors;
680 ++ dev->stats.rx_dropped = rx_dropped;
681 + return &dev->stats;
682 + }
683 +
684 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
685 +index 758f2b836328..ff7e58a8c90f 100644
686 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
687 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
688 +@@ -311,32 +311,17 @@ static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
689 + */
690 + static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
691 + {
692 +- struct adapter *adapter = (struct adapter *)container_of(ptp,
693 +- struct adapter, ptp_clock_info);
694 +- struct fw_ptp_cmd c;
695 ++ struct adapter *adapter = container_of(ptp, struct adapter,
696 ++ ptp_clock_info);
697 + u64 ns;
698 +- int err;
699 +-
700 +- memset(&c, 0, sizeof(c));
701 +- c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
702 +- FW_CMD_REQUEST_F |
703 +- FW_CMD_READ_F |
704 +- FW_PTP_CMD_PORTID_V(0));
705 +- c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
706 +- c.u.ts.sc = FW_PTP_SC_GET_TIME;
707 +
708 +- err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c);
709 +- if (err < 0) {
710 +- dev_err(adapter->pdev_dev,
711 +- "PTP: %s error %d\n", __func__, -err);
712 +- return err;
713 +- }
714 ++ ns = t4_read_reg(adapter, T5_PORT_REG(0, MAC_PORT_PTP_SUM_LO_A));
715 ++ ns |= (u64)t4_read_reg(adapter,
716 ++ T5_PORT_REG(0, MAC_PORT_PTP_SUM_HI_A)) << 32;
717 +
718 + /* convert to timespec*/
719 +- ns = be64_to_cpu(c.u.ts.tm);
720 + *ts = ns_to_timespec64(ns);
721 +-
722 +- return err;
723 ++ return 0;
724 + }
725 +
726 + /**
727 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
728 +index 39bcf27902e4..0f126ce4645f 100644
729 +--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
730 ++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
731 +@@ -3609,7 +3609,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
732 + FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
733 + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
734 + &param, &val);
735 +- if (ret < 0)
736 ++ if (ret)
737 + return ret;
738 + *phy_fw_ver = val;
739 + return 0;
740 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
741 +index dac90837842b..d3df6962cf43 100644
742 +--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
743 ++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
744 +@@ -1810,6 +1810,9 @@
745 +
746 + #define MAC_PORT_CFG2_A 0x818
747 +
748 ++#define MAC_PORT_PTP_SUM_LO_A 0x990
749 ++#define MAC_PORT_PTP_SUM_HI_A 0x994
750 ++
751 + #define MPS_CMN_CTL_A 0x9000
752 +
753 + #define COUNTPAUSEMCRX_S 5
754 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
755 +index c4e8bf0773fe..6024b832b4d9 100644
756 +--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
757 ++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
758 +@@ -3151,26 +3151,20 @@ static void qed_chain_free_single(struct qed_dev *cdev,
759 +
760 + static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
761 + {
762 +- void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
763 ++ struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl;
764 + u32 page_cnt = p_chain->page_cnt, i, pbl_size;
765 +- u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
766 +
767 +- if (!pp_virt_addr_tbl)
768 ++ if (!pp_addr_tbl)
769 + return;
770 +
771 +- if (!p_pbl_virt)
772 +- goto out;
773 +-
774 + for (i = 0; i < page_cnt; i++) {
775 +- if (!pp_virt_addr_tbl[i])
776 ++ if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map)
777 + break;
778 +
779 + dma_free_coherent(&cdev->pdev->dev,
780 + QED_CHAIN_PAGE_SIZE,
781 +- pp_virt_addr_tbl[i],
782 +- *(dma_addr_t *)p_pbl_virt);
783 +-
784 +- p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
785 ++ pp_addr_tbl[i].virt_addr,
786 ++ pp_addr_tbl[i].dma_map);
787 + }
788 +
789 + pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
790 +@@ -3180,9 +3174,9 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
791 + pbl_size,
792 + p_chain->pbl_sp.p_virt_table,
793 + p_chain->pbl_sp.p_phys_table);
794 +-out:
795 +- vfree(p_chain->pbl.pp_virt_addr_tbl);
796 +- p_chain->pbl.pp_virt_addr_tbl = NULL;
797 ++
798 ++ vfree(p_chain->pbl.pp_addr_tbl);
799 ++ p_chain->pbl.pp_addr_tbl = NULL;
800 + }
801 +
802 + void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
803 +@@ -3283,19 +3277,19 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
804 + {
805 + u32 page_cnt = p_chain->page_cnt, size, i;
806 + dma_addr_t p_phys = 0, p_pbl_phys = 0;
807 +- void **pp_virt_addr_tbl = NULL;
808 ++ struct addr_tbl_entry *pp_addr_tbl;
809 + u8 *p_pbl_virt = NULL;
810 + void *p_virt = NULL;
811 +
812 +- size = page_cnt * sizeof(*pp_virt_addr_tbl);
813 +- pp_virt_addr_tbl = vzalloc(size);
814 +- if (!pp_virt_addr_tbl)
815 ++ size = page_cnt * sizeof(*pp_addr_tbl);
816 ++ pp_addr_tbl = vzalloc(size);
817 ++ if (!pp_addr_tbl)
818 + return -ENOMEM;
819 +
820 + /* The allocation of the PBL table is done with its full size, since it
821 + * is expected to be successive.
822 + * qed_chain_init_pbl_mem() is called even in a case of an allocation
823 +- * failure, since pp_virt_addr_tbl was previously allocated, and it
824 ++ * failure, since tbl was previously allocated, and it
825 + * should be saved to allow its freeing during the error flow.
826 + */
827 + size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
828 +@@ -3309,8 +3303,7 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
829 + p_chain->b_external_pbl = true;
830 + }
831 +
832 +- qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
833 +- pp_virt_addr_tbl);
834 ++ qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl);
835 + if (!p_pbl_virt)
836 + return -ENOMEM;
837 +
838 +@@ -3329,7 +3322,8 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
839 + /* Fill the PBL table with the physical address of the page */
840 + *(dma_addr_t *)p_pbl_virt = p_phys;
841 + /* Keep the virtual address of the page */
842 +- p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
843 ++ p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt;
844 ++ p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys;
845 +
846 + p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
847 + }
848 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
849 +index 5959e8817a1b..926e2eb528fd 100644
850 +--- a/drivers/net/macsec.c
851 ++++ b/drivers/net/macsec.c
852 +@@ -3209,11 +3209,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
853 + struct netlink_ext_ack *extack)
854 + {
855 + struct macsec_dev *macsec = macsec_priv(dev);
856 ++ rx_handler_func_t *rx_handler;
857 ++ u8 icv_len = DEFAULT_ICV_LEN;
858 + struct net_device *real_dev;
859 +- int err;
860 ++ int err, mtu;
861 + sci_t sci;
862 +- u8 icv_len = DEFAULT_ICV_LEN;
863 +- rx_handler_func_t *rx_handler;
864 +
865 + if (!tb[IFLA_LINK])
866 + return -EINVAL;
867 +@@ -3229,7 +3229,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
868 +
869 + if (data && data[IFLA_MACSEC_ICV_LEN])
870 + icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
871 +- dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
872 ++ mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
873 ++ if (mtu < 0)
874 ++ dev->mtu = 0;
875 ++ else
876 ++ dev->mtu = mtu;
877 +
878 + rx_handler = rtnl_dereference(real_dev->rx_handler);
879 + if (rx_handler && rx_handler != macsec_handle_frame)
880 +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
881 +index 6989e84670e5..3072fc902eca 100644
882 +--- a/drivers/net/macvlan.c
883 ++++ b/drivers/net/macvlan.c
884 +@@ -1673,7 +1673,7 @@ static int macvlan_device_event(struct notifier_block *unused,
885 + struct macvlan_dev,
886 + list);
887 +
888 +- if (macvlan_sync_address(vlan->dev, dev->dev_addr))
889 ++ if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr))
890 + return NOTIFY_BAD;
891 +
892 + break;
893 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
894 +index 3dba58fa3433..396a8c6cb999 100644
895 +--- a/drivers/net/team/team.c
896 ++++ b/drivers/net/team/team.c
897 +@@ -480,6 +480,9 @@ static const struct team_mode *team_mode_get(const char *kind)
898 + struct team_mode_item *mitem;
899 + const struct team_mode *mode = NULL;
900 +
901 ++ if (!try_module_get(THIS_MODULE))
902 ++ return NULL;
903 ++
904 + spin_lock(&mode_list_lock);
905 + mitem = __find_mode(kind);
906 + if (!mitem) {
907 +@@ -495,6 +498,7 @@ static const struct team_mode *team_mode_get(const char *kind)
908 + }
909 +
910 + spin_unlock(&mode_list_lock);
911 ++ module_put(THIS_MODULE);
912 + return mode;
913 + }
914 +
915 +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
916 +index 03e4fcdfeab7..811fe0bde8a3 100644
917 +--- a/drivers/net/vrf.c
918 ++++ b/drivers/net/vrf.c
919 +@@ -476,7 +476,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
920 + if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
921 + return skb;
922 +
923 +- if (qdisc_tx_is_default(vrf_dev))
924 ++ if (qdisc_tx_is_default(vrf_dev) ||
925 ++ IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
926 + return vrf_ip6_out_direct(vrf_dev, sk, skb);
927 +
928 + return vrf_ip6_out_redirect(vrf_dev, skb);
929 +@@ -692,7 +693,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
930 + ipv4_is_lbcast(ip_hdr(skb)->daddr))
931 + return skb;
932 +
933 +- if (qdisc_tx_is_default(vrf_dev))
934 ++ if (qdisc_tx_is_default(vrf_dev) ||
935 ++ IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
936 + return vrf_ip_out_direct(vrf_dev, sk, skb);
937 +
938 + return vrf_ip_out_redirect(vrf_dev, skb);
939 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
940 +index bbb39d6ec2ee..f37018d72b44 100644
941 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
942 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
943 +@@ -1124,6 +1124,9 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
944 +
945 + iwl_pcie_gen2_txq_unmap(trans, queue);
946 +
947 ++ iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
948 ++ trans_pcie->txq[queue] = NULL;
949 ++
950 + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
951 + }
952 +
953 +diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
954 +index 6f58767b5190..400031622b76 100644
955 +--- a/drivers/pci/pcie/aspm.c
956 ++++ b/drivers/pci/pcie/aspm.c
957 +@@ -80,6 +80,7 @@ struct pcie_link_state {
958 + u32 clkpm_capable:1; /* Clock PM capable? */
959 + u32 clkpm_enabled:1; /* Current Clock PM state */
960 + u32 clkpm_default:1; /* Default Clock PM state by BIOS */
961 ++ u32 clkpm_disable:1; /* Clock PM disabled */
962 +
963 + /* Exit latencies */
964 + struct aspm_latency latency_up; /* Upstream direction exit latency */
965 +@@ -177,8 +178,11 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
966 +
967 + static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
968 + {
969 +- /* Don't enable Clock PM if the link is not Clock PM capable */
970 +- if (!link->clkpm_capable)
971 ++ /*
972 ++ * Don't enable Clock PM if the link is not Clock PM capable
973 ++ * or Clock PM is disabled
974 ++ */
975 ++ if (!link->clkpm_capable || link->clkpm_disable)
976 + enable = 0;
977 + /* Need nothing if the specified equals to current state */
978 + if (link->clkpm_enabled == enable)
979 +@@ -208,7 +212,8 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
980 + }
981 + link->clkpm_enabled = enabled;
982 + link->clkpm_default = enabled;
983 +- link->clkpm_capable = (blacklist) ? 0 : capable;
984 ++ link->clkpm_capable = capable;
985 ++ link->clkpm_disable = blacklist ? 1 : 0;
986 + }
987 +
988 + static bool pcie_retrain_link(struct pcie_link_state *link)
989 +@@ -1052,10 +1057,9 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
990 + link->aspm_disable |= ASPM_STATE_L1;
991 + pcie_config_aspm_link(link, policy_to_aspm_state(link));
992 +
993 +- if (state & PCIE_LINK_STATE_CLKPM) {
994 +- link->clkpm_capable = 0;
995 +- pcie_set_clkpm(link, 0);
996 +- }
997 ++ if (state & PCIE_LINK_STATE_CLKPM)
998 ++ link->clkpm_disable = 1;
999 ++ pcie_set_clkpm(link, policy_to_clkpm_state(link));
1000 + mutex_unlock(&aspm_lock);
1001 + if (sem)
1002 + up_read(&pci_bus_sem);
1003 +diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
1004 +index db001cba937f..e340ad79a1ec 100644
1005 +--- a/drivers/pwm/pwm-bcm2835.c
1006 ++++ b/drivers/pwm/pwm-bcm2835.c
1007 +@@ -166,6 +166,7 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
1008 +
1009 + pc->chip.dev = &pdev->dev;
1010 + pc->chip.ops = &bcm2835_pwm_ops;
1011 ++ pc->chip.base = -1;
1012 + pc->chip.npwm = 2;
1013 + pc->chip.of_xlate = of_pwm_xlate_with_flags;
1014 + pc->chip.of_pwm_n_cells = 3;
1015 +diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
1016 +index 0fcf94ffad32..c298bec25a90 100644
1017 +--- a/drivers/pwm/pwm-rcar.c
1018 ++++ b/drivers/pwm/pwm-rcar.c
1019 +@@ -236,24 +236,28 @@ static int rcar_pwm_probe(struct platform_device *pdev)
1020 + rcar_pwm->chip.base = -1;
1021 + rcar_pwm->chip.npwm = 1;
1022 +
1023 ++ pm_runtime_enable(&pdev->dev);
1024 ++
1025 + ret = pwmchip_add(&rcar_pwm->chip);
1026 + if (ret < 0) {
1027 + dev_err(&pdev->dev, "failed to register PWM chip: %d\n", ret);
1028 ++ pm_runtime_disable(&pdev->dev);
1029 + return ret;
1030 + }
1031 +
1032 +- pm_runtime_enable(&pdev->dev);
1033 +-
1034 + return 0;
1035 + }
1036 +
1037 + static int rcar_pwm_remove(struct platform_device *pdev)
1038 + {
1039 + struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
1040 ++ int ret;
1041 ++
1042 ++ ret = pwmchip_remove(&rcar_pwm->chip);
1043 +
1044 + pm_runtime_disable(&pdev->dev);
1045 +
1046 +- return pwmchip_remove(&rcar_pwm->chip);
1047 ++ return ret;
1048 + }
1049 +
1050 + static const struct of_device_id rcar_pwm_of_table[] = {
1051 +diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
1052 +index 29267d12fb4c..9c7962f2f0aa 100644
1053 +--- a/drivers/pwm/pwm-renesas-tpu.c
1054 ++++ b/drivers/pwm/pwm-renesas-tpu.c
1055 +@@ -423,16 +423,17 @@ static int tpu_probe(struct platform_device *pdev)
1056 + tpu->chip.base = -1;
1057 + tpu->chip.npwm = TPU_CHANNEL_MAX;
1058 +
1059 ++ pm_runtime_enable(&pdev->dev);
1060 ++
1061 + ret = pwmchip_add(&tpu->chip);
1062 + if (ret < 0) {
1063 + dev_err(&pdev->dev, "failed to register PWM chip\n");
1064 ++ pm_runtime_disable(&pdev->dev);
1065 + return ret;
1066 + }
1067 +
1068 + dev_info(&pdev->dev, "TPU PWM %d registered\n", tpu->pdev->id);
1069 +
1070 +- pm_runtime_enable(&pdev->dev);
1071 +-
1072 + return 0;
1073 + }
1074 +
1075 +@@ -442,12 +443,10 @@ static int tpu_remove(struct platform_device *pdev)
1076 + int ret;
1077 +
1078 + ret = pwmchip_remove(&tpu->chip);
1079 +- if (ret)
1080 +- return ret;
1081 +
1082 + pm_runtime_disable(&pdev->dev);
1083 +
1084 +- return 0;
1085 ++ return ret;
1086 + }
1087 +
1088 + #ifdef CONFIG_OF
1089 +diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
1090 +index cc733b89560a..8f4fa1a52f05 100644
1091 +--- a/drivers/remoteproc/remoteproc_core.c
1092 ++++ b/drivers/remoteproc/remoteproc_core.c
1093 +@@ -288,7 +288,7 @@ void rproc_free_vring(struct rproc_vring *rvring)
1094 + {
1095 + int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
1096 + struct rproc *rproc = rvring->rvdev->rproc;
1097 +- int idx = rvring->rvdev->vring - rvring;
1098 ++ int idx = rvring - rvring->rvdev->vring;
1099 + struct fw_rsc_vdev *rsc;
1100 +
1101 + dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma);
1102 +diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
1103 +index e5c32f4b5287..d2203cd17813 100644
1104 +--- a/drivers/s390/cio/device.c
1105 ++++ b/drivers/s390/cio/device.c
1106 +@@ -828,8 +828,10 @@ static void io_subchannel_register(struct ccw_device *cdev)
1107 + * Now we know this subchannel will stay, we can throw
1108 + * our delayed uevent.
1109 + */
1110 +- dev_set_uevent_suppress(&sch->dev, 0);
1111 +- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1112 ++ if (dev_get_uevent_suppress(&sch->dev)) {
1113 ++ dev_set_uevent_suppress(&sch->dev, 0);
1114 ++ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1115 ++ }
1116 + /* make it known to the system */
1117 + ret = ccw_device_add(cdev);
1118 + if (ret) {
1119 +@@ -1037,8 +1039,11 @@ static int io_subchannel_probe(struct subchannel *sch)
1120 + * Throw the delayed uevent for the subchannel, register
1121 + * the ccw_device and exit.
1122 + */
1123 +- dev_set_uevent_suppress(&sch->dev, 0);
1124 +- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1125 ++ if (dev_get_uevent_suppress(&sch->dev)) {
1126 ++ /* should always be the case for the console */
1127 ++ dev_set_uevent_suppress(&sch->dev, 0);
1128 ++ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1129 ++ }
1130 + cdev = sch_get_cdev(sch);
1131 + rc = ccw_device_add(cdev);
1132 + if (rc) {
1133 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
1134 +index d8e0ba68879c..480d2d467f7a 100644
1135 +--- a/drivers/scsi/lpfc/lpfc_sli.c
1136 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
1137 +@@ -2271,6 +2271,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1138 + !pmb->u.mb.mbxStatus) {
1139 + rpi = pmb->u.mb.un.varWords[0];
1140 + vpi = pmb->u.mb.un.varRegLogin.vpi;
1141 ++ if (phba->sli_rev == LPFC_SLI_REV4)
1142 ++ vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
1143 + lpfc_unreg_login(phba, vpi, rpi, pmb);
1144 + pmb->vport = vport;
1145 + pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1146 +diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
1147 +index aecb563a2b4e..958901523469 100644
1148 +--- a/drivers/scsi/scsi_transport_iscsi.c
1149 ++++ b/drivers/scsi/scsi_transport_iscsi.c
1150 +@@ -2010,7 +2010,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
1151 + if (session->target_id == ISCSI_MAX_TARGET) {
1152 + spin_unlock_irqrestore(&session->lock, flags);
1153 + mutex_unlock(&ihost->mutex);
1154 +- return;
1155 ++ goto unbind_session_exit;
1156 + }
1157 +
1158 + target_id = session->target_id;
1159 +@@ -2022,6 +2022,8 @@ static void __iscsi_unbind_session(struct work_struct *work)
1160 + ida_simple_remove(&iscsi_sess_ida, target_id);
1161 +
1162 + scsi_remove_target(&session->dev);
1163 ++
1164 ++unbind_session_exit:
1165 + iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
1166 + ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
1167 + }
1168 +diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
1169 +index e19e395b0e44..9f1ec427c168 100644
1170 +--- a/drivers/staging/comedi/comedi_fops.c
1171 ++++ b/drivers/staging/comedi/comedi_fops.c
1172 +@@ -2603,8 +2603,10 @@ static int comedi_open(struct inode *inode, struct file *file)
1173 + }
1174 +
1175 + cfp = kzalloc(sizeof(*cfp), GFP_KERNEL);
1176 +- if (!cfp)
1177 ++ if (!cfp) {
1178 ++ comedi_dev_put(dev);
1179 + return -ENOMEM;
1180 ++ }
1181 +
1182 + cfp->dev = dev;
1183 +
1184 +diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
1185 +index ce5571971194..9b773c2e140b 100644
1186 +--- a/drivers/staging/comedi/drivers/dt2815.c
1187 ++++ b/drivers/staging/comedi/drivers/dt2815.c
1188 +@@ -101,6 +101,7 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
1189 + int ret;
1190 +
1191 + for (i = 0; i < insn->n; i++) {
1192 ++ /* FIXME: lo bit 0 chooses voltage output or current output */
1193 + lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01;
1194 + hi = (data[i] & 0xff0) >> 4;
1195 +
1196 +@@ -114,6 +115,8 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
1197 + if (ret)
1198 + return ret;
1199 +
1200 ++ outb(hi, dev->iobase + DT2815_DATA);
1201 ++
1202 + devpriv->ao_readback[chan] = data[i];
1203 + }
1204 + return i;
1205 +diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
1206 +index c521729c4192..d5d89e836309 100644
1207 +--- a/drivers/staging/vt6656/int.c
1208 ++++ b/drivers/staging/vt6656/int.c
1209 +@@ -153,7 +153,8 @@ void vnt_int_process_data(struct vnt_private *priv)
1210 + priv->wake_up_count =
1211 + priv->hw->conf.listen_interval;
1212 +
1213 +- --priv->wake_up_count;
1214 ++ if (priv->wake_up_count)
1215 ++ --priv->wake_up_count;
1216 +
1217 + /* Turn on wake up to listen next beacon */
1218 + if (priv->wake_up_count == 1)
1219 +diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
1220 +index cc18cb141bff..5ecc1a97cb44 100644
1221 +--- a/drivers/staging/vt6656/key.c
1222 ++++ b/drivers/staging/vt6656/key.c
1223 +@@ -91,9 +91,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
1224 + case VNT_KEY_PAIRWISE:
1225 + key_mode |= mode;
1226 + key_inx = 4;
1227 +- /* Don't save entry for pairwise key for station mode */
1228 +- if (priv->op_mode == NL80211_IFTYPE_STATION)
1229 +- clear_bit(entry, &priv->key_entry_inuse);
1230 + break;
1231 + default:
1232 + return -EINVAL;
1233 +@@ -117,7 +114,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
1234 + int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
1235 + struct ieee80211_vif *vif, struct ieee80211_key_conf *key)
1236 + {
1237 +- struct ieee80211_bss_conf *conf = &vif->bss_conf;
1238 + struct vnt_private *priv = hw->priv;
1239 + u8 *mac_addr = NULL;
1240 + u8 key_dec_mode = 0;
1241 +@@ -159,16 +155,12 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
1242 + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1243 + }
1244 +
1245 +- if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
1246 ++ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
1247 + vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE,
1248 + key_dec_mode, true);
1249 +- } else {
1250 +- vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY,
1251 ++ else
1252 ++ vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS,
1253 + key_dec_mode, true);
1254 +
1255 +- vnt_set_keymode(hw, (u8 *)conf->bssid, key,
1256 +- VNT_KEY_GROUP_ADDRESS, key_dec_mode, true);
1257 +- }
1258 +-
1259 + return 0;
1260 + }
1261 +diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
1262 +index e8ccd800c94f..9adab851580c 100644
1263 +--- a/drivers/staging/vt6656/main_usb.c
1264 ++++ b/drivers/staging/vt6656/main_usb.c
1265 +@@ -594,8 +594,6 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1266 +
1267 + priv->op_mode = vif->type;
1268 +
1269 +- vnt_set_bss_mode(priv);
1270 +-
1271 + /* LED blink on TX */
1272 + vnt_mac_set_led(priv, LEDSTS_STS, LEDSTS_INTER);
1273 +
1274 +@@ -682,7 +680,6 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1275 + priv->basic_rates = conf->basic_rates;
1276 +
1277 + vnt_update_top_rates(priv);
1278 +- vnt_set_bss_mode(priv);
1279 +
1280 + dev_dbg(&priv->usb->dev, "basic rates %x\n", conf->basic_rates);
1281 + }
1282 +@@ -711,11 +708,14 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1283 + priv->short_slot_time = false;
1284 +
1285 + vnt_set_short_slot_time(priv);
1286 +- vnt_update_ifs(priv);
1287 + vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
1288 + vnt_update_pre_ed_threshold(priv, false);
1289 + }
1290 +
1291 ++ if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE |
1292 ++ BSS_CHANGED_ERP_SLOT))
1293 ++ vnt_set_bss_mode(priv);
1294 ++
1295 + if (changed & BSS_CHANGED_TXPOWER)
1296 + vnt_rf_setpower(priv, priv->current_rate,
1297 + conf->chandef.chan->hw_value);
1298 +@@ -739,12 +739,15 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1299 + vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL,
1300 + TFTCTL_TSFCNTREN);
1301 +
1302 +- vnt_adjust_tsf(priv, conf->beacon_rate->hw_value,
1303 +- conf->sync_tsf, priv->current_tsf);
1304 +-
1305 + vnt_mac_set_beacon_interval(priv, conf->beacon_int);
1306 +
1307 + vnt_reset_next_tbtt(priv, conf->beacon_int);
1308 ++
1309 ++ vnt_adjust_tsf(priv, conf->beacon_rate->hw_value,
1310 ++ conf->sync_tsf, priv->current_tsf);
1311 ++
1312 ++ vnt_update_next_tbtt(priv,
1313 ++ conf->sync_tsf, conf->beacon_int);
1314 + } else {
1315 + vnt_clear_current_tsf(priv);
1316 +
1317 +@@ -779,15 +782,11 @@ static void vnt_configure(struct ieee80211_hw *hw,
1318 + {
1319 + struct vnt_private *priv = hw->priv;
1320 + u8 rx_mode = 0;
1321 +- int rc;
1322 +
1323 + *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
1324 +
1325 +- rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
1326 +- MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
1327 +-
1328 +- if (!rc)
1329 +- rx_mode = RCR_MULTICAST | RCR_BROADCAST;
1330 ++ vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
1331 ++ MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
1332 +
1333 + dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode);
1334 +
1335 +@@ -828,8 +827,12 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1336 + return -EOPNOTSUPP;
1337 + break;
1338 + case DISABLE_KEY:
1339 +- if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
1340 ++ if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) {
1341 + clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
1342 ++
1343 ++ vnt_mac_disable_keyentry(priv, key->hw_key_idx);
1344 ++ }
1345 ++
1346 + default:
1347 + break;
1348 + }
1349 +diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
1350 +index 95aa47ac4dcd..f8621fe67376 100644
1351 +--- a/drivers/target/target_core_fabric_lib.c
1352 ++++ b/drivers/target/target_core_fabric_lib.c
1353 +@@ -76,7 +76,7 @@ static int fc_get_pr_transport_id(
1354 + * encoded TransportID.
1355 + */
1356 + ptr = &se_nacl->initiatorname[0];
1357 +- for (i = 0; i < 24; ) {
1358 ++ for (i = 0; i < 23; ) {
1359 + if (!strncmp(&ptr[i], ":", 1)) {
1360 + i++;
1361 + continue;
1362 +diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
1363 +index a8d399188242..fc0ef13f2616 100644
1364 +--- a/drivers/tty/hvc/hvc_console.c
1365 ++++ b/drivers/tty/hvc/hvc_console.c
1366 +@@ -288,10 +288,6 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
1367 + vtermnos[index] = vtermno;
1368 + cons_ops[index] = ops;
1369 +
1370 +- /* reserve all indices up to and including this index */
1371 +- if (last_hvc < index)
1372 +- last_hvc = index;
1373 +-
1374 + /* check if we need to re-register the kernel console */
1375 + hvc_check_console(index);
1376 +
1377 +@@ -895,13 +891,22 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
1378 + cons_ops[i] == hp->ops)
1379 + break;
1380 +
1381 +- /* no matching slot, just use a counter */
1382 +- if (i >= MAX_NR_HVC_CONSOLES)
1383 +- i = ++last_hvc;
1384 ++ if (i >= MAX_NR_HVC_CONSOLES) {
1385 ++
1386 ++ /* find 'empty' slot for console */
1387 ++ for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) {
1388 ++ }
1389 ++
1390 ++ /* no matching slot, just use a counter */
1391 ++ if (i == MAX_NR_HVC_CONSOLES)
1392 ++ i = ++last_hvc + MAX_NR_HVC_CONSOLES;
1393 ++ }
1394 +
1395 + hp->index = i;
1396 +- cons_ops[i] = ops;
1397 +- vtermnos[i] = vtermno;
1398 ++ if (i < MAX_NR_HVC_CONSOLES) {
1399 ++ cons_ops[i] = ops;
1400 ++ vtermnos[i] = vtermno;
1401 ++ }
1402 +
1403 + list_add_tail(&(hp->next), &hvc_structs);
1404 + spin_unlock(&hvc_structs_lock);
1405 +diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
1406 +index 32943afacffd..1081810b3e3f 100644
1407 +--- a/drivers/tty/rocket.c
1408 ++++ b/drivers/tty/rocket.c
1409 +@@ -645,18 +645,21 @@ init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
1410 + tty_port_init(&info->port);
1411 + info->port.ops = &rocket_port_ops;
1412 + info->flags &= ~ROCKET_MODE_MASK;
1413 +- switch (pc104[board][line]) {
1414 +- case 422:
1415 +- info->flags |= ROCKET_MODE_RS422;
1416 +- break;
1417 +- case 485:
1418 +- info->flags |= ROCKET_MODE_RS485;
1419 +- break;
1420 +- case 232:
1421 +- default:
1422 ++ if (board < ARRAY_SIZE(pc104) && line < ARRAY_SIZE(pc104_1))
1423 ++ switch (pc104[board][line]) {
1424 ++ case 422:
1425 ++ info->flags |= ROCKET_MODE_RS422;
1426 ++ break;
1427 ++ case 485:
1428 ++ info->flags |= ROCKET_MODE_RS485;
1429 ++ break;
1430 ++ case 232:
1431 ++ default:
1432 ++ info->flags |= ROCKET_MODE_RS232;
1433 ++ break;
1434 ++ }
1435 ++ else
1436 + info->flags |= ROCKET_MODE_RS232;
1437 +- break;
1438 +- }
1439 +
1440 + info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR;
1441 + if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) {
1442 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
1443 +index 333de7d3fe86..06cf474072d6 100644
1444 +--- a/drivers/tty/serial/sh-sci.c
1445 ++++ b/drivers/tty/serial/sh-sci.c
1446 +@@ -841,9 +841,16 @@ static void sci_receive_chars(struct uart_port *port)
1447 + tty_insert_flip_char(tport, c, TTY_NORMAL);
1448 + } else {
1449 + for (i = 0; i < count; i++) {
1450 +- char c = serial_port_in(port, SCxRDR);
1451 +-
1452 +- status = serial_port_in(port, SCxSR);
1453 ++ char c;
1454 ++
1455 ++ if (port->type == PORT_SCIF ||
1456 ++ port->type == PORT_HSCIF) {
1457 ++ status = serial_port_in(port, SCxSR);
1458 ++ c = serial_port_in(port, SCxRDR);
1459 ++ } else {
1460 ++ c = serial_port_in(port, SCxRDR);
1461 ++ status = serial_port_in(port, SCxSR);
1462 ++ }
1463 + if (uart_handle_sysrq_char(port, c)) {
1464 + count--; i--;
1465 + continue;
1466 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1467 +index 46defa3be9a4..8a4e7879a7a6 100644
1468 +--- a/drivers/tty/vt/vt.c
1469 ++++ b/drivers/tty/vt/vt.c
1470 +@@ -880,7 +880,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
1471 + if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
1472 + return 0;
1473 +
1474 +- if (new_screen_size > (4 << 20))
1475 ++ if (new_screen_size > KMALLOC_MAX_SIZE)
1476 + return -EINVAL;
1477 + newscreen = kzalloc(new_screen_size, GFP_USER);
1478 + if (!newscreen)
1479 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1480 +index 5e171e45c685..38709bee4c20 100644
1481 +--- a/drivers/usb/class/cdc-acm.c
1482 ++++ b/drivers/usb/class/cdc-acm.c
1483 +@@ -424,9 +424,12 @@ static void acm_ctrl_irq(struct urb *urb)
1484 +
1485 + exit:
1486 + retval = usb_submit_urb(urb, GFP_ATOMIC);
1487 +- if (retval && retval != -EPERM)
1488 ++ if (retval && retval != -EPERM && retval != -ENODEV)
1489 + dev_err(&acm->control->dev,
1490 + "%s - usb_submit_urb failed: %d\n", __func__, retval);
1491 ++ else
1492 ++ dev_vdbg(&acm->control->dev,
1493 ++ "control resubmission terminated %d\n", retval);
1494 + }
1495 +
1496 + static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
1497 +@@ -442,6 +445,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
1498 + dev_err(&acm->data->dev,
1499 + "urb %d failed submission with %d\n",
1500 + index, res);
1501 ++ } else {
1502 ++ dev_vdbg(&acm->data->dev, "intended failure %d\n", res);
1503 + }
1504 + set_bit(index, &acm->read_urbs_free);
1505 + return res;
1506 +@@ -484,6 +489,7 @@ static void acm_read_bulk_callback(struct urb *urb)
1507 + int status = urb->status;
1508 + bool stopped = false;
1509 + bool stalled = false;
1510 ++ bool cooldown = false;
1511 +
1512 + dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
1513 + rb->index, urb->actual_length, status);
1514 +@@ -510,6 +516,14 @@ static void acm_read_bulk_callback(struct urb *urb)
1515 + __func__, status);
1516 + stopped = true;
1517 + break;
1518 ++ case -EOVERFLOW:
1519 ++ case -EPROTO:
1520 ++ dev_dbg(&acm->data->dev,
1521 ++ "%s - cooling babbling device\n", __func__);
1522 ++ usb_mark_last_busy(acm->dev);
1523 ++ set_bit(rb->index, &acm->urbs_in_error_delay);
1524 ++ cooldown = true;
1525 ++ break;
1526 + default:
1527 + dev_dbg(&acm->data->dev,
1528 + "%s - nonzero urb status received: %d\n",
1529 +@@ -531,9 +545,11 @@ static void acm_read_bulk_callback(struct urb *urb)
1530 + */
1531 + smp_mb__after_atomic();
1532 +
1533 +- if (stopped || stalled) {
1534 ++ if (stopped || stalled || cooldown) {
1535 + if (stalled)
1536 + schedule_work(&acm->work);
1537 ++ else if (cooldown)
1538 ++ schedule_delayed_work(&acm->dwork, HZ / 2);
1539 + return;
1540 + }
1541 +
1542 +@@ -575,14 +591,20 @@ static void acm_softint(struct work_struct *work)
1543 + struct acm *acm = container_of(work, struct acm, work);
1544 +
1545 + if (test_bit(EVENT_RX_STALL, &acm->flags)) {
1546 +- if (!(usb_autopm_get_interface(acm->data))) {
1547 ++ smp_mb(); /* against acm_suspend() */
1548 ++ if (!acm->susp_count) {
1549 + for (i = 0; i < acm->rx_buflimit; i++)
1550 + usb_kill_urb(acm->read_urbs[i]);
1551 + usb_clear_halt(acm->dev, acm->in);
1552 + acm_submit_read_urbs(acm, GFP_KERNEL);
1553 +- usb_autopm_put_interface(acm->data);
1554 ++ clear_bit(EVENT_RX_STALL, &acm->flags);
1555 + }
1556 +- clear_bit(EVENT_RX_STALL, &acm->flags);
1557 ++ }
1558 ++
1559 ++ if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) {
1560 ++ for (i = 0; i < ACM_NR; i++)
1561 ++ if (test_and_clear_bit(i, &acm->urbs_in_error_delay))
1562 ++ acm_submit_read_urb(acm, i, GFP_NOIO);
1563 + }
1564 +
1565 + if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
1566 +@@ -1374,6 +1396,7 @@ made_compressed_probe:
1567 + acm->readsize = readsize;
1568 + acm->rx_buflimit = num_rx_buf;
1569 + INIT_WORK(&acm->work, acm_softint);
1570 ++ INIT_DELAYED_WORK(&acm->dwork, acm_softint);
1571 + init_waitqueue_head(&acm->wioctl);
1572 + spin_lock_init(&acm->write_lock);
1573 + spin_lock_init(&acm->read_lock);
1574 +@@ -1587,6 +1610,7 @@ static void acm_disconnect(struct usb_interface *intf)
1575 +
1576 + acm_kill_urbs(acm);
1577 + cancel_work_sync(&acm->work);
1578 ++ cancel_delayed_work_sync(&acm->dwork);
1579 +
1580 + tty_unregister_device(acm_tty_driver, acm->minor);
1581 +
1582 +@@ -1629,6 +1653,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1583 +
1584 + acm_kill_urbs(acm);
1585 + cancel_work_sync(&acm->work);
1586 ++ cancel_delayed_work_sync(&acm->dwork);
1587 ++ acm->urbs_in_error_delay = 0;
1588 +
1589 + return 0;
1590 + }
1591 +diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
1592 +index 515aad0847ee..30380d28a504 100644
1593 +--- a/drivers/usb/class/cdc-acm.h
1594 ++++ b/drivers/usb/class/cdc-acm.h
1595 +@@ -108,8 +108,11 @@ struct acm {
1596 + unsigned long flags;
1597 + # define EVENT_TTY_WAKEUP 0
1598 + # define EVENT_RX_STALL 1
1599 ++# define ACM_ERROR_DELAY 3
1600 ++ unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */
1601 + struct usb_cdc_line_coding line; /* bits, stop, parity */
1602 +- struct work_struct work; /* work queue entry for line discipline waking up */
1603 ++ struct work_struct work; /* work queue entry for various purposes*/
1604 ++ struct delayed_work dwork; /* for cool downs needed in error recovery */
1605 + unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
1606 + unsigned int ctrlout; /* output control lines (DTR, RTS) */
1607 + struct async_icount iocount; /* counters for control line changes */
1608 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1609 +index 4391192bdd19..d6d09486231d 100644
1610 +--- a/drivers/usb/core/hub.c
1611 ++++ b/drivers/usb/core/hub.c
1612 +@@ -1195,6 +1195,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1613 + #ifdef CONFIG_PM
1614 + udev->reset_resume = 1;
1615 + #endif
1616 ++ /* Don't set the change_bits when the device
1617 ++ * was powered off.
1618 ++ */
1619 ++ if (test_bit(port1, hub->power_bits))
1620 ++ set_bit(port1, hub->change_bits);
1621 +
1622 + } else {
1623 + /* The power session is gone; tell hub_wq */
1624 +@@ -3008,6 +3013,15 @@ static int check_port_resume_type(struct usb_device *udev,
1625 + if (portchange & USB_PORT_STAT_C_ENABLE)
1626 + usb_clear_port_feature(hub->hdev, port1,
1627 + USB_PORT_FEAT_C_ENABLE);
1628 ++
1629 ++ /*
1630 ++ * Whatever made this reset-resume necessary may have
1631 ++ * turned on the port1 bit in hub->change_bits. But after
1632 ++ * a successful reset-resume we want the bit to be clear;
1633 ++ * if it was on it would indicate that something happened
1634 ++ * following the reset-resume.
1635 ++ */
1636 ++ clear_bit(port1, hub->change_bits);
1637 + }
1638 +
1639 + return status;
1640 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
1641 +index e70578e11156..00e80cfe614c 100644
1642 +--- a/drivers/usb/core/message.c
1643 ++++ b/drivers/usb/core/message.c
1644 +@@ -586,12 +586,13 @@ void usb_sg_cancel(struct usb_sg_request *io)
1645 + int i, retval;
1646 +
1647 + spin_lock_irqsave(&io->lock, flags);
1648 +- if (io->status) {
1649 ++ if (io->status || io->count == 0) {
1650 + spin_unlock_irqrestore(&io->lock, flags);
1651 + return;
1652 + }
1653 + /* shut everything down */
1654 + io->status = -ECONNRESET;
1655 ++ io->count++; /* Keep the request alive until we're done */
1656 + spin_unlock_irqrestore(&io->lock, flags);
1657 +
1658 + for (i = io->entries - 1; i >= 0; --i) {
1659 +@@ -605,6 +606,12 @@ void usb_sg_cancel(struct usb_sg_request *io)
1660 + dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
1661 + __func__, retval);
1662 + }
1663 ++
1664 ++ spin_lock_irqsave(&io->lock, flags);
1665 ++ io->count--;
1666 ++ if (!io->count)
1667 ++ complete(&io->complete);
1668 ++ spin_unlock_irqrestore(&io->lock, flags);
1669 + }
1670 + EXPORT_SYMBOL_GPL(usb_sg_cancel);
1671 +
1672 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1673 +index 6c4bb47922ac..27d05f0134de 100644
1674 +--- a/drivers/usb/core/quirks.c
1675 ++++ b/drivers/usb/core/quirks.c
1676 +@@ -272,6 +272,10 @@ static const struct usb_device_id usb_quirk_list[] = {
1677 + /* Corsair K70 LUX */
1678 + { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
1679 +
1680 ++ /* Corsair K70 RGB RAPDIFIRE */
1681 ++ { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT |
1682 ++ USB_QUIRK_DELAY_CTRL_MSG },
1683 ++
1684 + /* MIDI keyboard WORLDE MINI */
1685 + { USB_DEVICE(0x1c75, 0x0204), .driver_info =
1686 + USB_QUIRK_CONFIG_INTF_STRINGS },
1687 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
1688 +index 76a0020b0f2e..4149d751719e 100644
1689 +--- a/drivers/usb/dwc3/gadget.c
1690 ++++ b/drivers/usb/dwc3/gadget.c
1691 +@@ -1641,7 +1641,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1692 + u32 reg;
1693 +
1694 + u8 link_state;
1695 +- u8 speed;
1696 +
1697 + /*
1698 + * According to the Databook Remote wakeup request should
1699 +@@ -1651,16 +1650,13 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1700 + */
1701 + reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1702 +
1703 +- speed = reg & DWC3_DSTS_CONNECTSPD;
1704 +- if ((speed == DWC3_DSTS_SUPERSPEED) ||
1705 +- (speed == DWC3_DSTS_SUPERSPEED_PLUS))
1706 +- return 0;
1707 +-
1708 + link_state = DWC3_DSTS_USBLNKST(reg);
1709 +
1710 + switch (link_state) {
1711 ++ case DWC3_LINK_STATE_RESET:
1712 + case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1713 + case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1714 ++ case DWC3_LINK_STATE_RESUME:
1715 + break;
1716 + default:
1717 + return -EINVAL;
1718 +diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
1719 +index 12fe70beae69..21244c556b81 100644
1720 +--- a/drivers/usb/early/xhci-dbc.c
1721 ++++ b/drivers/usb/early/xhci-dbc.c
1722 +@@ -738,19 +738,19 @@ static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
1723 + case COMP_USB_TRANSACTION_ERROR:
1724 + case COMP_STALL_ERROR:
1725 + default:
1726 +- if (ep_id == XDBC_EPID_OUT)
1727 ++ if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL)
1728 + xdbc.flags |= XDBC_FLAGS_OUT_STALL;
1729 +- if (ep_id == XDBC_EPID_IN)
1730 ++ if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL)
1731 + xdbc.flags |= XDBC_FLAGS_IN_STALL;
1732 +
1733 + xdbc_trace("endpoint %d stalled\n", ep_id);
1734 + break;
1735 + }
1736 +
1737 +- if (ep_id == XDBC_EPID_IN) {
1738 ++ if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) {
1739 + xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS;
1740 + xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
1741 +- } else if (ep_id == XDBC_EPID_OUT) {
1742 ++ } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) {
1743 + xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS;
1744 + } else {
1745 + xdbc_trace("invalid endpoint id %d\n", ep_id);
1746 +diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h
1747 +index a516cab0bf4a..6c9200d913da 100644
1748 +--- a/drivers/usb/early/xhci-dbc.h
1749 ++++ b/drivers/usb/early/xhci-dbc.h
1750 +@@ -123,8 +123,22 @@ struct xdbc_ring {
1751 + u32 cycle_state;
1752 + };
1753 +
1754 +-#define XDBC_EPID_OUT 2
1755 +-#define XDBC_EPID_IN 3
1756 ++/*
1757 ++ * These are the "Endpoint ID" (also known as "Context Index") values for the
1758 ++ * OUT Transfer Ring and the IN Transfer Ring of a Debug Capability Context data
1759 ++ * structure.
1760 ++ * According to the "eXtensible Host Controller Interface for Universal Serial
1761 ++ * Bus (xHCI)" specification, section "7.6.3.2 Endpoint Contexts and Transfer
1762 ++ * Rings", these should be 0 and 1, and those are the values AMD machines give
1763 ++ * you; but Intel machines seem to use the formula from section "4.5.1 Device
1764 ++ * Context Index", which is supposed to be used for the Device Context only.
1765 ++ * Luckily the values from Intel don't overlap with those from AMD, so we can
1766 ++ * just test for both.
1767 ++ */
1768 ++#define XDBC_EPID_OUT 0
1769 ++#define XDBC_EPID_IN 1
1770 ++#define XDBC_EPID_OUT_INTEL 2
1771 ++#define XDBC_EPID_IN_INTEL 3
1772 +
1773 + struct xdbc_state {
1774 + u16 vendor;
1775 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
1776 +index 819fd77a2da4..2ff7c21bbda5 100644
1777 +--- a/drivers/usb/gadget/function/f_fs.c
1778 ++++ b/drivers/usb/gadget/function/f_fs.c
1779 +@@ -1727,6 +1727,10 @@ static void ffs_data_reset(struct ffs_data *ffs)
1780 + ffs->state = FFS_READ_DESCRIPTORS;
1781 + ffs->setup_state = FFS_NO_SETUP;
1782 + ffs->flags = 0;
1783 ++
1784 ++ ffs->ms_os_descs_ext_prop_count = 0;
1785 ++ ffs->ms_os_descs_ext_prop_name_len = 0;
1786 ++ ffs->ms_os_descs_ext_prop_data_len = 0;
1787 + }
1788 +
1789 +
1790 +diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
1791 +index bfd8f7ade935..be9f40bc9c12 100644
1792 +--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
1793 ++++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
1794 +@@ -546,7 +546,7 @@ static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
1795 + {
1796 + struct bdc *bdc = ep->bdc;
1797 +
1798 +- if (req == NULL || &req->queue == NULL || &req->usb_req == NULL)
1799 ++ if (req == NULL)
1800 + return;
1801 +
1802 + dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
1803 +diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
1804 +index 3121fa31aabf..a6f88442a53a 100644
1805 +--- a/drivers/usb/misc/sisusbvga/sisusb.c
1806 ++++ b/drivers/usb/misc/sisusbvga/sisusb.c
1807 +@@ -1198,18 +1198,18 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
1808 + /* High level: Gfx (indexed) register access */
1809 +
1810 + #ifdef INCL_SISUSB_CON
1811 +-int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data)
1812 ++int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data)
1813 + {
1814 + return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
1815 + }
1816 +
1817 +-int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data)
1818 ++int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 *data)
1819 + {
1820 + return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
1821 + }
1822 + #endif
1823 +
1824 +-int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
1825 ++int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port,
1826 + u8 index, u8 data)
1827 + {
1828 + int ret;
1829 +@@ -1219,7 +1219,7 @@ int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
1830 + return ret;
1831 + }
1832 +
1833 +-int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
1834 ++int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port,
1835 + u8 index, u8 *data)
1836 + {
1837 + int ret;
1838 +@@ -1229,7 +1229,7 @@ int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
1839 + return ret;
1840 + }
1841 +
1842 +-int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
1843 ++int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx,
1844 + u8 myand, u8 myor)
1845 + {
1846 + int ret;
1847 +@@ -1244,7 +1244,7 @@ int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
1848 + }
1849 +
1850 + static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
1851 +- int port, u8 idx, u8 data, u8 mask)
1852 ++ u32 port, u8 idx, u8 data, u8 mask)
1853 + {
1854 + int ret;
1855 + u8 tmp;
1856 +@@ -1257,13 +1257,13 @@ static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
1857 + return ret;
1858 + }
1859 +
1860 +-int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
1861 ++int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port,
1862 + u8 index, u8 myor)
1863 + {
1864 + return sisusb_setidxregandor(sisusb, port, index, 0xff, myor);
1865 + }
1866 +
1867 +-int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
1868 ++int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port,
1869 + u8 idx, u8 myand)
1870 + {
1871 + return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00);
1872 +@@ -2786,8 +2786,8 @@ static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig)
1873 + static int sisusb_handle_command(struct sisusb_usb_data *sisusb,
1874 + struct sisusb_command *y, unsigned long arg)
1875 + {
1876 +- int retval, port, length;
1877 +- u32 address;
1878 ++ int retval, length;
1879 ++ u32 port, address;
1880 +
1881 + /* All our commands require the device
1882 + * to be initialized.
1883 +diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h
1884 +index e79a616f0d26..f7182257f7e1 100644
1885 +--- a/drivers/usb/misc/sisusbvga/sisusb_init.h
1886 ++++ b/drivers/usb/misc/sisusbvga/sisusb_init.h
1887 +@@ -811,17 +811,17 @@ static const struct SiS_VCLKData SiSUSB_VCLKData[] = {
1888 + int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo);
1889 + int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo);
1890 +
1891 +-extern int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data);
1892 +-extern int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 * data);
1893 +-extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
1894 ++extern int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data);
1895 ++extern int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 * data);
1896 ++extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port,
1897 + u8 index, u8 data);
1898 +-extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
1899 ++extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port,
1900 + u8 index, u8 * data);
1901 +-extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port,
1902 ++extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port,
1903 + u8 idx, u8 myand, u8 myor);
1904 +-extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
1905 ++extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port,
1906 + u8 index, u8 myor);
1907 +-extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
1908 ++extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port,
1909 + u8 idx, u8 myand);
1910 +
1911 + void sisusb_delete(struct kref *kref);
1912 +diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
1913 +index 9d97543449e6..20dd8df864c4 100644
1914 +--- a/drivers/usb/storage/uas.c
1915 ++++ b/drivers/usb/storage/uas.c
1916 +@@ -82,6 +82,19 @@ static void uas_free_streams(struct uas_dev_info *devinfo);
1917 + static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
1918 + int status);
1919 +
1920 ++/*
1921 ++ * This driver needs its own workqueue, as we need to control memory allocation.
1922 ++ *
1923 ++ * In the course of error handling and power management uas_wait_for_pending_cmnds()
1924 ++ * needs to flush pending work items. In these contexts we cannot allocate memory
1925 ++ * by doing block IO as we would deadlock. For the same reason we cannot wait
1926 ++ * for anything allocating memory not heeding these constraints.
1927 ++ *
1928 ++ * So we have to control all work items that can be on the workqueue we flush.
1929 ++ * Hence we cannot share a queue and need our own.
1930 ++ */
1931 ++static struct workqueue_struct *workqueue;
1932 ++
1933 + static void uas_do_work(struct work_struct *work)
1934 + {
1935 + struct uas_dev_info *devinfo =
1936 +@@ -110,7 +123,7 @@ static void uas_do_work(struct work_struct *work)
1937 + if (!err)
1938 + cmdinfo->state &= ~IS_IN_WORK_LIST;
1939 + else
1940 +- schedule_work(&devinfo->work);
1941 ++ queue_work(workqueue, &devinfo->work);
1942 + }
1943 + out:
1944 + spin_unlock_irqrestore(&devinfo->lock, flags);
1945 +@@ -135,7 +148,7 @@ static void uas_add_work(struct uas_cmd_info *cmdinfo)
1946 +
1947 + lockdep_assert_held(&devinfo->lock);
1948 + cmdinfo->state |= IS_IN_WORK_LIST;
1949 +- schedule_work(&devinfo->work);
1950 ++ queue_work(workqueue, &devinfo->work);
1951 + }
1952 +
1953 + static void uas_zap_pending(struct uas_dev_info *devinfo, int result)
1954 +@@ -191,6 +204,9 @@ static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
1955 + struct uas_cmd_info *ci = (void *)&cmnd->SCp;
1956 + struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
1957 +
1958 ++ if (status == -ENODEV) /* too late */
1959 ++ return;
1960 ++
1961 + scmd_printk(KERN_INFO, cmnd,
1962 + "%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ",
1963 + prefix, status, cmdinfo->uas_tag,
1964 +@@ -1233,7 +1249,31 @@ static struct usb_driver uas_driver = {
1965 + .id_table = uas_usb_ids,
1966 + };
1967 +
1968 +-module_usb_driver(uas_driver);
1969 ++static int __init uas_init(void)
1970 ++{
1971 ++ int rv;
1972 ++
1973 ++ workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0);
1974 ++ if (!workqueue)
1975 ++ return -ENOMEM;
1976 ++
1977 ++ rv = usb_register(&uas_driver);
1978 ++ if (rv) {
1979 ++ destroy_workqueue(workqueue);
1980 ++ return -ENOMEM;
1981 ++ }
1982 ++
1983 ++ return 0;
1984 ++}
1985 ++
1986 ++static void __exit uas_exit(void)
1987 ++{
1988 ++ usb_deregister(&uas_driver);
1989 ++ destroy_workqueue(workqueue);
1990 ++}
1991 ++
1992 ++module_init(uas_init);
1993 ++module_exit(uas_exit);
1994 +
1995 + MODULE_LICENSE("GPL");
1996 + MODULE_AUTHOR(
1997 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1998 +index df8ee83c3f1a..5c3f2eaf59e8 100644
1999 +--- a/drivers/usb/storage/unusual_devs.h
2000 ++++ b/drivers/usb/storage/unusual_devs.h
2001 +@@ -2342,6 +2342,13 @@ UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000,
2002 + USB_SC_DEVICE,USB_PR_DEVICE,NULL,
2003 + US_FL_MAX_SECTORS_64 ),
2004 +
2005 ++/* Reported by Cyril Roelandt <tipecaml@×××××.com> */
2006 ++UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114,
2007 ++ "JMicron",
2008 ++ "USB to ATA/ATAPI Bridge",
2009 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2010 ++ US_FL_BROKEN_FUA ),
2011 ++
2012 + /* Reported by Andrey Rahmatullin <wrar@××××××××.org> */
2013 + UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
2014 + "iRiver",
2015 +diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
2016 +index b30fb637ae94..52e03f1c76e3 100644
2017 +--- a/drivers/watchdog/watchdog_dev.c
2018 ++++ b/drivers/watchdog/watchdog_dev.c
2019 +@@ -245,6 +245,7 @@ static int watchdog_start(struct watchdog_device *wdd)
2020 + if (err == 0) {
2021 + set_bit(WDOG_ACTIVE, &wdd->status);
2022 + wd_data->last_keepalive = started_at;
2023 ++ wd_data->last_hw_keepalive = started_at;
2024 + watchdog_update_worker(wdd);
2025 + }
2026 +
2027 +diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
2028 +index a1c17000129b..e94a61eaeceb 100644
2029 +--- a/drivers/xen/xenbus/xenbus_client.c
2030 ++++ b/drivers/xen/xenbus/xenbus_client.c
2031 +@@ -450,7 +450,14 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
2032 + int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
2033 + unsigned int nr_grefs, void **vaddr)
2034 + {
2035 +- return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
2036 ++ int err;
2037 ++
2038 ++ err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
2039 ++ /* Some hypervisors are buggy and can return 1. */
2040 ++ if (err > 0)
2041 ++ err = GNTST_general_error;
2042 ++
2043 ++ return err;
2044 + }
2045 + EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
2046 +
2047 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
2048 +index c3a3ee74e2d8..1b5a50848b5b 100644
2049 +--- a/fs/ceph/caps.c
2050 ++++ b/fs/ceph/caps.c
2051 +@@ -1863,8 +1863,12 @@ retry_locked:
2052 + }
2053 +
2054 + /* want more caps from mds? */
2055 +- if (want & ~(cap->mds_wanted | cap->issued))
2056 +- goto ack;
2057 ++ if (want & ~cap->mds_wanted) {
2058 ++ if (want & ~(cap->mds_wanted | cap->issued))
2059 ++ goto ack;
2060 ++ if (!__cap_is_valid(cap))
2061 ++ goto ack;
2062 ++ }
2063 +
2064 + /* things we might delay */
2065 + if ((cap->issued & ~retain) == 0 &&
2066 +diff --git a/fs/ceph/export.c b/fs/ceph/export.c
2067 +index 3c59ad180ef0..4cfe1154d4c7 100644
2068 +--- a/fs/ceph/export.c
2069 ++++ b/fs/ceph/export.c
2070 +@@ -151,6 +151,11 @@ static struct dentry *__get_parent(struct super_block *sb,
2071 +
2072 + req->r_num_caps = 1;
2073 + err = ceph_mdsc_do_request(mdsc, NULL, req);
2074 ++ if (err) {
2075 ++ ceph_mdsc_put_request(req);
2076 ++ return ERR_PTR(err);
2077 ++ }
2078 ++
2079 + inode = req->r_target_inode;
2080 + if (inode)
2081 + ihold(inode);
2082 +diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
2083 +index bee888e0e2db..13eb028607ca 100644
2084 +--- a/fs/ext4/block_validity.c
2085 ++++ b/fs/ext4/block_validity.c
2086 +@@ -137,6 +137,49 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
2087 + printk(KERN_CONT "\n");
2088 + }
2089 +
2090 ++static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
2091 ++{
2092 ++ struct inode *inode;
2093 ++ struct ext4_sb_info *sbi = EXT4_SB(sb);
2094 ++ struct ext4_map_blocks map;
2095 ++ u32 i = 0, num;
2096 ++ int err = 0, n;
2097 ++
2098 ++ if ((ino < EXT4_ROOT_INO) ||
2099 ++ (ino > le32_to_cpu(sbi->s_es->s_inodes_count)))
2100 ++ return -EINVAL;
2101 ++ inode = ext4_iget(sb, ino, EXT4_IGET_SPECIAL);
2102 ++ if (IS_ERR(inode))
2103 ++ return PTR_ERR(inode);
2104 ++ num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
2105 ++ while (i < num) {
2106 ++ map.m_lblk = i;
2107 ++ map.m_len = num - i;
2108 ++ n = ext4_map_blocks(NULL, inode, &map, 0);
2109 ++ if (n < 0) {
2110 ++ err = n;
2111 ++ break;
2112 ++ }
2113 ++ if (n == 0) {
2114 ++ i++;
2115 ++ } else {
2116 ++ if (!ext4_data_block_valid(sbi, map.m_pblk, n)) {
2117 ++ ext4_error(sb, "blocks %llu-%llu from inode %u "
2118 ++ "overlap system zone", map.m_pblk,
2119 ++ map.m_pblk + map.m_len - 1, ino);
2120 ++ err = -EFSCORRUPTED;
2121 ++ break;
2122 ++ }
2123 ++ err = add_system_zone(sbi, map.m_pblk, n);
2124 ++ if (err < 0)
2125 ++ break;
2126 ++ i += n;
2127 ++ }
2128 ++ }
2129 ++ iput(inode);
2130 ++ return err;
2131 ++}
2132 ++
2133 + int ext4_setup_system_zone(struct super_block *sb)
2134 + {
2135 + ext4_group_t ngroups = ext4_get_groups_count(sb);
2136 +@@ -171,6 +214,12 @@ int ext4_setup_system_zone(struct super_block *sb)
2137 + if (ret)
2138 + return ret;
2139 + }
2140 ++ if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
2141 ++ ret = ext4_protect_reserved_inode(sb,
2142 ++ le32_to_cpu(sbi->s_es->s_journal_inum));
2143 ++ if (ret)
2144 ++ return ret;
2145 ++ }
2146 +
2147 + if (test_opt(sb, DEBUG))
2148 + debug_print_tree(EXT4_SB(sb));
2149 +@@ -227,6 +276,11 @@ int ext4_check_blockref(const char *function, unsigned int line,
2150 + __le32 *bref = p;
2151 + unsigned int blk;
2152 +
2153 ++ if (ext4_has_feature_journal(inode->i_sb) &&
2154 ++ (inode->i_ino ==
2155 ++ le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
2156 ++ return 0;
2157 ++
2158 + while (bref < p+max) {
2159 + blk = le32_to_cpu(*bref++);
2160 + if (blk &&
2161 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
2162 +index 4aa0f8f7d9a0..6c5b4301ee37 100644
2163 +--- a/fs/ext4/ext4.h
2164 ++++ b/fs/ext4/ext4.h
2165 +@@ -2509,8 +2509,19 @@ int do_journal_get_write_access(handle_t *handle,
2166 + #define FALL_BACK_TO_NONDELALLOC 1
2167 + #define CONVERT_INLINE_DATA 2
2168 +
2169 +-extern struct inode *ext4_iget(struct super_block *, unsigned long);
2170 +-extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
2171 ++typedef enum {
2172 ++ EXT4_IGET_NORMAL = 0,
2173 ++ EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */
2174 ++ EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */
2175 ++} ext4_iget_flags;
2176 ++
2177 ++extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
2178 ++ ext4_iget_flags flags, const char *function,
2179 ++ unsigned int line);
2180 ++
2181 ++#define ext4_iget(sb, ino, flags) \
2182 ++ __ext4_iget((sb), (ino), (flags), __func__, __LINE__)
2183 ++
2184 + extern int ext4_write_inode(struct inode *, struct writeback_control *);
2185 + extern int ext4_setattr(struct dentry *, struct iattr *);
2186 + extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
2187 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
2188 +index fa6ae9014e8f..4f9eb4b61549 100644
2189 +--- a/fs/ext4/extents.c
2190 ++++ b/fs/ext4/extents.c
2191 +@@ -510,6 +510,30 @@ int ext4_ext_check_inode(struct inode *inode)
2192 + return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
2193 + }
2194 +
2195 ++static void ext4_cache_extents(struct inode *inode,
2196 ++ struct ext4_extent_header *eh)
2197 ++{
2198 ++ struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
2199 ++ ext4_lblk_t prev = 0;
2200 ++ int i;
2201 ++
2202 ++ for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
2203 ++ unsigned int status = EXTENT_STATUS_WRITTEN;
2204 ++ ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
2205 ++ int len = ext4_ext_get_actual_len(ex);
2206 ++
2207 ++ if (prev && (prev != lblk))
2208 ++ ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
2209 ++ EXTENT_STATUS_HOLE);
2210 ++
2211 ++ if (ext4_ext_is_unwritten(ex))
2212 ++ status = EXTENT_STATUS_UNWRITTEN;
2213 ++ ext4_es_cache_extent(inode, lblk, len,
2214 ++ ext4_ext_pblock(ex), status);
2215 ++ prev = lblk + len;
2216 ++ }
2217 ++}
2218 ++
2219 + static struct buffer_head *
2220 + __read_extent_tree_block(const char *function, unsigned int line,
2221 + struct inode *inode, ext4_fsblk_t pblk, int depth,
2222 +@@ -530,36 +554,21 @@ __read_extent_tree_block(const char *function, unsigned int line,
2223 + }
2224 + if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
2225 + return bh;
2226 +- err = __ext4_ext_check(function, line, inode,
2227 +- ext_block_hdr(bh), depth, pblk);
2228 +- if (err)
2229 +- goto errout;
2230 ++ if (!ext4_has_feature_journal(inode->i_sb) ||
2231 ++ (inode->i_ino !=
2232 ++ le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
2233 ++ err = __ext4_ext_check(function, line, inode,
2234 ++ ext_block_hdr(bh), depth, pblk);
2235 ++ if (err)
2236 ++ goto errout;
2237 ++ }
2238 + set_buffer_verified(bh);
2239 + /*
2240 + * If this is a leaf block, cache all of its entries
2241 + */
2242 + if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
2243 + struct ext4_extent_header *eh = ext_block_hdr(bh);
2244 +- struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
2245 +- ext4_lblk_t prev = 0;
2246 +- int i;
2247 +-
2248 +- for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
2249 +- unsigned int status = EXTENT_STATUS_WRITTEN;
2250 +- ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
2251 +- int len = ext4_ext_get_actual_len(ex);
2252 +-
2253 +- if (prev && (prev != lblk))
2254 +- ext4_es_cache_extent(inode, prev,
2255 +- lblk - prev, ~0,
2256 +- EXTENT_STATUS_HOLE);
2257 +-
2258 +- if (ext4_ext_is_unwritten(ex))
2259 +- status = EXTENT_STATUS_UNWRITTEN;
2260 +- ext4_es_cache_extent(inode, lblk, len,
2261 +- ext4_ext_pblock(ex), status);
2262 +- prev = lblk + len;
2263 +- }
2264 ++ ext4_cache_extents(inode, eh);
2265 + }
2266 + return bh;
2267 + errout:
2268 +@@ -907,6 +916,8 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
2269 + path[0].p_bh = NULL;
2270 +
2271 + i = depth;
2272 ++ if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
2273 ++ ext4_cache_extents(inode, eh);
2274 + /* walk through the tree */
2275 + while (i) {
2276 + ext_debug("depth %d: num %d, max %d\n",
2277 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
2278 +index 2a480c0ef1bc..85c2a7ea5ea2 100644
2279 +--- a/fs/ext4/ialloc.c
2280 ++++ b/fs/ext4/ialloc.c
2281 +@@ -673,7 +673,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
2282 + * block has been written back to disk. (Yes, these values are
2283 + * somewhat arbitrary...)
2284 + */
2285 +-#define RECENTCY_MIN 5
2286 ++#define RECENTCY_MIN 60
2287 + #define RECENTCY_DIRTY 300
2288 +
2289 + static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
2290 +@@ -1239,7 +1239,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
2291 + if (!ext4_test_bit(bit, bitmap_bh->b_data))
2292 + goto bad_orphan;
2293 +
2294 +- inode = ext4_iget(sb, ino);
2295 ++ inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
2296 + if (IS_ERR(inode)) {
2297 + err = PTR_ERR(inode);
2298 + ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
2299 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2300 +index 5b0d5ca2c2b2..1bc0037c50aa 100644
2301 +--- a/fs/ext4/inode.c
2302 ++++ b/fs/ext4/inode.c
2303 +@@ -407,6 +407,10 @@ static int __check_block_validity(struct inode *inode, const char *func,
2304 + unsigned int line,
2305 + struct ext4_map_blocks *map)
2306 + {
2307 ++ if (ext4_has_feature_journal(inode->i_sb) &&
2308 ++ (inode->i_ino ==
2309 ++ le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
2310 ++ return 0;
2311 + if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
2312 + map->m_len)) {
2313 + ext4_error_inode(inode, func, line, map->m_pblk,
2314 +@@ -2123,7 +2127,7 @@ static int ext4_writepage(struct page *page,
2315 + bool keep_towrite = false;
2316 +
2317 + if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
2318 +- ext4_invalidatepage(page, 0, PAGE_SIZE);
2319 ++ inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
2320 + unlock_page(page);
2321 + return -EIO;
2322 + }
2323 +@@ -4695,7 +4699,9 @@ int ext4_get_projid(struct inode *inode, kprojid_t *projid)
2324 + return 0;
2325 + }
2326 +
2327 +-struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2328 ++struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
2329 ++ ext4_iget_flags flags, const char *function,
2330 ++ unsigned int line)
2331 + {
2332 + struct ext4_iloc iloc;
2333 + struct ext4_inode *raw_inode;
2334 +@@ -4709,6 +4715,18 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2335 + gid_t i_gid;
2336 + projid_t i_projid;
2337 +
2338 ++ if (((flags & EXT4_IGET_NORMAL) &&
2339 ++ (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
2340 ++ (ino < EXT4_ROOT_INO) ||
2341 ++ (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
2342 ++ if (flags & EXT4_IGET_HANDLE)
2343 ++ return ERR_PTR(-ESTALE);
2344 ++ __ext4_error(sb, function, line,
2345 ++ "inode #%lu: comm %s: iget: illegal inode #",
2346 ++ ino, current->comm);
2347 ++ return ERR_PTR(-EFSCORRUPTED);
2348 ++ }
2349 ++
2350 + inode = iget_locked(sb, ino);
2351 + if (!inode)
2352 + return ERR_PTR(-ENOMEM);
2353 +@@ -4724,18 +4742,26 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2354 + raw_inode = ext4_raw_inode(&iloc);
2355 +
2356 + if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
2357 +- EXT4_ERROR_INODE(inode, "root inode unallocated");
2358 ++ ext4_error_inode(inode, function, line, 0,
2359 ++ "iget: root inode unallocated");
2360 + ret = -EFSCORRUPTED;
2361 + goto bad_inode;
2362 + }
2363 +
2364 ++ if ((flags & EXT4_IGET_HANDLE) &&
2365 ++ (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
2366 ++ ret = -ESTALE;
2367 ++ goto bad_inode;
2368 ++ }
2369 ++
2370 + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2371 + ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2372 + if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2373 + EXT4_INODE_SIZE(inode->i_sb) ||
2374 + (ei->i_extra_isize & 3)) {
2375 +- EXT4_ERROR_INODE(inode,
2376 +- "bad extra_isize %u (inode size %u)",
2377 ++ ext4_error_inode(inode, function, line, 0,
2378 ++ "iget: bad extra_isize %u "
2379 ++ "(inode size %u)",
2380 + ei->i_extra_isize,
2381 + EXT4_INODE_SIZE(inode->i_sb));
2382 + ret = -EFSCORRUPTED;
2383 +@@ -4757,7 +4783,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2384 + }
2385 +
2386 + if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
2387 +- EXT4_ERROR_INODE(inode, "checksum invalid");
2388 ++ ext4_error_inode(inode, function, line, 0,
2389 ++ "iget: checksum invalid");
2390 + ret = -EFSBADCRC;
2391 + goto bad_inode;
2392 + }
2393 +@@ -4813,7 +4840,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2394 + ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2395 + inode->i_size = ext4_isize(sb, raw_inode);
2396 + if ((size = i_size_read(inode)) < 0) {
2397 +- EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
2398 ++ ext4_error_inode(inode, function, line, 0,
2399 ++ "iget: bad i_size value: %lld", size);
2400 + ret = -EFSCORRUPTED;
2401 + goto bad_inode;
2402 + }
2403 +@@ -4899,7 +4927,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2404 + ret = 0;
2405 + if (ei->i_file_acl &&
2406 + !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
2407 +- EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
2408 ++ ext4_error_inode(inode, function, line, 0,
2409 ++ "iget: bad extended attribute block %llu",
2410 + ei->i_file_acl);
2411 + ret = -EFSCORRUPTED;
2412 + goto bad_inode;
2413 +@@ -4954,7 +4983,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2414 + make_bad_inode(inode);
2415 + } else {
2416 + ret = -EFSCORRUPTED;
2417 +- EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
2418 ++ ext4_error_inode(inode, function, line, 0,
2419 ++ "iget: bogus i_mode (%o)", inode->i_mode);
2420 + goto bad_inode;
2421 + }
2422 + brelse(iloc.bh);
2423 +@@ -4969,13 +4999,6 @@ bad_inode:
2424 + return ERR_PTR(ret);
2425 + }
2426 +
2427 +-struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
2428 +-{
2429 +- if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
2430 +- return ERR_PTR(-EFSCORRUPTED);
2431 +- return ext4_iget(sb, ino);
2432 +-}
2433 +-
2434 + static int ext4_inode_blocks_set(handle_t *handle,
2435 + struct ext4_inode *raw_inode,
2436 + struct ext4_inode_info *ei)
2437 +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
2438 +index 82e118e9e50b..9dbb5542167a 100644
2439 +--- a/fs/ext4/ioctl.c
2440 ++++ b/fs/ext4/ioctl.c
2441 +@@ -111,7 +111,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
2442 + if (!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
2443 + return -EPERM;
2444 +
2445 +- inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
2446 ++ inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
2447 + if (IS_ERR(inode_bl))
2448 + return PTR_ERR(inode_bl);
2449 + ei_bl = EXT4_I(inode_bl);
2450 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
2451 +index 745a89d30a57..d7cedfaa1cc0 100644
2452 +--- a/fs/ext4/mballoc.c
2453 ++++ b/fs/ext4/mballoc.c
2454 +@@ -1952,7 +1952,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2455 + int free;
2456 +
2457 + free = e4b->bd_info->bb_free;
2458 +- BUG_ON(free <= 0);
2459 ++ if (WARN_ON(free <= 0))
2460 ++ return;
2461 +
2462 + i = e4b->bd_info->bb_first_free;
2463 +
2464 +@@ -1973,7 +1974,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2465 + }
2466 +
2467 + mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2468 +- BUG_ON(ex.fe_len <= 0);
2469 ++ if (WARN_ON(ex.fe_len <= 0))
2470 ++ break;
2471 + if (free < ex.fe_len) {
2472 + ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2473 + "%d free clusters as per "
2474 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
2475 +index 3f7b3836166c..161099f39ab9 100644
2476 +--- a/fs/ext4/namei.c
2477 ++++ b/fs/ext4/namei.c
2478 +@@ -1597,7 +1597,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
2479 + dentry);
2480 + return ERR_PTR(-EFSCORRUPTED);
2481 + }
2482 +- inode = ext4_iget_normal(dir->i_sb, ino);
2483 ++ inode = ext4_iget(dir->i_sb, ino, EXT4_IGET_NORMAL);
2484 + if (inode == ERR_PTR(-ESTALE)) {
2485 + EXT4_ERROR_INODE(dir,
2486 + "deleted inode referenced: %u",
2487 +@@ -1639,7 +1639,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
2488 + return ERR_PTR(-EFSCORRUPTED);
2489 + }
2490 +
2491 +- return d_obtain_alias(ext4_iget_normal(child->d_sb, ino));
2492 ++ return d_obtain_alias(ext4_iget(child->d_sb, ino, EXT4_IGET_NORMAL));
2493 + }
2494 +
2495 + /*
2496 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2497 +index d42f7471fd5b..19af346a6651 100644
2498 +--- a/fs/ext4/resize.c
2499 ++++ b/fs/ext4/resize.c
2500 +@@ -1652,7 +1652,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
2501 + "No reserved GDT blocks, can't resize");
2502 + return -EPERM;
2503 + }
2504 +- inode = ext4_iget(sb, EXT4_RESIZE_INO);
2505 ++ inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
2506 + if (IS_ERR(inode)) {
2507 + ext4_warning(sb, "Error opening resize inode");
2508 + return PTR_ERR(inode);
2509 +@@ -1980,7 +1980,8 @@ retry:
2510 + }
2511 +
2512 + if (!resize_inode)
2513 +- resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
2514 ++ resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
2515 ++ EXT4_IGET_SPECIAL);
2516 + if (IS_ERR(resize_inode)) {
2517 + ext4_warning(sb, "Error opening resize inode");
2518 + return PTR_ERR(resize_inode);
2519 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2520 +index 0ced133a36ec..ffc985d78137 100644
2521 +--- a/fs/ext4/super.c
2522 ++++ b/fs/ext4/super.c
2523 +@@ -1126,20 +1126,11 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
2524 + {
2525 + struct inode *inode;
2526 +
2527 +- if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
2528 +- return ERR_PTR(-ESTALE);
2529 +- if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
2530 +- return ERR_PTR(-ESTALE);
2531 +-
2532 +- /* iget isn't really right if the inode is currently unallocated!!
2533 +- *
2534 +- * ext4_read_inode will return a bad_inode if the inode had been
2535 +- * deleted, so we should be safe.
2536 +- *
2537 ++ /*
2538 + * Currently we don't know the generation for parent directory, so
2539 + * a generation of 0 means "accept any"
2540 + */
2541 +- inode = ext4_iget_normal(sb, ino);
2542 ++ inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
2543 + if (IS_ERR(inode))
2544 + return ERR_CAST(inode);
2545 + if (generation && inode->i_generation != generation) {
2546 +@@ -3465,7 +3456,8 @@ int ext4_calculate_overhead(struct super_block *sb)
2547 + */
2548 + if (sbi->s_journal && !sbi->journal_bdev)
2549 + overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
2550 +- else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
2551 ++ else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
2552 ++ /* j_inum for internal journal is non-zero */
2553 + j_inode = ext4_get_journal_inode(sb, j_inum);
2554 + if (j_inode) {
2555 + j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
2556 +@@ -4370,7 +4362,7 @@ no_journal:
2557 + * so we can safely mount the rest of the filesystem now.
2558 + */
2559 +
2560 +- root = ext4_iget(sb, EXT4_ROOT_INO);
2561 ++ root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
2562 + if (IS_ERR(root)) {
2563 + ext4_msg(sb, KERN_ERR, "get root inode failed");
2564 + ret = PTR_ERR(root);
2565 +@@ -4620,7 +4612,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
2566 + * happen if we iget() an unused inode, as the subsequent iput()
2567 + * will try to delete it.
2568 + */
2569 +- journal_inode = ext4_iget(sb, journal_inum);
2570 ++ journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
2571 + if (IS_ERR(journal_inode)) {
2572 + ext4_msg(sb, KERN_ERR, "no journal found");
2573 + return NULL;
2574 +@@ -5693,7 +5685,7 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
2575 + if (!qf_inums[type])
2576 + return -EPERM;
2577 +
2578 +- qf_inode = ext4_iget(sb, qf_inums[type]);
2579 ++ qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
2580 + if (IS_ERR(qf_inode)) {
2581 + ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
2582 + return PTR_ERR(qf_inode);
2583 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
2584 +index f2fde3ac8698..b0873b89dc87 100644
2585 +--- a/fs/ext4/xattr.c
2586 ++++ b/fs/ext4/xattr.c
2587 +@@ -383,7 +383,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
2588 + struct inode *inode;
2589 + int err;
2590 +
2591 +- inode = ext4_iget(parent->i_sb, ea_ino);
2592 ++ inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
2593 + if (IS_ERR(inode)) {
2594 + err = PTR_ERR(inode);
2595 + ext4_error(parent->i_sb,
2596 +@@ -1486,7 +1486,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
2597 + }
2598 +
2599 + while (ce) {
2600 +- ea_inode = ext4_iget(inode->i_sb, ce->e_value);
2601 ++ ea_inode = ext4_iget(inode->i_sb, ce->e_value,
2602 ++ EXT4_IGET_NORMAL);
2603 + if (!IS_ERR(ea_inode) &&
2604 + !is_bad_inode(ea_inode) &&
2605 + (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
2606 +diff --git a/fs/namespace.c b/fs/namespace.c
2607 +index e9c13eedd739..c8acc60c456d 100644
2608 +--- a/fs/namespace.c
2609 ++++ b/fs/namespace.c
2610 +@@ -3216,8 +3216,8 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2611 + /* make certain new is below the root */
2612 + if (!is_path_reachable(new_mnt, new.dentry, &root))
2613 + goto out4;
2614 +- root_mp->m_count++; /* pin it so it won't go away */
2615 + lock_mount_hash();
2616 ++ root_mp->m_count++; /* pin it so it won't go away */
2617 + detach_mnt(new_mnt, &parent_path);
2618 + detach_mnt(root_mnt, &root_parent);
2619 + if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
2620 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
2621 +index fca8b2e7fbeb..d5d1c70bb927 100644
2622 +--- a/fs/nfsd/nfs4state.c
2623 ++++ b/fs/nfsd/nfs4state.c
2624 +@@ -246,6 +246,8 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
2625 + if (!nbl) {
2626 + nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
2627 + if (nbl) {
2628 ++ INIT_LIST_HEAD(&nbl->nbl_list);
2629 ++ INIT_LIST_HEAD(&nbl->nbl_lru);
2630 + fh_copy_shallow(&nbl->nbl_fh, fh);
2631 + locks_init_lock(&nbl->nbl_lock);
2632 + nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
2633 +diff --git a/fs/pnode.c b/fs/pnode.c
2634 +index 53d411a371ce..7910ae91f17e 100644
2635 +--- a/fs/pnode.c
2636 ++++ b/fs/pnode.c
2637 +@@ -266,14 +266,13 @@ static int propagate_one(struct mount *m)
2638 + if (IS_ERR(child))
2639 + return PTR_ERR(child);
2640 + child->mnt.mnt_flags &= ~MNT_LOCKED;
2641 ++ read_seqlock_excl(&mount_lock);
2642 + mnt_set_mountpoint(m, mp, child);
2643 ++ if (m->mnt_master != dest_master)
2644 ++ SET_MNT_MARK(m->mnt_master);
2645 ++ read_sequnlock_excl(&mount_lock);
2646 + last_dest = m;
2647 + last_source = child;
2648 +- if (m->mnt_master != dest_master) {
2649 +- read_seqlock_excl(&mount_lock);
2650 +- SET_MNT_MARK(m->mnt_master);
2651 +- read_sequnlock_excl(&mount_lock);
2652 +- }
2653 + hlist_add_head(&child->mnt_hash, list);
2654 + return count_mounts(m->mnt_ns, child);
2655 + }
2656 +diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
2657 +index ce400f97370d..aaa7486b6f0d 100644
2658 +--- a/fs/proc/vmcore.c
2659 ++++ b/fs/proc/vmcore.c
2660 +@@ -459,7 +459,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
2661 + tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
2662 + kaddr = elfnotes_buf + start - elfcorebuf_sz;
2663 + if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
2664 +- kaddr, tsz))
2665 ++ kaddr, 0, tsz))
2666 + goto fail;
2667 + size -= tsz;
2668 + start += tsz;
2669 +diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
2670 +index cb4833d06467..7cfbe2b0f886 100644
2671 +--- a/fs/xfs/xfs_inode.c
2672 ++++ b/fs/xfs/xfs_inode.c
2673 +@@ -3035,7 +3035,8 @@ xfs_rename(
2674 + &dfops, &first_block, spaceres);
2675 +
2676 + /*
2677 +- * Set up the target.
2678 ++ * Check for expected errors before we dirty the transaction
2679 ++ * so we can return an error without a transaction abort.
2680 + */
2681 + if (target_ip == NULL) {
2682 + /*
2683 +@@ -3047,6 +3048,46 @@ xfs_rename(
2684 + if (error)
2685 + goto out_trans_cancel;
2686 + }
2687 ++ } else {
2688 ++ /*
2689 ++ * If target exists and it's a directory, check that whether
2690 ++ * it can be destroyed.
2691 ++ */
2692 ++ if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
2693 ++ (!xfs_dir_isempty(target_ip) ||
2694 ++ (VFS_I(target_ip)->i_nlink > 2))) {
2695 ++ error = -EEXIST;
2696 ++ goto out_trans_cancel;
2697 ++ }
2698 ++ }
2699 ++
2700 ++ /*
2701 ++ * Directory entry creation below may acquire the AGF. Remove
2702 ++ * the whiteout from the unlinked list first to preserve correct
2703 ++ * AGI/AGF locking order. This dirties the transaction so failures
2704 ++ * after this point will abort and log recovery will clean up the
2705 ++ * mess.
2706 ++ *
2707 ++ * For whiteouts, we need to bump the link count on the whiteout
2708 ++ * inode. After this point, we have a real link, clear the tmpfile
2709 ++ * state flag from the inode so it doesn't accidentally get misused
2710 ++ * in future.
2711 ++ */
2712 ++ if (wip) {
2713 ++ ASSERT(VFS_I(wip)->i_nlink == 0);
2714 ++ error = xfs_iunlink_remove(tp, wip);
2715 ++ if (error)
2716 ++ goto out_trans_cancel;
2717 ++
2718 ++ xfs_bumplink(tp, wip);
2719 ++ xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
2720 ++ VFS_I(wip)->i_state &= ~I_LINKABLE;
2721 ++ }
2722 ++
2723 ++ /*
2724 ++ * Set up the target.
2725 ++ */
2726 ++ if (target_ip == NULL) {
2727 + /*
2728 + * If target does not exist and the rename crosses
2729 + * directories, adjust the target directory link count
2730 +@@ -3067,22 +3108,6 @@ xfs_rename(
2731 + goto out_bmap_cancel;
2732 + }
2733 + } else { /* target_ip != NULL */
2734 +- /*
2735 +- * If target exists and it's a directory, check that both
2736 +- * target and source are directories and that target can be
2737 +- * destroyed, or that neither is a directory.
2738 +- */
2739 +- if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
2740 +- /*
2741 +- * Make sure target dir is empty.
2742 +- */
2743 +- if (!(xfs_dir_isempty(target_ip)) ||
2744 +- (VFS_I(target_ip)->i_nlink > 2)) {
2745 +- error = -EEXIST;
2746 +- goto out_trans_cancel;
2747 +- }
2748 +- }
2749 +-
2750 + /*
2751 + * Link the source inode under the target name.
2752 + * If the source inode is a directory and we are moving
2753 +@@ -3175,32 +3200,6 @@ xfs_rename(
2754 + if (error)
2755 + goto out_bmap_cancel;
2756 +
2757 +- /*
2758 +- * For whiteouts, we need to bump the link count on the whiteout inode.
2759 +- * This means that failures all the way up to this point leave the inode
2760 +- * on the unlinked list and so cleanup is a simple matter of dropping
2761 +- * the remaining reference to it. If we fail here after bumping the link
2762 +- * count, we're shutting down the filesystem so we'll never see the
2763 +- * intermediate state on disk.
2764 +- */
2765 +- if (wip) {
2766 +- ASSERT(VFS_I(wip)->i_nlink == 0);
2767 +- error = xfs_bumplink(tp, wip);
2768 +- if (error)
2769 +- goto out_bmap_cancel;
2770 +- error = xfs_iunlink_remove(tp, wip);
2771 +- if (error)
2772 +- goto out_bmap_cancel;
2773 +- xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
2774 +-
2775 +- /*
2776 +- * Now we have a real link, clear the "I'm a tmpfile" state
2777 +- * flag from the inode so it doesn't accidentally get misused in
2778 +- * future.
2779 +- */
2780 +- VFS_I(wip)->i_state &= ~I_LINKABLE;
2781 +- }
2782 +-
2783 + xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2784 + xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
2785 + if (new_parent)
2786 +diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
2787 +index 4e768e606998..360e32220f93 100644
2788 +--- a/fs/xfs/xfs_log.c
2789 ++++ b/fs/xfs/xfs_log.c
2790 +@@ -608,6 +608,7 @@ xfs_log_mount(
2791 + xfs_daddr_t blk_offset,
2792 + int num_bblks)
2793 + {
2794 ++ bool fatal = xfs_sb_version_hascrc(&mp->m_sb);
2795 + int error = 0;
2796 + int min_logfsbs;
2797 +
2798 +@@ -659,9 +660,20 @@ xfs_log_mount(
2799 + XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks),
2800 + XFS_MAX_LOG_BYTES);
2801 + error = -EINVAL;
2802 ++ } else if (mp->m_sb.sb_logsunit > 1 &&
2803 ++ mp->m_sb.sb_logsunit % mp->m_sb.sb_blocksize) {
2804 ++ xfs_warn(mp,
2805 ++ "log stripe unit %u bytes must be a multiple of block size",
2806 ++ mp->m_sb.sb_logsunit);
2807 ++ error = -EINVAL;
2808 ++ fatal = true;
2809 + }
2810 + if (error) {
2811 +- if (xfs_sb_version_hascrc(&mp->m_sb)) {
2812 ++ /*
2813 ++ * Log check errors are always fatal on v5; or whenever bad
2814 ++ * metadata leads to a crash.
2815 ++ */
2816 ++ if (fatal) {
2817 + xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!");
2818 + ASSERT(0);
2819 + goto out_free_log;
2820 +diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
2821 +index 37e603bf1591..db7f9fdd20a3 100644
2822 +--- a/fs/xfs/xfs_reflink.c
2823 ++++ b/fs/xfs/xfs_reflink.c
2824 +@@ -1125,6 +1125,7 @@ xfs_reflink_remap_extent(
2825 + uirec.br_startblock = irec->br_startblock + rlen;
2826 + uirec.br_startoff = irec->br_startoff + rlen;
2827 + uirec.br_blockcount = unmap_len - rlen;
2828 ++ uirec.br_state = irec->br_state;
2829 + unmap_len = rlen;
2830 +
2831 + /* If this isn't a real mapping, we're done. */
2832 +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
2833 +index 30376715a607..2e06ca46f07c 100644
2834 +--- a/include/linux/kvm_host.h
2835 ++++ b/include/linux/kvm_host.h
2836 +@@ -945,7 +945,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn)
2837 + start = slot + 1;
2838 + }
2839 +
2840 +- if (gfn >= memslots[start].base_gfn &&
2841 ++ if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
2842 + gfn < memslots[start].base_gfn + memslots[start].npages) {
2843 + atomic_set(&slots->lru_slot, start);
2844 + return &memslots[start];
2845 +diff --git a/include/linux/overflow.h b/include/linux/overflow.h
2846 +index c8890ec358a7..d309788f4cd2 100644
2847 +--- a/include/linux/overflow.h
2848 ++++ b/include/linux/overflow.h
2849 +@@ -202,4 +202,35 @@
2850 +
2851 + #endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
2852 +
2853 ++/** check_shl_overflow() - Calculate a left-shifted value and check overflow
2854 ++ *
2855 ++ * @a: Value to be shifted
2856 ++ * @s: How many bits left to shift
2857 ++ * @d: Pointer to where to store the result
2858 ++ *
2859 ++ * Computes *@d = (@a << @s)
2860 ++ *
2861 ++ * Returns true if '*d' cannot hold the result or when 'a << s' doesn't
2862 ++ * make sense. Example conditions:
2863 ++ * - 'a << s' causes bits to be lost when stored in *d.
2864 ++ * - 's' is garbage (e.g. negative) or so large that the result of
2865 ++ * 'a << s' is guaranteed to be 0.
2866 ++ * - 'a' is negative.
2867 ++ * - 'a << s' sets the sign bit, if any, in '*d'.
2868 ++ *
2869 ++ * '*d' will hold the results of the attempted shift, but is not
2870 ++ * considered "safe for use" if false is returned.
2871 ++ */
2872 ++#define check_shl_overflow(a, s, d) ({ \
2873 ++ typeof(a) _a = a; \
2874 ++ typeof(s) _s = s; \
2875 ++ typeof(d) _d = d; \
2876 ++ u64 _a_full = _a; \
2877 ++ unsigned int _to_shift = \
2878 ++ _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \
2879 ++ *_d = (_a_full << _to_shift); \
2880 ++ (_to_shift != _s || *_d < 0 || _a < 0 || \
2881 ++ (*_d >> _to_shift) != _a); \
2882 ++})
2883 ++
2884 + #endif /* __LINUX_OVERFLOW_H */
2885 +diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
2886 +index 2dd0a9ed5b36..733fad7dfbed 100644
2887 +--- a/include/linux/qed/qed_chain.h
2888 ++++ b/include/linux/qed/qed_chain.h
2889 +@@ -97,6 +97,11 @@ struct qed_chain_u32 {
2890 + u32 cons_idx;
2891 + };
2892 +
2893 ++struct addr_tbl_entry {
2894 ++ void *virt_addr;
2895 ++ dma_addr_t dma_map;
2896 ++};
2897 ++
2898 + struct qed_chain {
2899 + /* fastpath portion of the chain - required for commands such
2900 + * as produce / consume.
2901 +@@ -107,10 +112,11 @@ struct qed_chain {
2902 +
2903 + /* Fastpath portions of the PBL [if exists] */
2904 + struct {
2905 +- /* Table for keeping the virtual addresses of the chain pages,
2906 +- * respectively to the physical addresses in the pbl table.
2907 ++ /* Table for keeping the virtual and physical addresses of the
2908 ++ * chain pages, respectively to the physical addresses
2909 ++ * in the pbl table.
2910 + */
2911 +- void **pp_virt_addr_tbl;
2912 ++ struct addr_tbl_entry *pp_addr_tbl;
2913 +
2914 + union {
2915 + struct qed_chain_pbl_u16 u16;
2916 +@@ -287,7 +293,7 @@ qed_chain_advance_page(struct qed_chain *p_chain,
2917 + *(u32 *)page_to_inc = 0;
2918 + page_index = *(u32 *)page_to_inc;
2919 + }
2920 +- *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
2921 ++ *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
2922 + }
2923 + }
2924 +
2925 +@@ -537,7 +543,7 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
2926 +
2927 + p_chain->pbl_sp.p_phys_table = 0;
2928 + p_chain->pbl_sp.p_virt_table = NULL;
2929 +- p_chain->pbl.pp_virt_addr_tbl = NULL;
2930 ++ p_chain->pbl.pp_addr_tbl = NULL;
2931 + }
2932 +
2933 + /**
2934 +@@ -575,11 +581,11 @@ static inline void qed_chain_init_mem(struct qed_chain *p_chain,
2935 + static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
2936 + void *p_virt_pbl,
2937 + dma_addr_t p_phys_pbl,
2938 +- void **pp_virt_addr_tbl)
2939 ++ struct addr_tbl_entry *pp_addr_tbl)
2940 + {
2941 + p_chain->pbl_sp.p_phys_table = p_phys_pbl;
2942 + p_chain->pbl_sp.p_virt_table = p_virt_pbl;
2943 +- p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
2944 ++ p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
2945 + }
2946 +
2947 + /**
2948 +@@ -644,7 +650,7 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
2949 + break;
2950 + case QED_CHAIN_MODE_PBL:
2951 + last_page_idx = p_chain->page_cnt - 1;
2952 +- p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
2953 ++ p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
2954 + break;
2955 + }
2956 + /* p_virt_addr points at this stage to the last page of the chain */
2957 +@@ -716,7 +722,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
2958 + page_cnt = qed_chain_get_page_cnt(p_chain);
2959 +
2960 + for (i = 0; i < page_cnt; i++)
2961 +- memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
2962 ++ memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
2963 + QED_CHAIN_PAGE_SIZE);
2964 + }
2965 +
2966 +diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
2967 +index 29ef33498cb6..98d65e11e837 100644
2968 +--- a/include/linux/vmalloc.h
2969 ++++ b/include/linux/vmalloc.h
2970 +@@ -102,7 +102,7 @@ extern void vunmap(const void *addr);
2971 +
2972 + extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
2973 + unsigned long uaddr, void *kaddr,
2974 +- unsigned long size);
2975 ++ unsigned long pgoff, unsigned long size);
2976 +
2977 + extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2978 + unsigned long pgoff);
2979 +diff --git a/include/net/tcp.h b/include/net/tcp.h
2980 +index c96302310314..58e8e57787cf 100644
2981 +--- a/include/net/tcp.h
2982 ++++ b/include/net/tcp.h
2983 +@@ -55,7 +55,7 @@ extern struct inet_hashinfo tcp_hashinfo;
2984 + extern struct percpu_counter tcp_orphan_count;
2985 + void tcp_time_wait(struct sock *sk, int state, int timeo);
2986 +
2987 +-#define MAX_TCP_HEADER (128 + MAX_HEADER)
2988 ++#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
2989 + #define MAX_TCP_OPTION_SPACE 40
2990 + #define TCP_MIN_SND_MSS 48
2991 + #define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
2992 +diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
2993 +index 7b8c9e19bad1..0f3cb13db8e9 100644
2994 +--- a/include/uapi/linux/keyctl.h
2995 ++++ b/include/uapi/linux/keyctl.h
2996 +@@ -65,7 +65,12 @@
2997 +
2998 + /* keyctl structures */
2999 + struct keyctl_dh_params {
3000 +- __s32 private;
3001 ++ union {
3002 ++#ifndef __cplusplus
3003 ++ __s32 private;
3004 ++#endif
3005 ++ __s32 priv;
3006 ++ };
3007 + __s32 prime;
3008 + __s32 base;
3009 + };
3010 +diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
3011 +index fa7f97da5b76..7272f85d6d6a 100644
3012 +--- a/include/uapi/linux/swab.h
3013 ++++ b/include/uapi/linux/swab.h
3014 +@@ -135,9 +135,9 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
3015 +
3016 + static __always_inline unsigned long __swab(const unsigned long y)
3017 + {
3018 +-#if BITS_PER_LONG == 64
3019 ++#if __BITS_PER_LONG == 64
3020 + return __swab64(y);
3021 +-#else /* BITS_PER_LONG == 32 */
3022 ++#else /* __BITS_PER_LONG == 32 */
3023 + return __swab32(y);
3024 + #endif
3025 + }
3026 +diff --git a/ipc/util.c b/ipc/util.c
3027 +index 79b30eee32cd..7989f5e53219 100644
3028 +--- a/ipc/util.c
3029 ++++ b/ipc/util.c
3030 +@@ -750,13 +750,13 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
3031 + total++;
3032 + }
3033 +
3034 ++ *new_pos = pos + 1;
3035 + if (total >= ids->in_use)
3036 + return NULL;
3037 +
3038 + for (; pos < IPCMNI; pos++) {
3039 + ipc = idr_find(&ids->ipcs_idr, pos);
3040 + if (ipc != NULL) {
3041 +- *new_pos = pos + 1;
3042 + rcu_read_lock();
3043 + ipc_lock_object(ipc);
3044 + return ipc;
3045 +diff --git a/kernel/audit.c b/kernel/audit.c
3046 +index b21a8910f765..aa6d5e39526b 100644
3047 +--- a/kernel/audit.c
3048 ++++ b/kernel/audit.c
3049 +@@ -1292,6 +1292,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
3050 + case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
3051 + if (!audit_enabled && msg_type != AUDIT_USER_AVC)
3052 + return 0;
3053 ++ /* exit early if there isn't at least one character to print */
3054 ++ if (data_len < 2)
3055 ++ return -EINVAL;
3056 +
3057 + err = audit_filter(msg_type, AUDIT_FILTER_USER);
3058 + if (err == 1) { /* match or error */
3059 +diff --git a/kernel/events/core.c b/kernel/events/core.c
3060 +index 845c8a1a9d30..5636c9c48545 100644
3061 +--- a/kernel/events/core.c
3062 ++++ b/kernel/events/core.c
3063 +@@ -6119,9 +6119,12 @@ static u64 perf_virt_to_phys(u64 virt)
3064 + * Try IRQ-safe __get_user_pages_fast first.
3065 + * If failed, leave phys_addr as 0.
3066 + */
3067 +- if ((current->mm != NULL) &&
3068 +- (__get_user_pages_fast(virt, 1, 0, &p) == 1))
3069 +- phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
3070 ++ if (current->mm != NULL) {
3071 ++ pagefault_disable();
3072 ++ if (__get_user_pages_fast(virt, 1, 0, &p) == 1)
3073 ++ phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
3074 ++ pagefault_enable();
3075 ++ }
3076 +
3077 + if (p)
3078 + put_page(p);
3079 +@@ -6607,10 +6610,17 @@ static void perf_event_task_output(struct perf_event *event,
3080 + goto out;
3081 +
3082 + task_event->event_id.pid = perf_event_pid(event, task);
3083 +- task_event->event_id.ppid = perf_event_pid(event, current);
3084 +-
3085 + task_event->event_id.tid = perf_event_tid(event, task);
3086 +- task_event->event_id.ptid = perf_event_tid(event, current);
3087 ++
3088 ++ if (task_event->event_id.header.type == PERF_RECORD_EXIT) {
3089 ++ task_event->event_id.ppid = perf_event_pid(event,
3090 ++ task->real_parent);
3091 ++ task_event->event_id.ptid = perf_event_pid(event,
3092 ++ task->real_parent);
3093 ++ } else { /* PERF_RECORD_FORK */
3094 ++ task_event->event_id.ppid = perf_event_pid(event, current);
3095 ++ task_event->event_id.ptid = perf_event_tid(event, current);
3096 ++ }
3097 +
3098 + task_event->event_id.time = perf_event_clock(event);
3099 +
3100 +diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
3101 +index 6e40ff6be083..291e0797125b 100644
3102 +--- a/kernel/gcov/fs.c
3103 ++++ b/kernel/gcov/fs.c
3104 +@@ -109,9 +109,9 @@ static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos)
3105 + {
3106 + struct gcov_iterator *iter = data;
3107 +
3108 ++ (*pos)++;
3109 + if (gcov_iter_next(iter))
3110 + return NULL;
3111 +- (*pos)++;
3112 +
3113 + return iter;
3114 + }
3115 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3116 +index 310656b4ede6..d6464045d3b9 100644
3117 +--- a/mm/hugetlb.c
3118 ++++ b/mm/hugetlb.c
3119 +@@ -4745,8 +4745,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
3120 + {
3121 + pgd_t *pgd;
3122 + p4d_t *p4d;
3123 +- pud_t *pud;
3124 +- pmd_t *pmd;
3125 ++ pud_t *pud, pud_entry;
3126 ++ pmd_t *pmd, pmd_entry;
3127 +
3128 + pgd = pgd_offset(mm, addr);
3129 + if (!pgd_present(*pgd))
3130 +@@ -4756,17 +4756,19 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
3131 + return NULL;
3132 +
3133 + pud = pud_offset(p4d, addr);
3134 +- if (sz != PUD_SIZE && pud_none(*pud))
3135 ++ pud_entry = READ_ONCE(*pud);
3136 ++ if (sz != PUD_SIZE && pud_none(pud_entry))
3137 + return NULL;
3138 + /* hugepage or swap? */
3139 +- if (pud_huge(*pud) || !pud_present(*pud))
3140 ++ if (pud_huge(pud_entry) || !pud_present(pud_entry))
3141 + return (pte_t *)pud;
3142 +
3143 + pmd = pmd_offset(pud, addr);
3144 +- if (sz != PMD_SIZE && pmd_none(*pmd))
3145 ++ pmd_entry = READ_ONCE(*pmd);
3146 ++ if (sz != PMD_SIZE && pmd_none(pmd_entry))
3147 + return NULL;
3148 + /* hugepage or swap? */
3149 +- if (pmd_huge(*pmd) || !pmd_present(*pmd))
3150 ++ if (pmd_huge(pmd_entry) || !pmd_present(pmd_entry))
3151 + return (pte_t *)pmd;
3152 +
3153 + return NULL;
3154 +diff --git a/mm/ksm.c b/mm/ksm.c
3155 +index 764486ffcd16..65d4bf52f543 100644
3156 +--- a/mm/ksm.c
3157 ++++ b/mm/ksm.c
3158 +@@ -2074,8 +2074,16 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
3159 +
3160 + down_read(&mm->mmap_sem);
3161 + vma = find_mergeable_vma(mm, rmap_item->address);
3162 +- err = try_to_merge_one_page(vma, page,
3163 +- ZERO_PAGE(rmap_item->address));
3164 ++ if (vma) {
3165 ++ err = try_to_merge_one_page(vma, page,
3166 ++ ZERO_PAGE(rmap_item->address));
3167 ++ } else {
3168 ++ /*
3169 ++ * If the vma is out of date, we do not need to
3170 ++ * continue.
3171 ++ */
3172 ++ err = 0;
3173 ++ }
3174 + up_read(&mm->mmap_sem);
3175 + /*
3176 + * In case of failure, the page was not really empty, so we
3177 +diff --git a/mm/shmem.c b/mm/shmem.c
3178 +index 0b6db162083c..f9a1e0ba259f 100644
3179 +--- a/mm/shmem.c
3180 ++++ b/mm/shmem.c
3181 +@@ -2330,11 +2330,11 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
3182 +
3183 + lru_cache_add_anon(page);
3184 +
3185 +- spin_lock(&info->lock);
3186 ++ spin_lock_irq(&info->lock);
3187 + info->alloced++;
3188 + inode->i_blocks += BLOCKS_PER_PAGE;
3189 + shmem_recalc_inode(inode);
3190 +- spin_unlock(&info->lock);
3191 ++ spin_unlock_irq(&info->lock);
3192 +
3193 + inc_mm_counter(dst_mm, mm_counter_file(page));
3194 + page_add_file_rmap(page, false);
3195 +diff --git a/mm/slub.c b/mm/slub.c
3196 +index 3c1a16f03b2b..481518c3f61a 100644
3197 +--- a/mm/slub.c
3198 ++++ b/mm/slub.c
3199 +@@ -269,8 +269,7 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
3200 +
3201 + static void prefetch_freepointer(const struct kmem_cache *s, void *object)
3202 + {
3203 +- if (object)
3204 +- prefetch(freelist_dereference(s, object + s->offset));
3205 ++ prefetch(object + s->offset);
3206 + }
3207 +
3208 + static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
3209 +diff --git a/mm/vmalloc.c b/mm/vmalloc.c
3210 +index 88091fd704f4..63bf3f207e16 100644
3211 +--- a/mm/vmalloc.c
3212 ++++ b/mm/vmalloc.c
3213 +@@ -31,6 +31,7 @@
3214 + #include <linux/compiler.h>
3215 + #include <linux/llist.h>
3216 + #include <linux/bitops.h>
3217 ++#include <linux/overflow.h>
3218 +
3219 + #include <linux/uaccess.h>
3220 + #include <asm/tlbflush.h>
3221 +@@ -2246,6 +2247,7 @@ finished:
3222 + * @vma: vma to cover
3223 + * @uaddr: target user address to start at
3224 + * @kaddr: virtual address of vmalloc kernel memory
3225 ++ * @pgoff: offset from @kaddr to start at
3226 + * @size: size of map area
3227 + *
3228 + * Returns: 0 for success, -Exxx on failure
3229 +@@ -2258,9 +2260,15 @@ finished:
3230 + * Similar to remap_pfn_range() (see mm/memory.c)
3231 + */
3232 + int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3233 +- void *kaddr, unsigned long size)
3234 ++ void *kaddr, unsigned long pgoff,
3235 ++ unsigned long size)
3236 + {
3237 + struct vm_struct *area;
3238 ++ unsigned long off;
3239 ++ unsigned long end_index;
3240 ++
3241 ++ if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3242 ++ return -EINVAL;
3243 +
3244 + size = PAGE_ALIGN(size);
3245 +
3246 +@@ -2274,8 +2282,10 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3247 + if (!(area->flags & VM_USERMAP))
3248 + return -EINVAL;
3249 +
3250 +- if (kaddr + size > area->addr + get_vm_area_size(area))
3251 ++ if (check_add_overflow(size, off, &end_index) ||
3252 ++ end_index > get_vm_area_size(area))
3253 + return -EINVAL;
3254 ++ kaddr += off;
3255 +
3256 + do {
3257 + struct page *page = vmalloc_to_page(kaddr);
3258 +@@ -2314,7 +2324,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3259 + unsigned long pgoff)
3260 + {
3261 + return remap_vmalloc_range_partial(vma, vma->vm_start,
3262 +- addr + (pgoff << PAGE_SHIFT),
3263 ++ addr, pgoff,
3264 + vma->vm_end - vma->vm_start);
3265 + }
3266 + EXPORT_SYMBOL(remap_vmalloc_range);
3267 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
3268 +index b930ab5cf140..c1693d75e196 100644
3269 +--- a/net/ipv4/ip_vti.c
3270 ++++ b/net/ipv4/ip_vti.c
3271 +@@ -681,10 +681,8 @@ static int __init vti_init(void)
3272 +
3273 + msg = "ipip tunnel";
3274 + err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
3275 +- if (err < 0) {
3276 +- pr_info("%s: cant't register tunnel\n",__func__);
3277 ++ if (err < 0)
3278 + goto xfrm_tunnel_failed;
3279 +- }
3280 +
3281 + msg = "netlink interface";
3282 + err = rtnl_link_register(&vti_link_ops);
3283 +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
3284 +index 53a11894f9e4..261a9813b88c 100644
3285 +--- a/net/ipv4/raw.c
3286 ++++ b/net/ipv4/raw.c
3287 +@@ -520,9 +520,11 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
3288 + goto out;
3289 +
3290 + /* hdrincl should be READ_ONCE(inet->hdrincl)
3291 +- * but READ_ONCE() doesn't work with bit fields
3292 ++ * but READ_ONCE() doesn't work with bit fields.
3293 ++ * Doing this indirectly yields the same result.
3294 + */
3295 + hdrincl = inet->hdrincl;
3296 ++ hdrincl = READ_ONCE(hdrincl);
3297 + /*
3298 + * Check the flags.
3299 + */
3300 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3301 +index 05fe1d007544..3c298ec32200 100644
3302 +--- a/net/ipv4/route.c
3303 ++++ b/net/ipv4/route.c
3304 +@@ -133,8 +133,6 @@ static int ip_rt_min_advmss __read_mostly = 256;
3305 +
3306 + static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
3307 +
3308 +-static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3309 +-
3310 + /*
3311 + * Interface to generic destination cache.
3312 + */
3313 +@@ -2869,6 +2867,7 @@ void ip_rt_multicast_event(struct in_device *in_dev)
3314 + static int ip_rt_gc_interval __read_mostly = 60 * HZ;
3315 + static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
3316 + static int ip_rt_gc_elasticity __read_mostly = 8;
3317 ++static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3318 +
3319 + static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3320 + void __user *buffer,
3321 +diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
3322 +index 94b8702603bc..35dbc8eb9396 100644
3323 +--- a/net/ipv4/xfrm4_output.c
3324 ++++ b/net/ipv4/xfrm4_output.c
3325 +@@ -76,9 +76,7 @@ int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
3326 + {
3327 + memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
3328 +
3329 +-#ifdef CONFIG_NETFILTER
3330 + IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
3331 +-#endif
3332 +
3333 + return xfrm_output(sk, skb);
3334 + }
3335 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
3336 +index 8c492471b0da..337b43d4c3eb 100644
3337 +--- a/net/ipv6/ipv6_sockglue.c
3338 ++++ b/net/ipv6/ipv6_sockglue.c
3339 +@@ -185,15 +185,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
3340 + retv = -EBUSY;
3341 + break;
3342 + }
3343 +- } else if (sk->sk_protocol == IPPROTO_TCP) {
3344 +- if (sk->sk_prot != &tcpv6_prot) {
3345 +- retv = -EBUSY;
3346 +- break;
3347 +- }
3348 +- break;
3349 +- } else {
3350 ++ }
3351 ++ if (sk->sk_protocol == IPPROTO_TCP &&
3352 ++ sk->sk_prot != &tcpv6_prot) {
3353 ++ retv = -EBUSY;
3354 + break;
3355 + }
3356 ++ if (sk->sk_protocol != IPPROTO_TCP)
3357 ++ break;
3358 + if (sk->sk_state != TCP_ESTABLISHED) {
3359 + retv = -ENOTCONN;
3360 + break;
3361 +diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
3362 +index 29dae7f2ff14..aff901be5353 100644
3363 +--- a/net/ipv6/xfrm6_output.c
3364 ++++ b/net/ipv6/xfrm6_output.c
3365 +@@ -130,9 +130,7 @@ int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb)
3366 + {
3367 + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
3368 +
3369 +-#ifdef CONFIG_NETFILTER
3370 + IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
3371 +-#endif
3372 +
3373 + return xfrm_output(sk, skb);
3374 + }
3375 +diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
3376 +index 0c59354e280e..d098bb8d53aa 100644
3377 +--- a/net/netrom/nr_route.c
3378 ++++ b/net/netrom/nr_route.c
3379 +@@ -199,6 +199,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
3380 + /* refcount initialized at 1 */
3381 + spin_unlock_bh(&nr_node_list_lock);
3382 +
3383 ++ nr_neigh_put(nr_neigh);
3384 + return 0;
3385 + }
3386 + nr_node_lock(nr_node);
3387 +diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
3388 +index 39231237e1c3..30f71620d4e3 100644
3389 +--- a/net/x25/x25_dev.c
3390 ++++ b/net/x25/x25_dev.c
3391 +@@ -120,8 +120,10 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
3392 + goto drop;
3393 + }
3394 +
3395 +- if (!pskb_may_pull(skb, 1))
3396 ++ if (!pskb_may_pull(skb, 1)) {
3397 ++ x25_neigh_put(nb);
3398 + return 0;
3399 ++ }
3400 +
3401 + switch (skb->data[0]) {
3402 +
3403 +diff --git a/security/keys/internal.h b/security/keys/internal.h
3404 +index e3a573840186..124273e500cf 100644
3405 +--- a/security/keys/internal.h
3406 ++++ b/security/keys/internal.h
3407 +@@ -20,6 +20,8 @@
3408 + #include <linux/keyctl.h>
3409 + #include <linux/refcount.h>
3410 + #include <linux/compat.h>
3411 ++#include <linux/mm.h>
3412 ++#include <linux/vmalloc.h>
3413 +
3414 + struct iovec;
3415 +
3416 +@@ -305,4 +307,14 @@ static inline void key_check(const struct key *key)
3417 +
3418 + #endif
3419 +
3420 ++/*
3421 ++ * Helper function to clear and free a kvmalloc'ed memory object.
3422 ++ */
3423 ++static inline void __kvzfree(const void *addr, size_t len)
3424 ++{
3425 ++ if (addr) {
3426 ++ memset((void *)addr, 0, len);
3427 ++ kvfree(addr);
3428 ++ }
3429 ++}
3430 + #endif /* _INTERNAL_H */
3431 +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
3432 +index 4b6a084e323b..c07c2e2b2478 100644
3433 +--- a/security/keys/keyctl.c
3434 ++++ b/security/keys/keyctl.c
3435 +@@ -330,7 +330,7 @@ long keyctl_update_key(key_serial_t id,
3436 + payload = NULL;
3437 + if (plen) {
3438 + ret = -ENOMEM;
3439 +- payload = kmalloc(plen, GFP_KERNEL);
3440 ++ payload = kvmalloc(plen, GFP_KERNEL);
3441 + if (!payload)
3442 + goto error;
3443 +
3444 +@@ -351,7 +351,7 @@ long keyctl_update_key(key_serial_t id,
3445 +
3446 + key_ref_put(key_ref);
3447 + error2:
3448 +- kzfree(payload);
3449 ++ __kvzfree(payload, plen);
3450 + error:
3451 + return ret;
3452 + }
3453 +@@ -772,7 +772,8 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
3454 + struct key *key;
3455 + key_ref_t key_ref;
3456 + long ret;
3457 +- char *key_data;
3458 ++ char *key_data = NULL;
3459 ++ size_t key_data_len;
3460 +
3461 + /* find the key first */
3462 + key_ref = lookup_user_key(keyid, 0, 0);
3463 +@@ -823,24 +824,51 @@ can_read_key:
3464 + * Allocating a temporary buffer to hold the keys before
3465 + * transferring them to user buffer to avoid potential
3466 + * deadlock involving page fault and mmap_sem.
3467 ++ *
3468 ++ * key_data_len = (buflen <= PAGE_SIZE)
3469 ++ * ? buflen : actual length of key data
3470 ++ *
3471 ++ * This prevents allocating arbitrary large buffer which can
3472 ++ * be much larger than the actual key length. In the latter case,
3473 ++ * at least 2 passes of this loop is required.
3474 + */
3475 +- key_data = kmalloc(buflen, GFP_KERNEL);
3476 ++ key_data_len = (buflen <= PAGE_SIZE) ? buflen : 0;
3477 ++ for (;;) {
3478 ++ if (key_data_len) {
3479 ++ key_data = kvmalloc(key_data_len, GFP_KERNEL);
3480 ++ if (!key_data) {
3481 ++ ret = -ENOMEM;
3482 ++ goto key_put_out;
3483 ++ }
3484 ++ }
3485 +
3486 +- if (!key_data) {
3487 +- ret = -ENOMEM;
3488 +- goto key_put_out;
3489 +- }
3490 +- ret = __keyctl_read_key(key, key_data, buflen);
3491 ++ ret = __keyctl_read_key(key, key_data, key_data_len);
3492 ++
3493 ++ /*
3494 ++ * Read methods will just return the required length without
3495 ++ * any copying if the provided length isn't large enough.
3496 ++ */
3497 ++ if (ret <= 0 || ret > buflen)
3498 ++ break;
3499 ++
3500 ++ /*
3501 ++ * The key may change (unlikely) in between 2 consecutive
3502 ++ * __keyctl_read_key() calls. In this case, we reallocate
3503 ++ * a larger buffer and redo the key read when
3504 ++ * key_data_len < ret <= buflen.
3505 ++ */
3506 ++ if (ret > key_data_len) {
3507 ++ if (unlikely(key_data))
3508 ++ __kvzfree(key_data, key_data_len);
3509 ++ key_data_len = ret;
3510 ++ continue; /* Allocate buffer */
3511 ++ }
3512 +
3513 +- /*
3514 +- * Read methods will just return the required length without
3515 +- * any copying if the provided length isn't large enough.
3516 +- */
3517 +- if (ret > 0 && ret <= buflen) {
3518 + if (copy_to_user(buffer, key_data, ret))
3519 + ret = -EFAULT;
3520 ++ break;
3521 + }
3522 +- kzfree(key_data);
3523 ++ __kvzfree(key_data, key_data_len);
3524 +
3525 + key_put_out:
3526 + key_put(key);
3527 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
3528 +index d392c1ec0b28..46670da04707 100644
3529 +--- a/sound/pci/hda/hda_intel.c
3530 ++++ b/sound/pci/hda/hda_intel.c
3531 +@@ -2173,7 +2173,6 @@ static const struct hdac_io_ops pci_hda_io_ops = {
3532 + * should be ignored from the beginning.
3533 + */
3534 + static const struct snd_pci_quirk driver_blacklist[] = {
3535 +- SND_PCI_QUIRK(0x1043, 0x874f, "ASUS ROG Zenith II / Strix", 0),
3536 + SND_PCI_QUIRK(0x1462, 0xcb59, "MSI TRX40 Creator", 0),
3537 + SND_PCI_QUIRK(0x1462, 0xcb60, "MSI TRX40", 0),
3538 + {}
3539 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3540 +index 3fded87817c6..288ea05dfa3c 100644
3541 +--- a/sound/pci/hda/patch_realtek.c
3542 ++++ b/sound/pci/hda/patch_realtek.c
3543 +@@ -334,6 +334,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
3544 + case 0x10ec0233:
3545 + case 0x10ec0235:
3546 + case 0x10ec0236:
3547 ++ case 0x10ec0245:
3548 + case 0x10ec0255:
3549 + case 0x10ec0256:
3550 + case 0x10ec0257:
3551 +@@ -7264,6 +7265,7 @@ static int patch_alc269(struct hda_codec *codec)
3552 + spec->gen.mixer_nid = 0;
3553 + break;
3554 + case 0x10ec0215:
3555 ++ case 0x10ec0245:
3556 + case 0x10ec0285:
3557 + case 0x10ec0289:
3558 + spec->codec_variant = ALC269_TYPE_ALC215;
3559 +@@ -8344,6 +8346,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
3560 + HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
3561 + HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
3562 + HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
3563 ++ HDA_CODEC_ENTRY(0x10ec0245, "ALC245", patch_alc269),
3564 + HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
3565 + HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
3566 + HDA_CODEC_ENTRY(0x10ec0257, "ALC257", patch_alc269),
3567 +diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
3568 +index 6044b3bbb121..999eb3ba7867 100644
3569 +--- a/sound/soc/intel/atom/sst-atom-controls.c
3570 ++++ b/sound/soc/intel/atom/sst-atom-controls.c
3571 +@@ -974,7 +974,9 @@ static int sst_set_be_modules(struct snd_soc_dapm_widget *w,
3572 + dev_dbg(c->dev, "Enter: widget=%s\n", w->name);
3573 +
3574 + if (SND_SOC_DAPM_EVENT_ON(event)) {
3575 ++ mutex_lock(&drv->lock);
3576 + ret = sst_send_slot_map(drv);
3577 ++ mutex_unlock(&drv->lock);
3578 + if (ret)
3579 + return ret;
3580 + ret = sst_send_pipe_module_params(w, k);
3581 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
3582 +index 7861cf7a4488..c42ee8ef544d 100644
3583 +--- a/sound/soc/soc-dapm.c
3584 ++++ b/sound/soc/soc-dapm.c
3585 +@@ -413,7 +413,7 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
3586 +
3587 + memset(&template, 0, sizeof(template));
3588 + template.reg = e->reg;
3589 +- template.mask = e->mask << e->shift_l;
3590 ++ template.mask = e->mask;
3591 + template.shift = e->shift_l;
3592 + template.off_val = snd_soc_enum_item_to_val(e, 0);
3593 + template.on_val = template.off_val;
3594 +@@ -539,8 +539,22 @@ static bool dapm_kcontrol_set_value(const struct snd_kcontrol *kcontrol,
3595 + if (data->value == value)
3596 + return false;
3597 +
3598 +- if (data->widget)
3599 +- data->widget->on_val = value;
3600 ++ if (data->widget) {
3601 ++ switch (dapm_kcontrol_get_wlist(kcontrol)->widgets[0]->id) {
3602 ++ case snd_soc_dapm_switch:
3603 ++ case snd_soc_dapm_mixer:
3604 ++ case snd_soc_dapm_mixer_named_ctl:
3605 ++ data->widget->on_val = value & data->widget->mask;
3606 ++ break;
3607 ++ case snd_soc_dapm_demux:
3608 ++ case snd_soc_dapm_mux:
3609 ++ data->widget->on_val = value >> data->widget->shift;
3610 ++ break;
3611 ++ default:
3612 ++ data->widget->on_val = value;
3613 ++ break;
3614 ++ }
3615 ++ }
3616 +
3617 + data->value = value;
3618 +
3619 +diff --git a/sound/usb/format.c b/sound/usb/format.c
3620 +index 2c44386e5569..eeb56d6fe8aa 100644
3621 +--- a/sound/usb/format.c
3622 ++++ b/sound/usb/format.c
3623 +@@ -220,6 +220,52 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
3624 + return 0;
3625 + }
3626 +
3627 ++/*
3628 ++ * Many Focusrite devices supports a limited set of sampling rates per
3629 ++ * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type
3630 ++ * descriptor which has a non-standard bLength = 10.
3631 ++ */
3632 ++static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip,
3633 ++ struct audioformat *fp,
3634 ++ unsigned int rate)
3635 ++{
3636 ++ struct usb_interface *iface;
3637 ++ struct usb_host_interface *alts;
3638 ++ unsigned char *fmt;
3639 ++ unsigned int max_rate;
3640 ++
3641 ++ iface = usb_ifnum_to_if(chip->dev, fp->iface);
3642 ++ if (!iface)
3643 ++ return true;
3644 ++
3645 ++ alts = &iface->altsetting[fp->altset_idx];
3646 ++ fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen,
3647 ++ NULL, UAC_FORMAT_TYPE);
3648 ++ if (!fmt)
3649 ++ return true;
3650 ++
3651 ++ if (fmt[0] == 10) { /* bLength */
3652 ++ max_rate = combine_quad(&fmt[6]);
3653 ++
3654 ++ /* Validate max rate */
3655 ++ if (max_rate != 48000 &&
3656 ++ max_rate != 96000 &&
3657 ++ max_rate != 192000 &&
3658 ++ max_rate != 384000) {
3659 ++
3660 ++ usb_audio_info(chip,
3661 ++ "%u:%d : unexpected max rate: %u\n",
3662 ++ fp->iface, fp->altsetting, max_rate);
3663 ++
3664 ++ return true;
3665 ++ }
3666 ++
3667 ++ return rate <= max_rate;
3668 ++ }
3669 ++
3670 ++ return true;
3671 ++}
3672 ++
3673 + /*
3674 + * Helper function to walk the array of sample rate triplets reported by
3675 + * the device. The problem is that we need to parse whole array first to
3676 +@@ -256,6 +302,11 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip,
3677 + }
3678 +
3679 + for (rate = min; rate <= max; rate += res) {
3680 ++ /* Filter out invalid rates on Focusrite devices */
3681 ++ if (USB_ID_VENDOR(chip->usb_id) == 0x1235 &&
3682 ++ !focusrite_valid_sample_rate(chip, fp, rate))
3683 ++ goto skip_rate;
3684 ++
3685 + if (fp->rate_table)
3686 + fp->rate_table[nr_rates] = rate;
3687 + if (!fp->rate_min || rate < fp->rate_min)
3688 +@@ -270,6 +321,7 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip,
3689 + break;
3690 + }
3691 +
3692 ++skip_rate:
3693 + /* avoid endless loop */
3694 + if (res == 0)
3695 + break;
3696 +diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
3697 +index b54f7dab8372..b9ea4a42aee4 100644
3698 +--- a/sound/usb/mixer_quirks.c
3699 ++++ b/sound/usb/mixer_quirks.c
3700 +@@ -1520,11 +1520,15 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
3701 +
3702 + /* use known values for that card: interface#1 altsetting#1 */
3703 + iface = usb_ifnum_to_if(chip->dev, 1);
3704 +- if (!iface || iface->num_altsetting < 2)
3705 +- return -EINVAL;
3706 ++ if (!iface || iface->num_altsetting < 2) {
3707 ++ err = -EINVAL;
3708 ++ goto end;
3709 ++ }
3710 + alts = &iface->altsetting[1];
3711 +- if (get_iface_desc(alts)->bNumEndpoints < 1)
3712 +- return -EINVAL;
3713 ++ if (get_iface_desc(alts)->bNumEndpoints < 1) {
3714 ++ err = -EINVAL;
3715 ++ goto end;
3716 ++ }
3717 + ep = get_endpoint(alts, 0)->bEndpointAddress;
3718 +
3719 + err = snd_usb_ctl_msg(chip->dev,
3720 +diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
3721 +index f93b355756e6..2dfc0abf2e37 100644
3722 +--- a/sound/usb/usx2y/usbusx2yaudio.c
3723 ++++ b/sound/usb/usx2y/usbusx2yaudio.c
3724 +@@ -689,6 +689,8 @@ static int usX2Y_rate_set(struct usX2Ydev *usX2Y, int rate)
3725 + us->submitted = 2*NOOF_SETRATE_URBS;
3726 + for (i = 0; i < NOOF_SETRATE_URBS; ++i) {
3727 + struct urb *urb = us->urb[i];
3728 ++ if (!urb)
3729 ++ continue;
3730 + if (urb->status) {
3731 + if (!err)
3732 + err = -ENODEV;
3733 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
3734 +index ccd5319d1284..04fc04b4ab67 100644
3735 +--- a/tools/objtool/check.c
3736 ++++ b/tools/objtool/check.c
3737 +@@ -2062,14 +2062,27 @@ static bool ignore_unreachable_insn(struct instruction *insn)
3738 + !strcmp(insn->sec->name, ".altinstr_aux"))
3739 + return true;
3740 +
3741 ++ if (!insn->func)
3742 ++ return false;
3743 ++
3744 ++ /*
3745 ++ * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
3746 ++ * __builtin_unreachable(). The BUG() macro has an unreachable() after
3747 ++ * the UD2, which causes GCC's undefined trap logic to emit another UD2
3748 ++ * (or occasionally a JMP to UD2).
3749 ++ */
3750 ++ if (list_prev_entry(insn, list)->dead_end &&
3751 ++ (insn->type == INSN_BUG ||
3752 ++ (insn->type == INSN_JUMP_UNCONDITIONAL &&
3753 ++ insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
3754 ++ return true;
3755 ++
3756 + /*
3757 + * Check if this (or a subsequent) instruction is related to
3758 + * CONFIG_UBSAN or CONFIG_KASAN.
3759 + *
3760 + * End the search at 5 instructions to avoid going into the weeds.
3761 + */
3762 +- if (!insn->func)
3763 +- return false;
3764 + for (i = 0; i < 5; i++) {
3765 +
3766 + if (is_kasan_insn(insn) || is_ubsan_insn(insn))
3767 +diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
3768 +index c3343820916a..7cbbbdd932f1 100644
3769 +--- a/tools/objtool/orc_dump.c
3770 ++++ b/tools/objtool/orc_dump.c
3771 +@@ -78,7 +78,7 @@ int orc_dump(const char *_objname)
3772 + char *name;
3773 + size_t nr_sections;
3774 + Elf64_Addr orc_ip_addr = 0;
3775 +- size_t shstrtab_idx;
3776 ++ size_t shstrtab_idx, strtab_idx = 0;
3777 + Elf *elf;
3778 + Elf_Scn *scn;
3779 + GElf_Shdr sh;
3780 +@@ -139,6 +139,8 @@ int orc_dump(const char *_objname)
3781 +
3782 + if (!strcmp(name, ".symtab")) {
3783 + symtab = data;
3784 ++ } else if (!strcmp(name, ".strtab")) {
3785 ++ strtab_idx = i;
3786 + } else if (!strcmp(name, ".orc_unwind")) {
3787 + orc = data->d_buf;
3788 + orc_size = sh.sh_size;
3789 +@@ -150,7 +152,7 @@ int orc_dump(const char *_objname)
3790 + }
3791 + }
3792 +
3793 +- if (!symtab || !orc || !orc_ip)
3794 ++ if (!symtab || !strtab_idx || !orc || !orc_ip)
3795 + return 0;
3796 +
3797 + if (orc_size % sizeof(*orc) != 0) {
3798 +@@ -171,21 +173,29 @@ int orc_dump(const char *_objname)
3799 + return -1;
3800 + }
3801 +
3802 +- scn = elf_getscn(elf, sym.st_shndx);
3803 +- if (!scn) {
3804 +- WARN_ELF("elf_getscn");
3805 +- return -1;
3806 +- }
3807 +-
3808 +- if (!gelf_getshdr(scn, &sh)) {
3809 +- WARN_ELF("gelf_getshdr");
3810 +- return -1;
3811 +- }
3812 +-
3813 +- name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
3814 +- if (!name || !*name) {
3815 +- WARN_ELF("elf_strptr");
3816 +- return -1;
3817 ++ if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) {
3818 ++ scn = elf_getscn(elf, sym.st_shndx);
3819 ++ if (!scn) {
3820 ++ WARN_ELF("elf_getscn");
3821 ++ return -1;
3822 ++ }
3823 ++
3824 ++ if (!gelf_getshdr(scn, &sh)) {
3825 ++ WARN_ELF("gelf_getshdr");
3826 ++ return -1;
3827 ++ }
3828 ++
3829 ++ name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
3830 ++ if (!name) {
3831 ++ WARN_ELF("elf_strptr");
3832 ++ return -1;
3833 ++ }
3834 ++ } else {
3835 ++ name = elf_strptr(elf, strtab_idx, sym.st_name);
3836 ++ if (!name) {
3837 ++ WARN_ELF("elf_strptr");
3838 ++ return -1;
3839 ++ }
3840 + }
3841 +
3842 + printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
3843 +diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh
3844 +index 7956ea3be667..eed5d5b81226 100755
3845 +--- a/tools/testing/selftests/kmod/kmod.sh
3846 ++++ b/tools/testing/selftests/kmod/kmod.sh
3847 +@@ -502,18 +502,23 @@ function test_num()
3848 + fi
3849 + }
3850 +
3851 +-function get_test_count()
3852 ++function get_test_data()
3853 + {
3854 + test_num $1
3855 +- TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
3856 ++ local field_num=$(echo $1 | sed 's/^0*//')
3857 ++ echo $ALL_TESTS | awk '{print $'$field_num'}'
3858 ++}
3859 ++
3860 ++function get_test_count()
3861 ++{
3862 ++ TEST_DATA=$(get_test_data $1)
3863 + LAST_TWO=${TEST_DATA#*:*}
3864 + echo ${LAST_TWO%:*}
3865 + }
3866 +
3867 + function get_test_enabled()
3868 + {
3869 +- test_num $1
3870 +- TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}')
3871 ++ TEST_DATA=$(get_test_data $1)
3872 + echo ${TEST_DATA#*:*:}
3873 + }
3874 +
3875 +diff --git a/tools/vm/Makefile b/tools/vm/Makefile
3876 +index 20f6cf04377f..9860622cbb15 100644
3877 +--- a/tools/vm/Makefile
3878 ++++ b/tools/vm/Makefile
3879 +@@ -1,6 +1,8 @@
3880 + # SPDX-License-Identifier: GPL-2.0
3881 + # Makefile for vm tools
3882 + #
3883 ++include ../scripts/Makefile.include
3884 ++
3885 + TARGETS=page-types slabinfo page_owner_sort
3886 +
3887 + LIB_DIR = ../lib/api