Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sun, 25 Aug 2019 17:37:12
Message-Id: 1566754610.b62a0b001e2e98264ba2cbe01a779da0ea1a1a35.mpagano@gentoo
1 commit: b62a0b001e2e98264ba2cbe01a779da0ea1a1a35
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Aug 25 17:36:50 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Aug 25 17:36:50 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b62a0b00
7
8 Linux patch 4.19.68
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1067_linux-4.19.68.patch | 2580 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2584 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 75aee25..dd97210 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -311,6 +311,10 @@ Patch: 1066_linux-4.19.67.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.67
23
24 +Patch: 1067_linux-4.19.68.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.68
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1067_linux-4.19.68.patch b/1067_linux-4.19.68.patch
33 new file mode 100644
34 index 0000000..a4515f2
35 --- /dev/null
36 +++ b/1067_linux-4.19.68.patch
37 @@ -0,0 +1,2580 @@
38 +diff --git a/Makefile b/Makefile
39 +index b6aa6e8d4411..6f164b04d953 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 67
47 ++SUBLEVEL = 68
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
52 +index 7ed320895d1f..f52a2968a3b6 100644
53 +--- a/arch/arm64/include/asm/efi.h
54 ++++ b/arch/arm64/include/asm/efi.h
55 +@@ -94,7 +94,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
56 + ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
57 +
58 + #define alloc_screen_info(x...) &screen_info
59 +-#define free_screen_info(x...)
60 ++
61 ++static inline void free_screen_info(efi_system_table_t *sys_table_arg,
62 ++ struct screen_info *si)
63 ++{
64 ++}
65 +
66 + /* redeclare as 'hidden' so the compiler will generate relative references */
67 + extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
68 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
69 +index ea423db39364..2214a403f39b 100644
70 +--- a/arch/arm64/include/asm/pgtable.h
71 ++++ b/arch/arm64/include/asm/pgtable.h
72 +@@ -419,8 +419,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
73 + PMD_TYPE_SECT)
74 +
75 + #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
76 +-#define pud_sect(pud) (0)
77 +-#define pud_table(pud) (1)
78 ++static inline bool pud_sect(pud_t pud) { return false; }
79 ++static inline bool pud_table(pud_t pud) { return true; }
80 + #else
81 + #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
82 + PUD_TYPE_SECT)
83 +diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
84 +index 57e962290df3..7eff8afa035f 100644
85 +--- a/arch/arm64/kernel/ftrace.c
86 ++++ b/arch/arm64/kernel/ftrace.c
87 +@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
88 +
89 + if (offset < -SZ_128M || offset >= SZ_128M) {
90 + #ifdef CONFIG_ARM64_MODULE_PLTS
91 +- struct plt_entry trampoline;
92 ++ struct plt_entry trampoline, *dst;
93 + struct module *mod;
94 +
95 + /*
96 +@@ -104,24 +104,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
97 + * is added in the future, but for now, the pr_err() below
98 + * deals with a theoretical issue only.
99 + */
100 ++ dst = mod->arch.ftrace_trampoline;
101 + trampoline = get_plt_entry(addr);
102 +- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
103 +- &trampoline)) {
104 +- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
105 +- &(struct plt_entry){})) {
106 ++ if (!plt_entries_equal(dst, &trampoline)) {
107 ++ if (!plt_entries_equal(dst, &(struct plt_entry){})) {
108 + pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
109 + return -EINVAL;
110 + }
111 +
112 + /* point the trampoline to our ftrace entry point */
113 + module_disable_ro(mod);
114 +- *mod->arch.ftrace_trampoline = trampoline;
115 ++ *dst = trampoline;
116 + module_enable_ro(mod, true);
117 +
118 +- /* update trampoline before patching in the branch */
119 +- smp_wmb();
120 ++ /*
121 ++ * Ensure updated trampoline is visible to instruction
122 ++ * fetch before we patch in the branch.
123 ++ */
124 ++ __flush_icache_range((unsigned long)&dst[0],
125 ++ (unsigned long)&dst[1]);
126 + }
127 +- addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
128 ++ addr = (unsigned long)dst;
129 + #else /* CONFIG_ARM64_MODULE_PLTS */
130 + return -EINVAL;
131 + #endif /* CONFIG_ARM64_MODULE_PLTS */
132 +diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
133 +index 933adbc0f654..0311fe52c8ff 100644
134 +--- a/arch/arm64/kernel/return_address.c
135 ++++ b/arch/arm64/kernel/return_address.c
136 +@@ -11,6 +11,7 @@
137 +
138 + #include <linux/export.h>
139 + #include <linux/ftrace.h>
140 ++#include <linux/kprobes.h>
141 +
142 + #include <asm/stack_pointer.h>
143 + #include <asm/stacktrace.h>
144 +@@ -32,6 +33,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
145 + return 0;
146 + }
147 + }
148 ++NOKPROBE_SYMBOL(save_return_addr);
149 +
150 + void *return_address(unsigned int level)
151 + {
152 +@@ -55,3 +57,4 @@ void *return_address(unsigned int level)
153 + return NULL;
154 + }
155 + EXPORT_SYMBOL_GPL(return_address);
156 ++NOKPROBE_SYMBOL(return_address);
157 +diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
158 +index 4989f7ea1e59..bb482ec044b6 100644
159 +--- a/arch/arm64/kernel/stacktrace.c
160 ++++ b/arch/arm64/kernel/stacktrace.c
161 +@@ -18,6 +18,7 @@
162 + #include <linux/kernel.h>
163 + #include <linux/export.h>
164 + #include <linux/ftrace.h>
165 ++#include <linux/kprobes.h>
166 + #include <linux/sched.h>
167 + #include <linux/sched/debug.h>
168 + #include <linux/sched/task_stack.h>
169 +@@ -85,6 +86,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
170 +
171 + return 0;
172 + }
173 ++NOKPROBE_SYMBOL(unwind_frame);
174 +
175 + void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
176 + int (*fn)(struct stackframe *, void *), void *data)
177 +@@ -99,6 +101,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
178 + break;
179 + }
180 + }
181 ++NOKPROBE_SYMBOL(walk_stackframe);
182 +
183 + #ifdef CONFIG_STACKTRACE
184 + struct stack_trace_data {
185 +diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
186 +index 7a5173ea2276..4c2e96ef306e 100644
187 +--- a/arch/arm64/kvm/regmap.c
188 ++++ b/arch/arm64/kvm/regmap.c
189 +@@ -189,13 +189,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
190 + switch (spsr_idx) {
191 + case KVM_SPSR_SVC:
192 + write_sysreg_el1(v, spsr);
193 ++ break;
194 + case KVM_SPSR_ABT:
195 + write_sysreg(v, spsr_abt);
196 ++ break;
197 + case KVM_SPSR_UND:
198 + write_sysreg(v, spsr_und);
199 ++ break;
200 + case KVM_SPSR_IRQ:
201 + write_sysreg(v, spsr_irq);
202 ++ break;
203 + case KVM_SPSR_FIQ:
204 + write_sysreg(v, spsr_fiq);
205 ++ break;
206 + }
207 + }
208 +diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
209 +index dd6b05bff75b..d911a8c2314d 100644
210 +--- a/arch/riscv/include/asm/switch_to.h
211 ++++ b/arch/riscv/include/asm/switch_to.h
212 +@@ -23,7 +23,7 @@ extern void __fstate_restore(struct task_struct *restore_from);
213 +
214 + static inline void __fstate_clean(struct pt_regs *regs)
215 + {
216 +- regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
217 ++ regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
218 + }
219 +
220 + static inline void fstate_save(struct task_struct *task,
221 +diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
222 +index d9ff3b42da7c..2569ffc061f9 100644
223 +--- a/arch/sh/kernel/hw_breakpoint.c
224 ++++ b/arch/sh/kernel/hw_breakpoint.c
225 +@@ -160,6 +160,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
226 + switch (sh_type) {
227 + case SH_BREAKPOINT_READ:
228 + *gen_type = HW_BREAKPOINT_R;
229 ++ break;
230 + case SH_BREAKPOINT_WRITE:
231 + *gen_type = HW_BREAKPOINT_W;
232 + break;
233 +diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
234 +index a285fbd0fd9b..15580e4fc766 100644
235 +--- a/arch/xtensa/kernel/setup.c
236 ++++ b/arch/xtensa/kernel/setup.c
237 +@@ -515,6 +515,7 @@ void cpu_reset(void)
238 + "add %2, %2, %7\n\t"
239 + "addi %0, %0, -1\n\t"
240 + "bnez %0, 1b\n\t"
241 ++ "isync\n\t"
242 + /* Jump to identity mapping */
243 + "jx %3\n"
244 + "2:\n\t"
245 +diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
246 +index c92c10d55374..5bece9752ed6 100644
247 +--- a/drivers/ata/libahci_platform.c
248 ++++ b/drivers/ata/libahci_platform.c
249 +@@ -313,6 +313,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
250 + hpriv->phys[port] = NULL;
251 + rc = 0;
252 + break;
253 ++ case -EPROBE_DEFER:
254 ++ /* Do not complain yet */
255 ++ break;
256 +
257 + default:
258 + dev_err(dev,
259 +diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
260 +index 173e6f2dd9af..eefda51f97d3 100644
261 +--- a/drivers/ata/libata-zpodd.c
262 ++++ b/drivers/ata/libata-zpodd.c
263 +@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
264 + unsigned int ret;
265 + struct rm_feature_desc *desc;
266 + struct ata_taskfile tf;
267 +- static const char cdb[] = { GPCMD_GET_CONFIGURATION,
268 ++ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
269 + 2, /* only 1 feature descriptor requested */
270 + 0, 3, /* 3, removable medium feature */
271 + 0, 0, 0,/* reserved */
272 +diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
273 +index 33481368740e..113152425a95 100644
274 +--- a/drivers/clk/at91/clk-generated.c
275 ++++ b/drivers/clk/at91/clk-generated.c
276 +@@ -153,6 +153,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
277 + continue;
278 +
279 + div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
280 ++ if (div > GENERATED_MAX_DIV + 1)
281 ++ div = GENERATED_MAX_DIV + 1;
282 +
283 + clk_generated_best_diff(req, parent, parent_rate, div,
284 + &best_diff, &best_rate);
285 +diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
286 +index f4b013e9352d..24485bee9b49 100644
287 +--- a/drivers/clk/renesas/renesas-cpg-mssr.c
288 ++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
289 +@@ -535,17 +535,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
290 + unsigned int reg = id / 32;
291 + unsigned int bit = id % 32;
292 + u32 bitmask = BIT(bit);
293 +- unsigned long flags;
294 +- u32 value;
295 +
296 + dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
297 +
298 + /* Reset module */
299 +- spin_lock_irqsave(&priv->rmw_lock, flags);
300 +- value = readl(priv->base + SRCR(reg));
301 +- value |= bitmask;
302 +- writel(value, priv->base + SRCR(reg));
303 +- spin_unlock_irqrestore(&priv->rmw_lock, flags);
304 ++ writel(bitmask, priv->base + SRCR(reg));
305 +
306 + /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
307 + udelay(35);
308 +@@ -562,16 +556,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
309 + unsigned int reg = id / 32;
310 + unsigned int bit = id % 32;
311 + u32 bitmask = BIT(bit);
312 +- unsigned long flags;
313 +- u32 value;
314 +
315 + dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
316 +
317 +- spin_lock_irqsave(&priv->rmw_lock, flags);
318 +- value = readl(priv->base + SRCR(reg));
319 +- value |= bitmask;
320 +- writel(value, priv->base + SRCR(reg));
321 +- spin_unlock_irqrestore(&priv->rmw_lock, flags);
322 ++ writel(bitmask, priv->base + SRCR(reg));
323 + return 0;
324 + }
325 +
326 +diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
327 +index 87892471eb96..bad8099832d4 100644
328 +--- a/drivers/clk/sprd/Kconfig
329 ++++ b/drivers/clk/sprd/Kconfig
330 +@@ -2,6 +2,7 @@ config SPRD_COMMON_CLK
331 + tristate "Clock support for Spreadtrum SoCs"
332 + depends on ARCH_SPRD || COMPILE_TEST
333 + default ARCH_SPRD
334 ++ select REGMAP_MMIO
335 +
336 + if SPRD_COMMON_CLK
337 +
338 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
339 +index f5fb93795a69..65cecfdd9b45 100644
340 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
341 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
342 +@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
343 + thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
344 + bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
345 +
346 +- data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
347 ++ data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
348 + if (!data)
349 + return -ENOMEM;
350 +
351 +diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
352 +index bf6cad6c9178..7a3e5a8f6439 100644
353 +--- a/drivers/gpu/drm/bridge/Kconfig
354 ++++ b/drivers/gpu/drm/bridge/Kconfig
355 +@@ -46,6 +46,7 @@ config DRM_DUMB_VGA_DAC
356 + config DRM_LVDS_ENCODER
357 + tristate "Transparent parallel to LVDS encoder support"
358 + depends on OF
359 ++ select DRM_KMS_HELPER
360 + select DRM_PANEL_BRIDGE
361 + help
362 + Support for transparent parallel to LVDS encoders that don't require
363 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
364 +index 0ddb6eec7b11..df228436a03d 100644
365 +--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
366 ++++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
367 +@@ -108,12 +108,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
368 + scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
369 + do {
370 + cpu_relax();
371 +- } while (retry > 1 &&
372 ++ } while (--retry > 1 &&
373 + scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
374 + do {
375 + cpu_relax();
376 + scaler_write(1, SCALER_INT_EN);
377 +- } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
378 ++ } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
379 +
380 + return retry ? 0 : -EIO;
381 + }
382 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
383 +index ed9a3a1e50ef..dbfd2c006f74 100644
384 +--- a/drivers/gpu/drm/msm/msm_drv.c
385 ++++ b/drivers/gpu/drm/msm/msm_drv.c
386 +@@ -1284,7 +1284,8 @@ static int add_gpu_components(struct device *dev,
387 + if (!np)
388 + return 0;
389 +
390 +- drm_of_component_match_add(dev, matchptr, compare_of, np);
391 ++ if (of_device_is_available(np))
392 ++ drm_of_component_match_add(dev, matchptr, compare_of, np);
393 +
394 + of_node_put(np);
395 +
396 +diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
397 +index 6e1a4a4fc0c1..ab9da597106f 100644
398 +--- a/drivers/hid/hid-holtek-kbd.c
399 ++++ b/drivers/hid/hid-holtek-kbd.c
400 +@@ -126,9 +126,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
401 +
402 + /* Locate the boot interface, to receive the LED change events */
403 + struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
404 ++ struct hid_device *boot_hid;
405 ++ struct hid_input *boot_hid_input;
406 +
407 +- struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
408 +- struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
409 ++ if (unlikely(boot_interface == NULL))
410 ++ return -ENODEV;
411 ++
412 ++ boot_hid = usb_get_intfdata(boot_interface);
413 ++ boot_hid_input = list_first_entry(&boot_hid->inputs,
414 + struct hid_input, list);
415 +
416 + return boot_hid_input->input->event(boot_hid_input->input, type, code,
417 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
418 +index a746017fac17..5a949ca42b1d 100644
419 +--- a/drivers/hid/usbhid/hiddev.c
420 ++++ b/drivers/hid/usbhid/hiddev.c
421 +@@ -297,6 +297,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
422 + spin_unlock_irq(&list->hiddev->list_lock);
423 +
424 + mutex_lock(&hiddev->existancelock);
425 ++ /*
426 ++ * recheck exist with existance lock held to
427 ++ * avoid opening a disconnected device
428 ++ */
429 ++ if (!list->hiddev->exist) {
430 ++ res = -ENODEV;
431 ++ goto bail_unlock;
432 ++ }
433 + if (!list->hiddev->open++)
434 + if (list->hiddev->exist) {
435 + struct hid_device *hid = hiddev->hid;
436 +@@ -313,6 +321,10 @@ bail_normal_power:
437 + hid_hw_power(hid, PM_HINT_NORMAL);
438 + bail_unlock:
439 + mutex_unlock(&hiddev->existancelock);
440 ++
441 ++ spin_lock_irq(&list->hiddev->list_lock);
442 ++ list_del(&list->node);
443 ++ spin_unlock_irq(&list->hiddev->list_lock);
444 + bail:
445 + file->private_data = NULL;
446 + vfree(list);
447 +diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
448 +index ce9af43fa2de..49c1956e6a67 100644
449 +--- a/drivers/iio/adc/max9611.c
450 ++++ b/drivers/iio/adc/max9611.c
451 +@@ -483,7 +483,7 @@ static int max9611_init(struct max9611_dev *max9611)
452 + if (ret)
453 + return ret;
454 +
455 +- regval = ret & MAX9611_TEMP_MASK;
456 ++ regval &= MAX9611_TEMP_MASK;
457 +
458 + if ((regval > MAX9611_TEMP_MAX_POS &&
459 + regval < MAX9611_TEMP_MIN_NEG) ||
460 +diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
461 +index ef459f2f2eeb..7586c1dd73f1 100644
462 +--- a/drivers/infiniband/core/mad.c
463 ++++ b/drivers/infiniband/core/mad.c
464 +@@ -3182,18 +3182,18 @@ static int ib_mad_port_open(struct ib_device *device,
465 + if (has_smi)
466 + cq_size *= 2;
467 +
468 ++ port_priv->pd = ib_alloc_pd(device, 0);
469 ++ if (IS_ERR(port_priv->pd)) {
470 ++ dev_err(&device->dev, "Couldn't create ib_mad PD\n");
471 ++ ret = PTR_ERR(port_priv->pd);
472 ++ goto error3;
473 ++ }
474 ++
475 + port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
476 + IB_POLL_WORKQUEUE);
477 + if (IS_ERR(port_priv->cq)) {
478 + dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
479 + ret = PTR_ERR(port_priv->cq);
480 +- goto error3;
481 +- }
482 +-
483 +- port_priv->pd = ib_alloc_pd(device, 0);
484 +- if (IS_ERR(port_priv->pd)) {
485 +- dev_err(&device->dev, "Couldn't create ib_mad PD\n");
486 +- ret = PTR_ERR(port_priv->pd);
487 + goto error4;
488 + }
489 +
490 +@@ -3236,11 +3236,11 @@ error8:
491 + error7:
492 + destroy_mad_qp(&port_priv->qp_info[0]);
493 + error6:
494 +- ib_dealloc_pd(port_priv->pd);
495 +-error4:
496 + ib_free_cq(port_priv->cq);
497 + cleanup_recv_queue(&port_priv->qp_info[1]);
498 + cleanup_recv_queue(&port_priv->qp_info[0]);
499 ++error4:
500 ++ ib_dealloc_pd(port_priv->pd);
501 + error3:
502 + kfree(port_priv);
503 +
504 +@@ -3270,8 +3270,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
505 + destroy_workqueue(port_priv->wq);
506 + destroy_mad_qp(&port_priv->qp_info[1]);
507 + destroy_mad_qp(&port_priv->qp_info[0]);
508 +- ib_dealloc_pd(port_priv->pd);
509 + ib_free_cq(port_priv->cq);
510 ++ ib_dealloc_pd(port_priv->pd);
511 + cleanup_recv_queue(&port_priv->qp_info[1]);
512 + cleanup_recv_queue(&port_priv->qp_info[0]);
513 + /* XXX: Handle deallocation of MAD registration tables */
514 +diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
515 +index c34a6852d691..a18f3f8ad77f 100644
516 +--- a/drivers/infiniband/core/user_mad.c
517 ++++ b/drivers/infiniband/core/user_mad.c
518 +@@ -49,6 +49,7 @@
519 + #include <linux/sched.h>
520 + #include <linux/semaphore.h>
521 + #include <linux/slab.h>
522 ++#include <linux/nospec.h>
523 +
524 + #include <linux/uaccess.h>
525 +
526 +@@ -868,11 +869,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
527 +
528 + if (get_user(id, arg))
529 + return -EFAULT;
530 ++ if (id >= IB_UMAD_MAX_AGENTS)
531 ++ return -EINVAL;
532 +
533 + mutex_lock(&file->port->file_mutex);
534 + mutex_lock(&file->mutex);
535 +
536 +- if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
537 ++ id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
538 ++ if (!__get_agent(file, id)) {
539 + ret = -EINVAL;
540 + goto out;
541 + }
542 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
543 +index 9bab4fb65c68..bd1fdadf7ba0 100644
544 +--- a/drivers/infiniband/hw/mlx5/mr.c
545 ++++ b/drivers/infiniband/hw/mlx5/mr.c
546 +@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
547 + static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
548 + static int mr_cache_max_order(struct mlx5_ib_dev *dev);
549 + static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
550 +-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
551 +-{
552 +- return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
553 +-}
554 +
555 + static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
556 + {
557 + return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
558 + }
559 +
560 +-static bool use_umr(struct mlx5_ib_dev *dev, int order)
561 +-{
562 +- return order <= mr_cache_max_order(dev) &&
563 +- umr_can_modify_entity_size(dev);
564 +-}
565 +-
566 + static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
567 + {
568 + int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
569 +@@ -1305,7 +1295,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
570 + {
571 + struct mlx5_ib_dev *dev = to_mdev(pd->device);
572 + struct mlx5_ib_mr *mr = NULL;
573 +- bool populate_mtts = false;
574 ++ bool use_umr;
575 + struct ib_umem *umem;
576 + int page_shift;
577 + int npages;
578 +@@ -1338,29 +1328,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
579 + if (err < 0)
580 + return ERR_PTR(err);
581 +
582 +- if (use_umr(dev, order)) {
583 ++ use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
584 ++ (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
585 ++ !MLX5_CAP_GEN(dev->mdev, atomic));
586 ++
587 ++ if (order <= mr_cache_max_order(dev) && use_umr) {
588 + mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
589 + page_shift, order, access_flags);
590 + if (PTR_ERR(mr) == -EAGAIN) {
591 + mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
592 + mr = NULL;
593 + }
594 +- populate_mtts = false;
595 + } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
596 + if (access_flags & IB_ACCESS_ON_DEMAND) {
597 + err = -EINVAL;
598 + pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
599 + goto error;
600 + }
601 +- populate_mtts = true;
602 ++ use_umr = false;
603 + }
604 +
605 + if (!mr) {
606 +- if (!umr_can_modify_entity_size(dev))
607 +- populate_mtts = true;
608 + mutex_lock(&dev->slow_path_mutex);
609 + mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
610 +- page_shift, access_flags, populate_mtts);
611 ++ page_shift, access_flags, !use_umr);
612 + mutex_unlock(&dev->slow_path_mutex);
613 + }
614 +
615 +@@ -1378,7 +1369,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
616 + update_odp_mr(mr);
617 + #endif
618 +
619 +- if (!populate_mtts) {
620 ++ if (use_umr) {
621 + int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
622 +
623 + if (access_flags & IB_ACCESS_ON_DEMAND)
624 +diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
625 +index 78073259c9a1..c431df7401b4 100644
626 +--- a/drivers/input/joystick/iforce/iforce-usb.c
627 ++++ b/drivers/input/joystick/iforce/iforce-usb.c
628 +@@ -141,7 +141,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
629 + return -ENODEV;
630 +
631 + epirq = &interface->endpoint[0].desc;
632 ++ if (!usb_endpoint_is_int_in(epirq))
633 ++ return -ENODEV;
634 ++
635 + epout = &interface->endpoint[1].desc;
636 ++ if (!usb_endpoint_is_int_out(epout))
637 ++ return -ENODEV;
638 +
639 + if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
640 + goto fail;
641 +diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
642 +index 10a039148234..538986e5ac5b 100644
643 +--- a/drivers/input/mouse/trackpoint.h
644 ++++ b/drivers/input/mouse/trackpoint.h
645 +@@ -161,7 +161,8 @@ struct trackpoint_data {
646 + #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
647 + int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
648 + #else
649 +-inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
650 ++static inline int trackpoint_detect(struct psmouse *psmouse,
651 ++ bool set_properties)
652 + {
653 + return -ENOSYS;
654 + }
655 +diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
656 +index 75b500651e4e..b1cf0c971274 100644
657 +--- a/drivers/input/tablet/kbtab.c
658 ++++ b/drivers/input/tablet/kbtab.c
659 +@@ -116,6 +116,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
660 + if (intf->cur_altsetting->desc.bNumEndpoints < 1)
661 + return -ENODEV;
662 +
663 ++ endpoint = &intf->cur_altsetting->endpoint[0].desc;
664 ++ if (!usb_endpoint_is_int_in(endpoint))
665 ++ return -ENODEV;
666 ++
667 + kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
668 + input_dev = input_allocate_device();
669 + if (!kbtab || !input_dev)
670 +@@ -154,8 +158,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
671 + input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
672 + input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
673 +
674 +- endpoint = &intf->cur_altsetting->endpoint[0].desc;
675 +-
676 + usb_fill_int_urb(kbtab->irq, dev,
677 + usb_rcvintpipe(dev, endpoint->bEndpointAddress),
678 + kbtab->data, 8,
679 +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
680 +index 3a1d30304f7e..66b4800bcdd8 100644
681 +--- a/drivers/iommu/amd_iommu_init.c
682 ++++ b/drivers/iommu/amd_iommu_init.c
683 +@@ -1710,7 +1710,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
684 + NULL,
685 + };
686 +
687 +-static int iommu_init_pci(struct amd_iommu *iommu)
688 ++static int __init iommu_init_pci(struct amd_iommu *iommu)
689 + {
690 + int cap_ptr = iommu->cap_ptr;
691 + u32 range, misc, low, high;
692 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
693 +index ee30e8965d1b..9ba73e11757d 100644
694 +--- a/drivers/irqchip/irq-gic-v3-its.c
695 ++++ b/drivers/irqchip/irq-gic-v3-its.c
696 +@@ -2883,7 +2883,7 @@ static int its_vpe_init(struct its_vpe *vpe)
697 +
698 + if (!its_alloc_vpe_table(vpe_id)) {
699 + its_vpe_id_free(vpe_id);
700 +- its_free_pending_table(vpe->vpt_page);
701 ++ its_free_pending_table(vpt_page);
702 + return -ENOMEM;
703 + }
704 +
705 +diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
706 +index 4760307ab43f..cef8f5e2e8fc 100644
707 +--- a/drivers/irqchip/irq-imx-gpcv2.c
708 ++++ b/drivers/irqchip/irq-imx-gpcv2.c
709 +@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
710 + .irq_unmask = imx_gpcv2_irq_unmask,
711 + .irq_set_wake = imx_gpcv2_irq_set_wake,
712 + .irq_retrigger = irq_chip_retrigger_hierarchy,
713 ++ .irq_set_type = irq_chip_set_type_parent,
714 + #ifdef CONFIG_SMP
715 + .irq_set_affinity = irq_chip_set_affinity_parent,
716 + #endif
717 +diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
718 +index 7d480c930eaf..7e426e4d1352 100644
719 +--- a/drivers/md/dm-core.h
720 ++++ b/drivers/md/dm-core.h
721 +@@ -130,6 +130,7 @@ struct mapped_device {
722 + };
723 +
724 + int md_in_flight(struct mapped_device *md);
725 ++void disable_discard(struct mapped_device *md);
726 + void disable_write_same(struct mapped_device *md);
727 + void disable_write_zeroes(struct mapped_device *md);
728 +
729 +diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
730 +index 6e547b8dd298..264b84e274aa 100644
731 +--- a/drivers/md/dm-rq.c
732 ++++ b/drivers/md/dm-rq.c
733 +@@ -295,11 +295,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
734 + }
735 +
736 + if (unlikely(error == BLK_STS_TARGET)) {
737 +- if (req_op(clone) == REQ_OP_WRITE_SAME &&
738 +- !clone->q->limits.max_write_same_sectors)
739 ++ if (req_op(clone) == REQ_OP_DISCARD &&
740 ++ !clone->q->limits.max_discard_sectors)
741 ++ disable_discard(tio->md);
742 ++ else if (req_op(clone) == REQ_OP_WRITE_SAME &&
743 ++ !clone->q->limits.max_write_same_sectors)
744 + disable_write_same(tio->md);
745 +- if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
746 +- !clone->q->limits.max_write_zeroes_sectors)
747 ++ else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
748 ++ !clone->q->limits.max_write_zeroes_sectors)
749 + disable_write_zeroes(tio->md);
750 + }
751 +
752 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
753 +index 42768fe92b41..c9860e3b04dd 100644
754 +--- a/drivers/md/dm.c
755 ++++ b/drivers/md/dm.c
756 +@@ -910,6 +910,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
757 + }
758 + }
759 +
760 ++void disable_discard(struct mapped_device *md)
761 ++{
762 ++ struct queue_limits *limits = dm_get_queue_limits(md);
763 ++
764 ++ /* device doesn't really support DISCARD, disable it */
765 ++ limits->max_discard_sectors = 0;
766 ++ blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
767 ++}
768 ++
769 + void disable_write_same(struct mapped_device *md)
770 + {
771 + struct queue_limits *limits = dm_get_queue_limits(md);
772 +@@ -935,11 +944,14 @@ static void clone_endio(struct bio *bio)
773 + dm_endio_fn endio = tio->ti->type->end_io;
774 +
775 + if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
776 +- if (bio_op(bio) == REQ_OP_WRITE_SAME &&
777 +- !bio->bi_disk->queue->limits.max_write_same_sectors)
778 ++ if (bio_op(bio) == REQ_OP_DISCARD &&
779 ++ !bio->bi_disk->queue->limits.max_discard_sectors)
780 ++ disable_discard(md);
781 ++ else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
782 ++ !bio->bi_disk->queue->limits.max_write_same_sectors)
783 + disable_write_same(md);
784 +- if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
785 +- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
786 ++ else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
787 ++ !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
788 + disable_write_zeroes(md);
789 + }
790 +
791 +diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
792 +index 7fdac277e382..9c77bfe4334f 100644
793 +--- a/drivers/mmc/host/sdhci-of-arasan.c
794 ++++ b/drivers/mmc/host/sdhci-of-arasan.c
795 +@@ -788,7 +788,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
796 +
797 + ret = mmc_of_parse(host->mmc);
798 + if (ret) {
799 +- dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
800 ++ if (ret != -EPROBE_DEFER)
801 ++ dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
802 + goto unreg_clk;
803 + }
804 +
805 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
806 +index be0b785becd0..8f14f85b8e95 100644
807 +--- a/drivers/net/bonding/bond_main.c
808 ++++ b/drivers/net/bonding/bond_main.c
809 +@@ -1102,6 +1102,8 @@ static void bond_compute_features(struct bonding *bond)
810 + done:
811 + bond_dev->vlan_features = vlan_features;
812 + bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
813 ++ NETIF_F_HW_VLAN_CTAG_TX |
814 ++ NETIF_F_HW_VLAN_STAG_TX |
815 + NETIF_F_GSO_UDP_L4;
816 + bond_dev->gso_max_segs = gso_max_segs;
817 + netif_set_gso_max_size(bond_dev, gso_max_size);
818 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
819 +index 33baa17fa9d5..cf01e73d1bcc 100644
820 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
821 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
822 +@@ -3058,12 +3058,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
823 + /* if VF indicate to PF this function is going down (PF will delete sp
824 + * elements and clear initializations
825 + */
826 +- if (IS_VF(bp))
827 ++ if (IS_VF(bp)) {
828 ++ bnx2x_clear_vlan_info(bp);
829 + bnx2x_vfpf_close_vf(bp);
830 +- else if (unload_mode != UNLOAD_RECOVERY)
831 ++ } else if (unload_mode != UNLOAD_RECOVERY) {
832 + /* if this is a normal/close unload need to clean up chip*/
833 + bnx2x_chip_cleanup(bp, unload_mode, keep_link);
834 +- else {
835 ++ } else {
836 + /* Send the UNLOAD_REQUEST to the MCP */
837 + bnx2x_send_unload_req(bp, unload_mode);
838 +
839 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
840 +index 0e508e5defce..ee5159ef837e 100644
841 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
842 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
843 +@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
844 + void bnx2x_disable_close_the_gate(struct bnx2x *bp);
845 + int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
846 +
847 ++void bnx2x_clear_vlan_info(struct bnx2x *bp);
848 ++
849 + /**
850 + * bnx2x_sp_event - handle ramrods completion.
851 + *
852 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
853 +index 2c9af0f420e5..68c62e32e882 100644
854 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
855 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
856 +@@ -8488,11 +8488,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
857 + return rc;
858 + }
859 +
860 ++void bnx2x_clear_vlan_info(struct bnx2x *bp)
861 ++{
862 ++ struct bnx2x_vlan_entry *vlan;
863 ++
864 ++ /* Mark that hw forgot all entries */
865 ++ list_for_each_entry(vlan, &bp->vlan_reg, link)
866 ++ vlan->hw = false;
867 ++
868 ++ bp->vlan_cnt = 0;
869 ++}
870 ++
871 + static int bnx2x_del_all_vlans(struct bnx2x *bp)
872 + {
873 + struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
874 + unsigned long ramrod_flags = 0, vlan_flags = 0;
875 +- struct bnx2x_vlan_entry *vlan;
876 + int rc;
877 +
878 + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
879 +@@ -8501,10 +8511,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
880 + if (rc)
881 + return rc;
882 +
883 +- /* Mark that hw forgot all entries */
884 +- list_for_each_entry(vlan, &bp->vlan_reg, link)
885 +- vlan->hw = false;
886 +- bp->vlan_cnt = 0;
887 ++ bnx2x_clear_vlan_info(bp);
888 +
889 + return 0;
890 + }
891 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
892 +index f5cd9539980f..45d9a5f8fa1b 100644
893 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
894 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
895 +@@ -1190,7 +1190,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
896 + err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
897 + if (err) {
898 + en_err(priv, "Failed to allocate RSS indirection QP\n");
899 +- goto rss_err;
900 ++ goto qp_alloc_err;
901 + }
902 +
903 + rss_map->indir_qp->event = mlx4_en_sqp_event;
904 +@@ -1244,6 +1244,7 @@ indir_err:
905 + MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
906 + mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
907 + mlx4_qp_free(mdev->dev, rss_map->indir_qp);
908 ++qp_alloc_err:
909 + kfree(rss_map->indir_qp);
910 + rss_map->indir_qp = NULL;
911 + rss_err:
912 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
913 +index 45cdde694d20..a4be04debe67 100644
914 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
915 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
916 +@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
917 + return &arfs_t->rules_hash[bucket_idx];
918 + }
919 +
920 +-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
921 +-{
922 +- return (skb->protocol == htons(ETH_P_IP)) ?
923 +- ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
924 +-}
925 +-
926 + static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
927 + u8 ip_proto, __be16 etype)
928 + {
929 +@@ -599,31 +593,9 @@ out:
930 + arfs_may_expire_flow(priv);
931 + }
932 +
933 +-/* return L4 destination port from ip4/6 packets */
934 +-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
935 +-{
936 +- char *transport_header;
937 +-
938 +- transport_header = skb_transport_header(skb);
939 +- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
940 +- return ((struct tcphdr *)transport_header)->dest;
941 +- return ((struct udphdr *)transport_header)->dest;
942 +-}
943 +-
944 +-/* return L4 source port from ip4/6 packets */
945 +-static __be16 arfs_get_src_port(const struct sk_buff *skb)
946 +-{
947 +- char *transport_header;
948 +-
949 +- transport_header = skb_transport_header(skb);
950 +- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
951 +- return ((struct tcphdr *)transport_header)->source;
952 +- return ((struct udphdr *)transport_header)->source;
953 +-}
954 +-
955 + static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
956 + struct arfs_table *arfs_t,
957 +- const struct sk_buff *skb,
958 ++ const struct flow_keys *fk,
959 + u16 rxq, u32 flow_id)
960 + {
961 + struct arfs_rule *rule;
962 +@@ -638,19 +610,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
963 + INIT_WORK(&rule->arfs_work, arfs_handle_work);
964 +
965 + tuple = &rule->tuple;
966 +- tuple->etype = skb->protocol;
967 ++ tuple->etype = fk->basic.n_proto;
968 ++ tuple->ip_proto = fk->basic.ip_proto;
969 + if (tuple->etype == htons(ETH_P_IP)) {
970 +- tuple->src_ipv4 = ip_hdr(skb)->saddr;
971 +- tuple->dst_ipv4 = ip_hdr(skb)->daddr;
972 ++ tuple->src_ipv4 = fk->addrs.v4addrs.src;
973 ++ tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
974 + } else {
975 +- memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
976 ++ memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
977 + sizeof(struct in6_addr));
978 +- memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
979 ++ memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
980 + sizeof(struct in6_addr));
981 + }
982 +- tuple->ip_proto = arfs_get_ip_proto(skb);
983 +- tuple->src_port = arfs_get_src_port(skb);
984 +- tuple->dst_port = arfs_get_dst_port(skb);
985 ++ tuple->src_port = fk->ports.src;
986 ++ tuple->dst_port = fk->ports.dst;
987 +
988 + rule->flow_id = flow_id;
989 + rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
990 +@@ -661,37 +633,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
991 + return rule;
992 + }
993 +
994 +-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
995 +- const struct sk_buff *skb)
996 ++static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
997 + {
998 +- if (tuple->etype == htons(ETH_P_IP) &&
999 +- tuple->src_ipv4 == ip_hdr(skb)->saddr &&
1000 +- tuple->dst_ipv4 == ip_hdr(skb)->daddr)
1001 +- return true;
1002 +- if (tuple->etype == htons(ETH_P_IPV6) &&
1003 +- (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
1004 +- sizeof(struct in6_addr))) &&
1005 +- (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
1006 +- sizeof(struct in6_addr))))
1007 +- return true;
1008 ++ if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
1009 ++ return false;
1010 ++ if (tuple->etype != fk->basic.n_proto)
1011 ++ return false;
1012 ++ if (tuple->etype == htons(ETH_P_IP))
1013 ++ return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
1014 ++ tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
1015 ++ if (tuple->etype == htons(ETH_P_IPV6))
1016 ++ return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
1017 ++ sizeof(struct in6_addr)) &&
1018 ++ !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
1019 ++ sizeof(struct in6_addr));
1020 + return false;
1021 + }
1022 +
1023 + static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
1024 +- const struct sk_buff *skb)
1025 ++ const struct flow_keys *fk)
1026 + {
1027 + struct arfs_rule *arfs_rule;
1028 + struct hlist_head *head;
1029 +- __be16 src_port = arfs_get_src_port(skb);
1030 +- __be16 dst_port = arfs_get_dst_port(skb);
1031 +
1032 +- head = arfs_hash_bucket(arfs_t, src_port, dst_port);
1033 ++ head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
1034 + hlist_for_each_entry(arfs_rule, head, hlist) {
1035 +- if (arfs_rule->tuple.src_port == src_port &&
1036 +- arfs_rule->tuple.dst_port == dst_port &&
1037 +- arfs_cmp_ips(&arfs_rule->tuple, skb)) {
1038 ++ if (arfs_cmp(&arfs_rule->tuple, fk))
1039 + return arfs_rule;
1040 +- }
1041 + }
1042 +
1043 + return NULL;
1044 +@@ -704,20 +672,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1045 + struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
1046 + struct arfs_table *arfs_t;
1047 + struct arfs_rule *arfs_rule;
1048 ++ struct flow_keys fk;
1049 ++
1050 ++ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
1051 ++ return -EPROTONOSUPPORT;
1052 +
1053 +- if (skb->protocol != htons(ETH_P_IP) &&
1054 +- skb->protocol != htons(ETH_P_IPV6))
1055 ++ if (fk.basic.n_proto != htons(ETH_P_IP) &&
1056 ++ fk.basic.n_proto != htons(ETH_P_IPV6))
1057 + return -EPROTONOSUPPORT;
1058 +
1059 + if (skb->encapsulation)
1060 + return -EPROTONOSUPPORT;
1061 +
1062 +- arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
1063 ++ arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
1064 + if (!arfs_t)
1065 + return -EPROTONOSUPPORT;
1066 +
1067 + spin_lock_bh(&arfs->arfs_lock);
1068 +- arfs_rule = arfs_find_rule(arfs_t, skb);
1069 ++ arfs_rule = arfs_find_rule(arfs_t, &fk);
1070 + if (arfs_rule) {
1071 + if (arfs_rule->rxq == rxq_index) {
1072 + spin_unlock_bh(&arfs->arfs_lock);
1073 +@@ -725,8 +697,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1074 + }
1075 + arfs_rule->rxq = rxq_index;
1076 + } else {
1077 +- arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
1078 +- rxq_index, flow_id);
1079 ++ arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
1080 + if (!arfs_rule) {
1081 + spin_unlock_bh(&arfs->arfs_lock);
1082 + return -ENOMEM;
1083 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1084 +index 792bb8bc0cd3..2b9350f4c752 100644
1085 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1086 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1087 +@@ -1083,6 +1083,9 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
1088 + struct mlx5_core_dev *mdev = priv->mdev;
1089 + int err;
1090 +
1091 ++ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
1092 ++ return -EOPNOTSUPP;
1093 ++
1094 + if (pauseparam->autoneg)
1095 + return -EINVAL;
1096 +
1097 +diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1098 +index dc30f11f4766..3feb49badda9 100644
1099 +--- a/drivers/net/team/team.c
1100 ++++ b/drivers/net/team/team.c
1101 +@@ -1011,6 +1011,8 @@ static void __team_compute_features(struct team *team)
1102 +
1103 + team->dev->vlan_features = vlan_features;
1104 + team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1105 ++ NETIF_F_HW_VLAN_CTAG_TX |
1106 ++ NETIF_F_HW_VLAN_STAG_TX |
1107 + NETIF_F_GSO_UDP_L4;
1108 + team->dev->hard_header_len = max_hard_header_len;
1109 +
1110 +diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
1111 +index f4247b275e09..b7a0df95d4b0 100644
1112 +--- a/drivers/net/usb/pegasus.c
1113 ++++ b/drivers/net/usb/pegasus.c
1114 +@@ -285,7 +285,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
1115 + static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
1116 + {
1117 + int i;
1118 +- __u8 tmp;
1119 ++ __u8 tmp = 0;
1120 + __le16 retdatai;
1121 + int ret;
1122 +
1123 +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
1124 +index d5081ffdc8f0..1c849106b793 100644
1125 +--- a/drivers/net/xen-netback/netback.c
1126 ++++ b/drivers/net/xen-netback/netback.c
1127 +@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1128 + skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1129 + nskb = xenvif_alloc_skb(0);
1130 + if (unlikely(nskb == NULL)) {
1131 ++ skb_shinfo(skb)->nr_frags = 0;
1132 + kfree_skb(skb);
1133 + xenvif_tx_err(queue, &txreq, extra_count, idx);
1134 + if (net_ratelimit())
1135 +@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1136 +
1137 + if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1138 + /* Failure in xenvif_set_skb_gso is fatal. */
1139 ++ skb_shinfo(skb)->nr_frags = 0;
1140 + kfree_skb(skb);
1141 + kfree_skb(nskb);
1142 + break;
1143 +diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
1144 +index 1c64fd8e9234..72bdda4ccebf 100644
1145 +--- a/drivers/pwm/sysfs.c
1146 ++++ b/drivers/pwm/sysfs.c
1147 +@@ -263,7 +263,6 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
1148 + export->pwm = pwm;
1149 + mutex_init(&export->lock);
1150 +
1151 +- export->child.class = parent->class;
1152 + export->child.release = pwm_export_release;
1153 + export->child.parent = parent;
1154 + export->child.devt = MKDEV(0, 0);
1155 +diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
1156 +index c43eccdea65d..f570b8c5d857 100644
1157 +--- a/drivers/scsi/hpsa.c
1158 ++++ b/drivers/scsi/hpsa.c
1159 +@@ -2320,6 +2320,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1160 + case IOACCEL2_SERV_RESPONSE_COMPLETE:
1161 + switch (c2->error_data.status) {
1162 + case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1163 ++ if (cmd)
1164 ++ cmd->result = 0;
1165 + break;
1166 + case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1167 + cmd->result |= SAM_STAT_CHECK_CONDITION;
1168 +@@ -2479,8 +2481,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
1169 +
1170 + /* check for good status */
1171 + if (likely(c2->error_data.serv_response == 0 &&
1172 +- c2->error_data.status == 0))
1173 ++ c2->error_data.status == 0)) {
1174 ++ cmd->result = 0;
1175 + return hpsa_cmd_free_and_done(h, c, cmd);
1176 ++ }
1177 +
1178 + /*
1179 + * Any RAID offload error results in retry which will use
1180 +@@ -5617,6 +5621,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
1181 + }
1182 + c = cmd_tagged_alloc(h, cmd);
1183 +
1184 ++ /*
1185 ++ * This is necessary because the SML doesn't zero out this field during
1186 ++ * error recovery.
1187 ++ */
1188 ++ cmd->result = 0;
1189 ++
1190 + /*
1191 + * Call alternate submit routine for I/O accelerated commands.
1192 + * Retries always go down the normal I/O path.
1193 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
1194 +index f84f9bf15027..ddce32fe0513 100644
1195 +--- a/drivers/scsi/qla2xxx/qla_init.c
1196 ++++ b/drivers/scsi/qla2xxx/qla_init.c
1197 +@@ -4732,7 +4732,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1198 + ql_log(ql_log_warn, vha, 0xd049,
1199 + "Failed to allocate ct_sns request.\n");
1200 + kfree(fcport);
1201 +- fcport = NULL;
1202 ++ return NULL;
1203 + }
1204 + INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
1205 + INIT_LIST_HEAD(&fcport->gnl_entry);
1206 +diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
1207 +index 2edf3ee91300..caf4d4df4bd3 100644
1208 +--- a/drivers/staging/comedi/drivers/dt3000.c
1209 ++++ b/drivers/staging/comedi/drivers/dt3000.c
1210 +@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
1211 + static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
1212 + unsigned int flags)
1213 + {
1214 +- int divider, base, prescale;
1215 ++ unsigned int divider, base, prescale;
1216 +
1217 +- /* This function needs improvment */
1218 ++ /* This function needs improvement */
1219 + /* Don't know if divider==0 works. */
1220 +
1221 + for (prescale = 0; prescale < 16; prescale++) {
1222 +@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
1223 + divider = (*nanosec) / base;
1224 + break;
1225 + case CMDF_ROUND_UP:
1226 +- divider = (*nanosec) / base;
1227 ++ divider = DIV_ROUND_UP(*nanosec, base);
1228 + break;
1229 + }
1230 + if (divider < 65536) {
1231 +@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
1232 + }
1233 +
1234 + prescale = 15;
1235 +- base = timer_base * (1 << prescale);
1236 ++ base = timer_base * (prescale + 1);
1237 + divider = 65535;
1238 + *nanosec = divider * base;
1239 + return (prescale << 16) | (divider);
1240 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1241 +index 5b442bc68a76..59675cc7aa01 100644
1242 +--- a/drivers/usb/class/cdc-acm.c
1243 ++++ b/drivers/usb/class/cdc-acm.c
1244 +@@ -1333,10 +1333,6 @@ made_compressed_probe:
1245 + tty_port_init(&acm->port);
1246 + acm->port.ops = &acm_port_ops;
1247 +
1248 +- minor = acm_alloc_minor(acm);
1249 +- if (minor < 0)
1250 +- goto alloc_fail1;
1251 +-
1252 + ctrlsize = usb_endpoint_maxp(epctrl);
1253 + readsize = usb_endpoint_maxp(epread) *
1254 + (quirks == SINGLE_RX_URB ? 1 : 2);
1255 +@@ -1344,6 +1340,13 @@ made_compressed_probe:
1256 + acm->writesize = usb_endpoint_maxp(epwrite) * 20;
1257 + acm->control = control_interface;
1258 + acm->data = data_interface;
1259 ++
1260 ++ usb_get_intf(acm->control); /* undone in destruct() */
1261 ++
1262 ++ minor = acm_alloc_minor(acm);
1263 ++ if (minor < 0)
1264 ++ goto alloc_fail1;
1265 ++
1266 + acm->minor = minor;
1267 + acm->dev = usb_dev;
1268 + if (h.usb_cdc_acm_descriptor)
1269 +@@ -1490,7 +1493,6 @@ skip_countries:
1270 + usb_driver_claim_interface(&acm_driver, data_interface, acm);
1271 + usb_set_intfdata(data_interface, acm);
1272 +
1273 +- usb_get_intf(control_interface);
1274 + tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
1275 + &control_interface->dev);
1276 + if (IS_ERR(tty_dev)) {
1277 +diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
1278 +index 65de6f73b672..558890ada0e5 100644
1279 +--- a/drivers/usb/core/file.c
1280 ++++ b/drivers/usb/core/file.c
1281 +@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
1282 + intf->minor = minor;
1283 + break;
1284 + }
1285 +- up_write(&minor_rwsem);
1286 +- if (intf->minor < 0)
1287 ++ if (intf->minor < 0) {
1288 ++ up_write(&minor_rwsem);
1289 + return -EXFULL;
1290 ++ }
1291 +
1292 + /* create a usb class device for this usb interface */
1293 + snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
1294 +@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
1295 + MKDEV(USB_MAJOR, minor), class_driver,
1296 + "%s", kbasename(name));
1297 + if (IS_ERR(intf->usb_dev)) {
1298 +- down_write(&minor_rwsem);
1299 + usb_minors[minor] = NULL;
1300 + intf->minor = -1;
1301 +- up_write(&minor_rwsem);
1302 + retval = PTR_ERR(intf->usb_dev);
1303 + }
1304 ++ up_write(&minor_rwsem);
1305 + return retval;
1306 + }
1307 + EXPORT_SYMBOL_GPL(usb_register_dev);
1308 +@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
1309 + return;
1310 +
1311 + dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
1312 ++ device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
1313 +
1314 + down_write(&minor_rwsem);
1315 + usb_minors[intf->minor] = NULL;
1316 + up_write(&minor_rwsem);
1317 +
1318 +- device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
1319 + intf->usb_dev = NULL;
1320 + intf->minor = -1;
1321 + destroy_usb_class();
1322 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
1323 +index 4020ce8db6ce..0d3fd2083165 100644
1324 +--- a/drivers/usb/core/message.c
1325 ++++ b/drivers/usb/core/message.c
1326 +@@ -2211,14 +2211,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
1327 + (struct usb_cdc_dmm_desc *)buffer;
1328 + break;
1329 + case USB_CDC_MDLM_TYPE:
1330 +- if (elength < sizeof(struct usb_cdc_mdlm_desc *))
1331 ++ if (elength < sizeof(struct usb_cdc_mdlm_desc))
1332 + goto next_desc;
1333 + if (desc)
1334 + return -EINVAL;
1335 + desc = (struct usb_cdc_mdlm_desc *)buffer;
1336 + break;
1337 + case USB_CDC_MDLM_DETAIL_TYPE:
1338 +- if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
1339 ++ if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
1340 + goto next_desc;
1341 + if (detail)
1342 + return -EINVAL;
1343 +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
1344 +index fea02c7ad4f4..a5254e82d628 100644
1345 +--- a/drivers/usb/gadget/udc/renesas_usb3.c
1346 ++++ b/drivers/usb/gadget/udc/renesas_usb3.c
1347 +@@ -19,6 +19,7 @@
1348 + #include <linux/pm_runtime.h>
1349 + #include <linux/sizes.h>
1350 + #include <linux/slab.h>
1351 ++#include <linux/string.h>
1352 + #include <linux/sys_soc.h>
1353 + #include <linux/uaccess.h>
1354 + #include <linux/usb/ch9.h>
1355 +@@ -2378,9 +2379,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
1356 + if (usb3->forced_b_device)
1357 + return -EBUSY;
1358 +
1359 +- if (!strncmp(buf, "host", strlen("host")))
1360 ++ if (sysfs_streq(buf, "host"))
1361 + new_mode_is_host = true;
1362 +- else if (!strncmp(buf, "peripheral", strlen("peripheral")))
1363 ++ else if (sysfs_streq(buf, "peripheral"))
1364 + new_mode_is_host = false;
1365 + else
1366 + return -EINVAL;
1367 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1368 +index e0a4749ba565..56f572cb08f8 100644
1369 +--- a/drivers/usb/serial/option.c
1370 ++++ b/drivers/usb/serial/option.c
1371 +@@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
1372 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
1373 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
1374 +
1375 ++ /* Motorola devices */
1376 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
1377 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
1378 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
1379 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
1380 +
1381 + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
1382 + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
1383 +@@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
1384 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1385 + .driver_info = RSVD(2) },
1386 + { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
1387 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
1388 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1389 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1390 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
1391 +@@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
1392 + .driver_info = RSVD(4) },
1393 + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
1394 + .driver_info = RSVD(4) },
1395 ++ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
1396 ++ .driver_info = RSVD(4) },
1397 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1398 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1399 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
1400 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
1401 + .driver_info = RSVD(4) },
1402 ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
1403 ++ .driver_info = RSVD(4) },
1404 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1405 + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1406 + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
1407 +diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
1408 +index 73427d8e0116..e5694133ebe5 100644
1409 +--- a/drivers/xen/xen-pciback/conf_space_capability.c
1410 ++++ b/drivers/xen/xen-pciback/conf_space_capability.c
1411 +@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
1412 + {
1413 + int err;
1414 + u16 old_value;
1415 +- pci_power_t new_state, old_state;
1416 ++ pci_power_t new_state;
1417 +
1418 + err = pci_read_config_word(dev, offset, &old_value);
1419 + if (err)
1420 + goto out;
1421 +
1422 +- old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
1423 + new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
1424 +
1425 + new_value &= PM_OK_BITS;
1426 +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
1427 +index ac6c383d6314..19855659f650 100644
1428 +--- a/fs/btrfs/backref.c
1429 ++++ b/fs/btrfs/backref.c
1430 +@@ -1485,7 +1485,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
1431 + goto out;
1432 + }
1433 +
1434 +- trans = btrfs_attach_transaction(root);
1435 ++ trans = btrfs_join_transaction_nostart(root);
1436 + if (IS_ERR(trans)) {
1437 + if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1438 + ret = PTR_ERR(trans);
1439 +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
1440 +index f1ca53a3ff0b..26317bca5649 100644
1441 +--- a/fs/btrfs/transaction.c
1442 ++++ b/fs/btrfs/transaction.c
1443 +@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
1444 + [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
1445 + [TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
1446 + __TRANS_ATTACH |
1447 +- __TRANS_JOIN),
1448 ++ __TRANS_JOIN |
1449 ++ __TRANS_JOIN_NOSTART),
1450 + [TRANS_STATE_UNBLOCKED] = (__TRANS_START |
1451 + __TRANS_ATTACH |
1452 + __TRANS_JOIN |
1453 +- __TRANS_JOIN_NOLOCK),
1454 ++ __TRANS_JOIN_NOLOCK |
1455 ++ __TRANS_JOIN_NOSTART),
1456 + [TRANS_STATE_COMPLETED] = (__TRANS_START |
1457 + __TRANS_ATTACH |
1458 + __TRANS_JOIN |
1459 +- __TRANS_JOIN_NOLOCK),
1460 ++ __TRANS_JOIN_NOLOCK |
1461 ++ __TRANS_JOIN_NOSTART),
1462 + };
1463 +
1464 + void btrfs_put_transaction(struct btrfs_transaction *transaction)
1465 +@@ -531,7 +534,8 @@ again:
1466 + ret = join_transaction(fs_info, type);
1467 + if (ret == -EBUSY) {
1468 + wait_current_trans(fs_info);
1469 +- if (unlikely(type == TRANS_ATTACH))
1470 ++ if (unlikely(type == TRANS_ATTACH ||
1471 ++ type == TRANS_JOIN_NOSTART))
1472 + ret = -ENOENT;
1473 + }
1474 + } while (ret == -EBUSY);
1475 +@@ -647,6 +651,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
1476 + BTRFS_RESERVE_NO_FLUSH, true);
1477 + }
1478 +
1479 ++/*
1480 ++ * Similar to regular join but it never starts a transaction when none is
1481 ++ * running or after waiting for the current one to finish.
1482 ++ */
1483 ++struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
1484 ++{
1485 ++ return start_transaction(root, 0, TRANS_JOIN_NOSTART,
1486 ++ BTRFS_RESERVE_NO_FLUSH, true);
1487 ++}
1488 ++
1489 + /*
1490 + * btrfs_attach_transaction() - catch the running transaction
1491 + *
1492 +diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
1493 +index 4cbb1b55387d..c1d34cc70472 100644
1494 +--- a/fs/btrfs/transaction.h
1495 ++++ b/fs/btrfs/transaction.h
1496 +@@ -97,11 +97,13 @@ struct btrfs_transaction {
1497 + #define __TRANS_JOIN (1U << 11)
1498 + #define __TRANS_JOIN_NOLOCK (1U << 12)
1499 + #define __TRANS_DUMMY (1U << 13)
1500 ++#define __TRANS_JOIN_NOSTART (1U << 14)
1501 +
1502 + #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
1503 + #define TRANS_ATTACH (__TRANS_ATTACH)
1504 + #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
1505 + #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
1506 ++#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
1507 +
1508 + #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
1509 +
1510 +@@ -187,6 +189,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
1511 + int min_factor);
1512 + struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
1513 + struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
1514 ++struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
1515 + struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
1516 + struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
1517 + struct btrfs_root *root);
1518 +diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
1519 +index 3a24ce3deb01..c146e12a8601 100644
1520 +--- a/fs/ocfs2/xattr.c
1521 ++++ b/fs/ocfs2/xattr.c
1522 +@@ -3833,7 +3833,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
1523 + u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
1524 + int low_bucket = 0, bucket, high_bucket;
1525 + struct ocfs2_xattr_bucket *search;
1526 +- u32 last_hash;
1527 + u64 blkno, lower_blkno = 0;
1528 +
1529 + search = ocfs2_xattr_bucket_new(inode);
1530 +@@ -3877,8 +3876,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
1531 + if (xh->xh_count)
1532 + xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
1533 +
1534 +- last_hash = le32_to_cpu(xe->xe_name_hash);
1535 +-
1536 + /* record lower_blkno which may be the insert place. */
1537 + lower_blkno = blkno;
1538 +
1539 +diff --git a/fs/seq_file.c b/fs/seq_file.c
1540 +index 1dea7a8a5255..05e58b56f620 100644
1541 +--- a/fs/seq_file.c
1542 ++++ b/fs/seq_file.c
1543 +@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
1544 + }
1545 + if (seq_has_overflowed(m))
1546 + goto Eoverflow;
1547 ++ p = m->op->next(m, p, &m->index);
1548 + if (pos + m->count > offset) {
1549 + m->from = offset - pos;
1550 + m->count -= m->from;
1551 +@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
1552 + }
1553 + pos += m->count;
1554 + m->count = 0;
1555 +- p = m->op->next(m, p, &m->index);
1556 + if (pos == offset)
1557 + break;
1558 + }
1559 +diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
1560 +index c64bea7a52be..e9f20b813a69 100644
1561 +--- a/include/asm-generic/getorder.h
1562 ++++ b/include/asm-generic/getorder.h
1563 +@@ -7,24 +7,6 @@
1564 + #include <linux/compiler.h>
1565 + #include <linux/log2.h>
1566 +
1567 +-/*
1568 +- * Runtime evaluation of get_order()
1569 +- */
1570 +-static inline __attribute_const__
1571 +-int __get_order(unsigned long size)
1572 +-{
1573 +- int order;
1574 +-
1575 +- size--;
1576 +- size >>= PAGE_SHIFT;
1577 +-#if BITS_PER_LONG == 32
1578 +- order = fls(size);
1579 +-#else
1580 +- order = fls64(size);
1581 +-#endif
1582 +- return order;
1583 +-}
1584 +-
1585 + /**
1586 + * get_order - Determine the allocation order of a memory size
1587 + * @size: The size for which to get the order
1588 +@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
1589 + * to hold an object of the specified size.
1590 + *
1591 + * The result is undefined if the size is 0.
1592 +- *
1593 +- * This function may be used to initialise variables with compile time
1594 +- * evaluations of constants.
1595 + */
1596 +-#define get_order(n) \
1597 +-( \
1598 +- __builtin_constant_p(n) ? ( \
1599 +- ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
1600 +- (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
1601 +- ilog2((n) - 1) - PAGE_SHIFT + 1) \
1602 +- ) : \
1603 +- __get_order(n) \
1604 +-)
1605 ++static inline __attribute_const__ int get_order(unsigned long size)
1606 ++{
1607 ++ if (__builtin_constant_p(size)) {
1608 ++ if (!size)
1609 ++ return BITS_PER_LONG - PAGE_SHIFT;
1610 ++
1611 ++ if (size < (1UL << PAGE_SHIFT))
1612 ++ return 0;
1613 ++
1614 ++ return ilog2((size) - 1) - PAGE_SHIFT + 1;
1615 ++ }
1616 ++
1617 ++ size--;
1618 ++ size >>= PAGE_SHIFT;
1619 ++#if BITS_PER_LONG == 32
1620 ++ return fls(size);
1621 ++#else
1622 ++ return fls64(size);
1623 ++#endif
1624 ++}
1625 +
1626 + #endif /* __ASSEMBLY__ */
1627 +
1628 +diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
1629 +index fbf5cfc9b352..fd965ffbb92e 100644
1630 +--- a/include/drm/i915_pciids.h
1631 ++++ b/include/drm/i915_pciids.h
1632 +@@ -386,6 +386,7 @@
1633 + INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
1634 + INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
1635 + INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
1636 ++ INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
1637 + INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
1638 +
1639 + /* CFL H */
1640 +diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
1641 +index 90ac450745f1..561fefc2a980 100644
1642 +--- a/include/kvm/arm_vgic.h
1643 ++++ b/include/kvm/arm_vgic.h
1644 +@@ -361,6 +361,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
1645 +
1646 + void kvm_vgic_load(struct kvm_vcpu *vcpu);
1647 + void kvm_vgic_put(struct kvm_vcpu *vcpu);
1648 ++void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
1649 +
1650 + #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
1651 + #define vgic_initialized(k) ((k)->arch.vgic.initialized)
1652 +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
1653 +index 4e3625109b28..64d54acc9928 100644
1654 +--- a/kernel/sched/cpufreq_schedutil.c
1655 ++++ b/kernel/sched/cpufreq_schedutil.c
1656 +@@ -40,6 +40,7 @@ struct sugov_policy {
1657 + struct task_struct *thread;
1658 + bool work_in_progress;
1659 +
1660 ++ bool limits_changed;
1661 + bool need_freq_update;
1662 + };
1663 +
1664 +@@ -90,8 +91,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
1665 + !cpufreq_this_cpu_can_update(sg_policy->policy))
1666 + return false;
1667 +
1668 +- if (unlikely(sg_policy->need_freq_update))
1669 ++ if (unlikely(sg_policy->limits_changed)) {
1670 ++ sg_policy->limits_changed = false;
1671 ++ sg_policy->need_freq_update = true;
1672 + return true;
1673 ++ }
1674 +
1675 + delta_ns = time - sg_policy->last_freq_update_time;
1676 +
1677 +@@ -405,7 +409,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
1678 + static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
1679 + {
1680 + if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
1681 +- sg_policy->need_freq_update = true;
1682 ++ sg_policy->limits_changed = true;
1683 + }
1684 +
1685 + static void sugov_update_single(struct update_util_data *hook, u64 time,
1686 +@@ -425,7 +429,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
1687 + if (!sugov_should_update_freq(sg_policy, time))
1688 + return;
1689 +
1690 +- busy = sugov_cpu_is_busy(sg_cpu);
1691 ++ /* Limits may have changed, don't skip frequency update */
1692 ++ busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
1693 +
1694 + util = sugov_get_util(sg_cpu);
1695 + max = sg_cpu->max;
1696 +@@ -798,6 +803,7 @@ static int sugov_start(struct cpufreq_policy *policy)
1697 + sg_policy->last_freq_update_time = 0;
1698 + sg_policy->next_freq = 0;
1699 + sg_policy->work_in_progress = false;
1700 ++ sg_policy->limits_changed = false;
1701 + sg_policy->need_freq_update = false;
1702 + sg_policy->cached_raw_freq = 0;
1703 +
1704 +@@ -849,7 +855,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
1705 + mutex_unlock(&sg_policy->work_lock);
1706 + }
1707 +
1708 +- sg_policy->need_freq_update = true;
1709 ++ sg_policy->limits_changed = true;
1710 + }
1711 +
1712 + static struct cpufreq_governor schedutil_gov = {
1713 +diff --git a/mm/kmemleak.c b/mm/kmemleak.c
1714 +index 6c94b6865ac2..5eeabece0c17 100644
1715 +--- a/mm/kmemleak.c
1716 ++++ b/mm/kmemleak.c
1717 +@@ -126,7 +126,7 @@
1718 + /* GFP bitmask for kmemleak internal allocations */
1719 + #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
1720 + __GFP_NORETRY | __GFP_NOMEMALLOC | \
1721 +- __GFP_NOWARN | __GFP_NOFAIL)
1722 ++ __GFP_NOWARN)
1723 +
1724 + /* scanning area inside a memory block */
1725 + struct kmemleak_scan_area {
1726 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
1727 +index 7e7cc0cd89fe..ecde75f2189b 100644
1728 +--- a/mm/memcontrol.c
1729 ++++ b/mm/memcontrol.c
1730 +@@ -1037,26 +1037,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
1731 + css_put(&prev->css);
1732 + }
1733 +
1734 +-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1735 ++static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1736 ++ struct mem_cgroup *dead_memcg)
1737 + {
1738 +- struct mem_cgroup *memcg = dead_memcg;
1739 + struct mem_cgroup_reclaim_iter *iter;
1740 + struct mem_cgroup_per_node *mz;
1741 + int nid;
1742 + int i;
1743 +
1744 +- for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1745 +- for_each_node(nid) {
1746 +- mz = mem_cgroup_nodeinfo(memcg, nid);
1747 +- for (i = 0; i <= DEF_PRIORITY; i++) {
1748 +- iter = &mz->iter[i];
1749 +- cmpxchg(&iter->position,
1750 +- dead_memcg, NULL);
1751 +- }
1752 ++ for_each_node(nid) {
1753 ++ mz = mem_cgroup_nodeinfo(from, nid);
1754 ++ for (i = 0; i <= DEF_PRIORITY; i++) {
1755 ++ iter = &mz->iter[i];
1756 ++ cmpxchg(&iter->position,
1757 ++ dead_memcg, NULL);
1758 + }
1759 + }
1760 + }
1761 +
1762 ++static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1763 ++{
1764 ++ struct mem_cgroup *memcg = dead_memcg;
1765 ++ struct mem_cgroup *last;
1766 ++
1767 ++ do {
1768 ++ __invalidate_reclaim_iterators(memcg, dead_memcg);
1769 ++ last = memcg;
1770 ++ } while ((memcg = parent_mem_cgroup(memcg)));
1771 ++
1772 ++ /*
1773 ++ * When cgruop1 non-hierarchy mode is used,
1774 ++ * parent_mem_cgroup() does not walk all the way up to the
1775 ++ * cgroup root (root_mem_cgroup). So we have to handle
1776 ++ * dead_memcg from cgroup root separately.
1777 ++ */
1778 ++ if (last != root_mem_cgroup)
1779 ++ __invalidate_reclaim_iterators(root_mem_cgroup,
1780 ++ dead_memcg);
1781 ++}
1782 ++
1783 + /**
1784 + * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1785 + * @memcg: hierarchy root
1786 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
1787 +index 62f945ea3e36..70298b635b59 100644
1788 +--- a/mm/mempolicy.c
1789 ++++ b/mm/mempolicy.c
1790 +@@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
1791 + },
1792 + };
1793 +
1794 +-static void migrate_page_add(struct page *page, struct list_head *pagelist,
1795 ++static int migrate_page_add(struct page *page, struct list_head *pagelist,
1796 + unsigned long flags);
1797 +
1798 + struct queue_pages {
1799 +@@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page,
1800 + }
1801 +
1802 + /*
1803 +- * queue_pages_pmd() has three possible return values:
1804 +- * 1 - pages are placed on the right node or queued successfully.
1805 +- * 0 - THP was split.
1806 +- * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
1807 +- * page was already on a node that does not follow the policy.
1808 ++ * queue_pages_pmd() has four possible return values:
1809 ++ * 0 - pages are placed on the right node or queued successfully.
1810 ++ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
1811 ++ * specified.
1812 ++ * 2 - THP was split.
1813 ++ * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
1814 ++ * existing page was already on a node that does not follow the
1815 ++ * policy.
1816 + */
1817 + static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
1818 + unsigned long end, struct mm_walk *walk)
1819 +@@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
1820 + if (is_huge_zero_page(page)) {
1821 + spin_unlock(ptl);
1822 + __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
1823 ++ ret = 2;
1824 + goto out;
1825 + }
1826 +- if (!queue_pages_required(page, qp)) {
1827 +- ret = 1;
1828 ++ if (!queue_pages_required(page, qp))
1829 + goto unlock;
1830 +- }
1831 +
1832 +- ret = 1;
1833 + flags = qp->flags;
1834 + /* go to thp migration */
1835 + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1836 +- if (!vma_migratable(walk->vma)) {
1837 +- ret = -EIO;
1838 ++ if (!vma_migratable(walk->vma) ||
1839 ++ migrate_page_add(page, qp->pagelist, flags)) {
1840 ++ ret = 1;
1841 + goto unlock;
1842 + }
1843 +-
1844 +- migrate_page_add(page, qp->pagelist, flags);
1845 + } else
1846 + ret = -EIO;
1847 + unlock:
1848 +@@ -479,6 +479,13 @@ out:
1849 + /*
1850 + * Scan through pages checking if pages follow certain conditions,
1851 + * and move them to the pagelist if they do.
1852 ++ *
1853 ++ * queue_pages_pte_range() has three possible return values:
1854 ++ * 0 - pages are placed on the right node or queued successfully.
1855 ++ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
1856 ++ * specified.
1857 ++ * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
1858 ++ * on a node that does not follow the policy.
1859 + */
1860 + static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
1861 + unsigned long end, struct mm_walk *walk)
1862 +@@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
1863 + struct queue_pages *qp = walk->private;
1864 + unsigned long flags = qp->flags;
1865 + int ret;
1866 ++ bool has_unmovable = false;
1867 + pte_t *pte;
1868 + spinlock_t *ptl;
1869 +
1870 + ptl = pmd_trans_huge_lock(pmd, vma);
1871 + if (ptl) {
1872 + ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
1873 +- if (ret > 0)
1874 +- return 0;
1875 +- else if (ret < 0)
1876 ++ if (ret != 2)
1877 + return ret;
1878 + }
1879 ++ /* THP was split, fall through to pte walk */
1880 +
1881 + if (pmd_trans_unstable(pmd))
1882 + return 0;
1883 +@@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
1884 + if (!queue_pages_required(page, qp))
1885 + continue;
1886 + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1887 +- if (!vma_migratable(vma))
1888 ++ /* MPOL_MF_STRICT must be specified if we get here */
1889 ++ if (!vma_migratable(vma)) {
1890 ++ has_unmovable = true;
1891 + break;
1892 +- migrate_page_add(page, qp->pagelist, flags);
1893 ++ }
1894 ++
1895 ++ /*
1896 ++ * Do not abort immediately since there may be
1897 ++ * temporary off LRU pages in the range. Still
1898 ++ * need migrate other LRU pages.
1899 ++ */
1900 ++ if (migrate_page_add(page, qp->pagelist, flags))
1901 ++ has_unmovable = true;
1902 + } else
1903 + break;
1904 + }
1905 + pte_unmap_unlock(pte - 1, ptl);
1906 + cond_resched();
1907 ++
1908 ++ if (has_unmovable)
1909 ++ return 1;
1910 ++
1911 + return addr != end ? -EIO : 0;
1912 + }
1913 +
1914 +@@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
1915 + *
1916 + * If pages found in a given range are on a set of nodes (determined by
1917 + * @nodes and @flags,) it's isolated and queued to the pagelist which is
1918 +- * passed via @private.)
1919 ++ * passed via @private.
1920 ++ *
1921 ++ * queue_pages_range() has three possible return values:
1922 ++ * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
1923 ++ * specified.
1924 ++ * 0 - queue pages successfully or no misplaced page.
1925 ++ * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
1926 + */
1927 + static int
1928 + queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
1929 +@@ -926,7 +953,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
1930 + /*
1931 + * page migration, thp tail pages can be passed.
1932 + */
1933 +-static void migrate_page_add(struct page *page, struct list_head *pagelist,
1934 ++static int migrate_page_add(struct page *page, struct list_head *pagelist,
1935 + unsigned long flags)
1936 + {
1937 + struct page *head = compound_head(page);
1938 +@@ -939,8 +966,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
1939 + mod_node_page_state(page_pgdat(head),
1940 + NR_ISOLATED_ANON + page_is_file_cache(head),
1941 + hpage_nr_pages(head));
1942 ++ } else if (flags & MPOL_MF_STRICT) {
1943 ++ /*
1944 ++ * Non-movable page may reach here. And, there may be
1945 ++ * temporary off LRU pages or non-LRU movable pages.
1946 ++ * Treat them as unmovable pages since they can't be
1947 ++ * isolated, so they can't be moved at the moment. It
1948 ++ * should return -EIO for this case too.
1949 ++ */
1950 ++ return -EIO;
1951 + }
1952 + }
1953 ++
1954 ++ return 0;
1955 + }
1956 +
1957 + /* page allocation callback for NUMA node migration */
1958 +@@ -1143,9 +1181,10 @@ static struct page *new_page(struct page *page, unsigned long start)
1959 + }
1960 + #else
1961 +
1962 +-static void migrate_page_add(struct page *page, struct list_head *pagelist,
1963 ++static int migrate_page_add(struct page *page, struct list_head *pagelist,
1964 + unsigned long flags)
1965 + {
1966 ++ return -EIO;
1967 + }
1968 +
1969 + int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1970 +@@ -1168,6 +1207,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1971 + struct mempolicy *new;
1972 + unsigned long end;
1973 + int err;
1974 ++ int ret;
1975 + LIST_HEAD(pagelist);
1976 +
1977 + if (flags & ~(unsigned long)MPOL_MF_VALID)
1978 +@@ -1229,10 +1269,15 @@ static long do_mbind(unsigned long start, unsigned long len,
1979 + if (err)
1980 + goto mpol_out;
1981 +
1982 +- err = queue_pages_range(mm, start, end, nmask,
1983 ++ ret = queue_pages_range(mm, start, end, nmask,
1984 + flags | MPOL_MF_INVERT, &pagelist);
1985 +- if (!err)
1986 +- err = mbind_range(mm, start, end, new);
1987 ++
1988 ++ if (ret < 0) {
1989 ++ err = -EIO;
1990 ++ goto up_out;
1991 ++ }
1992 ++
1993 ++ err = mbind_range(mm, start, end, new);
1994 +
1995 + if (!err) {
1996 + int nr_failed = 0;
1997 +@@ -1245,13 +1290,14 @@ static long do_mbind(unsigned long start, unsigned long len,
1998 + putback_movable_pages(&pagelist);
1999 + }
2000 +
2001 +- if (nr_failed && (flags & MPOL_MF_STRICT))
2002 ++ if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
2003 + err = -EIO;
2004 + } else
2005 + putback_movable_pages(&pagelist);
2006 +
2007 ++up_out:
2008 + up_write(&mm->mmap_sem);
2009 +- mpol_out:
2010 ++mpol_out:
2011 + mpol_put(new);
2012 + return err;
2013 + }
2014 +diff --git a/mm/rmap.c b/mm/rmap.c
2015 +index f048c2651954..1bd94ea62f7f 100644
2016 +--- a/mm/rmap.c
2017 ++++ b/mm/rmap.c
2018 +@@ -1467,7 +1467,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
2019 + /*
2020 + * No need to invalidate here it will synchronize on
2021 + * against the special swap migration pte.
2022 ++ *
2023 ++ * The assignment to subpage above was computed from a
2024 ++ * swap PTE which results in an invalid pointer.
2025 ++ * Since only PAGE_SIZE pages can currently be
2026 ++ * migrated, just set it to page. This will need to be
2027 ++ * changed when hugepage migrations to device private
2028 ++ * memory are supported.
2029 + */
2030 ++ subpage = page;
2031 + goto discard;
2032 + }
2033 +
2034 +diff --git a/mm/usercopy.c b/mm/usercopy.c
2035 +index 14faadcedd06..51411f9c4068 100644
2036 +--- a/mm/usercopy.c
2037 ++++ b/mm/usercopy.c
2038 +@@ -151,7 +151,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
2039 + bool to_user)
2040 + {
2041 + /* Reject if object wraps past end of memory. */
2042 +- if (ptr + n < ptr)
2043 ++ if (ptr + (n - 1) < ptr)
2044 + usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
2045 +
2046 + /* Reject if NULL or ZERO-allocation. */
2047 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2048 +index 0bb4d712b80c..995b3842ba7c 100644
2049 +--- a/net/bridge/netfilter/ebtables.c
2050 ++++ b/net/bridge/netfilter/ebtables.c
2051 +@@ -1779,20 +1779,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
2052 + return 0;
2053 + }
2054 +
2055 ++static int ebt_compat_init_offsets(unsigned int number)
2056 ++{
2057 ++ if (number > INT_MAX)
2058 ++ return -EINVAL;
2059 ++
2060 ++ /* also count the base chain policies */
2061 ++ number += NF_BR_NUMHOOKS;
2062 ++
2063 ++ return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
2064 ++}
2065 +
2066 + static int compat_table_info(const struct ebt_table_info *info,
2067 + struct compat_ebt_replace *newinfo)
2068 + {
2069 + unsigned int size = info->entries_size;
2070 + const void *entries = info->entries;
2071 ++ int ret;
2072 +
2073 + newinfo->entries_size = size;
2074 +- if (info->nentries) {
2075 +- int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
2076 +- info->nentries);
2077 +- if (ret)
2078 +- return ret;
2079 +- }
2080 ++ ret = ebt_compat_init_offsets(info->nentries);
2081 ++ if (ret)
2082 ++ return ret;
2083 +
2084 + return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
2085 + entries, newinfo);
2086 +@@ -2241,11 +2249,9 @@ static int compat_do_replace(struct net *net, void __user *user,
2087 +
2088 + xt_compat_lock(NFPROTO_BRIDGE);
2089 +
2090 +- if (tmp.nentries) {
2091 +- ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2092 +- if (ret < 0)
2093 +- goto out_unlock;
2094 +- }
2095 ++ ret = ebt_compat_init_offsets(tmp.nentries);
2096 ++ if (ret < 0)
2097 ++ goto out_unlock;
2098 +
2099 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2100 + if (ret < 0)
2101 +diff --git a/net/dsa/switch.c b/net/dsa/switch.c
2102 +index 142b294d3446..b0b9413fa5bf 100644
2103 +--- a/net/dsa/switch.c
2104 ++++ b/net/dsa/switch.c
2105 +@@ -127,6 +127,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
2106 + {
2107 + int port;
2108 +
2109 ++ if (!ds->ops->port_mdb_add)
2110 ++ return;
2111 ++
2112 + for_each_set_bit(port, bitmap, ds->num_ports)
2113 + ds->ops->port_mdb_add(ds, port, mdb);
2114 + }
2115 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
2116 +index 27eff89fad01..c6073d17c324 100644
2117 +--- a/net/netfilter/nf_conntrack_core.c
2118 ++++ b/net/netfilter/nf_conntrack_core.c
2119 +@@ -431,13 +431,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
2120 + * table location, we assume id gets exposed to userspace.
2121 + *
2122 + * Following nf_conn items do not change throughout lifetime
2123 +- * of the nf_conn after it has been committed to main hash table:
2124 ++ * of the nf_conn:
2125 + *
2126 + * 1. nf_conn address
2127 +- * 2. nf_conn->ext address
2128 +- * 3. nf_conn->master address (normally NULL)
2129 +- * 4. tuple
2130 +- * 5. the associated net namespace
2131 ++ * 2. nf_conn->master address (normally NULL)
2132 ++ * 3. the associated net namespace
2133 ++ * 4. the original direction tuple
2134 + */
2135 + u32 nf_ct_get_id(const struct nf_conn *ct)
2136 + {
2137 +@@ -447,9 +446,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
2138 + net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
2139 +
2140 + a = (unsigned long)ct;
2141 +- b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
2142 +- c = (unsigned long)ct->ext;
2143 +- d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
2144 ++ b = (unsigned long)ct->master;
2145 ++ c = (unsigned long)nf_ct_net(ct);
2146 ++ d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
2147 ++ sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
2148 + &ct_id_seed);
2149 + #ifdef CONFIG_64BIT
2150 + return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
2151 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2152 +index 93b5a4200585..7204e7bbebb0 100644
2153 +--- a/net/packet/af_packet.c
2154 ++++ b/net/packet/af_packet.c
2155 +@@ -2616,6 +2616,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2156 +
2157 + mutex_lock(&po->pg_vec_lock);
2158 +
2159 ++ /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2160 ++ * we need to confirm it under protection of pg_vec_lock.
2161 ++ */
2162 ++ if (unlikely(!po->tx_ring.pg_vec)) {
2163 ++ err = -EBUSY;
2164 ++ goto out;
2165 ++ }
2166 + if (likely(saddr == NULL)) {
2167 + dev = packet_cached_dev_get(po);
2168 + proto = po->num;
2169 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
2170 +index 3131b4154c74..28adac31f0ff 100644
2171 +--- a/net/sctp/sm_sideeffect.c
2172 ++++ b/net/sctp/sm_sideeffect.c
2173 +@@ -561,7 +561,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
2174 + */
2175 + if (net->sctp.pf_enable &&
2176 + (transport->state == SCTP_ACTIVE) &&
2177 +- (asoc->pf_retrans < transport->pathmaxrxt) &&
2178 ++ (transport->error_count < transport->pathmaxrxt) &&
2179 + (transport->error_count > asoc->pf_retrans)) {
2180 +
2181 + sctp_assoc_control_transport(asoc, transport,
2182 +diff --git a/net/sctp/stream.c b/net/sctp/stream.c
2183 +index 0da57938a6c5..87061a4bb44b 100644
2184 +--- a/net/sctp/stream.c
2185 ++++ b/net/sctp/stream.c
2186 +@@ -416,6 +416,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
2187 + nstr_list[i] = htons(str_list[i]);
2188 +
2189 + if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
2190 ++ kfree(nstr_list);
2191 + retval = -EAGAIN;
2192 + goto out;
2193 + }
2194 +diff --git a/net/tipc/addr.c b/net/tipc/addr.c
2195 +index b88d48d00913..0f1eaed1bd1b 100644
2196 +--- a/net/tipc/addr.c
2197 ++++ b/net/tipc/addr.c
2198 +@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
2199 + tipc_set_node_id(net, node_id);
2200 + }
2201 + tn->trial_addr = addr;
2202 ++ tn->addr_trial_end = jiffies;
2203 + pr_info("32-bit node address hash set to %x\n", addr);
2204 + }
2205 +
2206 +diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
2207 +index dad5583451af..3b2861f47709 100644
2208 +--- a/scripts/Kconfig.include
2209 ++++ b/scripts/Kconfig.include
2210 +@@ -20,7 +20,7 @@ success = $(if-success,$(1),y,n)
2211 +
2212 + # $(cc-option,<flag>)
2213 + # Return y if the compiler supports <flag>, n otherwise
2214 +-cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null)
2215 ++cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
2216 +
2217 + # $(ld-option,<flag>)
2218 + # Return y if the linker supports <flag>, n otherwise
2219 +diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
2220 +index 7d4af0d0accb..51884c7b8069 100644
2221 +--- a/scripts/Makefile.modpost
2222 ++++ b/scripts/Makefile.modpost
2223 +@@ -75,7 +75,7 @@ modpost = scripts/mod/modpost \
2224 + $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
2225 + $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
2226 + $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
2227 +- $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
2228 ++ $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
2229 + $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
2230 + $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
2231 + $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
2232 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
2233 +index 579984ecdec3..bb2bd33b00ec 100644
2234 +--- a/sound/pci/hda/hda_generic.c
2235 ++++ b/sound/pci/hda/hda_generic.c
2236 +@@ -6033,6 +6033,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
2237 + }
2238 + EXPORT_SYMBOL_GPL(snd_hda_gen_free);
2239 +
2240 ++/**
2241 ++ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
2242 ++ * @codec: the HDA codec
2243 ++ *
2244 ++ * This can be put as patch_ops reboot_notify function.
2245 ++ */
2246 ++void snd_hda_gen_reboot_notify(struct hda_codec *codec)
2247 ++{
2248 ++ /* Make the codec enter D3 to avoid spurious noises from the internal
2249 ++ * speaker during (and after) reboot
2250 ++ */
2251 ++ snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
2252 ++ snd_hda_codec_write(codec, codec->core.afg, 0,
2253 ++ AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
2254 ++ msleep(10);
2255 ++}
2256 ++EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
2257 ++
2258 + #ifdef CONFIG_PM
2259 + /**
2260 + * snd_hda_gen_check_power_status - check the loopback power save state
2261 +@@ -6060,6 +6078,7 @@ static const struct hda_codec_ops generic_patch_ops = {
2262 + .init = snd_hda_gen_init,
2263 + .free = snd_hda_gen_free,
2264 + .unsol_event = snd_hda_jack_unsol_event,
2265 ++ .reboot_notify = snd_hda_gen_reboot_notify,
2266 + #ifdef CONFIG_PM
2267 + .check_power_status = snd_hda_gen_check_power_status,
2268 + #endif
2269 +@@ -6082,7 +6101,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
2270 +
2271 + err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
2272 + if (err < 0)
2273 +- return err;
2274 ++ goto error;
2275 +
2276 + err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
2277 + if (err < 0)
2278 +diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
2279 +index 10123664fa61..ce9c293717b9 100644
2280 +--- a/sound/pci/hda/hda_generic.h
2281 ++++ b/sound/pci/hda/hda_generic.h
2282 +@@ -336,6 +336,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
2283 + struct auto_pin_cfg *cfg);
2284 + int snd_hda_gen_build_controls(struct hda_codec *codec);
2285 + int snd_hda_gen_build_pcms(struct hda_codec *codec);
2286 ++void snd_hda_gen_reboot_notify(struct hda_codec *codec);
2287 +
2288 + /* standard jack event callbacks */
2289 + void snd_hda_gen_hp_automute(struct hda_codec *codec,
2290 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2291 +index 81cea34aff1c..7a3e34b120b3 100644
2292 +--- a/sound/pci/hda/hda_intel.c
2293 ++++ b/sound/pci/hda/hda_intel.c
2294 +@@ -2655,6 +2655,9 @@ static const struct pci_device_id azx_ids[] = {
2295 + /* AMD, X370 & co */
2296 + { PCI_DEVICE(0x1022, 0x1457),
2297 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2298 ++ /* AMD, X570 & co */
2299 ++ { PCI_DEVICE(0x1022, 0x1487),
2300 ++ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2301 + /* AMD Stoney */
2302 + { PCI_DEVICE(0x1022, 0x157a),
2303 + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
2304 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2305 +index b70fbfa80546..6f17b256fcd0 100644
2306 +--- a/sound/pci/hda/patch_conexant.c
2307 ++++ b/sound/pci/hda/patch_conexant.c
2308 +@@ -176,23 +176,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
2309 + {
2310 + struct conexant_spec *spec = codec->spec;
2311 +
2312 +- switch (codec->core.vendor_id) {
2313 +- case 0x14f12008: /* CX8200 */
2314 +- case 0x14f150f2: /* CX20722 */
2315 +- case 0x14f150f4: /* CX20724 */
2316 +- break;
2317 +- default:
2318 +- return;
2319 +- }
2320 +-
2321 + /* Turn the problematic codec into D3 to avoid spurious noises
2322 + from the internal speaker during (and after) reboot */
2323 + cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
2324 +-
2325 +- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
2326 +- snd_hda_codec_write(codec, codec->core.afg, 0,
2327 +- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
2328 +- msleep(10);
2329 ++ snd_hda_gen_reboot_notify(codec);
2330 + }
2331 +
2332 + static void cx_auto_free(struct hda_codec *codec)
2333 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2334 +index dc1989686f09..9b5caf099bfb 100644
2335 +--- a/sound/pci/hda/patch_realtek.c
2336 ++++ b/sound/pci/hda/patch_realtek.c
2337 +@@ -868,15 +868,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
2338 + alc_shutup(codec);
2339 + }
2340 +
2341 +-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
2342 +-static void alc_d3_at_reboot(struct hda_codec *codec)
2343 +-{
2344 +- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
2345 +- snd_hda_codec_write(codec, codec->core.afg, 0,
2346 +- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
2347 +- msleep(10);
2348 +-}
2349 +-
2350 + #define alc_free snd_hda_gen_free
2351 +
2352 + #ifdef CONFIG_PM
2353 +@@ -5111,7 +5102,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
2354 + struct alc_spec *spec = codec->spec;
2355 +
2356 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
2357 +- spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
2358 ++ spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
2359 + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
2360 + codec->power_save_node = 0; /* avoid click noises */
2361 + snd_hda_apply_pincfgs(codec, pincfgs);
2362 +@@ -6851,6 +6842,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2363 + SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
2364 + SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
2365 + SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
2366 ++ SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
2367 + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
2368 + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
2369 + SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
2370 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
2371 +index 7e1c6c2dc99e..4b3e1c48ca2f 100644
2372 +--- a/sound/usb/mixer.c
2373 ++++ b/sound/usb/mixer.c
2374 +@@ -83,6 +83,7 @@ struct mixer_build {
2375 + unsigned char *buffer;
2376 + unsigned int buflen;
2377 + DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
2378 ++ DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
2379 + struct usb_audio_term oterm;
2380 + const struct usbmix_name_map *map;
2381 + const struct usbmix_selector_map *selector_map;
2382 +@@ -759,6 +760,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
2383 + return -EINVAL;
2384 + if (!desc->bNrInPins)
2385 + return -EINVAL;
2386 ++ if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
2387 ++ return -EINVAL;
2388 +
2389 + switch (state->mixer->protocol) {
2390 + case UAC_VERSION_1:
2391 +@@ -788,16 +791,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
2392 + * parse the source unit recursively until it reaches to a terminal
2393 + * or a branched unit.
2394 + */
2395 +-static int check_input_term(struct mixer_build *state, int id,
2396 ++static int __check_input_term(struct mixer_build *state, int id,
2397 + struct usb_audio_term *term)
2398 + {
2399 + int protocol = state->mixer->protocol;
2400 + int err;
2401 + void *p1;
2402 ++ unsigned char *hdr;
2403 +
2404 + memset(term, 0, sizeof(*term));
2405 +- while ((p1 = find_audio_control_unit(state, id)) != NULL) {
2406 +- unsigned char *hdr = p1;
2407 ++ for (;;) {
2408 ++ /* a loop in the terminal chain? */
2409 ++ if (test_and_set_bit(id, state->termbitmap))
2410 ++ return -EINVAL;
2411 ++
2412 ++ p1 = find_audio_control_unit(state, id);
2413 ++ if (!p1)
2414 ++ break;
2415 ++
2416 ++ hdr = p1;
2417 + term->id = id;
2418 +
2419 + if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
2420 +@@ -815,7 +827,7 @@ static int check_input_term(struct mixer_build *state, int id,
2421 +
2422 + /* call recursively to verify that the
2423 + * referenced clock entity is valid */
2424 +- err = check_input_term(state, d->bCSourceID, term);
2425 ++ err = __check_input_term(state, d->bCSourceID, term);
2426 + if (err < 0)
2427 + return err;
2428 +
2429 +@@ -849,7 +861,7 @@ static int check_input_term(struct mixer_build *state, int id,
2430 + case UAC2_CLOCK_SELECTOR: {
2431 + struct uac_selector_unit_descriptor *d = p1;
2432 + /* call recursively to retrieve the channel info */
2433 +- err = check_input_term(state, d->baSourceID[0], term);
2434 ++ err = __check_input_term(state, d->baSourceID[0], term);
2435 + if (err < 0)
2436 + return err;
2437 + term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
2438 +@@ -912,7 +924,7 @@ static int check_input_term(struct mixer_build *state, int id,
2439 +
2440 + /* call recursively to verify that the
2441 + * referenced clock entity is valid */
2442 +- err = check_input_term(state, d->bCSourceID, term);
2443 ++ err = __check_input_term(state, d->bCSourceID, term);
2444 + if (err < 0)
2445 + return err;
2446 +
2447 +@@ -963,7 +975,7 @@ static int check_input_term(struct mixer_build *state, int id,
2448 + case UAC3_CLOCK_SELECTOR: {
2449 + struct uac_selector_unit_descriptor *d = p1;
2450 + /* call recursively to retrieve the channel info */
2451 +- err = check_input_term(state, d->baSourceID[0], term);
2452 ++ err = __check_input_term(state, d->baSourceID[0], term);
2453 + if (err < 0)
2454 + return err;
2455 + term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
2456 +@@ -979,7 +991,7 @@ static int check_input_term(struct mixer_build *state, int id,
2457 + return -EINVAL;
2458 +
2459 + /* call recursively to retrieve the channel info */
2460 +- err = check_input_term(state, d->baSourceID[0], term);
2461 ++ err = __check_input_term(state, d->baSourceID[0], term);
2462 + if (err < 0)
2463 + return err;
2464 +
2465 +@@ -997,6 +1009,15 @@ static int check_input_term(struct mixer_build *state, int id,
2466 + return -ENODEV;
2467 + }
2468 +
2469 ++
2470 ++static int check_input_term(struct mixer_build *state, int id,
2471 ++ struct usb_audio_term *term)
2472 ++{
2473 ++ memset(term, 0, sizeof(*term));
2474 ++ memset(state->termbitmap, 0, sizeof(state->termbitmap));
2475 ++ return __check_input_term(state, id, term);
2476 ++}
2477 ++
2478 + /*
2479 + * Feature Unit
2480 + */
2481 +diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
2482 +index a94bd6850a0b..54c34c107cab 100644
2483 +--- a/tools/perf/util/header.c
2484 ++++ b/tools/perf/util/header.c
2485 +@@ -3285,6 +3285,13 @@ int perf_session__read_header(struct perf_session *session)
2486 + data->file.path);
2487 + }
2488 +
2489 ++ if (f_header.attr_size == 0) {
2490 ++ pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
2491 ++ "Was the 'perf record' command properly terminated?\n",
2492 ++ data->file.path);
2493 ++ return -EINVAL;
2494 ++ }
2495 ++
2496 + nr_attrs = f_header.attrs.size / f_header.attr_size;
2497 + lseek(fd, f_header.attrs.offset, SEEK_SET);
2498 +
2499 +@@ -3365,7 +3372,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
2500 + size += sizeof(struct perf_event_header);
2501 + size += ids * sizeof(u64);
2502 +
2503 +- ev = malloc(size);
2504 ++ ev = zalloc(size);
2505 +
2506 + if (ev == NULL)
2507 + return -ENOMEM;
2508 +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
2509 +index 02bac8abd206..d982650deb33 100644
2510 +--- a/virt/kvm/arm/arm.c
2511 ++++ b/virt/kvm/arm/arm.c
2512 +@@ -338,6 +338,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
2513 + void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
2514 + {
2515 + kvm_timer_schedule(vcpu);
2516 ++ /*
2517 ++ * If we're about to block (most likely because we've just hit a
2518 ++ * WFI), we need to sync back the state of the GIC CPU interface
2519 ++ * so that we have the lastest PMR and group enables. This ensures
2520 ++ * that kvm_arch_vcpu_runnable has up-to-date data to decide
2521 ++ * whether we have pending interrupts.
2522 ++ */
2523 ++ preempt_disable();
2524 ++ kvm_vgic_vmcr_sync(vcpu);
2525 ++ preempt_enable();
2526 ++
2527 + kvm_vgic_v4_enable_doorbell(vcpu);
2528 + }
2529 +
2530 +diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
2531 +index 69b892abd7dc..57281c1594d0 100644
2532 +--- a/virt/kvm/arm/vgic/vgic-v2.c
2533 ++++ b/virt/kvm/arm/vgic/vgic-v2.c
2534 +@@ -495,10 +495,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
2535 + kvm_vgic_global_state.vctrl_base + GICH_APR);
2536 + }
2537 +
2538 +-void vgic_v2_put(struct kvm_vcpu *vcpu)
2539 ++void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
2540 + {
2541 + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
2542 +
2543 + cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
2544 ++}
2545 ++
2546 ++void vgic_v2_put(struct kvm_vcpu *vcpu)
2547 ++{
2548 ++ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
2549 ++
2550 ++ vgic_v2_vmcr_sync(vcpu);
2551 + cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
2552 + }
2553 +diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
2554 +index 3f2350a4d4ab..5c55995a1a16 100644
2555 +--- a/virt/kvm/arm/vgic/vgic-v3.c
2556 ++++ b/virt/kvm/arm/vgic/vgic-v3.c
2557 +@@ -674,12 +674,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
2558 + __vgic_v3_activate_traps(vcpu);
2559 + }
2560 +
2561 +-void vgic_v3_put(struct kvm_vcpu *vcpu)
2562 ++void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
2563 + {
2564 + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
2565 +
2566 + if (likely(cpu_if->vgic_sre))
2567 + cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
2568 ++}
2569 ++
2570 ++void vgic_v3_put(struct kvm_vcpu *vcpu)
2571 ++{
2572 ++ vgic_v3_vmcr_sync(vcpu);
2573 +
2574 + kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
2575 +
2576 +diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
2577 +index c5165e3b80cb..250cd72c95a5 100644
2578 +--- a/virt/kvm/arm/vgic/vgic.c
2579 ++++ b/virt/kvm/arm/vgic/vgic.c
2580 +@@ -902,6 +902,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
2581 + vgic_v3_put(vcpu);
2582 + }
2583 +
2584 ++void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
2585 ++{
2586 ++ if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
2587 ++ return;
2588 ++
2589 ++ if (kvm_vgic_global_state.type == VGIC_V2)
2590 ++ vgic_v2_vmcr_sync(vcpu);
2591 ++ else
2592 ++ vgic_v3_vmcr_sync(vcpu);
2593 ++}
2594 ++
2595 + int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
2596 + {
2597 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
2598 +diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
2599 +index a90024718ca4..d5e454279925 100644
2600 +--- a/virt/kvm/arm/vgic/vgic.h
2601 ++++ b/virt/kvm/arm/vgic/vgic.h
2602 +@@ -204,6 +204,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
2603 + void vgic_v2_init_lrs(void);
2604 + void vgic_v2_load(struct kvm_vcpu *vcpu);
2605 + void vgic_v2_put(struct kvm_vcpu *vcpu);
2606 ++void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
2607 +
2608 + void vgic_v2_save_state(struct kvm_vcpu *vcpu);
2609 + void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
2610 +@@ -234,6 +235,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
2611 +
2612 + void vgic_v3_load(struct kvm_vcpu *vcpu);
2613 + void vgic_v3_put(struct kvm_vcpu *vcpu);
2614 ++void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
2615 +
2616 + bool vgic_has_its(struct kvm *kvm);
2617 + int kvm_vgic_register_its_device(void);