Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Thu, 05 Mar 2020 16:23:32
Message-Id: 1583425387.836621b120225f053a815f0660ead626b1a5ebb4.mpagano@gentoo
1 commit: 836621b120225f053a815f0660ead626b1a5ebb4
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Mar 5 16:23:07 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Mar 5 16:23:07 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=836621b1
7
8 Linux patch 4.19.108
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1107_linux-4.19.108.patch | 2999 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3003 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 7d48aad..65259b7 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -467,6 +467,10 @@ Patch: 1106_linux-4.19.107.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.107
23
24 +Patch: 1107_linux-4.19.108.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.108
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1107_linux-4.19.108.patch b/1107_linux-4.19.108.patch
33 new file mode 100644
34 index 0000000..f4ed81e
35 --- /dev/null
36 +++ b/1107_linux-4.19.108.patch
37 @@ -0,0 +1,2999 @@
38 +diff --git a/Documentation/networking/nf_flowtable.txt b/Documentation/networking/nf_flowtable.txt
39 +index 54128c50d508..b01c91893481 100644
40 +--- a/Documentation/networking/nf_flowtable.txt
41 ++++ b/Documentation/networking/nf_flowtable.txt
42 +@@ -76,7 +76,7 @@ flowtable and add one rule to your forward chain.
43 +
44 + table inet x {
45 + flowtable f {
46 +- hook ingress priority 0 devices = { eth0, eth1 };
47 ++ hook ingress priority 0; devices = { eth0, eth1 };
48 + }
49 + chain y {
50 + type filter hook forward priority 0; policy accept;
51 +diff --git a/Makefile b/Makefile
52 +index 69e2527a6968..313f0c8dd66f 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 4
58 + PATCHLEVEL = 19
59 +-SUBLEVEL = 107
60 ++SUBLEVEL = 108
61 + EXTRAVERSION =
62 + NAME = "People's Front"
63 +
64 +diff --git a/arch/arm/boot/dts/stihxxx-b2120.dtsi b/arch/arm/boot/dts/stihxxx-b2120.dtsi
65 +index 4dedfcb0fcb3..ac42d3c6bda0 100644
66 +--- a/arch/arm/boot/dts/stihxxx-b2120.dtsi
67 ++++ b/arch/arm/boot/dts/stihxxx-b2120.dtsi
68 +@@ -45,7 +45,7 @@
69 + /* DAC */
70 + format = "i2s";
71 + mclk-fs = <256>;
72 +- frame-inversion = <1>;
73 ++ frame-inversion;
74 + cpu {
75 + sound-dai = <&sti_uni_player2>;
76 + };
77 +diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
78 +index 0bef238d2c0c..0d5f9c8f5bda 100644
79 +--- a/arch/mips/kernel/vpe.c
80 ++++ b/arch/mips/kernel/vpe.c
81 +@@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
82 + {
83 + list_del(&v->list);
84 + if (v->load_addr)
85 +- release_progmem(v);
86 ++ release_progmem(v->load_addr);
87 + kfree(v);
88 + }
89 +
90 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
91 +index 0219693bf08e..3f0565e1a7a8 100644
92 +--- a/arch/x86/kvm/svm.c
93 ++++ b/arch/x86/kvm/svm.c
94 +@@ -1298,6 +1298,47 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
95 + control->pause_filter_count, old);
96 + }
97 +
98 ++/*
99 ++ * The default MMIO mask is a single bit (excluding the present bit),
100 ++ * which could conflict with the memory encryption bit. Check for
101 ++ * memory encryption support and override the default MMIO mask if
102 ++ * memory encryption is enabled.
103 ++ */
104 ++static __init void svm_adjust_mmio_mask(void)
105 ++{
106 ++ unsigned int enc_bit, mask_bit;
107 ++ u64 msr, mask;
108 ++
109 ++ /* If there is no memory encryption support, use existing mask */
110 ++ if (cpuid_eax(0x80000000) < 0x8000001f)
111 ++ return;
112 ++
113 ++ /* If memory encryption is not enabled, use existing mask */
114 ++ rdmsrl(MSR_K8_SYSCFG, msr);
115 ++ if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
116 ++ return;
117 ++
118 ++ enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
119 ++ mask_bit = boot_cpu_data.x86_phys_bits;
120 ++
121 ++ /* Increment the mask bit if it is the same as the encryption bit */
122 ++ if (enc_bit == mask_bit)
123 ++ mask_bit++;
124 ++
125 ++ /*
126 ++ * If the mask bit location is below 52, then some bits above the
127 ++ * physical addressing limit will always be reserved, so use the
128 ++ * rsvd_bits() function to generate the mask. This mask, along with
129 ++ * the present bit, will be used to generate a page fault with
130 ++ * PFER.RSV = 1.
131 ++ *
132 ++ * If the mask bit location is 52 (or above), then clear the mask.
133 ++ */
134 ++ mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
135 ++
136 ++ kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK);
137 ++}
138 ++
139 + static __init int svm_hardware_setup(void)
140 + {
141 + int cpu;
142 +@@ -1352,6 +1393,8 @@ static __init int svm_hardware_setup(void)
143 + }
144 + }
145 +
146 ++ svm_adjust_mmio_mask();
147 ++
148 + for_each_possible_cpu(cpu) {
149 + r = svm_cpu_init(cpu);
150 + if (r)
151 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
152 +index 9c48484dbe23..a81d7d9ce9d6 100644
153 +--- a/arch/x86/kvm/vmx.c
154 ++++ b/arch/x86/kvm/vmx.c
155 +@@ -13724,6 +13724,7 @@ static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
156 + else
157 + intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
158 +
159 ++ /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
160 + return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
161 + }
162 +
163 +@@ -13753,6 +13754,20 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
164 + case x86_intercept_outs:
165 + return vmx_check_intercept_io(vcpu, info);
166 +
167 ++ case x86_intercept_lgdt:
168 ++ case x86_intercept_lidt:
169 ++ case x86_intercept_lldt:
170 ++ case x86_intercept_ltr:
171 ++ case x86_intercept_sgdt:
172 ++ case x86_intercept_sidt:
173 ++ case x86_intercept_sldt:
174 ++ case x86_intercept_str:
175 ++ if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
176 ++ return X86EMUL_CONTINUE;
177 ++
178 ++ /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
179 ++ break;
180 ++
181 + /* TODO: check more intercepts... */
182 + default:
183 + break;
184 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
185 +index ade694f94a49..2cb379e261c0 100644
186 +--- a/arch/x86/kvm/x86.c
187 ++++ b/arch/x86/kvm/x86.c
188 +@@ -8693,12 +8693,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
189 +
190 + void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
191 + {
192 +- vcpu->arch.apf.msr_val = 0;
193 +-
194 +- vcpu_load(vcpu);
195 +- kvm_mmu_unload(vcpu);
196 +- vcpu_put(vcpu);
197 +-
198 + kvm_arch_vcpu_free(vcpu);
199 + }
200 +
201 +diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
202 +index 95600309ce42..23cde3d8e8fb 100644
203 +--- a/drivers/acpi/acpi_watchdog.c
204 ++++ b/drivers/acpi/acpi_watchdog.c
205 +@@ -129,12 +129,11 @@ void __init acpi_watchdog_init(void)
206 + gas = &entries[i].register_region;
207 +
208 + res.start = gas->address;
209 ++ res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
210 + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
211 + res.flags = IORESOURCE_MEM;
212 +- res.end = res.start + ALIGN(gas->access_width, 4) - 1;
213 + } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
214 + res.flags = IORESOURCE_IO;
215 +- res.end = res.start + gas->access_width - 1;
216 + } else {
217 + pr_warn("Unsupported address space: %u\n",
218 + gas->space_id);
219 +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
220 +index af44db2dfb68..fec679433f72 100644
221 +--- a/drivers/char/ipmi/ipmi_ssif.c
222 ++++ b/drivers/char/ipmi/ipmi_ssif.c
223 +@@ -735,10 +735,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
224 + flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
225 + msg = ssif_info->curr_msg;
226 + if (msg) {
227 ++ if (data) {
228 ++ if (len > IPMI_MAX_MSG_LENGTH)
229 ++ len = IPMI_MAX_MSG_LENGTH;
230 ++ memcpy(msg->rsp, data, len);
231 ++ } else {
232 ++ len = 0;
233 ++ }
234 + msg->rsp_size = len;
235 +- if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
236 +- msg->rsp_size = IPMI_MAX_MSG_LENGTH;
237 +- memcpy(msg->rsp, data, msg->rsp_size);
238 + ssif_info->curr_msg = NULL;
239 + }
240 +
241 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
242 +index 8122d0e0d4c4..06a981c72246 100644
243 +--- a/drivers/devfreq/devfreq.c
244 ++++ b/drivers/devfreq/devfreq.c
245 +@@ -600,7 +600,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
246 + {
247 + struct devfreq *devfreq;
248 + struct devfreq_governor *governor;
249 +- static atomic_t devfreq_no = ATOMIC_INIT(-1);
250 + int err = 0;
251 +
252 + if (!dev || !profile || !governor_name) {
253 +@@ -661,8 +660,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
254 + }
255 + devfreq->max_freq = devfreq->scaling_max_freq;
256 +
257 +- dev_set_name(&devfreq->dev, "devfreq%d",
258 +- atomic_inc_return(&devfreq_no));
259 ++ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
260 + err = device_register(&devfreq->dev);
261 + if (err) {
262 + mutex_unlock(&devfreq->lock);
263 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
264 +index bb5a47a45790..5c76a815396d 100644
265 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
266 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
267 +@@ -97,6 +97,7 @@ struct amdgpu_gmc {
268 + uint32_t srbm_soft_reset;
269 + bool prt_warning;
270 + uint64_t stolen_size;
271 ++ uint32_t sdpif_register;
272 + /* apertures */
273 + u64 shared_aperture_start;
274 + u64 shared_aperture_end;
275 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
276 +index ede27dab675f..8b25940c1367 100644
277 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
278 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
279 +@@ -992,6 +992,19 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
280 + }
281 + }
282 +
283 ++/**
284 ++ * gmc_v9_0_restore_registers - restores regs
285 ++ *
286 ++ * @adev: amdgpu_device pointer
287 ++ *
288 ++ * This restores register values, saved at suspend.
289 ++ */
290 ++static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
291 ++{
292 ++ if (adev->asic_type == CHIP_RAVEN)
293 ++ WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
294 ++}
295 ++
296 + /**
297 + * gmc_v9_0_gart_enable - gart enable
298 + *
299 +@@ -1080,6 +1093,20 @@ static int gmc_v9_0_hw_init(void *handle)
300 + return r;
301 + }
302 +
303 ++/**
304 ++ * gmc_v9_0_save_registers - saves regs
305 ++ *
306 ++ * @adev: amdgpu_device pointer
307 ++ *
308 ++ * This saves potential register values that should be
309 ++ * restored upon resume
310 ++ */
311 ++static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
312 ++{
313 ++ if (adev->asic_type == CHIP_RAVEN)
314 ++ adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
315 ++}
316 ++
317 + /**
318 + * gmc_v9_0_gart_disable - gart disable
319 + *
320 +@@ -1112,9 +1139,16 @@ static int gmc_v9_0_hw_fini(void *handle)
321 +
322 + static int gmc_v9_0_suspend(void *handle)
323 + {
324 ++ int r;
325 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
326 +
327 +- return gmc_v9_0_hw_fini(adev);
328 ++ r = gmc_v9_0_hw_fini(adev);
329 ++ if (r)
330 ++ return r;
331 ++
332 ++ gmc_v9_0_save_registers(adev);
333 ++
334 ++ return 0;
335 + }
336 +
337 + static int gmc_v9_0_resume(void *handle)
338 +@@ -1122,6 +1156,7 @@ static int gmc_v9_0_resume(void *handle)
339 + int r;
340 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
341 +
342 ++ gmc_v9_0_restore_registers(adev);
343 + r = gmc_v9_0_hw_init(adev);
344 + if (r)
345 + return r;
346 +diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
347 +index b6f74bf4af02..27bb8c1ab858 100644
348 +--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
349 ++++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
350 +@@ -7376,6 +7376,8 @@
351 + #define mmCRTC4_CRTC_DRR_CONTROL 0x0f3e
352 + #define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX 2
353 +
354 ++#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x395d
355 ++#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
356 +
357 + // addressBlock: dce_dc_fmt4_dispdec
358 + // base address: 0x2000
359 +diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
360 +index 51ed99a37803..6053f5a93f58 100644
361 +--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
362 ++++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
363 +@@ -95,12 +95,12 @@ static void dmabuf_gem_object_free(struct kref *kref)
364 + dmabuf_obj = container_of(pos,
365 + struct intel_vgpu_dmabuf_obj, list);
366 + if (dmabuf_obj == obj) {
367 ++ list_del(pos);
368 + intel_gvt_hypervisor_put_vfio_device(vgpu);
369 + idr_remove(&vgpu->object_idr,
370 + dmabuf_obj->dmabuf_id);
371 + kfree(dmabuf_obj->info);
372 + kfree(dmabuf_obj);
373 +- list_del(pos);
374 + break;
375 + }
376 + }
377 +diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
378 +index c628be05fbfe..69bba88906cd 100644
379 +--- a/drivers/gpu/drm/i915/gvt/vgpu.c
380 ++++ b/drivers/gpu/drm/i915/gvt/vgpu.c
381 +@@ -556,9 +556,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
382 +
383 + intel_vgpu_reset_mmio(vgpu, dmlr);
384 + populate_pvinfo_page(vgpu);
385 +- intel_vgpu_reset_display(vgpu);
386 +
387 + if (dmlr) {
388 ++ intel_vgpu_reset_display(vgpu);
389 + intel_vgpu_reset_cfg_space(vgpu);
390 + /* only reset the failsafe mode when dmlr reset */
391 + vgpu->failsafe = false;
392 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
393 +index dbfd2c006f74..6f81de85fb86 100644
394 +--- a/drivers/gpu/drm/msm/msm_drv.c
395 ++++ b/drivers/gpu/drm/msm/msm_drv.c
396 +@@ -492,6 +492,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
397 + if (ret)
398 + goto err_msm_uninit;
399 +
400 ++ if (!dev->dma_parms) {
401 ++ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
402 ++ GFP_KERNEL);
403 ++ if (!dev->dma_parms)
404 ++ return -ENOMEM;
405 ++ }
406 ++ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
407 ++
408 + msm_gem_shrinker_init(ddev);
409 +
410 + switch (get_mdp_ver(pdev)) {
411 +diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
412 +index 3cd7229b6e54..895f49b565ee 100644
413 +--- a/drivers/hid/hid-alps.c
414 ++++ b/drivers/hid/hid-alps.c
415 +@@ -734,7 +734,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
416 + if (data->has_sp) {
417 + input2 = input_allocate_device();
418 + if (!input2) {
419 +- input_free_device(input2);
420 ++ ret = -ENOMEM;
421 + goto exit;
422 + }
423 +
424 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
425 +index e723156057a6..2c85d075daee 100644
426 +--- a/drivers/hid/hid-core.c
427 ++++ b/drivers/hid/hid-core.c
428 +@@ -1566,7 +1566,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
429 +
430 + rsize = ((report->size - 1) >> 3) + 1;
431 +
432 +- if (rsize > HID_MAX_BUFFER_SIZE)
433 ++ if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
434 ++ rsize = HID_MAX_BUFFER_SIZE - 1;
435 ++ else if (rsize > HID_MAX_BUFFER_SIZE)
436 + rsize = HID_MAX_BUFFER_SIZE;
437 +
438 + if (csize < rsize) {
439 +diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
440 +index 2ce1eb0c9212..f2e23f81601e 100644
441 +--- a/drivers/hid/hid-ite.c
442 ++++ b/drivers/hid/hid-ite.c
443 +@@ -44,8 +44,9 @@ static const struct hid_device_id ite_devices[] = {
444 + { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
445 + { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
446 + /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
447 +- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS,
448 +- USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
449 ++ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
450 ++ USB_VENDOR_ID_SYNAPTICS,
451 ++ USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
452 + { }
453 + };
454 + MODULE_DEVICE_TABLE(hid, ite_devices);
455 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
456 +index da000195b79a..c34ef95d7cef 100644
457 +--- a/drivers/hid/usbhid/hiddev.c
458 ++++ b/drivers/hid/usbhid/hiddev.c
459 +@@ -954,9 +954,9 @@ void hiddev_disconnect(struct hid_device *hid)
460 + hiddev->exist = 0;
461 +
462 + if (hiddev->open) {
463 +- mutex_unlock(&hiddev->existancelock);
464 + hid_hw_close(hiddev->hid);
465 + wake_up_interruptible(&hiddev->wait);
466 ++ mutex_unlock(&hiddev->existancelock);
467 + } else {
468 + mutex_unlock(&hiddev->existancelock);
469 + kfree(hiddev);
470 +diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
471 +index f5e1941e65b5..a1cdcfc74acf 100644
472 +--- a/drivers/i2c/busses/i2c-altera.c
473 ++++ b/drivers/i2c/busses/i2c-altera.c
474 +@@ -182,7 +182,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
475 + /* SCL Low Time */
476 + writel(t_low, idev->base + ALTR_I2C_SCL_LOW);
477 + /* SDA Hold Time, 300ns */
478 +- writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD);
479 ++ writel(3 * clk_mhz / 10, idev->base + ALTR_I2C_SDA_HOLD);
480 +
481 + /* Mask all master interrupt bits */
482 + altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
483 +diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
484 +index 30132c3957cd..41ca9ff7b5da 100644
485 +--- a/drivers/i2c/busses/i2c-jz4780.c
486 ++++ b/drivers/i2c/busses/i2c-jz4780.c
487 +@@ -82,25 +82,6 @@
488 + #define JZ4780_I2C_STA_TFNF BIT(1)
489 + #define JZ4780_I2C_STA_ACT BIT(0)
490 +
491 +-static const char * const jz4780_i2c_abrt_src[] = {
492 +- "ABRT_7B_ADDR_NOACK",
493 +- "ABRT_10ADDR1_NOACK",
494 +- "ABRT_10ADDR2_NOACK",
495 +- "ABRT_XDATA_NOACK",
496 +- "ABRT_GCALL_NOACK",
497 +- "ABRT_GCALL_READ",
498 +- "ABRT_HS_ACKD",
499 +- "SBYTE_ACKDET",
500 +- "ABRT_HS_NORSTRT",
501 +- "SBYTE_NORSTRT",
502 +- "ABRT_10B_RD_NORSTRT",
503 +- "ABRT_MASTER_DIS",
504 +- "ARB_LOST",
505 +- "SLVFLUSH_TXFIFO",
506 +- "SLV_ARBLOST",
507 +- "SLVRD_INTX",
508 +-};
509 +-
510 + #define JZ4780_I2C_INTST_IGC BIT(11)
511 + #define JZ4780_I2C_INTST_ISTT BIT(10)
512 + #define JZ4780_I2C_INTST_ISTP BIT(9)
513 +@@ -538,21 +519,8 @@ done:
514 +
515 + static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src)
516 + {
517 +- int i;
518 +-
519 +- dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src);
520 +- dev_err(&i2c->adap.dev, "device addr=%x\n",
521 +- jz4780_i2c_readw(i2c, JZ4780_I2C_TAR));
522 +- dev_err(&i2c->adap.dev, "send cmd count:%d %d\n",
523 +- i2c->cmd, i2c->cmd_buf[i2c->cmd]);
524 +- dev_err(&i2c->adap.dev, "receive data count:%d %d\n",
525 +- i2c->cmd, i2c->data_buf[i2c->cmd]);
526 +-
527 +- for (i = 0; i < 16; i++) {
528 +- if (src & BIT(i))
529 +- dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n",
530 +- i, jz4780_i2c_abrt_src[i]);
531 +- }
532 ++ dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n",
533 ++ src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]);
534 + }
535 +
536 + static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
537 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
538 +index bf7b69449b43..f9b73336a39e 100644
539 +--- a/drivers/irqchip/irq-gic-v3-its.c
540 ++++ b/drivers/irqchip/irq-gic-v3-its.c
541 +@@ -208,7 +208,7 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
542 +
543 + static struct its_collection *valid_col(struct its_collection *col)
544 + {
545 +- if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
546 ++ if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
547 + return NULL;
548 +
549 + return col;
550 +diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
551 +index 8c744578122a..a0d87ed9da69 100644
552 +--- a/drivers/macintosh/therm_windtunnel.c
553 ++++ b/drivers/macintosh/therm_windtunnel.c
554 +@@ -300,9 +300,11 @@ static int control_loop(void *dummy)
555 + /* i2c probing and setup */
556 + /************************************************************************/
557 +
558 +-static int
559 +-do_attach( struct i2c_adapter *adapter )
560 ++static void do_attach(struct i2c_adapter *adapter)
561 + {
562 ++ struct i2c_board_info info = { };
563 ++ struct device_node *np;
564 ++
565 + /* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */
566 + static const unsigned short scan_ds1775[] = {
567 + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
568 +@@ -313,25 +315,24 @@ do_attach( struct i2c_adapter *adapter )
569 + I2C_CLIENT_END
570 + };
571 +
572 +- if( strncmp(adapter->name, "uni-n", 5) )
573 +- return 0;
574 +-
575 +- if( !x.running ) {
576 +- struct i2c_board_info info;
577 ++ if (x.running || strncmp(adapter->name, "uni-n", 5))
578 ++ return;
579 +
580 +- memset(&info, 0, sizeof(struct i2c_board_info));
581 +- strlcpy(info.type, "therm_ds1775", I2C_NAME_SIZE);
582 ++ np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,ds1775");
583 ++ if (np) {
584 ++ of_node_put(np);
585 ++ } else {
586 ++ strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
587 + i2c_new_probed_device(adapter, &info, scan_ds1775, NULL);
588 ++ }
589 +
590 +- strlcpy(info.type, "therm_adm1030", I2C_NAME_SIZE);
591 ++ np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030");
592 ++ if (np) {
593 ++ of_node_put(np);
594 ++ } else {
595 ++ strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
596 + i2c_new_probed_device(adapter, &info, scan_adm1030, NULL);
597 +-
598 +- if( x.thermostat && x.fan ) {
599 +- x.running = 1;
600 +- x.poll_task = kthread_run(control_loop, NULL, "g4fand");
601 +- }
602 + }
603 +- return 0;
604 + }
605 +
606 + static int
607 +@@ -404,8 +405,8 @@ out:
608 + enum chip { ds1775, adm1030 };
609 +
610 + static const struct i2c_device_id therm_windtunnel_id[] = {
611 +- { "therm_ds1775", ds1775 },
612 +- { "therm_adm1030", adm1030 },
613 ++ { "MAC,ds1775", ds1775 },
614 ++ { "MAC,adm1030", adm1030 },
615 + { }
616 + };
617 + MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id);
618 +@@ -414,6 +415,7 @@ static int
619 + do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
620 + {
621 + struct i2c_adapter *adapter = cl->adapter;
622 ++ int ret = 0;
623 +
624 + if( !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA
625 + | I2C_FUNC_SMBUS_WRITE_BYTE) )
626 +@@ -421,11 +423,19 @@ do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
627 +
628 + switch (id->driver_data) {
629 + case adm1030:
630 +- return attach_fan( cl );
631 ++ ret = attach_fan(cl);
632 ++ break;
633 + case ds1775:
634 +- return attach_thermostat(cl);
635 ++ ret = attach_thermostat(cl);
636 ++ break;
637 + }
638 +- return 0;
639 ++
640 ++ if (!x.running && x.thermostat && x.fan) {
641 ++ x.running = 1;
642 ++ x.poll_task = kthread_run(control_loop, NULL, "g4fand");
643 ++ }
644 ++
645 ++ return ret;
646 + }
647 +
648 + static struct i2c_driver g4fan_driver = {
649 +diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
650 +index 92261c946e2a..3afc0e59a2bd 100644
651 +--- a/drivers/net/ethernet/amazon/ena/ena_com.c
652 ++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
653 +@@ -201,6 +201,11 @@ static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
654 + static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
655 + u16 command_id, bool capture)
656 + {
657 ++ if (unlikely(!queue->comp_ctx)) {
658 ++ pr_err("Completion context is NULL\n");
659 ++ return NULL;
660 ++ }
661 ++
662 + if (unlikely(command_id >= queue->q_depth)) {
663 + pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
664 + command_id, queue->q_depth);
665 +@@ -842,6 +847,24 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
666 + 0);
667 + }
668 +
669 ++static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
670 ++{
671 ++ struct ena_admin_feature_rss_flow_hash_control *hash_key =
672 ++ (ena_dev->rss).hash_key;
673 ++
674 ++ netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
675 ++ /* The key is stored in the device in u32 array
676 ++ * as well as the API requires the key to be passed in this
677 ++ * format. Thus the size of our array should be divided by 4
678 ++ */
679 ++ hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
680 ++}
681 ++
682 ++int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
683 ++{
684 ++ return ena_dev->rss.hash_func;
685 ++}
686 ++
687 + static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
688 + {
689 + struct ena_rss *rss = &ena_dev->rss;
690 +@@ -2075,15 +2098,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
691 +
692 + switch (func) {
693 + case ENA_ADMIN_TOEPLITZ:
694 +- if (key_len > sizeof(hash_key->key)) {
695 +- pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
696 +- key_len, sizeof(hash_key->key));
697 +- return -EINVAL;
698 ++ if (key) {
699 ++ if (key_len != sizeof(hash_key->key)) {
700 ++ pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
701 ++ key_len, sizeof(hash_key->key));
702 ++ return -EINVAL;
703 ++ }
704 ++ memcpy(hash_key->key, key, key_len);
705 ++ rss->hash_init_val = init_val;
706 ++ hash_key->keys_num = key_len >> 2;
707 + }
708 +-
709 +- memcpy(hash_key->key, key, key_len);
710 +- rss->hash_init_val = init_val;
711 +- hash_key->keys_num = key_len >> 2;
712 + break;
713 + case ENA_ADMIN_CRC32:
714 + rss->hash_init_val = init_val;
715 +@@ -2120,7 +2144,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
716 + if (unlikely(rc))
717 + return rc;
718 +
719 +- rss->hash_func = get_resp.u.flow_hash_func.selected_func;
720 ++ /* ffs() returns 1 in case the lsb is set */
721 ++ rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
722 ++ if (rss->hash_func)
723 ++ rss->hash_func--;
724 ++
725 + if (func)
726 + *func = rss->hash_func;
727 +
728 +@@ -2408,6 +2436,8 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
729 + if (unlikely(rc))
730 + goto err_hash_key;
731 +
732 ++ ena_com_hash_key_fill_default_key(ena_dev);
733 ++
734 + rc = ena_com_hash_ctrl_init(ena_dev);
735 + if (unlikely(rc))
736 + goto err_hash_ctrl;
737 +diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
738 +index 7b784f8a06a6..7272fb0d858d 100644
739 +--- a/drivers/net/ethernet/amazon/ena/ena_com.h
740 ++++ b/drivers/net/ethernet/amazon/ena/ena_com.h
741 +@@ -42,6 +42,7 @@
742 + #include <linux/spinlock.h>
743 + #include <linux/types.h>
744 + #include <linux/wait.h>
745 ++#include <linux/netdevice.h>
746 +
747 + #include "ena_common_defs.h"
748 + #include "ena_admin_defs.h"
749 +@@ -631,6 +632,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
750 + */
751 + void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
752 +
753 ++/* ena_com_get_current_hash_function - Get RSS hash function
754 ++ * @ena_dev: ENA communication layer struct
755 ++ *
756 ++ * Return the current hash function.
757 ++ * @return: 0 or one of the ena_admin_hash_functions values.
758 ++ */
759 ++int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
760 ++
761 + /* ena_com_fill_hash_function - Fill RSS hash function
762 + * @ena_dev: ENA communication layer struct
763 + * @func: The hash function (Toeplitz or crc)
764 +diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
765 +index eb9e07fa427e..66f992510e0e 100644
766 +--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
767 ++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
768 +@@ -649,6 +649,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
769 + return ENA_HASH_KEY_SIZE;
770 + }
771 +
772 ++static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
773 ++{
774 ++ struct ena_com_dev *ena_dev = adapter->ena_dev;
775 ++ int i, rc;
776 ++
777 ++ if (!indir)
778 ++ return 0;
779 ++
780 ++ rc = ena_com_indirect_table_get(ena_dev, indir);
781 ++ if (rc)
782 ++ return rc;
783 ++
784 ++ /* Our internal representation of the indices is: even indices
785 ++ * for Tx and uneven indices for Rx. We need to convert the Rx
786 ++ * indices to be consecutive
787 ++ */
788 ++ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
789 ++ indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
790 ++
791 ++ return rc;
792 ++}
793 ++
794 + static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
795 + u8 *hfunc)
796 + {
797 +@@ -657,11 +679,25 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
798 + u8 func;
799 + int rc;
800 +
801 +- rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
802 ++ rc = ena_indirection_table_get(adapter, indir);
803 + if (rc)
804 + return rc;
805 +
806 ++ /* We call this function in order to check if the device
807 ++ * supports getting/setting the hash function.
808 ++ */
809 + rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
810 ++
811 ++ if (rc) {
812 ++ if (rc == -EOPNOTSUPP) {
813 ++ key = NULL;
814 ++ hfunc = NULL;
815 ++ rc = 0;
816 ++ }
817 ++
818 ++ return rc;
819 ++ }
820 ++
821 + if (rc)
822 + return rc;
823 +
824 +@@ -670,7 +706,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
825 + func = ETH_RSS_HASH_TOP;
826 + break;
827 + case ENA_ADMIN_CRC32:
828 +- func = ETH_RSS_HASH_XOR;
829 ++ func = ETH_RSS_HASH_CRC32;
830 + break;
831 + default:
832 + netif_err(adapter, drv, netdev,
833 +@@ -713,10 +749,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
834 + }
835 +
836 + switch (hfunc) {
837 ++ case ETH_RSS_HASH_NO_CHANGE:
838 ++ func = ena_com_get_current_hash_function(ena_dev);
839 ++ break;
840 + case ETH_RSS_HASH_TOP:
841 + func = ENA_ADMIN_TOEPLITZ;
842 + break;
843 +- case ETH_RSS_HASH_XOR:
844 ++ case ETH_RSS_HASH_CRC32:
845 + func = ENA_ADMIN_CRC32;
846 + break;
847 + default:
848 +@@ -817,6 +856,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
849 + .get_channels = ena_get_channels,
850 + .get_tunable = ena_get_tunable,
851 + .set_tunable = ena_set_tunable,
852 ++ .get_ts_info = ethtool_op_get_ts_info,
853 + };
854 +
855 + void ena_set_ethtool_ops(struct net_device *netdev)
856 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
857 +index 9afb19ebba58..8736718b1735 100644
858 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
859 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
860 +@@ -2847,8 +2847,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
861 + if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
862 + return;
863 +
864 +- keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
865 +- adapter->keep_alive_timeout);
866 ++ keep_alive_expired = adapter->last_keep_alive_jiffies +
867 ++ adapter->keep_alive_timeout;
868 + if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
869 + netif_err(adapter, drv, adapter->netdev,
870 + "Keep alive watchdog timeout.\n");
871 +@@ -2950,7 +2950,7 @@ static void ena_timer_service(struct timer_list *t)
872 + }
873 +
874 + /* Reset the timer */
875 +- mod_timer(&adapter->timer_service, jiffies + HZ);
876 ++ mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
877 + }
878 +
879 + static int ena_calc_io_queue_num(struct pci_dev *pdev,
880 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
881 +index 7c7ae56c52cf..f4783effe5c0 100644
882 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
883 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
884 +@@ -113,6 +113,8 @@
885 +
886 + #define ENA_IO_TXQ_IDX(q) (2 * (q))
887 + #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
888 ++#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2)
889 ++#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
890 +
891 + #define ENA_MGMNT_IRQ_IDX 0
892 + #define ENA_IO_IRQ_FIRST_IDX 1
893 +diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
894 +index 50dd6bf176d0..3a489b2b99c9 100644
895 +--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
896 ++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
897 +@@ -2034,7 +2034,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
898 + int ret;
899 +
900 + ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
901 +- XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
902 ++ XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
903 + if (!ndev)
904 + return -ENOMEM;
905 +
906 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
907 +index 8cc34b0bedc3..15dcfb6704e5 100644
908 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
909 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
910 +@@ -399,8 +399,10 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
911 + dx_buff->len,
912 + DMA_TO_DEVICE);
913 +
914 +- if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
915 ++ if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) {
916 ++ ret = 0;
917 + goto exit;
918 ++ }
919 +
920 + first = dx_buff;
921 + dx_buff->len_pkt = skb->len;
922 +@@ -530,10 +532,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
923 + if (likely(frags)) {
924 + err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
925 + ring, frags);
926 +- if (err >= 0) {
927 +- ++ring->stats.tx.packets;
928 +- ring->stats.tx.bytes += skb->len;
929 +- }
930 + } else {
931 + err = NETDEV_TX_BUSY;
932 + }
933 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
934 +index b3c7994d73eb..b03e5fd4327e 100644
935 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
936 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
937 +@@ -162,9 +162,12 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
938 + }
939 + }
940 +
941 +- if (unlikely(buff->is_eop))
942 +- dev_kfree_skb_any(buff->skb);
943 ++ if (unlikely(buff->is_eop)) {
944 ++ ++self->stats.rx.packets;
945 ++ self->stats.tx.bytes += buff->skb->len;
946 +
947 ++ dev_kfree_skb_any(buff->skb);
948 ++ }
949 + buff->pa = 0U;
950 + buff->eop_index = 0xffffU;
951 + self->sw_head = aq_ring_next_dx(self, self->sw_head);
952 +diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
953 +index 3cdf63e35b53..4054cf9db818 100644
954 +--- a/drivers/net/ethernet/mscc/ocelot_board.c
955 ++++ b/drivers/net/ethernet/mscc/ocelot_board.c
956 +@@ -105,6 +105,14 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
957 + if (err != 4)
958 + break;
959 +
960 ++ /* At this point the IFH was read correctly, so it is safe to
961 ++ * presume that there is no error. The err needs to be reset
962 ++ * otherwise a frame could come in CPU queue between the while
963 ++ * condition and the check for error later on. And in that case
964 ++ * the new frame is just removed and not processed.
965 ++ */
966 ++ err = 0;
967 ++
968 + ocelot_parse_ifh(ifh, &info);
969 +
970 + dev = ocelot->ports[info.port]->dev;
971 +diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
972 +index d242a5724069..dc3be8a4acf4 100644
973 +--- a/drivers/net/ethernet/qlogic/qede/qede.h
974 ++++ b/drivers/net/ethernet/qlogic/qede/qede.h
975 +@@ -162,6 +162,8 @@ struct qede_rdma_dev {
976 + struct list_head entry;
977 + struct list_head rdma_event_list;
978 + struct workqueue_struct *rdma_wq;
979 ++ struct kref refcnt;
980 ++ struct completion event_comp;
981 + };
982 +
983 + struct qede_ptp;
984 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
985 +index 1900bf7e67d1..cd12fb919ad5 100644
986 +--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
987 ++++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
988 +@@ -57,6 +57,9 @@ static void _qede_rdma_dev_add(struct qede_dev *edev)
989 + static int qede_rdma_create_wq(struct qede_dev *edev)
990 + {
991 + INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
992 ++ kref_init(&edev->rdma_info.refcnt);
993 ++ init_completion(&edev->rdma_info.event_comp);
994 ++
995 + edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
996 + if (!edev->rdma_info.rdma_wq) {
997 + DP_NOTICE(edev, "qedr: Could not create workqueue\n");
998 +@@ -81,8 +84,23 @@ static void qede_rdma_cleanup_event(struct qede_dev *edev)
999 + }
1000 + }
1001 +
1002 ++static void qede_rdma_complete_event(struct kref *ref)
1003 ++{
1004 ++ struct qede_rdma_dev *rdma_dev =
1005 ++ container_of(ref, struct qede_rdma_dev, refcnt);
1006 ++
1007 ++ /* no more events will be added after this */
1008 ++ complete(&rdma_dev->event_comp);
1009 ++}
1010 ++
1011 + static void qede_rdma_destroy_wq(struct qede_dev *edev)
1012 + {
1013 ++ /* Avoid race with add_event flow, make sure it finishes before
1014 ++ * we start accessing the list and cleaning up the work
1015 ++ */
1016 ++ kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
1017 ++ wait_for_completion(&edev->rdma_info.event_comp);
1018 ++
1019 + qede_rdma_cleanup_event(edev);
1020 + destroy_workqueue(edev->rdma_info.rdma_wq);
1021 + }
1022 +@@ -287,15 +305,24 @@ static void qede_rdma_add_event(struct qede_dev *edev,
1023 + if (!edev->rdma_info.qedr_dev)
1024 + return;
1025 +
1026 ++ /* We don't want the cleanup flow to start while we're allocating and
1027 ++ * scheduling the work
1028 ++ */
1029 ++ if (!kref_get_unless_zero(&edev->rdma_info.refcnt))
1030 ++ return; /* already being destroyed */
1031 ++
1032 + event_node = qede_rdma_get_free_event_node(edev);
1033 + if (!event_node)
1034 +- return;
1035 ++ goto out;
1036 +
1037 + event_node->event = event;
1038 + event_node->ptr = edev;
1039 +
1040 + INIT_WORK(&event_node->work, qede_rdma_handle_event);
1041 + queue_work(edev->rdma_info.rdma_wq, &event_node->work);
1042 ++
1043 ++out:
1044 ++ kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
1045 + }
1046 +
1047 + void qede_rdma_dev_event_open(struct qede_dev *edev)
1048 +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
1049 +index dbfd3a0c97d3..77a9a753d979 100644
1050 +--- a/drivers/net/hyperv/netvsc.c
1051 ++++ b/drivers/net/hyperv/netvsc.c
1052 +@@ -110,7 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
1053 +
1054 + init_waitqueue_head(&net_device->wait_drain);
1055 + net_device->destroy = false;
1056 +- net_device->tx_disable = false;
1057 ++ net_device->tx_disable = true;
1058 +
1059 + net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
1060 + net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
1061 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1062 +index 7ab576d8b622..bdb55db4523b 100644
1063 +--- a/drivers/net/hyperv/netvsc_drv.c
1064 ++++ b/drivers/net/hyperv/netvsc_drv.c
1065 +@@ -984,6 +984,7 @@ static int netvsc_attach(struct net_device *ndev,
1066 + }
1067 +
1068 + /* In any case device is now ready */
1069 ++ nvdev->tx_disable = false;
1070 + netif_device_attach(ndev);
1071 +
1072 + /* Note: enable and attach happen when sub-channels setup */
1073 +@@ -2336,6 +2337,8 @@ static int netvsc_probe(struct hv_device *dev,
1074 + else
1075 + net->max_mtu = ETH_DATA_LEN;
1076 +
1077 ++ nvdev->tx_disable = false;
1078 ++
1079 + ret = register_netdevice(net);
1080 + if (ret != 0) {
1081 + pr_err("Unable to register netdev.\n");
1082 +diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
1083 +index 46fe1ae919a3..51ce3ea17fb3 100644
1084 +--- a/drivers/net/phy/mdio-bcm-iproc.c
1085 ++++ b/drivers/net/phy/mdio-bcm-iproc.c
1086 +@@ -188,6 +188,23 @@ static int iproc_mdio_remove(struct platform_device *pdev)
1087 + return 0;
1088 + }
1089 +
1090 ++#ifdef CONFIG_PM_SLEEP
1091 ++int iproc_mdio_resume(struct device *dev)
1092 ++{
1093 ++ struct platform_device *pdev = to_platform_device(dev);
1094 ++ struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
1095 ++
1096 ++ /* restore the mii clock configuration */
1097 ++ iproc_mdio_config_clk(priv->base);
1098 ++
1099 ++ return 0;
1100 ++}
1101 ++
1102 ++static const struct dev_pm_ops iproc_mdio_pm_ops = {
1103 ++ .resume = iproc_mdio_resume
1104 ++};
1105 ++#endif /* CONFIG_PM_SLEEP */
1106 ++
1107 + static const struct of_device_id iproc_mdio_of_match[] = {
1108 + { .compatible = "brcm,iproc-mdio", },
1109 + { /* sentinel */ },
1110 +@@ -198,6 +215,9 @@ static struct platform_driver iproc_mdio_driver = {
1111 + .driver = {
1112 + .name = "iproc-mdio",
1113 + .of_match_table = iproc_mdio_of_match,
1114 ++#ifdef CONFIG_PM_SLEEP
1115 ++ .pm = &iproc_mdio_pm_ops,
1116 ++#endif
1117 + },
1118 + .probe = iproc_mdio_probe,
1119 + .remove = iproc_mdio_remove,
1120 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1121 +index 13c8788e3b6b..a04f8577d9f2 100644
1122 +--- a/drivers/net/usb/qmi_wwan.c
1123 ++++ b/drivers/net/usb/qmi_wwan.c
1124 +@@ -63,7 +63,6 @@ enum qmi_wwan_flags {
1125 +
1126 + enum qmi_wwan_quirks {
1127 + QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
1128 +- QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
1129 + };
1130 +
1131 + struct qmimux_hdr {
1132 +@@ -853,16 +852,6 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
1133 + .data = QMI_WWAN_QUIRK_DTR,
1134 + };
1135 +
1136 +-static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
1137 +- .description = "WWAN/QMI device",
1138 +- .flags = FLAG_WWAN | FLAG_SEND_ZLP,
1139 +- .bind = qmi_wwan_bind,
1140 +- .unbind = qmi_wwan_unbind,
1141 +- .manage_power = qmi_wwan_manage_power,
1142 +- .rx_fixup = qmi_wwan_rx_fixup,
1143 +- .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
1144 +-};
1145 +-
1146 + #define HUAWEI_VENDOR_ID 0x12D1
1147 +
1148 + /* map QMI/wwan function by a fixed interface number */
1149 +@@ -883,14 +872,18 @@ static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
1150 + #define QMI_GOBI_DEVICE(vend, prod) \
1151 + QMI_FIXED_INTF(vend, prod, 0)
1152 +
1153 +-/* Quectel does not use fixed interface numbers on at least some of their
1154 +- * devices. We need to check the number of endpoints to ensure that we bind to
1155 +- * the correct interface.
1156 ++/* Many devices have QMI and DIAG functions which are distinguishable
1157 ++ * from other vendor specific functions by class, subclass and
1158 ++ * protocol all being 0xff. The DIAG function has exactly 2 endpoints
1159 ++ * and is silently rejected when probed.
1160 ++ *
1161 ++ * This makes it possible to match dynamically numbered QMI functions
1162 ++ * as seen on e.g. many Quectel modems.
1163 + */
1164 +-#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
1165 ++#define QMI_MATCH_FF_FF_FF(vend, prod) \
1166 + USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
1167 + USB_SUBCLASS_VENDOR_SPEC, 0xff), \
1168 +- .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
1169 ++ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
1170 +
1171 + static const struct usb_device_id products[] = {
1172 + /* 1. CDC ECM like devices match on the control interface */
1173 +@@ -996,10 +989,10 @@ static const struct usb_device_id products[] = {
1174 + USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
1175 + .driver_info = (unsigned long)&qmi_wwan_info,
1176 + },
1177 +- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
1178 +- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
1179 +- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
1180 +- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
1181 ++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
1182 ++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
1183 ++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
1184 ++ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
1185 +
1186 + /* 3. Combined interface devices matching on interface number */
1187 + {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
1188 +@@ -1298,6 +1291,7 @@ static const struct usb_device_id products[] = {
1189 + {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
1190 + {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
1191 + {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
1192 ++ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
1193 + {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
1194 + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
1195 + {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
1196 +@@ -1389,7 +1383,6 @@ static int qmi_wwan_probe(struct usb_interface *intf,
1197 + {
1198 + struct usb_device_id *id = (struct usb_device_id *)prod;
1199 + struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
1200 +- const struct driver_info *info;
1201 +
1202 + /* Workaround to enable dynamic IDs. This disables usbnet
1203 + * blacklisting functionality. Which, if required, can be
1204 +@@ -1425,12 +1418,8 @@ static int qmi_wwan_probe(struct usb_interface *intf,
1205 + * different. Ignore the current interface if the number of endpoints
1206 + * equals the number for the diag interface (two).
1207 + */
1208 +- info = (void *)id->driver_info;
1209 +-
1210 +- if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
1211 +- if (desc->bNumEndpoints == 2)
1212 +- return -ENODEV;
1213 +- }
1214 ++ if (desc->bNumEndpoints == 2)
1215 ++ return -ENODEV;
1216 +
1217 + return usbnet_probe(intf, id);
1218 + }
1219 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
1220 +index 4f5571123f70..24da49615135 100644
1221 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
1222 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
1223 +@@ -3283,6 +3283,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1224 + spin_lock_init(&trans_pcie->reg_lock);
1225 + mutex_init(&trans_pcie->mutex);
1226 + init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1227 ++
1228 ++ trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
1229 ++ WQ_HIGHPRI | WQ_UNBOUND, 1);
1230 ++ if (!trans_pcie->rba.alloc_wq) {
1231 ++ ret = -ENOMEM;
1232 ++ goto out_free_trans;
1233 ++ }
1234 ++ INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
1235 ++
1236 + trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
1237 + if (!trans_pcie->tso_hdr_page) {
1238 + ret = -ENOMEM;
1239 +@@ -3485,10 +3494,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1240 + trans_pcie->inta_mask = CSR_INI_SET_MASK;
1241 + }
1242 +
1243 +- trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
1244 +- WQ_HIGHPRI | WQ_UNBOUND, 1);
1245 +- INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
1246 +-
1247 + #ifdef CONFIG_IWLWIFI_PCIE_RTPM
1248 + trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
1249 + #else
1250 +@@ -3501,6 +3506,8 @@ out_free_ict:
1251 + iwl_pcie_free_ict(trans);
1252 + out_no_pci:
1253 + free_percpu(trans_pcie->tso_hdr_page);
1254 ++ destroy_workqueue(trans_pcie->rba.alloc_wq);
1255 ++out_free_trans:
1256 + iwl_trans_free(trans);
1257 + return ERR_PTR(ret);
1258 + }
1259 +diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
1260 +index e39bb5c42c9a..7e526014b638 100644
1261 +--- a/drivers/net/wireless/marvell/mwifiex/main.h
1262 ++++ b/drivers/net/wireless/marvell/mwifiex/main.h
1263 +@@ -1294,19 +1294,6 @@ mwifiex_copy_rates(u8 *dest, u32 pos, u8 *src, int len)
1264 + return pos;
1265 + }
1266 +
1267 +-/* This function return interface number with the same bss_type.
1268 +- */
1269 +-static inline u8
1270 +-mwifiex_get_intf_num(struct mwifiex_adapter *adapter, u8 bss_type)
1271 +-{
1272 +- u8 i, num = 0;
1273 +-
1274 +- for (i = 0; i < adapter->priv_num; i++)
1275 +- if (adapter->priv[i] && adapter->priv[i]->bss_type == bss_type)
1276 +- num++;
1277 +- return num;
1278 +-}
1279 +-
1280 + /*
1281 + * This function returns the correct private structure pointer based
1282 + * upon the BSS type and BSS number.
1283 +diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
1284 +index 6058c48d56dc..b6b7bbe168eb 100644
1285 +--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
1286 ++++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
1287 +@@ -897,7 +897,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1288 + u8 *peer, *pos, *end;
1289 + u8 i, action, basic;
1290 + u16 cap = 0;
1291 +- int ie_len = 0;
1292 ++ int ies_len = 0;
1293 +
1294 + if (len < (sizeof(struct ethhdr) + 3))
1295 + return;
1296 +@@ -919,7 +919,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1297 + pos = buf + sizeof(struct ethhdr) + 4;
1298 + /* payload 1+ category 1 + action 1 + dialog 1 */
1299 + cap = get_unaligned_le16(pos);
1300 +- ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
1301 ++ ies_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
1302 + pos += 2;
1303 + break;
1304 +
1305 +@@ -929,7 +929,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1306 + /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
1307 + pos = buf + sizeof(struct ethhdr) + 6;
1308 + cap = get_unaligned_le16(pos);
1309 +- ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
1310 ++ ies_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
1311 + pos += 2;
1312 + break;
1313 +
1314 +@@ -937,7 +937,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1315 + if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
1316 + return;
1317 + pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
1318 +- ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
1319 ++ ies_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
1320 + break;
1321 + default:
1322 + mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
1323 +@@ -950,33 +950,33 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1324 +
1325 + sta_ptr->tdls_cap.capab = cpu_to_le16(cap);
1326 +
1327 +- for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
1328 +- if (pos + 2 + pos[1] > end)
1329 ++ for (end = pos + ies_len; pos + 1 < end; pos += 2 + pos[1]) {
1330 ++ u8 ie_len = pos[1];
1331 ++
1332 ++ if (pos + 2 + ie_len > end)
1333 + break;
1334 +
1335 + switch (*pos) {
1336 + case WLAN_EID_SUPP_RATES:
1337 +- if (pos[1] > 32)
1338 ++ if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
1339 + return;
1340 +- sta_ptr->tdls_cap.rates_len = pos[1];
1341 +- for (i = 0; i < pos[1]; i++)
1342 ++ sta_ptr->tdls_cap.rates_len = ie_len;
1343 ++ for (i = 0; i < ie_len; i++)
1344 + sta_ptr->tdls_cap.rates[i] = pos[i + 2];
1345 + break;
1346 +
1347 + case WLAN_EID_EXT_SUPP_RATES:
1348 +- if (pos[1] > 32)
1349 ++ if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
1350 + return;
1351 + basic = sta_ptr->tdls_cap.rates_len;
1352 +- if (pos[1] > 32 - basic)
1353 ++ if (ie_len > sizeof(sta_ptr->tdls_cap.rates) - basic)
1354 + return;
1355 +- for (i = 0; i < pos[1]; i++)
1356 ++ for (i = 0; i < ie_len; i++)
1357 + sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
1358 +- sta_ptr->tdls_cap.rates_len += pos[1];
1359 ++ sta_ptr->tdls_cap.rates_len += ie_len;
1360 + break;
1361 + case WLAN_EID_HT_CAPABILITY:
1362 +- if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
1363 +- return;
1364 +- if (pos[1] != sizeof(struct ieee80211_ht_cap))
1365 ++ if (ie_len != sizeof(struct ieee80211_ht_cap))
1366 + return;
1367 + /* copy the ie's value into ht_capb*/
1368 + memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
1369 +@@ -984,59 +984,45 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1370 + sta_ptr->is_11n_enabled = 1;
1371 + break;
1372 + case WLAN_EID_HT_OPERATION:
1373 +- if (pos > end -
1374 +- sizeof(struct ieee80211_ht_operation) - 2)
1375 +- return;
1376 +- if (pos[1] != sizeof(struct ieee80211_ht_operation))
1377 ++ if (ie_len != sizeof(struct ieee80211_ht_operation))
1378 + return;
1379 + /* copy the ie's value into ht_oper*/
1380 + memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
1381 + sizeof(struct ieee80211_ht_operation));
1382 + break;
1383 + case WLAN_EID_BSS_COEX_2040:
1384 +- if (pos > end - 3)
1385 +- return;
1386 +- if (pos[1] != 1)
1387 ++ if (ie_len != sizeof(pos[2]))
1388 + return;
1389 + sta_ptr->tdls_cap.coex_2040 = pos[2];
1390 + break;
1391 + case WLAN_EID_EXT_CAPABILITY:
1392 +- if (pos > end - sizeof(struct ieee_types_header))
1393 +- return;
1394 +- if (pos[1] < sizeof(struct ieee_types_header))
1395 ++ if (ie_len < sizeof(struct ieee_types_header))
1396 + return;
1397 +- if (pos[1] > 8)
1398 ++ if (ie_len > 8)
1399 + return;
1400 + memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
1401 + sizeof(struct ieee_types_header) +
1402 +- min_t(u8, pos[1], 8));
1403 ++ min_t(u8, ie_len, 8));
1404 + break;
1405 + case WLAN_EID_RSN:
1406 +- if (pos > end - sizeof(struct ieee_types_header))
1407 ++ if (ie_len < sizeof(struct ieee_types_header))
1408 + return;
1409 +- if (pos[1] < sizeof(struct ieee_types_header))
1410 +- return;
1411 +- if (pos[1] > IEEE_MAX_IE_SIZE -
1412 ++ if (ie_len > IEEE_MAX_IE_SIZE -
1413 + sizeof(struct ieee_types_header))
1414 + return;
1415 + memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
1416 + sizeof(struct ieee_types_header) +
1417 +- min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
1418 ++ min_t(u8, ie_len, IEEE_MAX_IE_SIZE -
1419 + sizeof(struct ieee_types_header)));
1420 + break;
1421 + case WLAN_EID_QOS_CAPA:
1422 +- if (pos > end - 3)
1423 +- return;
1424 +- if (pos[1] != 1)
1425 ++ if (ie_len != sizeof(pos[2]))
1426 + return;
1427 + sta_ptr->tdls_cap.qos_info = pos[2];
1428 + break;
1429 + case WLAN_EID_VHT_OPERATION:
1430 + if (priv->adapter->is_hw_11ac_capable) {
1431 +- if (pos > end -
1432 +- sizeof(struct ieee80211_vht_operation) - 2)
1433 +- return;
1434 +- if (pos[1] !=
1435 ++ if (ie_len !=
1436 + sizeof(struct ieee80211_vht_operation))
1437 + return;
1438 + /* copy the ie's value into vhtoper*/
1439 +@@ -1046,10 +1032,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1440 + break;
1441 + case WLAN_EID_VHT_CAPABILITY:
1442 + if (priv->adapter->is_hw_11ac_capable) {
1443 +- if (pos > end -
1444 +- sizeof(struct ieee80211_vht_cap) - 2)
1445 +- return;
1446 +- if (pos[1] != sizeof(struct ieee80211_vht_cap))
1447 ++ if (ie_len != sizeof(struct ieee80211_vht_cap))
1448 + return;
1449 + /* copy the ie's value into vhtcap*/
1450 + memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
1451 +@@ -1059,9 +1042,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
1452 + break;
1453 + case WLAN_EID_AID:
1454 + if (priv->adapter->is_hw_11ac_capable) {
1455 +- if (pos > end - 4)
1456 +- return;
1457 +- if (pos[1] != 2)
1458 ++ if (ie_len != sizeof(u16))
1459 + return;
1460 + sta_ptr->tdls_cap.aid =
1461 + get_unaligned_le16((pos + 2));
1462 +diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
1463 +index d0207f8e68b7..dcef73eb4120 100644
1464 +--- a/drivers/nfc/pn544/i2c.c
1465 ++++ b/drivers/nfc/pn544/i2c.c
1466 +@@ -236,6 +236,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
1467 +
1468 + out:
1469 + gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity);
1470 ++ usleep_range(10000, 15000);
1471 + }
1472 +
1473 + static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
1474 +diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
1475 +index c6e710a713d3..527b87959742 100644
1476 +--- a/drivers/pwm/pwm-omap-dmtimer.c
1477 ++++ b/drivers/pwm/pwm-omap-dmtimer.c
1478 +@@ -259,7 +259,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
1479 + if (!timer_pdev) {
1480 + dev_err(&pdev->dev, "Unable to find Timer pdev\n");
1481 + ret = -ENODEV;
1482 +- goto put;
1483 ++ goto err_find_timer_pdev;
1484 + }
1485 +
1486 + timer_pdata = dev_get_platdata(&timer_pdev->dev);
1487 +@@ -267,7 +267,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
1488 + dev_dbg(&pdev->dev,
1489 + "dmtimer pdata structure NULL, deferring probe\n");
1490 + ret = -EPROBE_DEFER;
1491 +- goto put;
1492 ++ goto err_platdata;
1493 + }
1494 +
1495 + pdata = timer_pdata->timer_ops;
1496 +@@ -286,19 +286,19 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
1497 + !pdata->write_counter) {
1498 + dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
1499 + ret = -EINVAL;
1500 +- goto put;
1501 ++ goto err_platdata;
1502 + }
1503 +
1504 + if (!of_get_property(timer, "ti,timer-pwm", NULL)) {
1505 + dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n");
1506 + ret = -ENODEV;
1507 +- goto put;
1508 ++ goto err_timer_property;
1509 + }
1510 +
1511 + dm_timer = pdata->request_by_node(timer);
1512 + if (!dm_timer) {
1513 + ret = -EPROBE_DEFER;
1514 +- goto put;
1515 ++ goto err_request_timer;
1516 + }
1517 +
1518 + omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
1519 +@@ -355,7 +355,14 @@ err_pwmchip_add:
1520 + err_alloc_omap:
1521 +
1522 + pdata->free(dm_timer);
1523 +-put:
1524 ++err_request_timer:
1525 ++
1526 ++err_timer_property:
1527 ++err_platdata:
1528 ++
1529 ++ put_device(&timer_pdev->dev);
1530 ++err_find_timer_pdev:
1531 ++
1532 + of_node_put(timer);
1533 +
1534 + return ret;
1535 +@@ -375,6 +382,8 @@ static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
1536 +
1537 + omap->pdata->free(omap->dm_timer);
1538 +
1539 ++ put_device(&omap->dm_timer_pdev->dev);
1540 ++
1541 + mutex_destroy(&omap->mutex);
1542 +
1543 + return 0;
1544 +diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
1545 +index 7e85d238767b..1c799ddd9709 100644
1546 +--- a/drivers/s390/crypto/ap_bus.h
1547 ++++ b/drivers/s390/crypto/ap_bus.h
1548 +@@ -158,7 +158,7 @@ struct ap_card {
1549 + unsigned int functions; /* AP device function bitfield. */
1550 + int queue_depth; /* AP queue depth.*/
1551 + int id; /* AP card number. */
1552 +- atomic_t total_request_count; /* # requests ever for this AP device.*/
1553 ++ atomic64_t total_request_count; /* # requests ever for this AP device.*/
1554 + };
1555 +
1556 + #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
1557 +@@ -175,7 +175,7 @@ struct ap_queue {
1558 + enum ap_state state; /* State of the AP device. */
1559 + int pendingq_count; /* # requests on pendingq list. */
1560 + int requestq_count; /* # requests on requestq list. */
1561 +- int total_request_count; /* # requests ever for this AP device.*/
1562 ++ u64 total_request_count; /* # requests ever for this AP device.*/
1563 + int request_timeout; /* Request timeout in jiffies. */
1564 + struct timer_list timeout; /* Timer for request timeouts. */
1565 + struct list_head pendingq; /* List of message sent to AP queue. */
1566 +diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
1567 +index 63b4cc6cd7e5..e85bfca1ed16 100644
1568 +--- a/drivers/s390/crypto/ap_card.c
1569 ++++ b/drivers/s390/crypto/ap_card.c
1570 +@@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev,
1571 + char *buf)
1572 + {
1573 + struct ap_card *ac = to_ap_card(dev);
1574 +- unsigned int req_cnt;
1575 ++ u64 req_cnt;
1576 +
1577 + req_cnt = 0;
1578 + spin_lock_bh(&ap_list_lock);
1579 +- req_cnt = atomic_read(&ac->total_request_count);
1580 ++ req_cnt = atomic64_read(&ac->total_request_count);
1581 + spin_unlock_bh(&ap_list_lock);
1582 +- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
1583 ++ return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
1584 + }
1585 +
1586 + static ssize_t request_count_store(struct device *dev,
1587 +@@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev,
1588 + for_each_ap_queue(aq, ac)
1589 + aq->total_request_count = 0;
1590 + spin_unlock_bh(&ap_list_lock);
1591 +- atomic_set(&ac->total_request_count, 0);
1592 ++ atomic64_set(&ac->total_request_count, 0);
1593 +
1594 + return count;
1595 + }
1596 +diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
1597 +index 576ac08777c5..e1647da122f7 100644
1598 +--- a/drivers/s390/crypto/ap_queue.c
1599 ++++ b/drivers/s390/crypto/ap_queue.c
1600 +@@ -470,12 +470,12 @@ static ssize_t request_count_show(struct device *dev,
1601 + char *buf)
1602 + {
1603 + struct ap_queue *aq = to_ap_queue(dev);
1604 +- unsigned int req_cnt;
1605 ++ u64 req_cnt;
1606 +
1607 + spin_lock_bh(&aq->lock);
1608 + req_cnt = aq->total_request_count;
1609 + spin_unlock_bh(&aq->lock);
1610 +- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
1611 ++ return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
1612 + }
1613 +
1614 + static ssize_t request_count_store(struct device *dev,
1615 +@@ -667,7 +667,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1616 + list_add_tail(&ap_msg->list, &aq->requestq);
1617 + aq->requestq_count++;
1618 + aq->total_request_count++;
1619 +- atomic_inc(&aq->card->total_request_count);
1620 ++ atomic64_inc(&aq->card->total_request_count);
1621 + /* Send/receive as many request from the queue as possible. */
1622 + ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
1623 + spin_unlock_bh(&aq->lock);
1624 +diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
1625 +index b2737bfeb8bb..23c24a699cef 100644
1626 +--- a/drivers/s390/crypto/zcrypt_api.c
1627 ++++ b/drivers/s390/crypto/zcrypt_api.c
1628 +@@ -190,8 +190,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
1629 + weight += atomic_read(&zc->load);
1630 + pref_weight += atomic_read(&pref_zc->load);
1631 + if (weight == pref_weight)
1632 +- return atomic_read(&zc->card->total_request_count) >
1633 +- atomic_read(&pref_zc->card->total_request_count);
1634 ++ return atomic64_read(&zc->card->total_request_count) >
1635 ++ atomic64_read(&pref_zc->card->total_request_count);
1636 + return weight > pref_weight;
1637 + }
1638 +
1639 +@@ -719,11 +719,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
1640 + spin_unlock(&zcrypt_list_lock);
1641 + }
1642 +
1643 +-static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
1644 ++static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
1645 + {
1646 + struct zcrypt_card *zc;
1647 + struct zcrypt_queue *zq;
1648 + int card;
1649 ++ u64 cnt;
1650 +
1651 + memset(reqcnt, 0, sizeof(int) * max_adapters);
1652 + spin_lock(&zcrypt_list_lock);
1653 +@@ -735,8 +736,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
1654 + || card >= max_adapters)
1655 + continue;
1656 + spin_lock(&zq->queue->lock);
1657 +- reqcnt[card] = zq->queue->total_request_count;
1658 ++ cnt = zq->queue->total_request_count;
1659 + spin_unlock(&zq->queue->lock);
1660 ++ reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
1661 + }
1662 + }
1663 + local_bh_enable();
1664 +@@ -907,9 +909,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1665 + return 0;
1666 + }
1667 + case ZCRYPT_PERDEV_REQCNT: {
1668 +- int *reqcnt;
1669 ++ u32 *reqcnt;
1670 +
1671 +- reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
1672 ++ reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
1673 + if (!reqcnt)
1674 + return -ENOMEM;
1675 + zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
1676 +@@ -966,7 +968,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1677 + }
1678 + case Z90STAT_PERDEV_REQCNT: {
1679 + /* the old ioctl supports only 64 adapters */
1680 +- int reqcnt[MAX_ZDEV_CARDIDS];
1681 ++ u32 reqcnt[MAX_ZDEV_CARDIDS];
1682 +
1683 + zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
1684 + if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
1685 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
1686 +index aa90004f49e2..eb917e93fa72 100644
1687 +--- a/drivers/s390/net/qeth_l2_main.c
1688 ++++ b/drivers/s390/net/qeth_l2_main.c
1689 +@@ -2148,15 +2148,14 @@ int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
1690 +
1691 + QETH_CARD_TEXT(card, 2, "vniccsch");
1692 +
1693 +- /* do not change anything if BridgePort is enabled */
1694 +- if (qeth_bridgeport_is_in_use(card))
1695 +- return -EBUSY;
1696 +-
1697 + /* check if characteristic and enable/disable are supported */
1698 + if (!(card->options.vnicc.sup_chars & vnicc) ||
1699 + !(card->options.vnicc.set_char_sup & vnicc))
1700 + return -EOPNOTSUPP;
1701 +
1702 ++ if (qeth_bridgeport_is_in_use(card))
1703 ++ return -EBUSY;
1704 ++
1705 + /* set enable/disable command and store wanted characteristic */
1706 + if (state) {
1707 + cmd = IPA_VNICC_ENABLE;
1708 +@@ -2202,14 +2201,13 @@ int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state)
1709 +
1710 + QETH_CARD_TEXT(card, 2, "vniccgch");
1711 +
1712 +- /* do not get anything if BridgePort is enabled */
1713 +- if (qeth_bridgeport_is_in_use(card))
1714 +- return -EBUSY;
1715 +-
1716 + /* check if characteristic is supported */
1717 + if (!(card->options.vnicc.sup_chars & vnicc))
1718 + return -EOPNOTSUPP;
1719 +
1720 ++ if (qeth_bridgeport_is_in_use(card))
1721 ++ return -EBUSY;
1722 ++
1723 + /* if card is ready, query current VNICC state */
1724 + if (qeth_card_hw_is_reachable(card))
1725 + rc = qeth_l2_vnicc_query_chars(card);
1726 +@@ -2227,15 +2225,14 @@ int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout)
1727 +
1728 + QETH_CARD_TEXT(card, 2, "vniccsto");
1729 +
1730 +- /* do not change anything if BridgePort is enabled */
1731 +- if (qeth_bridgeport_is_in_use(card))
1732 +- return -EBUSY;
1733 +-
1734 + /* check if characteristic and set_timeout are supported */
1735 + if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
1736 + !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
1737 + return -EOPNOTSUPP;
1738 +
1739 ++ if (qeth_bridgeport_is_in_use(card))
1740 ++ return -EBUSY;
1741 ++
1742 + /* do we need to do anything? */
1743 + if (card->options.vnicc.learning_timeout == timeout)
1744 + return rc;
1745 +@@ -2264,14 +2261,14 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
1746 +
1747 + QETH_CARD_TEXT(card, 2, "vniccgto");
1748 +
1749 +- /* do not get anything if BridgePort is enabled */
1750 +- if (qeth_bridgeport_is_in_use(card))
1751 +- return -EBUSY;
1752 +-
1753 + /* check if characteristic and get_timeout are supported */
1754 + if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
1755 + !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
1756 + return -EOPNOTSUPP;
1757 ++
1758 ++ if (qeth_bridgeport_is_in_use(card))
1759 ++ return -EBUSY;
1760 ++
1761 + /* if card is ready, get timeout. Otherwise, just return stored value */
1762 + *timeout = card->options.vnicc.learning_timeout;
1763 + if (qeth_card_hw_is_reachable(card))
1764 +diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
1765 +index 257e254c6137..0ec6385eb15e 100644
1766 +--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
1767 ++++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
1768 +@@ -47,7 +47,8 @@
1769 + defined(CONFIG_ARCH_TEGRA_124_SOC) || \
1770 + defined(CONFIG_ARCH_TEGRA_132_SOC) || \
1771 + defined(CONFIG_ARCH_TEGRA_210_SOC) || \
1772 +- defined(CONFIG_ARCH_TEGRA_186_SOC)
1773 ++ defined(CONFIG_ARCH_TEGRA_186_SOC) || \
1774 ++ defined(CONFIG_ARCH_TEGRA_194_SOC)
1775 + static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
1776 + {
1777 + if (WARN_ON(!fuse->base))
1778 +diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
1779 +index 1919f91fa756..8d16a41eacae 100644
1780 +--- a/drivers/thermal/broadcom/brcmstb_thermal.c
1781 ++++ b/drivers/thermal/broadcom/brcmstb_thermal.c
1782 +@@ -58,7 +58,7 @@
1783 + #define AVS_TMON_TP_TEST_ENABLE 0x20
1784 +
1785 + /* Default coefficients */
1786 +-#define AVS_TMON_TEMP_SLOPE -487
1787 ++#define AVS_TMON_TEMP_SLOPE 487
1788 + #define AVS_TMON_TEMP_OFFSET 410040
1789 +
1790 + /* HW related temperature constants */
1791 +@@ -117,23 +117,12 @@ struct brcmstb_thermal_priv {
1792 + struct thermal_zone_device *thermal;
1793 + };
1794 +
1795 +-static void avs_tmon_get_coeffs(struct thermal_zone_device *tz, int *slope,
1796 +- int *offset)
1797 +-{
1798 +- *slope = thermal_zone_get_slope(tz);
1799 +- *offset = thermal_zone_get_offset(tz);
1800 +-}
1801 +-
1802 + /* Convert a HW code to a temperature reading (millidegree celsius) */
1803 + static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
1804 + u32 code)
1805 + {
1806 +- const int val = code & AVS_TMON_TEMP_MASK;
1807 +- int slope, offset;
1808 +-
1809 +- avs_tmon_get_coeffs(tz, &slope, &offset);
1810 +-
1811 +- return slope * val + offset;
1812 ++ return (AVS_TMON_TEMP_OFFSET -
1813 ++ (int)((code & AVS_TMON_TEMP_MAX) * AVS_TMON_TEMP_SLOPE));
1814 + }
1815 +
1816 + /*
1817 +@@ -145,20 +134,18 @@ static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
1818 + static inline u32 avs_tmon_temp_to_code(struct thermal_zone_device *tz,
1819 + int temp, bool low)
1820 + {
1821 +- int slope, offset;
1822 +-
1823 + if (temp < AVS_TMON_TEMP_MIN)
1824 +- return AVS_TMON_TEMP_MAX; /* Maximum code value */
1825 +-
1826 +- avs_tmon_get_coeffs(tz, &slope, &offset);
1827 ++ return AVS_TMON_TEMP_MAX; /* Maximum code value */
1828 +
1829 +- if (temp >= offset)
1830 ++ if (temp >= AVS_TMON_TEMP_OFFSET)
1831 + return 0; /* Minimum code value */
1832 +
1833 + if (low)
1834 +- return (u32)(DIV_ROUND_UP(offset - temp, abs(slope)));
1835 ++ return (u32)(DIV_ROUND_UP(AVS_TMON_TEMP_OFFSET - temp,
1836 ++ AVS_TMON_TEMP_SLOPE));
1837 + else
1838 +- return (u32)((offset - temp) / abs(slope));
1839 ++ return (u32)((AVS_TMON_TEMP_OFFSET - temp) /
1840 ++ AVS_TMON_TEMP_SLOPE);
1841 + }
1842 +
1843 + static int brcmstb_get_temp(void *data, int *temp)
1844 +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
1845 +index 06ed20dd01ba..cee0274806c5 100644
1846 +--- a/drivers/tty/sysrq.c
1847 ++++ b/drivers/tty/sysrq.c
1848 +@@ -546,7 +546,6 @@ void __handle_sysrq(int key, bool check_mask)
1849 + */
1850 + orig_log_level = console_loglevel;
1851 + console_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
1852 +- pr_info("SysRq : ");
1853 +
1854 + op_p = __sysrq_get_key_op(key);
1855 + if (op_p) {
1856 +@@ -555,14 +554,15 @@ void __handle_sysrq(int key, bool check_mask)
1857 + * should not) and is the invoked operation enabled?
1858 + */
1859 + if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
1860 +- pr_cont("%s\n", op_p->action_msg);
1861 ++ pr_info("%s\n", op_p->action_msg);
1862 + console_loglevel = orig_log_level;
1863 + op_p->handler(key);
1864 + } else {
1865 +- pr_cont("This sysrq operation is disabled.\n");
1866 ++ pr_info("This sysrq operation is disabled.\n");
1867 ++ console_loglevel = orig_log_level;
1868 + }
1869 + } else {
1870 +- pr_cont("HELP : ");
1871 ++ pr_info("HELP : ");
1872 + /* Only print the help msg once per handler */
1873 + for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
1874 + if (sysrq_key_table[i]) {
1875 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
1876 +index 124356dc39e1..88c8c158ec25 100644
1877 +--- a/drivers/vhost/net.c
1878 ++++ b/drivers/vhost/net.c
1879 +@@ -1187,10 +1187,6 @@ static int vhost_net_release(struct inode *inode, struct file *f)
1880 +
1881 + static struct socket *get_raw_socket(int fd)
1882 + {
1883 +- struct {
1884 +- struct sockaddr_ll sa;
1885 +- char buf[MAX_ADDR_LEN];
1886 +- } uaddr;
1887 + int r;
1888 + struct socket *sock = sockfd_lookup(fd, &r);
1889 +
1890 +@@ -1203,11 +1199,7 @@ static struct socket *get_raw_socket(int fd)
1891 + goto err;
1892 + }
1893 +
1894 +- r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0);
1895 +- if (r < 0)
1896 +- goto err;
1897 +-
1898 +- if (uaddr.sa.sll_family != AF_PACKET) {
1899 ++ if (sock->sk->sk_family != AF_PACKET) {
1900 + r = -EPFNOSUPPORT;
1901 + goto err;
1902 + }
1903 +diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
1904 +index 56ad19608a9b..9d91ed59615d 100644
1905 +--- a/drivers/watchdog/wdat_wdt.c
1906 ++++ b/drivers/watchdog/wdat_wdt.c
1907 +@@ -392,7 +392,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
1908 +
1909 + memset(&r, 0, sizeof(r));
1910 + r.start = gas->address;
1911 +- r.end = r.start + gas->access_width - 1;
1912 ++ r.end = r.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
1913 + if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1914 + r.flags = IORESOURCE_MEM;
1915 + } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1916 +diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
1917 +index 1d377b7f2860..130bdca9e568 100644
1918 +--- a/fs/cifs/cifsacl.c
1919 ++++ b/fs/cifs/cifsacl.c
1920 +@@ -603,7 +603,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
1921 + ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
1922 + *pmode |= (S_IXUGO & (*pbits_to_set));
1923 +
1924 +- cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode);
1925 ++ cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode);
1926 + return;
1927 + }
1928 +
1929 +@@ -632,7 +632,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
1930 + if (mode & S_IXUGO)
1931 + *pace_flags |= SET_FILE_EXEC_RIGHTS;
1932 +
1933 +- cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n",
1934 ++ cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n",
1935 + mode, *pace_flags);
1936 + return;
1937 + }
1938 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1939 +index 6c62ce40608a..975f800b9dd4 100644
1940 +--- a/fs/cifs/connect.c
1941 ++++ b/fs/cifs/connect.c
1942 +@@ -3794,7 +3794,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
1943 + cifs_sb->mnt_gid = pvolume_info->linux_gid;
1944 + cifs_sb->mnt_file_mode = pvolume_info->file_mode;
1945 + cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
1946 +- cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n",
1947 ++ cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n",
1948 + cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
1949 +
1950 + cifs_sb->actimeo = pvolume_info->actimeo;
1951 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
1952 +index 26154db6c87f..fbebf241dbf2 100644
1953 +--- a/fs/cifs/inode.c
1954 ++++ b/fs/cifs/inode.c
1955 +@@ -1579,7 +1579,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
1956 + struct TCP_Server_Info *server;
1957 + char *full_path;
1958 +
1959 +- cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n",
1960 ++ cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n",
1961 + mode, inode);
1962 +
1963 + cifs_sb = CIFS_SB(inode->i_sb);
1964 +diff --git a/fs/dax.c b/fs/dax.c
1965 +index f0d932fa39c2..d09701aa6f2f 100644
1966 +--- a/fs/dax.c
1967 ++++ b/fs/dax.c
1968 +@@ -1301,6 +1301,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1969 + lockdep_assert_held(&inode->i_rwsem);
1970 + }
1971 +
1972 ++ if (iocb->ki_flags & IOCB_NOWAIT)
1973 ++ flags |= IOMAP_NOWAIT;
1974 ++
1975 + while (iov_iter_count(iter)) {
1976 + ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1977 + iter, dax_iomap_actor);
1978 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1979 +index cb797489b2d8..d44fc3f579e1 100644
1980 +--- a/fs/ext4/super.c
1981 ++++ b/fs/ext4/super.c
1982 +@@ -2298,7 +2298,7 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
1983 + {
1984 + struct ext4_sb_info *sbi = EXT4_SB(sb);
1985 + struct flex_groups **old_groups, **new_groups;
1986 +- int size, i;
1987 ++ int size, i, j;
1988 +
1989 + if (!sbi->s_log_groups_per_flex)
1990 + return 0;
1991 +@@ -2319,8 +2319,8 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
1992 + sizeof(struct flex_groups)),
1993 + GFP_KERNEL);
1994 + if (!new_groups[i]) {
1995 +- for (i--; i >= sbi->s_flex_groups_allocated; i--)
1996 +- kvfree(new_groups[i]);
1997 ++ for (j = sbi->s_flex_groups_allocated; j < i; j++)
1998 ++ kvfree(new_groups[j]);
1999 + kvfree(new_groups);
2000 + ext4_msg(sb, KERN_ERR,
2001 + "not enough memory for %d flex groups", size);
2002 +diff --git a/fs/namei.c b/fs/namei.c
2003 +index c00a7e1da4c0..327844fedf3d 100644
2004 +--- a/fs/namei.c
2005 ++++ b/fs/namei.c
2006 +@@ -1368,7 +1368,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
2007 + nd->path.dentry = parent;
2008 + nd->seq = seq;
2009 + if (unlikely(!path_connected(&nd->path)))
2010 +- return -ENOENT;
2011 ++ return -ECHILD;
2012 + break;
2013 + } else {
2014 + struct mount *mnt = real_mount(nd->path.mnt);
2015 +diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
2016 +index 66ceb12ebc63..2939a6cd7fec 100644
2017 +--- a/include/acpi/actypes.h
2018 ++++ b/include/acpi/actypes.h
2019 +@@ -528,11 +528,12 @@ typedef u64 acpi_integer;
2020 + #define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
2021 +
2022 + /*
2023 +- * Algorithm to obtain access bit width.
2024 ++ * Algorithm to obtain access bit or byte width.
2025 + * Can be used with access_width of struct acpi_generic_address and access_size of
2026 + * struct acpi_resource_generic_register.
2027 + */
2028 + #define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2))
2029 ++#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1))
2030 +
2031 + /*******************************************************************************
2032 + *
2033 +diff --git a/include/linux/hid.h b/include/linux/hid.h
2034 +index 8b3e5e8a72fb..8506637f070d 100644
2035 +--- a/include/linux/hid.h
2036 ++++ b/include/linux/hid.h
2037 +@@ -495,7 +495,7 @@ struct hid_report_enum {
2038 + };
2039 +
2040 + #define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
2041 +-#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */
2042 ++#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */
2043 + #define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
2044 + #define HID_OUTPUT_FIFO_SIZE 64
2045 +
2046 +diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
2047 +index 4618cbbe3632..99f8580344d0 100644
2048 +--- a/include/net/flow_dissector.h
2049 ++++ b/include/net/flow_dissector.h
2050 +@@ -5,6 +5,7 @@
2051 + #include <linux/types.h>
2052 + #include <linux/in6.h>
2053 + #include <linux/siphash.h>
2054 ++#include <linux/string.h>
2055 + #include <uapi/linux/if_ether.h>
2056 +
2057 + /**
2058 +@@ -306,4 +307,12 @@ static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissec
2059 + return ((char *)target_container) + flow_dissector->offset[key_id];
2060 + }
2061 +
2062 ++static inline void
2063 ++flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
2064 ++ struct flow_dissector_key_basic *key_basic)
2065 ++{
2066 ++ memset(key_control, 0, sizeof(*key_control));
2067 ++ memset(key_basic, 0, sizeof(*key_basic));
2068 ++}
2069 ++
2070 + #endif
2071 +diff --git a/include/uapi/linux/usb/charger.h b/include/uapi/linux/usb/charger.h
2072 +index 5f72af35b3ed..ad22079125bf 100644
2073 +--- a/include/uapi/linux/usb/charger.h
2074 ++++ b/include/uapi/linux/usb/charger.h
2075 +@@ -14,18 +14,18 @@
2076 + * ACA (Accessory Charger Adapters)
2077 + */
2078 + enum usb_charger_type {
2079 +- UNKNOWN_TYPE,
2080 +- SDP_TYPE,
2081 +- DCP_TYPE,
2082 +- CDP_TYPE,
2083 +- ACA_TYPE,
2084 ++ UNKNOWN_TYPE = 0,
2085 ++ SDP_TYPE = 1,
2086 ++ DCP_TYPE = 2,
2087 ++ CDP_TYPE = 3,
2088 ++ ACA_TYPE = 4,
2089 + };
2090 +
2091 + /* USB charger state */
2092 + enum usb_charger_state {
2093 +- USB_CHARGER_DEFAULT,
2094 +- USB_CHARGER_PRESENT,
2095 +- USB_CHARGER_ABSENT,
2096 ++ USB_CHARGER_DEFAULT = 0,
2097 ++ USB_CHARGER_PRESENT = 1,
2098 ++ USB_CHARGER_ABSENT = 2,
2099 + };
2100 +
2101 + #endif /* _UAPI__LINUX_USB_CHARGER_H */
2102 +diff --git a/kernel/audit.c b/kernel/audit.c
2103 +index 2a8058764aa6..1f08c38e604a 100644
2104 +--- a/kernel/audit.c
2105 ++++ b/kernel/audit.c
2106 +@@ -1106,13 +1106,11 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
2107 + audit_log_end(ab);
2108 + }
2109 +
2110 +-static int audit_set_feature(struct sk_buff *skb)
2111 ++static int audit_set_feature(struct audit_features *uaf)
2112 + {
2113 +- struct audit_features *uaf;
2114 + int i;
2115 +
2116 + BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names));
2117 +- uaf = nlmsg_data(nlmsg_hdr(skb));
2118 +
2119 + /* if there is ever a version 2 we should handle that here */
2120 +
2121 +@@ -1180,6 +1178,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2122 + {
2123 + u32 seq;
2124 + void *data;
2125 ++ int data_len;
2126 + int err;
2127 + struct audit_buffer *ab;
2128 + u16 msg_type = nlh->nlmsg_type;
2129 +@@ -1193,6 +1192,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2130 +
2131 + seq = nlh->nlmsg_seq;
2132 + data = nlmsg_data(nlh);
2133 ++ data_len = nlmsg_len(nlh);
2134 +
2135 + switch (msg_type) {
2136 + case AUDIT_GET: {
2137 +@@ -1216,7 +1216,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2138 + struct audit_status s;
2139 + memset(&s, 0, sizeof(s));
2140 + /* guard against past and future API changes */
2141 +- memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
2142 ++ memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
2143 + if (s.mask & AUDIT_STATUS_ENABLED) {
2144 + err = audit_set_enabled(s.enabled);
2145 + if (err < 0)
2146 +@@ -1320,7 +1320,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2147 + return err;
2148 + break;
2149 + case AUDIT_SET_FEATURE:
2150 +- err = audit_set_feature(skb);
2151 ++ if (data_len < sizeof(struct audit_features))
2152 ++ return -EINVAL;
2153 ++ err = audit_set_feature(data);
2154 + if (err)
2155 + return err;
2156 + break;
2157 +@@ -1332,6 +1334,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2158 +
2159 + err = audit_filter(msg_type, AUDIT_FILTER_USER);
2160 + if (err == 1) { /* match or error */
2161 ++ char *str = data;
2162 ++
2163 + err = 0;
2164 + if (msg_type == AUDIT_USER_TTY) {
2165 + err = tty_audit_push();
2166 +@@ -1339,26 +1343,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2167 + break;
2168 + }
2169 + audit_log_common_recv_msg(&ab, msg_type);
2170 +- if (msg_type != AUDIT_USER_TTY)
2171 ++ if (msg_type != AUDIT_USER_TTY) {
2172 ++ /* ensure NULL termination */
2173 ++ str[data_len - 1] = '\0';
2174 + audit_log_format(ab, " msg='%.*s'",
2175 + AUDIT_MESSAGE_TEXT_MAX,
2176 +- (char *)data);
2177 +- else {
2178 +- int size;
2179 +-
2180 ++ str);
2181 ++ } else {
2182 + audit_log_format(ab, " data=");
2183 +- size = nlmsg_len(nlh);
2184 +- if (size > 0 &&
2185 +- ((unsigned char *)data)[size - 1] == '\0')
2186 +- size--;
2187 +- audit_log_n_untrustedstring(ab, data, size);
2188 ++ if (data_len > 0 && str[data_len - 1] == '\0')
2189 ++ data_len--;
2190 ++ audit_log_n_untrustedstring(ab, str, data_len);
2191 + }
2192 + audit_log_end(ab);
2193 + }
2194 + break;
2195 + case AUDIT_ADD_RULE:
2196 + case AUDIT_DEL_RULE:
2197 +- if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
2198 ++ if (data_len < sizeof(struct audit_rule_data))
2199 + return -EINVAL;
2200 + if (audit_enabled == AUDIT_LOCKED) {
2201 + audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
2202 +@@ -1366,7 +1368,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2203 + audit_log_end(ab);
2204 + return -EPERM;
2205 + }
2206 +- err = audit_rule_change(msg_type, seq, data, nlmsg_len(nlh));
2207 ++ err = audit_rule_change(msg_type, seq, data, data_len);
2208 + break;
2209 + case AUDIT_LIST_RULES:
2210 + err = audit_list_rules_send(skb, seq);
2211 +@@ -1380,7 +1382,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2212 + case AUDIT_MAKE_EQUIV: {
2213 + void *bufp = data;
2214 + u32 sizes[2];
2215 +- size_t msglen = nlmsg_len(nlh);
2216 ++ size_t msglen = data_len;
2217 + char *old, *new;
2218 +
2219 + err = -EINVAL;
2220 +@@ -1456,7 +1458,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2221 +
2222 + memset(&s, 0, sizeof(s));
2223 + /* guard against past and future API changes */
2224 +- memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
2225 ++ memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
2226 + /* check if new data is valid */
2227 + if ((s.enabled != 0 && s.enabled != 1) ||
2228 + (s.log_passwd != 0 && s.log_passwd != 1))
2229 +diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
2230 +index 425c67e4f568..1c8a48abda80 100644
2231 +--- a/kernel/auditfilter.c
2232 ++++ b/kernel/auditfilter.c
2233 +@@ -452,6 +452,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2234 + bufp = data->buf;
2235 + for (i = 0; i < data->field_count; i++) {
2236 + struct audit_field *f = &entry->rule.fields[i];
2237 ++ u32 f_val;
2238 +
2239 + err = -EINVAL;
2240 +
2241 +@@ -460,12 +461,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2242 + goto exit_free;
2243 +
2244 + f->type = data->fields[i];
2245 +- f->val = data->values[i];
2246 ++ f_val = data->values[i];
2247 +
2248 + /* Support legacy tests for a valid loginuid */
2249 +- if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
2250 ++ if ((f->type == AUDIT_LOGINUID) && (f_val == AUDIT_UID_UNSET)) {
2251 + f->type = AUDIT_LOGINUID_SET;
2252 +- f->val = 0;
2253 ++ f_val = 0;
2254 + entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
2255 + }
2256 +
2257 +@@ -481,7 +482,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2258 + case AUDIT_SUID:
2259 + case AUDIT_FSUID:
2260 + case AUDIT_OBJ_UID:
2261 +- f->uid = make_kuid(current_user_ns(), f->val);
2262 ++ f->uid = make_kuid(current_user_ns(), f_val);
2263 + if (!uid_valid(f->uid))
2264 + goto exit_free;
2265 + break;
2266 +@@ -490,11 +491,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2267 + case AUDIT_SGID:
2268 + case AUDIT_FSGID:
2269 + case AUDIT_OBJ_GID:
2270 +- f->gid = make_kgid(current_user_ns(), f->val);
2271 ++ f->gid = make_kgid(current_user_ns(), f_val);
2272 + if (!gid_valid(f->gid))
2273 + goto exit_free;
2274 + break;
2275 + case AUDIT_ARCH:
2276 ++ f->val = f_val;
2277 + entry->rule.arch_f = f;
2278 + break;
2279 + case AUDIT_SUBJ_USER:
2280 +@@ -507,11 +509,13 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2281 + case AUDIT_OBJ_TYPE:
2282 + case AUDIT_OBJ_LEV_LOW:
2283 + case AUDIT_OBJ_LEV_HIGH:
2284 +- str = audit_unpack_string(&bufp, &remain, f->val);
2285 +- if (IS_ERR(str))
2286 ++ str = audit_unpack_string(&bufp, &remain, f_val);
2287 ++ if (IS_ERR(str)) {
2288 ++ err = PTR_ERR(str);
2289 + goto exit_free;
2290 +- entry->rule.buflen += f->val;
2291 +-
2292 ++ }
2293 ++ entry->rule.buflen += f_val;
2294 ++ f->lsm_str = str;
2295 + err = security_audit_rule_init(f->type, f->op, str,
2296 + (void **)&f->lsm_rule);
2297 + /* Keep currently invalid fields around in case they
2298 +@@ -520,68 +524,71 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
2299 + pr_warn("audit rule for LSM \'%s\' is invalid\n",
2300 + str);
2301 + err = 0;
2302 +- }
2303 +- if (err) {
2304 +- kfree(str);
2305 ++ } else if (err)
2306 + goto exit_free;
2307 +- } else
2308 +- f->lsm_str = str;
2309 + break;
2310 + case AUDIT_WATCH:
2311 +- str = audit_unpack_string(&bufp, &remain, f->val);
2312 +- if (IS_ERR(str))
2313 ++ str = audit_unpack_string(&bufp, &remain, f_val);
2314 ++ if (IS_ERR(str)) {
2315 ++ err = PTR_ERR(str);
2316 + goto exit_free;
2317 +- entry->rule.buflen += f->val;
2318 +-
2319 +- err = audit_to_watch(&entry->rule, str, f->val, f->op);
2320 ++ }
2321 ++ err = audit_to_watch(&entry->rule, str, f_val, f->op);
2322 + if (err) {
2323 + kfree(str);
2324 + goto exit_free;
2325 + }
2326 ++ entry->rule.buflen += f_val;
2327 + break;
2328 + case AUDIT_DIR:
2329 +- str = audit_unpack_string(&bufp, &remain, f->val);
2330 +- if (IS_ERR(str))
2331 ++ str = audit_unpack_string(&bufp, &remain, f_val);
2332 ++ if (IS_ERR(str)) {
2333 ++ err = PTR_ERR(str);
2334 + goto exit_free;
2335 +- entry->rule.buflen += f->val;
2336 +-
2337 ++ }
2338 + err = audit_make_tree(&entry->rule, str, f->op);
2339 + kfree(str);
2340 + if (err)
2341 + goto exit_free;
2342 ++ entry->rule.buflen += f_val;
2343 + break;
2344 + case AUDIT_INODE:
2345 ++ f->val = f_val;
2346 + err = audit_to_inode(&entry->rule, f);
2347 + if (err)
2348 + goto exit_free;
2349 + break;
2350 + case AUDIT_FILTERKEY:
2351 +- if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
2352 ++ if (entry->rule.filterkey || f_val > AUDIT_MAX_KEY_LEN)
2353 + goto exit_free;
2354 +- str = audit_unpack_string(&bufp, &remain, f->val);
2355 +- if (IS_ERR(str))
2356 ++ str = audit_unpack_string(&bufp, &remain, f_val);
2357 ++ if (IS_ERR(str)) {
2358 ++ err = PTR_ERR(str);
2359 + goto exit_free;
2360 +- entry->rule.buflen += f->val;
2361 ++ }
2362 ++ entry->rule.buflen += f_val;
2363 + entry->rule.filterkey = str;
2364 + break;
2365 + case AUDIT_EXE:
2366 +- if (entry->rule.exe || f->val > PATH_MAX)
2367 ++ if (entry->rule.exe || f_val > PATH_MAX)
2368 + goto exit_free;
2369 +- str = audit_unpack_string(&bufp, &remain, f->val);
2370 ++ str = audit_unpack_string(&bufp, &remain, f_val);
2371 + if (IS_ERR(str)) {
2372 + err = PTR_ERR(str);
2373 + goto exit_free;
2374 + }
2375 +- entry->rule.buflen += f->val;
2376 +-
2377 +- audit_mark = audit_alloc_mark(&entry->rule, str, f->val);
2378 ++ audit_mark = audit_alloc_mark(&entry->rule, str, f_val);
2379 + if (IS_ERR(audit_mark)) {
2380 + kfree(str);
2381 + err = PTR_ERR(audit_mark);
2382 + goto exit_free;
2383 + }
2384 ++ entry->rule.buflen += f_val;
2385 + entry->rule.exe = audit_mark;
2386 + break;
2387 ++ default:
2388 ++ f->val = f_val;
2389 ++ break;
2390 + }
2391 + }
2392 +
2393 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
2394 +index f4e4095ec7ea..00050a22f6a1 100644
2395 +--- a/kernel/kprobes.c
2396 ++++ b/kernel/kprobes.c
2397 +@@ -523,6 +523,8 @@ static void do_unoptimize_kprobes(void)
2398 + arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
2399 + /* Loop free_list for disarming */
2400 + list_for_each_entry_safe(op, tmp, &freeing_list, list) {
2401 ++ /* Switching from detour code to origin */
2402 ++ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
2403 + /* Disarm probes if marked disabled */
2404 + if (kprobe_disabled(&op->kp))
2405 + arch_disarm_kprobe(&op->kp);
2406 +@@ -662,6 +664,7 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op)
2407 + {
2408 + lockdep_assert_cpus_held();
2409 + arch_unoptimize_kprobe(op);
2410 ++ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
2411 + if (kprobe_disabled(&op->kp))
2412 + arch_disarm_kprobe(&op->kp);
2413 + }
2414 +@@ -689,7 +692,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
2415 + return;
2416 + }
2417 +
2418 +- op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
2419 + if (!list_empty(&op->list)) {
2420 + /* Dequeue from the optimization queue */
2421 + list_del_init(&op->list);
2422 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2423 +index 7f4f4ab5bfef..86ccaaf0c1bf 100644
2424 +--- a/kernel/sched/fair.c
2425 ++++ b/kernel/sched/fair.c
2426 +@@ -353,6 +353,18 @@ static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
2427 + static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
2428 + {
2429 + if (cfs_rq->on_list) {
2430 ++ struct rq *rq = rq_of(cfs_rq);
2431 ++
2432 ++ /*
2433 ++ * With cfs_rq being unthrottled/throttled during an enqueue,
2434 ++ * it can happen the tmp_alone_branch points the a leaf that
2435 ++ * we finally want to del. In this case, tmp_alone_branch moves
2436 ++ * to the prev element but it will point to rq->leaf_cfs_rq_list
2437 ++ * at the end of the enqueue.
2438 ++ */
2439 ++ if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
2440 ++ rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
2441 ++
2442 + list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
2443 + cfs_rq->on_list = 0;
2444 + }
2445 +@@ -363,9 +375,10 @@ static inline void assert_list_leaf_cfs_rq(struct rq *rq)
2446 + SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
2447 + }
2448 +
2449 +-/* Iterate through all cfs_rq's on a runqueue in bottom-up order */
2450 +-#define for_each_leaf_cfs_rq(rq, cfs_rq) \
2451 +- list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
2452 ++/* Iterate thr' all leaf cfs_rq's on a runqueue */
2453 ++#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
2454 ++ list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
2455 ++ leaf_cfs_rq_list)
2456 +
2457 + /* Do the two (enqueued) entities belong to the same group ? */
2458 + static inline struct cfs_rq *
2459 +@@ -462,8 +475,8 @@ static inline void assert_list_leaf_cfs_rq(struct rq *rq)
2460 + {
2461 + }
2462 +
2463 +-#define for_each_leaf_cfs_rq(rq, cfs_rq) \
2464 +- for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
2465 ++#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
2466 ++ for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
2467 +
2468 + static inline struct sched_entity *parent_entity(struct sched_entity *se)
2469 + {
2470 +@@ -4441,6 +4454,10 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
2471 + /* adjust cfs_rq_clock_task() */
2472 + cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
2473 + cfs_rq->throttled_clock_task;
2474 ++
2475 ++ /* Add cfs_rq with already running entity in the list */
2476 ++ if (cfs_rq->nr_running >= 1)
2477 ++ list_add_leaf_cfs_rq(cfs_rq);
2478 + }
2479 +
2480 + return 0;
2481 +@@ -4452,8 +4469,10 @@ static int tg_throttle_down(struct task_group *tg, void *data)
2482 + struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2483 +
2484 + /* group is entering throttled state, stop time */
2485 +- if (!cfs_rq->throttle_count)
2486 ++ if (!cfs_rq->throttle_count) {
2487 + cfs_rq->throttled_clock_task = rq_clock_task(rq);
2488 ++ list_del_leaf_cfs_rq(cfs_rq);
2489 ++ }
2490 + cfs_rq->throttle_count++;
2491 +
2492 + return 0;
2493 +@@ -4556,6 +4575,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
2494 + break;
2495 + }
2496 +
2497 ++ assert_list_leaf_cfs_rq(rq);
2498 ++
2499 + if (!se)
2500 + add_nr_running(rq, task_delta);
2501 +
2502 +@@ -7441,10 +7462,27 @@ static inline bool others_have_blocked(struct rq *rq)
2503 +
2504 + #ifdef CONFIG_FAIR_GROUP_SCHED
2505 +
2506 ++static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
2507 ++{
2508 ++ if (cfs_rq->load.weight)
2509 ++ return false;
2510 ++
2511 ++ if (cfs_rq->avg.load_sum)
2512 ++ return false;
2513 ++
2514 ++ if (cfs_rq->avg.util_sum)
2515 ++ return false;
2516 ++
2517 ++ if (cfs_rq->avg.runnable_load_sum)
2518 ++ return false;
2519 ++
2520 ++ return true;
2521 ++}
2522 ++
2523 + static void update_blocked_averages(int cpu)
2524 + {
2525 + struct rq *rq = cpu_rq(cpu);
2526 +- struct cfs_rq *cfs_rq;
2527 ++ struct cfs_rq *cfs_rq, *pos;
2528 + const struct sched_class *curr_class;
2529 + struct rq_flags rf;
2530 + bool done = true;
2531 +@@ -7456,13 +7494,9 @@ static void update_blocked_averages(int cpu)
2532 + * Iterates the task_group tree in a bottom up fashion, see
2533 + * list_add_leaf_cfs_rq() for details.
2534 + */
2535 +- for_each_leaf_cfs_rq(rq, cfs_rq) {
2536 ++ for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
2537 + struct sched_entity *se;
2538 +
2539 +- /* throttled entities do not contribute to load */
2540 +- if (throttled_hierarchy(cfs_rq))
2541 +- continue;
2542 +-
2543 + if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
2544 + update_tg_load_avg(cfs_rq, 0);
2545 +
2546 +@@ -7471,6 +7505,13 @@ static void update_blocked_averages(int cpu)
2547 + if (se && !skip_blocked_update(se))
2548 + update_load_avg(cfs_rq_of(se), se, 0);
2549 +
2550 ++ /*
2551 ++ * There can be a lot of idle CPU cgroups. Don't let fully
2552 ++ * decayed cfs_rqs linger on the list.
2553 ++ */
2554 ++ if (cfs_rq_is_decayed(cfs_rq))
2555 ++ list_del_leaf_cfs_rq(cfs_rq);
2556 ++
2557 + /* Don't need periodic decay once load/util_avg are null */
2558 + if (cfs_rq_has_blocked(cfs_rq))
2559 + done = false;
2560 +@@ -10256,10 +10297,10 @@ const struct sched_class fair_sched_class = {
2561 + #ifdef CONFIG_SCHED_DEBUG
2562 + void print_cfs_stats(struct seq_file *m, int cpu)
2563 + {
2564 +- struct cfs_rq *cfs_rq;
2565 ++ struct cfs_rq *cfs_rq, *pos;
2566 +
2567 + rcu_read_lock();
2568 +- for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
2569 ++ for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
2570 + print_cfs_rq(m, cpu, cfs_rq);
2571 + rcu_read_unlock();
2572 + }
2573 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2574 +index e61aa1c68e99..c41f7d1ab5fa 100644
2575 +--- a/kernel/trace/trace.c
2576 ++++ b/kernel/trace/trace.c
2577 +@@ -1549,6 +1549,7 @@ static __init int init_trace_selftests(void)
2578 +
2579 + pr_info("Running postponed tracer tests:\n");
2580 +
2581 ++ tracing_selftest_running = true;
2582 + list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2583 + ret = run_tracer_selftest(p->type);
2584 + /* If the test fails, then warn and remove from available_tracers */
2585 +@@ -1567,6 +1568,7 @@ static __init int init_trace_selftests(void)
2586 + list_del(&p->list);
2587 + kfree(p);
2588 + }
2589 ++ tracing_selftest_running = false;
2590 +
2591 + out:
2592 + mutex_unlock(&trace_types_lock);
2593 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2594 +index 5bb93cf18009..146998357bed 100644
2595 +--- a/mm/huge_memory.c
2596 ++++ b/mm/huge_memory.c
2597 +@@ -173,16 +173,13 @@ static ssize_t enabled_store(struct kobject *kobj,
2598 + {
2599 + ssize_t ret = count;
2600 +
2601 +- if (!memcmp("always", buf,
2602 +- min(sizeof("always")-1, count))) {
2603 ++ if (sysfs_streq(buf, "always")) {
2604 + clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
2605 + set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
2606 +- } else if (!memcmp("madvise", buf,
2607 +- min(sizeof("madvise")-1, count))) {
2608 ++ } else if (sysfs_streq(buf, "madvise")) {
2609 + clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
2610 + set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
2611 +- } else if (!memcmp("never", buf,
2612 +- min(sizeof("never")-1, count))) {
2613 ++ } else if (sysfs_streq(buf, "never")) {
2614 + clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
2615 + clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
2616 + } else
2617 +@@ -246,32 +243,27 @@ static ssize_t defrag_store(struct kobject *kobj,
2618 + struct kobj_attribute *attr,
2619 + const char *buf, size_t count)
2620 + {
2621 +- if (!memcmp("always", buf,
2622 +- min(sizeof("always")-1, count))) {
2623 ++ if (sysfs_streq(buf, "always")) {
2624 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
2625 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
2626 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
2627 + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
2628 +- } else if (!memcmp("defer+madvise", buf,
2629 +- min(sizeof("defer+madvise")-1, count))) {
2630 ++ } else if (sysfs_streq(buf, "defer+madvise")) {
2631 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
2632 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
2633 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
2634 + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
2635 +- } else if (!memcmp("defer", buf,
2636 +- min(sizeof("defer")-1, count))) {
2637 ++ } else if (sysfs_streq(buf, "defer")) {
2638 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
2639 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
2640 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
2641 + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
2642 +- } else if (!memcmp("madvise", buf,
2643 +- min(sizeof("madvise")-1, count))) {
2644 ++ } else if (sysfs_streq(buf, "madvise")) {
2645 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
2646 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
2647 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
2648 + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
2649 +- } else if (!memcmp("never", buf,
2650 +- min(sizeof("never")-1, count))) {
2651 ++ } else if (sysfs_streq(buf, "never")) {
2652 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
2653 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
2654 + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
2655 +@@ -2661,7 +2653,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
2656 + unsigned long flags;
2657 + pgoff_t end;
2658 +
2659 +- VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
2660 ++ VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
2661 + VM_BUG_ON_PAGE(!PageLocked(page), page);
2662 + VM_BUG_ON_PAGE(!PageCompound(page), page);
2663 +
2664 +diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
2665 +index 0ff3953f64aa..8916c5d9b3b3 100644
2666 +--- a/net/core/fib_rules.c
2667 ++++ b/net/core/fib_rules.c
2668 +@@ -968,7 +968,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
2669 +
2670 + frh = nlmsg_data(nlh);
2671 + frh->family = ops->family;
2672 +- frh->table = rule->table;
2673 ++ frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
2674 + if (nla_put_u32(skb, FRA_TABLE, rule->table))
2675 + goto nla_put_failure;
2676 + if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
2677 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2678 +index 7091568b9f63..5e8979c1f76d 100644
2679 +--- a/net/ipv6/ip6_fib.c
2680 ++++ b/net/ipv6/ip6_fib.c
2681 +@@ -981,8 +981,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
2682 + found++;
2683 + break;
2684 + }
2685 +- if (rt_can_ecmp)
2686 +- fallback_ins = fallback_ins ?: ins;
2687 ++ fallback_ins = fallback_ins ?: ins;
2688 + goto next_iter;
2689 + }
2690 +
2691 +@@ -1025,7 +1024,9 @@ next_iter:
2692 + }
2693 +
2694 + if (fallback_ins && !found) {
2695 +- /* No ECMP-able route found, replace first non-ECMP one */
2696 ++ /* No matching route with same ecmp-able-ness found, replace
2697 ++ * first matching route
2698 ++ */
2699 + ins = fallback_ins;
2700 + iter = rcu_dereference_protected(*ins,
2701 + lockdep_is_held(&rt->fib6_table->tb6_lock));
2702 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2703 +index f8fe4c9ead4d..9c36a743ddbc 100644
2704 +--- a/net/ipv6/route.c
2705 ++++ b/net/ipv6/route.c
2706 +@@ -4514,6 +4514,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
2707 + */
2708 + cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
2709 + NLM_F_REPLACE);
2710 ++ cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
2711 + nhn++;
2712 + }
2713 +
2714 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
2715 +index f101a6460b44..7fa9871b1db9 100644
2716 +--- a/net/mac80211/util.c
2717 ++++ b/net/mac80211/util.c
2718 +@@ -945,16 +945,22 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
2719 + elem_parse_failed = true;
2720 + break;
2721 + case WLAN_EID_VHT_OPERATION:
2722 +- if (elen >= sizeof(struct ieee80211_vht_operation))
2723 ++ if (elen >= sizeof(struct ieee80211_vht_operation)) {
2724 + elems->vht_operation = (void *)pos;
2725 +- else
2726 +- elem_parse_failed = true;
2727 ++ if (calc_crc)
2728 ++ crc = crc32_be(crc, pos - 2, elen + 2);
2729 ++ break;
2730 ++ }
2731 ++ elem_parse_failed = true;
2732 + break;
2733 + case WLAN_EID_OPMODE_NOTIF:
2734 +- if (elen > 0)
2735 ++ if (elen > 0) {
2736 + elems->opmode_notif = pos;
2737 +- else
2738 +- elem_parse_failed = true;
2739 ++ if (calc_crc)
2740 ++ crc = crc32_be(crc, pos - 2, elen + 2);
2741 ++ break;
2742 ++ }
2743 ++ elem_parse_failed = true;
2744 + break;
2745 + case WLAN_EID_MESH_ID:
2746 + elems->mesh_id = pos;
2747 +diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
2748 +index 5e66042ac346..1c6d15ea76d4 100644
2749 +--- a/net/netfilter/nft_tunnel.c
2750 ++++ b/net/netfilter/nft_tunnel.c
2751 +@@ -467,8 +467,8 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
2752 + static int nft_tunnel_ports_dump(struct sk_buff *skb,
2753 + struct ip_tunnel_info *info)
2754 + {
2755 +- if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, htons(info->key.tp_src)) < 0 ||
2756 +- nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, htons(info->key.tp_dst)) < 0)
2757 ++ if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
2758 ++ nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
2759 + return -1;
2760 +
2761 + return 0;
2762 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2763 +index 930d17fa906c..4a1b1bb39b4b 100644
2764 +--- a/net/netlink/af_netlink.c
2765 ++++ b/net/netlink/af_netlink.c
2766 +@@ -1029,7 +1029,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
2767 + if (nlk->netlink_bind && groups) {
2768 + int group;
2769 +
2770 +- for (group = 0; group < nlk->ngroups; group++) {
2771 ++ /* nl_groups is a u32, so cap the maximum groups we can bind */
2772 ++ for (group = 0; group < BITS_PER_TYPE(u32); group++) {
2773 + if (!test_bit(group, &groups))
2774 + continue;
2775 + err = nlk->netlink_bind(net, group + 1);
2776 +@@ -1048,7 +1049,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
2777 + netlink_insert(sk, nladdr->nl_pid) :
2778 + netlink_autobind(sock);
2779 + if (err) {
2780 +- netlink_undo_bind(nlk->ngroups, groups, sk);
2781 ++ netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
2782 + goto unlock;
2783 + }
2784 + }
2785 +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
2786 +index c006d3b89ba3..44ca31f8538d 100644
2787 +--- a/net/sched/cls_flower.c
2788 ++++ b/net/sched/cls_flower.c
2789 +@@ -196,6 +196,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
2790 + struct fl_flow_key skb_mkey;
2791 +
2792 + list_for_each_entry_rcu(mask, &head->masks, list) {
2793 ++ flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
2794 + fl_clear_masked_range(&skb_key, mask);
2795 +
2796 + skb_key.indev_ifindex = skb->skb_iif;
2797 +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
2798 +index 559f09ac0b22..9f4d325f3a79 100644
2799 +--- a/net/sctp/sm_statefuns.c
2800 ++++ b/net/sctp/sm_statefuns.c
2801 +@@ -185,6 +185,16 @@ static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk,
2802 + return true;
2803 + }
2804 +
2805 ++/* Check for format error in an ABORT chunk */
2806 ++static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk)
2807 ++{
2808 ++ struct sctp_errhdr *err;
2809 ++
2810 ++ sctp_walk_errors(err, chunk->chunk_hdr);
2811 ++
2812 ++ return (void *)err == (void *)chunk->chunk_end;
2813 ++}
2814 ++
2815 + /**********************************************************
2816 + * These are the state functions for handling chunk events.
2817 + **********************************************************/
2818 +@@ -2270,6 +2280,9 @@ enum sctp_disposition sctp_sf_shutdown_pending_abort(
2819 + sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
2820 + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
2821 +
2822 ++ if (!sctp_err_chunk_valid(chunk))
2823 ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2824 ++
2825 + return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
2826 + }
2827 +
2828 +@@ -2313,6 +2326,9 @@ enum sctp_disposition sctp_sf_shutdown_sent_abort(
2829 + sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
2830 + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
2831 +
2832 ++ if (!sctp_err_chunk_valid(chunk))
2833 ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2834 ++
2835 + /* Stop the T2-shutdown timer. */
2836 + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
2837 + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
2838 +@@ -2580,6 +2596,9 @@ enum sctp_disposition sctp_sf_do_9_1_abort(
2839 + sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
2840 + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
2841 +
2842 ++ if (!sctp_err_chunk_valid(chunk))
2843 ++ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
2844 ++
2845 + return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
2846 + }
2847 +
2848 +@@ -2597,16 +2616,8 @@ static enum sctp_disposition __sctp_sf_do_9_1_abort(
2849 +
2850 + /* See if we have an error cause code in the chunk. */
2851 + len = ntohs(chunk->chunk_hdr->length);
2852 +- if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
2853 +- struct sctp_errhdr *err;
2854 +-
2855 +- sctp_walk_errors(err, chunk->chunk_hdr);
2856 +- if ((void *)err != (void *)chunk->chunk_end)
2857 +- return sctp_sf_pdiscard(net, ep, asoc, type, arg,
2858 +- commands);
2859 +-
2860 ++ if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
2861 + error = ((struct sctp_errhdr *)chunk->skb->data)->cause;
2862 +- }
2863 +
2864 + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
2865 + /* ASSOC_FAILED will DELETE_TCB. */
2866 +diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
2867 +index 52241d679cc9..aa9a17ac1f7b 100644
2868 +--- a/net/smc/smc_clc.c
2869 ++++ b/net/smc/smc_clc.c
2870 +@@ -364,7 +364,9 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
2871 + dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
2872 + dclc.hdr.version = SMC_CLC_V1;
2873 + dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
2874 +- memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid));
2875 ++ if (smc->conn.lgr && !smc->conn.lgr->is_smcd)
2876 ++ memcpy(dclc.id_for_peer, local_systemid,
2877 ++ sizeof(local_systemid));
2878 + dclc.peer_diagnosis = htonl(peer_diag_info);
2879 + memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
2880 +
2881 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
2882 +index 0a613e0ef3bf..8f40bbfd60ea 100644
2883 +--- a/net/tls/tls_device.c
2884 ++++ b/net/tls/tls_device.c
2885 +@@ -506,7 +506,7 @@ struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
2886 + u32 seq, u64 *p_record_sn)
2887 + {
2888 + u64 record_sn = context->hint_record_sn;
2889 +- struct tls_record_info *info;
2890 ++ struct tls_record_info *info, *last;
2891 +
2892 + info = context->retransmit_hint;
2893 + if (!info ||
2894 +@@ -516,6 +516,25 @@ struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
2895 + */
2896 + info = list_first_entry(&context->records_list,
2897 + struct tls_record_info, list);
2898 ++
2899 ++ /* send the start_marker record if seq number is before the
2900 ++ * tls offload start marker sequence number. This record is
2901 ++ * required to handle TCP packets which are before TLS offload
2902 ++ * started.
2903 ++ * And if it's not start marker, look if this seq number
2904 ++ * belongs to the list.
2905 ++ */
2906 ++ if (likely(!tls_record_is_start_marker(info))) {
2907 ++ /* we have the first record, get the last record to see
2908 ++ * if this seq number belongs to the list.
2909 ++ */
2910 ++ last = list_last_entry(&context->records_list,
2911 ++ struct tls_record_info, list);
2912 ++
2913 ++ if (!between(seq, tls_record_start_seq(info),
2914 ++ last->end_seq))
2915 ++ return NULL;
2916 ++ }
2917 + record_sn = context->unacked_record_sn;
2918 + }
2919 +
2920 +diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
2921 +index a9c0f368db5d..24e18405cdb4 100644
2922 +--- a/net/wireless/ethtool.c
2923 ++++ b/net/wireless/ethtool.c
2924 +@@ -7,9 +7,13 @@
2925 + void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2926 + {
2927 + struct wireless_dev *wdev = dev->ieee80211_ptr;
2928 ++ struct device *pdev = wiphy_dev(wdev->wiphy);
2929 +
2930 +- strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name,
2931 +- sizeof(info->driver));
2932 ++ if (pdev->driver)
2933 ++ strlcpy(info->driver, pdev->driver->name,
2934 ++ sizeof(info->driver));
2935 ++ else
2936 ++ strlcpy(info->driver, "N/A", sizeof(info->driver));
2937 +
2938 + strlcpy(info->version, init_utsname()->release, sizeof(info->version));
2939 +
2940 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
2941 +index 823dea187691..dfde06b8d25d 100644
2942 +--- a/net/wireless/nl80211.c
2943 ++++ b/net/wireless/nl80211.c
2944 +@@ -323,6 +323,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
2945 + [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
2946 + [NL80211_ATTR_CONTROL_PORT_OVER_NL80211] = { .type = NLA_FLAG },
2947 + [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
2948 ++ [NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
2949 + [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
2950 + [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
2951 + [NL80211_ATTR_PID] = { .type = NLA_U32 },
2952 +diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
2953 +index 692d2fa31c35..ed34902022c1 100644
2954 +--- a/tools/perf/ui/browsers/hists.c
2955 ++++ b/tools/perf/ui/browsers/hists.c
2956 +@@ -2931,6 +2931,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
2957 +
2958 + continue;
2959 + }
2960 ++ actions->ms.map = map;
2961 + top = pstack__peek(browser->pstack);
2962 + if (top == &browser->hists->dso_filter) {
2963 + /*
2964 +diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
2965 +index bbb0e042d8e5..59475287e2e1 100644
2966 +--- a/tools/perf/util/stat-shadow.c
2967 ++++ b/tools/perf/util/stat-shadow.c
2968 +@@ -209,12 +209,12 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
2969 + int cpu, struct runtime_stat *st)
2970 + {
2971 + int ctx = evsel_context(counter);
2972 ++ u64 count_ns = count;
2973 +
2974 + count *= counter->scale;
2975 +
2976 +- if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
2977 +- perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
2978 +- update_runtime_stat(st, STAT_NSECS, 0, cpu, count);
2979 ++ if (perf_evsel__is_clock(counter))
2980 ++ update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
2981 + else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
2982 + update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
2983 + else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
2984 +diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
2985 +index c0885fb65767..7d1a7c0dc56a 100755
2986 +--- a/tools/testing/selftests/net/fib_tests.sh
2987 ++++ b/tools/testing/selftests/net/fib_tests.sh
2988 +@@ -848,6 +848,12 @@ ipv6_rt_replace_mpath()
2989 + check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024"
2990 + log_test $? 0 "Multipath with single path via multipath attribute"
2991 +
2992 ++ # multipath with dev-only
2993 ++ add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
2994 ++ run_cmd "$IP -6 ro replace 2001:db8:104::/64 dev veth1"
2995 ++ check_route6 "2001:db8:104::/64 dev veth1 metric 1024"
2996 ++ log_test $? 0 "Multipath with dev-only"
2997 ++
2998 + # route replace fails - invalid nexthop 1
2999 + add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
3000 + run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3"
3001 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
3002 +index beec19fcf8cd..4e499b78569b 100644
3003 +--- a/virt/kvm/kvm_main.c
3004 ++++ b/virt/kvm/kvm_main.c
3005 +@@ -2024,12 +2024,12 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3006 + if (slots->generation != ghc->generation)
3007 + __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
3008 +
3009 +- if (unlikely(!ghc->memslot))
3010 +- return kvm_write_guest(kvm, gpa, data, len);
3011 +-
3012 + if (kvm_is_error_hva(ghc->hva))
3013 + return -EFAULT;
3014 +
3015 ++ if (unlikely(!ghc->memslot))
3016 ++ return kvm_write_guest(kvm, gpa, data, len);
3017 ++
3018 + r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3019 + if (r)
3020 + return -EFAULT;
3021 +@@ -2057,12 +2057,12 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3022 + if (slots->generation != ghc->generation)
3023 + __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
3024 +
3025 +- if (unlikely(!ghc->memslot))
3026 +- return kvm_read_guest(kvm, ghc->gpa, data, len);
3027 +-
3028 + if (kvm_is_error_hva(ghc->hva))
3029 + return -EFAULT;
3030 +
3031 ++ if (unlikely(!ghc->memslot))
3032 ++ return kvm_read_guest(kvm, ghc->gpa, data, len);
3033 ++
3034 + r = __copy_from_user(data, (void __user *)ghc->hva, len);
3035 + if (r)
3036 + return -EFAULT;