Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.18 commit in: /
Date: Sat, 29 Sep 2018 13:36:56
Message-Id: 1538228183.4256d26c4916914f83182e196d2b437222f4289f.mpagano@gentoo
1 commit: 4256d26c4916914f83182e196d2b437222f4289f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Sep 29 13:36:23 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Sep 29 13:36:23 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4256d26c
7
8 Linux patch 4.18.11
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1010_linux-4.18.11.patch | 2983 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2987 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index a9e2bd7..cccbd63 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -83,6 +83,10 @@ Patch: 1009_linux-4.18.10.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.18.10
23
24 +Patch: 1010_linux-4.18.11.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.18.11
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1010_linux-4.18.11.patch b/1010_linux-4.18.11.patch
33 new file mode 100644
34 index 0000000..fe34a23
35 --- /dev/null
36 +++ b/1010_linux-4.18.11.patch
37 @@ -0,0 +1,2983 @@
38 +diff --git a/Makefile b/Makefile
39 +index ffab15235ff0..de0ecace693a 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 18
46 +-SUBLEVEL = 10
47 ++SUBLEVEL = 11
48 + EXTRAVERSION =
49 + NAME = Merciless Moray
50 +
51 +diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
52 +index acd11b3bf639..2a356b948720 100644
53 +--- a/arch/x86/crypto/aegis128-aesni-glue.c
54 ++++ b/arch/x86/crypto/aegis128-aesni-glue.c
55 +@@ -379,7 +379,6 @@ static int __init crypto_aegis128_aesni_module_init(void)
56 + {
57 + if (!boot_cpu_has(X86_FEATURE_XMM2) ||
58 + !boot_cpu_has(X86_FEATURE_AES) ||
59 +- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
60 + !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
61 + return -ENODEV;
62 +
63 +diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
64 +index 2071c3d1ae07..dbe8bb980da1 100644
65 +--- a/arch/x86/crypto/aegis128l-aesni-glue.c
66 ++++ b/arch/x86/crypto/aegis128l-aesni-glue.c
67 +@@ -379,7 +379,6 @@ static int __init crypto_aegis128l_aesni_module_init(void)
68 + {
69 + if (!boot_cpu_has(X86_FEATURE_XMM2) ||
70 + !boot_cpu_has(X86_FEATURE_AES) ||
71 +- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
72 + !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
73 + return -ENODEV;
74 +
75 +diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
76 +index b5f2a8fd5a71..8bebda2de92f 100644
77 +--- a/arch/x86/crypto/aegis256-aesni-glue.c
78 ++++ b/arch/x86/crypto/aegis256-aesni-glue.c
79 +@@ -379,7 +379,6 @@ static int __init crypto_aegis256_aesni_module_init(void)
80 + {
81 + if (!boot_cpu_has(X86_FEATURE_XMM2) ||
82 + !boot_cpu_has(X86_FEATURE_AES) ||
83 +- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
84 + !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
85 + return -ENODEV;
86 +
87 +diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
88 +index 95cf857d2cbb..f40244eaf14d 100644
89 +--- a/arch/x86/crypto/morus1280-sse2-glue.c
90 ++++ b/arch/x86/crypto/morus1280-sse2-glue.c
91 +@@ -40,7 +40,6 @@ MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
92 + static int __init crypto_morus1280_sse2_module_init(void)
93 + {
94 + if (!boot_cpu_has(X86_FEATURE_XMM2) ||
95 +- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
96 + !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
97 + return -ENODEV;
98 +
99 +diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
100 +index 615fb7bc9a32..9afaf8f8565a 100644
101 +--- a/arch/x86/crypto/morus640-sse2-glue.c
102 ++++ b/arch/x86/crypto/morus640-sse2-glue.c
103 +@@ -40,7 +40,6 @@ MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
104 + static int __init crypto_morus640_sse2_module_init(void)
105 + {
106 + if (!boot_cpu_has(X86_FEATURE_XMM2) ||
107 +- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
108 + !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
109 + return -ENODEV;
110 +
111 +diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
112 +index 7d00d4ad44d4..95997e6c0696 100644
113 +--- a/arch/x86/xen/pmu.c
114 ++++ b/arch/x86/xen/pmu.c
115 +@@ -478,7 +478,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
116 + irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
117 + {
118 + int err, ret = IRQ_NONE;
119 +- struct pt_regs regs;
120 ++ struct pt_regs regs = {0};
121 + const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
122 + uint8_t xenpmu_flags = get_xenpmu_flags();
123 +
124 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
125 +index 984b37647b2f..22a2bc5f25ce 100644
126 +--- a/drivers/ata/libata-core.c
127 ++++ b/drivers/ata/libata-core.c
128 +@@ -5358,10 +5358,20 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
129 + */
130 + int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
131 + {
132 ++ u64 done_mask, ap_qc_active = ap->qc_active;
133 + int nr_done = 0;
134 +- u64 done_mask;
135 +
136 +- done_mask = ap->qc_active ^ qc_active;
137 ++ /*
138 ++ * If the internal tag is set on ap->qc_active, then we care about
139 ++ * bit0 on the passed in qc_active mask. Move that bit up to match
140 ++ * the internal tag.
141 ++ */
142 ++ if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
143 ++ qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
144 ++ qc_active ^= qc_active & 0x01;
145 ++ }
146 ++
147 ++ done_mask = ap_qc_active ^ qc_active;
148 +
149 + if (unlikely(done_mask & qc_active)) {
150 + ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
151 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
152 +index e950730f1933..5a6e7e1cb351 100644
153 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
154 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
155 +@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
156 + break;
157 + case CHIP_POLARIS10:
158 + if (type == CGS_UCODE_ID_SMU) {
159 +- if ((adev->pdev->device == 0x67df) &&
160 +- ((adev->pdev->revision == 0xe0) ||
161 +- (adev->pdev->revision == 0xe3) ||
162 +- (adev->pdev->revision == 0xe4) ||
163 +- (adev->pdev->revision == 0xe5) ||
164 +- (adev->pdev->revision == 0xe7) ||
165 ++ if (((adev->pdev->device == 0x67df) &&
166 ++ ((adev->pdev->revision == 0xe0) ||
167 ++ (adev->pdev->revision == 0xe3) ||
168 ++ (adev->pdev->revision == 0xe4) ||
169 ++ (adev->pdev->revision == 0xe5) ||
170 ++ (adev->pdev->revision == 0xe7) ||
171 ++ (adev->pdev->revision == 0xef))) ||
172 ++ ((adev->pdev->device == 0x6fdf) &&
173 + (adev->pdev->revision == 0xef))) {
174 + info->is_kicker = true;
175 + strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
176 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
177 +index b0bf2f24da48..dc893076398e 100644
178 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
179 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
180 +@@ -532,6 +532,7 @@ static const struct pci_device_id pciidlist[] = {
181 + {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
182 + {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
183 + {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
184 ++ {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
185 + /* Polaris12 */
186 + {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
187 + {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
188 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
189 +index dec0d60921bf..00486c744f24 100644
190 +--- a/drivers/gpu/drm/i915/intel_display.c
191 ++++ b/drivers/gpu/drm/i915/intel_display.c
192 +@@ -5062,10 +5062,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
193 + mutex_lock(&dev_priv->pcu_lock);
194 + WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
195 + mutex_unlock(&dev_priv->pcu_lock);
196 +- /* wait for pcode to finish disabling IPS, which may take up to 42ms */
197 ++ /*
198 ++ * Wait for PCODE to finish disabling IPS. The BSpec specified
199 ++ * 42ms timeout value leads to occasional timeouts so use 100ms
200 ++ * instead.
201 ++ */
202 + if (intel_wait_for_register(dev_priv,
203 + IPS_CTL, IPS_ENABLE, 0,
204 +- 42))
205 ++ 100))
206 + DRM_ERROR("Timed out waiting for IPS disable\n");
207 + } else {
208 + I915_WRITE(IPS_CTL, 0);
209 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
210 +index 9bae4db84cfb..7a12d75e5157 100644
211 +--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
212 ++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
213 +@@ -1098,17 +1098,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
214 + int ret;
215 +
216 + if (dpcd >= 0x12) {
217 +- ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
218 ++ /* Even if we're enabling MST, start with disabling the
219 ++ * branching unit to clear any sink-side MST topology state
220 ++ * that wasn't set by us
221 ++ */
222 ++ ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
223 + if (ret < 0)
224 + return ret;
225 +
226 +- dpcd &= ~DP_MST_EN;
227 +- if (state)
228 +- dpcd |= DP_MST_EN;
229 +-
230 +- ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
231 +- if (ret < 0)
232 +- return ret;
233 ++ if (state) {
234 ++ /* Now, start initializing */
235 ++ ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
236 ++ DP_MST_EN);
237 ++ if (ret < 0)
238 ++ return ret;
239 ++ }
240 + }
241 +
242 + return nvif_mthd(disp, 0, &args, sizeof(args));
243 +@@ -1117,31 +1121,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
244 + int
245 + nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
246 + {
247 +- int ret, state = 0;
248 ++ struct drm_dp_aux *aux;
249 ++ int ret;
250 ++ bool old_state, new_state;
251 ++ u8 mstm_ctrl;
252 +
253 + if (!mstm)
254 + return 0;
255 +
256 +- if (dpcd[0] >= 0x12) {
257 +- ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
258 ++ mutex_lock(&mstm->mgr.lock);
259 ++
260 ++ old_state = mstm->mgr.mst_state;
261 ++ new_state = old_state;
262 ++ aux = mstm->mgr.aux;
263 ++
264 ++ if (old_state) {
265 ++ /* Just check that the MST hub is still as we expect it */
266 ++ ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
267 ++ if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
268 ++ DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
269 ++ new_state = false;
270 ++ }
271 ++ } else if (dpcd[0] >= 0x12) {
272 ++ ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
273 + if (ret < 0)
274 +- return ret;
275 ++ goto probe_error;
276 +
277 + if (!(dpcd[1] & DP_MST_CAP))
278 + dpcd[0] = 0x11;
279 + else
280 +- state = allow;
281 ++ new_state = allow;
282 ++ }
283 ++
284 ++ if (new_state == old_state) {
285 ++ mutex_unlock(&mstm->mgr.lock);
286 ++ return new_state;
287 + }
288 +
289 +- ret = nv50_mstm_enable(mstm, dpcd[0], state);
290 ++ ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
291 + if (ret)
292 +- return ret;
293 ++ goto probe_error;
294 ++
295 ++ mutex_unlock(&mstm->mgr.lock);
296 +
297 +- ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
298 ++ ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
299 + if (ret)
300 + return nv50_mstm_enable(mstm, dpcd[0], 0);
301 +
302 +- return mstm->mgr.mst_state;
303 ++ return new_state;
304 ++
305 ++probe_error:
306 ++ mutex_unlock(&mstm->mgr.lock);
307 ++ return ret;
308 + }
309 +
310 + static void
311 +@@ -2049,7 +2080,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
312 + static const struct drm_mode_config_funcs
313 + nv50_disp_func = {
314 + .fb_create = nouveau_user_framebuffer_create,
315 +- .output_poll_changed = drm_fb_helper_output_poll_changed,
316 ++ .output_poll_changed = nouveau_fbcon_output_poll_changed,
317 + .atomic_check = nv50_disp_atomic_check,
318 + .atomic_commit = nv50_disp_atomic_commit,
319 + .atomic_state_alloc = nv50_disp_atomic_state_alloc,
320 +diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
321 +index af68eae4c626..de4ab310ef8e 100644
322 +--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
323 ++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
324 +@@ -570,12 +570,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
325 + nv_connector->edid = NULL;
326 + }
327 +
328 +- /* Outputs are only polled while runtime active, so acquiring a
329 +- * runtime PM ref here is unnecessary (and would deadlock upon
330 +- * runtime suspend because it waits for polling to finish).
331 ++ /* Outputs are only polled while runtime active, so resuming the
332 ++ * device here is unnecessary (and would deadlock upon runtime suspend
333 ++ * because it waits for polling to finish). We do however, want to
334 ++ * prevent the autosuspend timer from elapsing during this operation
335 ++ * if possible.
336 + */
337 +- if (!drm_kms_helper_is_poll_worker()) {
338 +- ret = pm_runtime_get_sync(connector->dev->dev);
339 ++ if (drm_kms_helper_is_poll_worker()) {
340 ++ pm_runtime_get_noresume(dev->dev);
341 ++ } else {
342 ++ ret = pm_runtime_get_sync(dev->dev);
343 + if (ret < 0 && ret != -EACCES)
344 + return conn_status;
345 + }
346 +@@ -653,10 +657,8 @@ detect_analog:
347 +
348 + out:
349 +
350 +- if (!drm_kms_helper_is_poll_worker()) {
351 +- pm_runtime_mark_last_busy(connector->dev->dev);
352 +- pm_runtime_put_autosuspend(connector->dev->dev);
353 +- }
354 ++ pm_runtime_mark_last_busy(dev->dev);
355 ++ pm_runtime_put_autosuspend(dev->dev);
356 +
357 + return conn_status;
358 + }
359 +@@ -1120,6 +1122,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
360 + const struct nvif_notify_conn_rep_v0 *rep = notify->data;
361 + const char *name = connector->name;
362 + struct nouveau_encoder *nv_encoder;
363 ++ int ret;
364 ++
365 ++ ret = pm_runtime_get(drm->dev->dev);
366 ++ if (ret == 0) {
367 ++ /* We can't block here if there's a pending PM request
368 ++ * running, as we'll deadlock nouveau_display_fini() when it
369 ++ * calls nvif_put() on our nvif_notify struct. So, simply
370 ++ * defer the hotplug event until the device finishes resuming
371 ++ */
372 ++ NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",
373 ++ name);
374 ++ schedule_work(&drm->hpd_work);
375 ++
376 ++ pm_runtime_put_noidle(drm->dev->dev);
377 ++ return NVIF_NOTIFY_KEEP;
378 ++ } else if (ret != 1 && ret != -EACCES) {
379 ++ NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",
380 ++ name, ret);
381 ++ return NVIF_NOTIFY_DROP;
382 ++ }
383 +
384 + if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
385 + NV_DEBUG(drm, "service %s\n", name);
386 +@@ -1137,6 +1159,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
387 + drm_helper_hpd_irq_event(connector->dev);
388 + }
389 +
390 ++ pm_runtime_mark_last_busy(drm->dev->dev);
391 ++ pm_runtime_put_autosuspend(drm->dev->dev);
392 + return NVIF_NOTIFY_KEEP;
393 + }
394 +
395 +diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
396 +index ec7861457b84..c5b3cc17965c 100644
397 +--- a/drivers/gpu/drm/nouveau/nouveau_display.c
398 ++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
399 +@@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
400 +
401 + static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
402 + .fb_create = nouveau_user_framebuffer_create,
403 +- .output_poll_changed = drm_fb_helper_output_poll_changed,
404 ++ .output_poll_changed = nouveau_fbcon_output_poll_changed,
405 + };
406 +
407 +
408 +@@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work)
409 + pm_runtime_get_sync(drm->dev->dev);
410 +
411 + drm_helper_hpd_irq_event(drm->dev);
412 +- /* enable polling for external displays */
413 +- drm_kms_helper_poll_enable(drm->dev);
414 +
415 + pm_runtime_mark_last_busy(drm->dev->dev);
416 + pm_runtime_put_sync(drm->dev->dev);
417 +@@ -379,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
418 + {
419 + struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
420 + struct acpi_bus_event *info = data;
421 ++ int ret;
422 +
423 + if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
424 + if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
425 +- /*
426 +- * This may be the only indication we receive of a
427 +- * connector hotplug on a runtime suspended GPU,
428 +- * schedule hpd_work to check.
429 +- */
430 +- schedule_work(&drm->hpd_work);
431 ++ ret = pm_runtime_get(drm->dev->dev);
432 ++ if (ret == 1 || ret == -EACCES) {
433 ++ /* If the GPU is already awake, or in a state
434 ++ * where we can't wake it up, it can handle
435 ++ * it's own hotplug events.
436 ++ */
437 ++ pm_runtime_put_autosuspend(drm->dev->dev);
438 ++ } else if (ret == 0) {
439 ++ /* This may be the only indication we receive
440 ++ * of a connector hotplug on a runtime
441 ++ * suspended GPU, schedule hpd_work to check.
442 ++ */
443 ++ NV_DEBUG(drm, "ACPI requested connector reprobe\n");
444 ++ schedule_work(&drm->hpd_work);
445 ++ pm_runtime_put_noidle(drm->dev->dev);
446 ++ } else {
447 ++ NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
448 ++ ret);
449 ++ }
450 +
451 + /* acpi-video should not generate keypresses for this */
452 + return NOTIFY_BAD;
453 +@@ -411,6 +423,11 @@ nouveau_display_init(struct drm_device *dev)
454 + if (ret)
455 + return ret;
456 +
457 ++ /* enable connector detection and polling for connectors without HPD
458 ++ * support
459 ++ */
460 ++ drm_kms_helper_poll_enable(dev);
461 ++
462 + /* enable hotplug interrupts */
463 + drm_connector_list_iter_begin(dev, &conn_iter);
464 + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
465 +@@ -425,7 +442,7 @@ nouveau_display_init(struct drm_device *dev)
466 + }
467 +
468 + void
469 +-nouveau_display_fini(struct drm_device *dev, bool suspend)
470 ++nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
471 + {
472 + struct nouveau_display *disp = nouveau_display(dev);
473 + struct nouveau_drm *drm = nouveau_drm(dev);
474 +@@ -450,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
475 + }
476 + drm_connector_list_iter_end(&conn_iter);
477 +
478 ++ if (!runtime)
479 ++ cancel_work_sync(&drm->hpd_work);
480 ++
481 + drm_kms_helper_poll_disable(dev);
482 + disp->fini(dev);
483 + }
484 +@@ -618,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
485 + }
486 + }
487 +
488 +- nouveau_display_fini(dev, true);
489 ++ nouveau_display_fini(dev, true, runtime);
490 + return 0;
491 + }
492 +
493 +- nouveau_display_fini(dev, true);
494 ++ nouveau_display_fini(dev, true, runtime);
495 +
496 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
497 + struct nouveau_framebuffer *nouveau_fb;
498 +diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
499 +index 54aa7c3fa42d..ff92b54ce448 100644
500 +--- a/drivers/gpu/drm/nouveau/nouveau_display.h
501 ++++ b/drivers/gpu/drm/nouveau/nouveau_display.h
502 +@@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev)
503 + int nouveau_display_create(struct drm_device *dev);
504 + void nouveau_display_destroy(struct drm_device *dev);
505 + int nouveau_display_init(struct drm_device *dev);
506 +-void nouveau_display_fini(struct drm_device *dev, bool suspend);
507 ++void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
508 + int nouveau_display_suspend(struct drm_device *dev, bool runtime);
509 + void nouveau_display_resume(struct drm_device *dev, bool runtime);
510 + int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
511 +diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
512 +index c7ec86d6c3c9..c2ebe5da34d0 100644
513 +--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
514 ++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
515 +@@ -629,7 +629,7 @@ nouveau_drm_unload(struct drm_device *dev)
516 + nouveau_debugfs_fini(drm);
517 +
518 + if (dev->mode_config.num_crtc)
519 +- nouveau_display_fini(dev, false);
520 ++ nouveau_display_fini(dev, false, false);
521 + nouveau_display_destroy(dev);
522 +
523 + nouveau_bios_takedown(dev);
524 +@@ -835,7 +835,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
525 + return -EBUSY;
526 + }
527 +
528 +- drm_kms_helper_poll_disable(drm_dev);
529 + nouveau_switcheroo_optimus_dsm();
530 + ret = nouveau_do_suspend(drm_dev, true);
531 + pci_save_state(pdev);
532 +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
533 +index 85c1f10bc2b6..8cf966690963 100644
534 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
535 ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
536 +@@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
537 + console_unlock();
538 +
539 + if (state == FBINFO_STATE_RUNNING) {
540 ++ nouveau_fbcon_hotplug_resume(drm->fbcon);
541 + pm_runtime_mark_last_busy(drm->dev->dev);
542 + pm_runtime_put_sync(drm->dev->dev);
543 + }
544 +@@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
545 + schedule_work(&drm->fbcon_work);
546 + }
547 +
548 ++void
549 ++nouveau_fbcon_output_poll_changed(struct drm_device *dev)
550 ++{
551 ++ struct nouveau_drm *drm = nouveau_drm(dev);
552 ++ struct nouveau_fbdev *fbcon = drm->fbcon;
553 ++ int ret;
554 ++
555 ++ if (!fbcon)
556 ++ return;
557 ++
558 ++ mutex_lock(&fbcon->hotplug_lock);
559 ++
560 ++ ret = pm_runtime_get(dev->dev);
561 ++ if (ret == 1 || ret == -EACCES) {
562 ++ drm_fb_helper_hotplug_event(&fbcon->helper);
563 ++
564 ++ pm_runtime_mark_last_busy(dev->dev);
565 ++ pm_runtime_put_autosuspend(dev->dev);
566 ++ } else if (ret == 0) {
567 ++ /* If the GPU was already in the process of suspending before
568 ++ * this event happened, then we can't block here as we'll
569 ++ * deadlock the runtime pmops since they wait for us to
570 ++ * finish. So, just defer this event for when we runtime
571 ++ * resume again. It will be handled by fbcon_work.
572 ++ */
573 ++ NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
574 ++ fbcon->hotplug_waiting = true;
575 ++ pm_runtime_put_noidle(drm->dev->dev);
576 ++ } else {
577 ++ DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
578 ++ ret);
579 ++ }
580 ++
581 ++ mutex_unlock(&fbcon->hotplug_lock);
582 ++}
583 ++
584 ++void
585 ++nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
586 ++{
587 ++ struct nouveau_drm *drm;
588 ++
589 ++ if (!fbcon)
590 ++ return;
591 ++ drm = nouveau_drm(fbcon->helper.dev);
592 ++
593 ++ mutex_lock(&fbcon->hotplug_lock);
594 ++ if (fbcon->hotplug_waiting) {
595 ++ fbcon->hotplug_waiting = false;
596 ++
597 ++ NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
598 ++ drm_fb_helper_hotplug_event(&fbcon->helper);
599 ++ }
600 ++ mutex_unlock(&fbcon->hotplug_lock);
601 ++}
602 ++
603 + int
604 + nouveau_fbcon_init(struct drm_device *dev)
605 + {
606 +@@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev)
607 +
608 + drm->fbcon = fbcon;
609 + INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
610 ++ mutex_init(&fbcon->hotplug_lock);
611 +
612 + drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
613 +
614 +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
615 +index a6f192ea3fa6..db9d52047ef8 100644
616 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
617 ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
618 +@@ -41,6 +41,9 @@ struct nouveau_fbdev {
619 + struct nvif_object gdi;
620 + struct nvif_object blit;
621 + struct nvif_object twod;
622 ++
623 ++ struct mutex hotplug_lock;
624 ++ bool hotplug_waiting;
625 + };
626 +
627 + void nouveau_fbcon_restore(void);
628 +@@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
629 + void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
630 + void nouveau_fbcon_accel_restore(struct drm_device *dev);
631 +
632 ++void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
633 ++void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon);
634 + extern int nouveau_nofbaccel;
635 +
636 + #endif /* __NV50_FBCON_H__ */
637 +diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
638 +index 8746eeeec44d..491f1892b50e 100644
639 +--- a/drivers/gpu/drm/udl/udl_fb.c
640 ++++ b/drivers/gpu/drm/udl/udl_fb.c
641 +@@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev,
642 + {
643 + drm_fb_helper_unregister_fbi(&ufbdev->helper);
644 + drm_fb_helper_fini(&ufbdev->helper);
645 +- drm_framebuffer_unregister_private(&ufbdev->ufb.base);
646 +- drm_framebuffer_cleanup(&ufbdev->ufb.base);
647 +- drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
648 ++ if (ufbdev->ufb.obj) {
649 ++ drm_framebuffer_unregister_private(&ufbdev->ufb.base);
650 ++ drm_framebuffer_cleanup(&ufbdev->ufb.base);
651 ++ drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
652 ++ }
653 + }
654 +
655 + int udl_fbdev_init(struct drm_device *dev)
656 +diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
657 +index a951ec75d01f..cf5aea1d6488 100644
658 +--- a/drivers/gpu/drm/vc4/vc4_plane.c
659 ++++ b/drivers/gpu/drm/vc4/vc4_plane.c
660 +@@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
661 + vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
662 + vc4_state->crtc_h);
663 +
664 ++ vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
665 ++ vc4_state->y_scaling[0] == VC4_SCALING_NONE);
666 ++
667 + if (num_planes > 1) {
668 + vc4_state->is_yuv = true;
669 +
670 +@@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
671 + vc4_get_scaling_mode(vc4_state->src_h[1],
672 + vc4_state->crtc_h);
673 +
674 +- /* YUV conversion requires that scaling be enabled,
675 +- * even on a plane that's otherwise 1:1. Choose TPZ
676 +- * for simplicity.
677 ++ /* YUV conversion requires that horizontal scaling be enabled,
678 ++ * even on a plane that's otherwise 1:1. Looks like only PPF
679 ++ * works in that case, so let's pick that one.
680 + */
681 +- if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
682 +- vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
683 +- if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
684 +- vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
685 ++ if (vc4_state->is_unity)
686 ++ vc4_state->x_scaling[0] = VC4_SCALING_PPF;
687 + } else {
688 + vc4_state->x_scaling[1] = VC4_SCALING_NONE;
689 + vc4_state->y_scaling[1] = VC4_SCALING_NONE;
690 + }
691 +
692 +- vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
693 +- vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
694 +- vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
695 +- vc4_state->y_scaling[1] == VC4_SCALING_NONE);
696 +-
697 + /* No configuring scaling on the cursor plane, since it gets
698 + non-vblank-synced updates, and scaling requires requires
699 + LBM changes which have to be vblank-synced.
700 +@@ -621,7 +617,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
701 + vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
702 + }
703 +
704 +- if (!vc4_state->is_unity) {
705 ++ if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
706 ++ vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
707 ++ vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
708 ++ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
709 + /* LBM Base Address. */
710 + if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
711 + vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
712 +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
713 +index aef53305f1c3..d97581ae3bf9 100644
714 +--- a/drivers/infiniband/hw/cxgb4/qp.c
715 ++++ b/drivers/infiniband/hw/cxgb4/qp.c
716 +@@ -1388,6 +1388,12 @@ static void flush_qp(struct c4iw_qp *qhp)
717 + schp = to_c4iw_cq(qhp->ibqp.send_cq);
718 +
719 + if (qhp->ibqp.uobject) {
720 ++
721 ++ /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
722 ++ if (qhp->wq.flushed)
723 ++ return;
724 ++
725 ++ qhp->wq.flushed = 1;
726 + t4_set_wq_in_error(&qhp->wq);
727 + t4_set_cq_in_error(&rchp->cq);
728 + spin_lock_irqsave(&rchp->comp_handler_lock, flag);
729 +diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
730 +index 5f8b583c6e41..f74166aa9a0d 100644
731 +--- a/drivers/misc/vmw_balloon.c
732 ++++ b/drivers/misc/vmw_balloon.c
733 +@@ -45,6 +45,7 @@
734 + #include <linux/seq_file.h>
735 + #include <linux/vmw_vmci_defs.h>
736 + #include <linux/vmw_vmci_api.h>
737 ++#include <linux/io.h>
738 + #include <asm/hypervisor.h>
739 +
740 + MODULE_AUTHOR("VMware, Inc.");
741 +diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
742 +index e84563d2067f..3463cd94a7f6 100644
743 +--- a/drivers/mtd/devices/m25p80.c
744 ++++ b/drivers/mtd/devices/m25p80.c
745 +@@ -41,13 +41,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
746 + struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
747 + SPI_MEM_OP_NO_ADDR,
748 + SPI_MEM_OP_NO_DUMMY,
749 +- SPI_MEM_OP_DATA_IN(len, val, 1));
750 ++ SPI_MEM_OP_DATA_IN(len, NULL, 1));
751 ++ void *scratchbuf;
752 + int ret;
753 +
754 ++ scratchbuf = kmalloc(len, GFP_KERNEL);
755 ++ if (!scratchbuf)
756 ++ return -ENOMEM;
757 ++
758 ++ op.data.buf.in = scratchbuf;
759 + ret = spi_mem_exec_op(flash->spimem, &op);
760 + if (ret < 0)
761 + dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret,
762 + code);
763 ++ else
764 ++ memcpy(val, scratchbuf, len);
765 ++
766 ++ kfree(scratchbuf);
767 +
768 + return ret;
769 + }
770 +@@ -58,9 +68,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
771 + struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
772 + SPI_MEM_OP_NO_ADDR,
773 + SPI_MEM_OP_NO_DUMMY,
774 +- SPI_MEM_OP_DATA_OUT(len, buf, 1));
775 ++ SPI_MEM_OP_DATA_OUT(len, NULL, 1));
776 ++ void *scratchbuf;
777 ++ int ret;
778 +
779 +- return spi_mem_exec_op(flash->spimem, &op);
780 ++ scratchbuf = kmemdup(buf, len, GFP_KERNEL);
781 ++ if (!scratchbuf)
782 ++ return -ENOMEM;
783 ++
784 ++ op.data.buf.out = scratchbuf;
785 ++ ret = spi_mem_exec_op(flash->spimem, &op);
786 ++ kfree(scratchbuf);
787 ++
788 ++ return ret;
789 + }
790 +
791 + static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
792 +diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
793 +index 2a302a1d1430..c502075e5721 100644
794 +--- a/drivers/mtd/nand/raw/denali.c
795 ++++ b/drivers/mtd/nand/raw/denali.c
796 +@@ -604,6 +604,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
797 + }
798 +
799 + iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
800 ++ /*
801 ++ * The ->setup_dma() hook kicks DMA by using the data/command
802 ++ * interface, which belongs to a different AXI port from the
803 ++ * register interface. Read back the register to avoid a race.
804 ++ */
805 ++ ioread32(denali->reg + DMA_ENABLE);
806 +
807 + denali_reset_irq(denali);
808 + denali->setup_dma(denali, dma_addr, page, write);
809 +diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
810 +index 9375cef22420..3d27616d9c85 100644
811 +--- a/drivers/net/appletalk/ipddp.c
812 ++++ b/drivers/net/appletalk/ipddp.c
813 +@@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
814 + case SIOCFINDIPDDPRT:
815 + spin_lock_bh(&ipddp_route_lock);
816 + rp = __ipddp_find_route(&rcp);
817 +- if (rp)
818 +- memcpy(&rcp2, rp, sizeof(rcp2));
819 ++ if (rp) {
820 ++ memset(&rcp2, 0, sizeof(rcp2));
821 ++ rcp2.ip = rp->ip;
822 ++ rcp2.at = rp->at;
823 ++ rcp2.flags = rp->flags;
824 ++ }
825 + spin_unlock_bh(&ipddp_route_lock);
826 +
827 + if (rp) {
828 +diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
829 +index 7c791c1da4b9..bef01331266f 100644
830 +--- a/drivers/net/dsa/mv88e6xxx/global1.h
831 ++++ b/drivers/net/dsa/mv88e6xxx/global1.h
832 +@@ -128,7 +128,7 @@
833 + #define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000
834 + #define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7)
835 + #define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6)
836 +-#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5)
837 ++#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION BIT(5)
838 + #define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4)
839 +
840 + /* Offset 0x0C: ATU Data Register */
841 +diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
842 +index 307410898fc9..5200e4bdce93 100644
843 +--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
844 ++++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
845 +@@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
846 + chip->ports[entry.portvec].atu_member_violation++;
847 + }
848 +
849 +- if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
850 ++ if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
851 + dev_err_ratelimited(chip->dev,
852 + "ATU miss violation for %pM portvec %x\n",
853 + entry.mac, entry.portvec);
854 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
855 +index 4fdf3d33aa59..80b05597c5fe 100644
856 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
857 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
858 +@@ -7888,7 +7888,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
859 + if (ether_addr_equal(addr->sa_data, dev->dev_addr))
860 + return 0;
861 +
862 +- rc = bnxt_approve_mac(bp, addr->sa_data);
863 ++ rc = bnxt_approve_mac(bp, addr->sa_data, true);
864 + if (rc)
865 + return rc;
866 +
867 +@@ -8683,14 +8683,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
868 + } else {
869 + #ifdef CONFIG_BNXT_SRIOV
870 + struct bnxt_vf_info *vf = &bp->vf;
871 ++ bool strict_approval = true;
872 +
873 + if (is_valid_ether_addr(vf->mac_addr)) {
874 + /* overwrite netdev dev_addr with admin VF MAC */
875 + memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
876 ++ /* Older PF driver or firmware may not approve this
877 ++ * correctly.
878 ++ */
879 ++ strict_approval = false;
880 + } else {
881 + eth_hw_addr_random(bp->dev);
882 + }
883 +- rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
884 ++ rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
885 + #endif
886 + }
887 + return rc;
888 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
889 +index 2c77004a022b..24d16d3d33a1 100644
890 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
891 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
892 +@@ -1095,7 +1095,7 @@ update_vf_mac_exit:
893 + mutex_unlock(&bp->hwrm_cmd_lock);
894 + }
895 +
896 +-int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
897 ++int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
898 + {
899 + struct hwrm_func_vf_cfg_input req = {0};
900 + int rc = 0;
901 +@@ -1113,12 +1113,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
902 + memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
903 + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
904 + mac_done:
905 +- if (rc) {
906 ++ if (rc && strict) {
907 + rc = -EADDRNOTAVAIL;
908 + netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
909 + mac);
910 ++ return rc;
911 + }
912 +- return rc;
913 ++ return 0;
914 + }
915 + #else
916 +
917 +@@ -1135,7 +1136,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
918 + {
919 + }
920 +
921 +-int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
922 ++int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
923 + {
924 + return 0;
925 + }
926 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
927 +index e9b20cd19881..2eed9eda1195 100644
928 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
929 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
930 +@@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
931 + void bnxt_sriov_disable(struct bnxt *);
932 + void bnxt_hwrm_exec_fwd_req(struct bnxt *);
933 + void bnxt_update_vf_mac(struct bnxt *);
934 +-int bnxt_approve_mac(struct bnxt *, u8 *);
935 ++int bnxt_approve_mac(struct bnxt *, u8 *, bool);
936 + #endif
937 +diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
938 +index c8c7ad2eff77..9b5a68b65432 100644
939 +--- a/drivers/net/ethernet/hp/hp100.c
940 ++++ b/drivers/net/ethernet/hp/hp100.c
941 +@@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
942 + /* Wait for link to drop */
943 + time = jiffies + (HZ / 10);
944 + do {
945 +- if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
946 ++ if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
947 + break;
948 + if (!in_interrupt())
949 + schedule_timeout_interruptible(1);
950 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
951 +index f7f08e3fa761..661fa5a38df2 100644
952 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
953 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
954 +@@ -61,6 +61,8 @@ static struct {
955 + */
956 + static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
957 + const struct phylink_link_state *state);
958 ++static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
959 ++ phy_interface_t interface, struct phy_device *phy);
960 +
961 + /* Queue modes */
962 + #define MVPP2_QDIST_SINGLE_MODE 0
963 +@@ -3142,6 +3144,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
964 + mvpp22_mode_reconfigure(port);
965 +
966 + if (port->phylink) {
967 ++ netif_carrier_off(port->dev);
968 + phylink_start(port->phylink);
969 + } else {
970 + /* Phylink isn't used as of now for ACPI, so the MAC has to be
971 +@@ -3150,9 +3153,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
972 + */
973 + struct phylink_link_state state = {
974 + .interface = port->phy_interface,
975 +- .link = 1,
976 + };
977 + mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
978 ++ mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
979 ++ NULL);
980 + }
981 +
982 + netif_tx_start_all_queues(port->dev);
983 +@@ -4389,10 +4393,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
984 + return;
985 + }
986 +
987 +- netif_tx_stop_all_queues(port->dev);
988 +- if (!port->has_phy)
989 +- netif_carrier_off(port->dev);
990 +-
991 + /* Make sure the port is disabled when reconfiguring the mode */
992 + mvpp2_port_disable(port);
993 +
994 +@@ -4417,16 +4417,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
995 + if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
996 + mvpp2_port_loopback_set(port, state);
997 +
998 +- /* If the port already was up, make sure it's still in the same state */
999 +- if (state->link || !port->has_phy) {
1000 +- mvpp2_port_enable(port);
1001 +-
1002 +- mvpp2_egress_enable(port);
1003 +- mvpp2_ingress_enable(port);
1004 +- if (!port->has_phy)
1005 +- netif_carrier_on(dev);
1006 +- netif_tx_wake_all_queues(dev);
1007 +- }
1008 ++ mvpp2_port_enable(port);
1009 + }
1010 +
1011 + static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
1012 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1013 +index 6d74cde68163..c0fc30a1f600 100644
1014 +--- a/drivers/net/hyperv/netvsc_drv.c
1015 ++++ b/drivers/net/hyperv/netvsc_drv.c
1016 +@@ -2172,17 +2172,15 @@ static int netvsc_remove(struct hv_device *dev)
1017 +
1018 + cancel_delayed_work_sync(&ndev_ctx->dwork);
1019 +
1020 +- rcu_read_lock();
1021 +- nvdev = rcu_dereference(ndev_ctx->nvdev);
1022 +-
1023 +- if (nvdev)
1024 ++ rtnl_lock();
1025 ++ nvdev = rtnl_dereference(ndev_ctx->nvdev);
1026 ++ if (nvdev)
1027 + cancel_work_sync(&nvdev->subchan_work);
1028 +
1029 + /*
1030 + * Call to the vsc driver to let it know that the device is being
1031 + * removed. Also blocks mtu and channel changes.
1032 + */
1033 +- rtnl_lock();
1034 + vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
1035 + if (vf_netdev)
1036 + netvsc_unregister_vf(vf_netdev);
1037 +@@ -2194,7 +2192,6 @@ static int netvsc_remove(struct hv_device *dev)
1038 + list_del(&ndev_ctx->list);
1039 +
1040 + rtnl_unlock();
1041 +- rcu_read_unlock();
1042 +
1043 + hv_set_drvdata(dev, NULL);
1044 +
1045 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
1046 +index ce61231e96ea..62dc564b251d 100644
1047 +--- a/drivers/net/ppp/pppoe.c
1048 ++++ b/drivers/net/ppp/pppoe.c
1049 +@@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
1050 + if (!skb)
1051 + goto out;
1052 +
1053 ++ if (skb_mac_header_len(skb) < ETH_HLEN)
1054 ++ goto drop;
1055 ++
1056 + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
1057 + goto drop;
1058 +
1059 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1060 +index cb0cc30c3d6a..1e95d37c6e27 100644
1061 +--- a/drivers/net/usb/qmi_wwan.c
1062 ++++ b/drivers/net/usb/qmi_wwan.c
1063 +@@ -1206,13 +1206,13 @@ static const struct usb_device_id products[] = {
1064 + {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
1065 + {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */
1066 + {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */
1067 +- {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
1068 +- {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
1069 +- {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
1070 +- {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
1071 +- {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
1072 +- {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
1073 +- {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
1074 ++ {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
1075 ++ {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */
1076 ++ {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
1077 ++ {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */
1078 ++ {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
1079 ++ {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
1080 ++ {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
1081 + {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1082 + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1083 + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
1084 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1085 +index c2b6aa1d485f..f49c2a60a6eb 100644
1086 +--- a/drivers/net/xen-netfront.c
1087 ++++ b/drivers/net/xen-netfront.c
1088 +@@ -907,7 +907,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
1089 + BUG_ON(pull_to <= skb_headlen(skb));
1090 + __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1091 + }
1092 +- BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
1093 ++ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1094 ++ queue->rx.rsp_cons = ++cons;
1095 ++ kfree_skb(nskb);
1096 ++ return ~0U;
1097 ++ }
1098 +
1099 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1100 + skb_frag_page(nfrag),
1101 +@@ -1044,6 +1048,8 @@ err:
1102 + skb->len += rx->status;
1103 +
1104 + i = xennet_fill_frags(queue, skb, &tmpq);
1105 ++ if (unlikely(i == ~0U))
1106 ++ goto err;
1107 +
1108 + if (rx->flags & XEN_NETRXF_csum_blank)
1109 + skb->ip_summed = CHECKSUM_PARTIAL;
1110 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1111 +index f439de848658..d1e2d175c10b 100644
1112 +--- a/drivers/pci/quirks.c
1113 ++++ b/drivers/pci/quirks.c
1114 +@@ -4235,11 +4235,6 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
1115 + *
1116 + * 0x9d10-0x9d1b PCI Express Root port #{1-12}
1117 + *
1118 +- * The 300 series chipset suffers from the same bug so include those root
1119 +- * ports here as well.
1120 +- *
1121 +- * 0xa32c-0xa343 PCI Express Root port #{0-24}
1122 +- *
1123 + * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
1124 + * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
1125 + * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
1126 +@@ -4257,7 +4252,6 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
1127 + case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
1128 + case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
1129 + case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
1130 +- case 0xa32c ... 0xa343: /* 300 series */
1131 + return true;
1132 + }
1133 +
1134 +diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
1135 +index d975462a4c57..f10af5c383c5 100644
1136 +--- a/drivers/platform/x86/alienware-wmi.c
1137 ++++ b/drivers/platform/x86/alienware-wmi.c
1138 +@@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
1139 + if (obj && obj->type == ACPI_TYPE_INTEGER)
1140 + *out_data = (u32) obj->integer.value;
1141 + }
1142 ++ kfree(output.pointer);
1143 + return status;
1144 +
1145 + }
1146 +diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
1147 +index fbefedb1c172..548abba2c1e9 100644
1148 +--- a/drivers/platform/x86/dell-smbios-wmi.c
1149 ++++ b/drivers/platform/x86/dell-smbios-wmi.c
1150 +@@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev)
1151 + dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
1152 + priv->buf->std.output[0], priv->buf->std.output[1],
1153 + priv->buf->std.output[2], priv->buf->std.output[3]);
1154 ++ kfree(output.pointer);
1155 +
1156 + return 0;
1157 + }
1158 +diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
1159 +index 8122807db380..b714a543a91d 100644
1160 +--- a/drivers/rpmsg/rpmsg_core.c
1161 ++++ b/drivers/rpmsg/rpmsg_core.c
1162 +@@ -15,7 +15,6 @@
1163 + #include <linux/module.h>
1164 + #include <linux/rpmsg.h>
1165 + #include <linux/of_device.h>
1166 +-#include <linux/pm_domain.h>
1167 + #include <linux/slab.h>
1168 +
1169 + #include "rpmsg_internal.h"
1170 +@@ -450,10 +449,6 @@ static int rpmsg_dev_probe(struct device *dev)
1171 + struct rpmsg_endpoint *ept = NULL;
1172 + int err;
1173 +
1174 +- err = dev_pm_domain_attach(dev, true);
1175 +- if (err)
1176 +- goto out;
1177 +-
1178 + if (rpdrv->callback) {
1179 + strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
1180 + chinfo.src = rpdev->src;
1181 +@@ -495,8 +490,6 @@ static int rpmsg_dev_remove(struct device *dev)
1182 +
1183 + rpdrv->remove(rpdev);
1184 +
1185 +- dev_pm_domain_detach(dev, true);
1186 +-
1187 + if (rpdev->ept)
1188 + rpmsg_destroy_ept(rpdev->ept);
1189 +
1190 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
1191 +index ec395a6baf9c..9da0bc5a036c 100644
1192 +--- a/drivers/spi/spi.c
1193 ++++ b/drivers/spi/spi.c
1194 +@@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr)
1195 + */
1196 + if (ctlr->num_chipselect == 0)
1197 + return -EINVAL;
1198 +- /* allocate dynamic bus number using Linux idr */
1199 +- if ((ctlr->bus_num < 0) && ctlr->dev.of_node) {
1200 ++ if (ctlr->bus_num >= 0) {
1201 ++ /* devices with a fixed bus num must check-in with the num */
1202 ++ mutex_lock(&board_lock);
1203 ++ id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
1204 ++ ctlr->bus_num + 1, GFP_KERNEL);
1205 ++ mutex_unlock(&board_lock);
1206 ++ if (WARN(id < 0, "couldn't get idr"))
1207 ++ return id == -ENOSPC ? -EBUSY : id;
1208 ++ ctlr->bus_num = id;
1209 ++ } else if (ctlr->dev.of_node) {
1210 ++ /* allocate dynamic bus number using Linux idr */
1211 + id = of_alias_get_id(ctlr->dev.of_node, "spi");
1212 + if (id >= 0) {
1213 + ctlr->bus_num = id;
1214 +diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
1215 +index 9518ffd8b8ba..4e680d753941 100644
1216 +--- a/drivers/target/iscsi/iscsi_target_auth.c
1217 ++++ b/drivers/target/iscsi/iscsi_target_auth.c
1218 +@@ -26,27 +26,6 @@
1219 + #include "iscsi_target_nego.h"
1220 + #include "iscsi_target_auth.h"
1221 +
1222 +-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
1223 +-{
1224 +- int j = DIV_ROUND_UP(len, 2), rc;
1225 +-
1226 +- rc = hex2bin(dst, src, j);
1227 +- if (rc < 0)
1228 +- pr_debug("CHAP string contains non hex digit symbols\n");
1229 +-
1230 +- dst[j] = '\0';
1231 +- return j;
1232 +-}
1233 +-
1234 +-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
1235 +-{
1236 +- int i;
1237 +-
1238 +- for (i = 0; i < src_len; i++) {
1239 +- sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
1240 +- }
1241 +-}
1242 +-
1243 + static int chap_gen_challenge(
1244 + struct iscsi_conn *conn,
1245 + int caller,
1246 +@@ -62,7 +41,7 @@ static int chap_gen_challenge(
1247 + ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
1248 + if (unlikely(ret))
1249 + return ret;
1250 +- chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
1251 ++ bin2hex(challenge_asciihex, chap->challenge,
1252 + CHAP_CHALLENGE_LENGTH);
1253 + /*
1254 + * Set CHAP_C, and copy the generated challenge into c_str.
1255 +@@ -248,9 +227,16 @@ static int chap_server_compute_md5(
1256 + pr_err("Could not find CHAP_R.\n");
1257 + goto out;
1258 + }
1259 ++ if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
1260 ++ pr_err("Malformed CHAP_R\n");
1261 ++ goto out;
1262 ++ }
1263 ++ if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
1264 ++ pr_err("Malformed CHAP_R\n");
1265 ++ goto out;
1266 ++ }
1267 +
1268 + pr_debug("[server] Got CHAP_R=%s\n", chap_r);
1269 +- chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
1270 +
1271 + tfm = crypto_alloc_shash("md5", 0, 0);
1272 + if (IS_ERR(tfm)) {
1273 +@@ -294,7 +280,7 @@ static int chap_server_compute_md5(
1274 + goto out;
1275 + }
1276 +
1277 +- chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
1278 ++ bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
1279 + pr_debug("[server] MD5 Server Digest: %s\n", response);
1280 +
1281 + if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
1282 +@@ -349,9 +335,7 @@ static int chap_server_compute_md5(
1283 + pr_err("Could not find CHAP_C.\n");
1284 + goto out;
1285 + }
1286 +- pr_debug("[server] Got CHAP_C=%s\n", challenge);
1287 +- challenge_len = chap_string_to_hex(challenge_binhex, challenge,
1288 +- strlen(challenge));
1289 ++ challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
1290 + if (!challenge_len) {
1291 + pr_err("Unable to convert incoming challenge\n");
1292 + goto out;
1293 +@@ -360,6 +344,11 @@ static int chap_server_compute_md5(
1294 + pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
1295 + goto out;
1296 + }
1297 ++ if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
1298 ++ pr_err("Malformed CHAP_C\n");
1299 ++ goto out;
1300 ++ }
1301 ++ pr_debug("[server] Got CHAP_C=%s\n", challenge);
1302 + /*
1303 + * During mutual authentication, the CHAP_C generated by the
1304 + * initiator must not match the original CHAP_C generated by
1305 +@@ -413,7 +402,7 @@ static int chap_server_compute_md5(
1306 + /*
1307 + * Convert response from binary hex to ascii hext.
1308 + */
1309 +- chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
1310 ++ bin2hex(response, digest, MD5_SIGNATURE_SIZE);
1311 + *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
1312 + response);
1313 + *nr_out_len += 1;
1314 +diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
1315 +index a78ad10a119b..73cdc0d633dd 100644
1316 +--- a/drivers/tty/vt/vt_ioctl.c
1317 ++++ b/drivers/tty/vt/vt_ioctl.c
1318 +@@ -32,6 +32,8 @@
1319 + #include <asm/io.h>
1320 + #include <linux/uaccess.h>
1321 +
1322 ++#include <linux/nospec.h>
1323 ++
1324 + #include <linux/kbd_kern.h>
1325 + #include <linux/vt_kern.h>
1326 + #include <linux/kbd_diacr.h>
1327 +@@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty,
1328 + if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
1329 + ret = -ENXIO;
1330 + else {
1331 ++ vsa.console = array_index_nospec(vsa.console,
1332 ++ MAX_NR_CONSOLES + 1);
1333 + vsa.console--;
1334 + console_lock();
1335 + ret = vc_allocate(vsa.console);
1336 +diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
1337 +index e2902d394f1b..f93f9881ec18 100644
1338 +--- a/fs/ext4/dir.c
1339 ++++ b/fs/ext4/dir.c
1340 +@@ -76,7 +76,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
1341 + else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
1342 + error_msg = "rec_len is too small for name_len";
1343 + else if (unlikely(((char *) de - buf) + rlen > size))
1344 +- error_msg = "directory entry across range";
1345 ++ error_msg = "directory entry overrun";
1346 + else if (unlikely(le32_to_cpu(de->inode) >
1347 + le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
1348 + error_msg = "inode out of bounds";
1349 +@@ -85,18 +85,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
1350 +
1351 + if (filp)
1352 + ext4_error_file(filp, function, line, bh->b_blocknr,
1353 +- "bad entry in directory: %s - offset=%u(%u), "
1354 +- "inode=%u, rec_len=%d, name_len=%d",
1355 +- error_msg, (unsigned) (offset % size),
1356 +- offset, le32_to_cpu(de->inode),
1357 +- rlen, de->name_len);
1358 ++ "bad entry in directory: %s - offset=%u, "
1359 ++ "inode=%u, rec_len=%d, name_len=%d, size=%d",
1360 ++ error_msg, offset, le32_to_cpu(de->inode),
1361 ++ rlen, de->name_len, size);
1362 + else
1363 + ext4_error_inode(dir, function, line, bh->b_blocknr,
1364 +- "bad entry in directory: %s - offset=%u(%u), "
1365 +- "inode=%u, rec_len=%d, name_len=%d",
1366 +- error_msg, (unsigned) (offset % size),
1367 +- offset, le32_to_cpu(de->inode),
1368 +- rlen, de->name_len);
1369 ++ "bad entry in directory: %s - offset=%u, "
1370 ++ "inode=%u, rec_len=%d, name_len=%d, size=%d",
1371 ++ error_msg, offset, le32_to_cpu(de->inode),
1372 ++ rlen, de->name_len, size);
1373 +
1374 + return 1;
1375 + }
1376 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1377 +index 7c7123f265c2..aa1ce53d0c87 100644
1378 +--- a/fs/ext4/ext4.h
1379 ++++ b/fs/ext4/ext4.h
1380 +@@ -675,6 +675,9 @@ enum {
1381 + /* Max physical block we can address w/o extents */
1382 + #define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
1383 +
1384 ++/* Max logical block we can support */
1385 ++#define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFF
1386 ++
1387 + /*
1388 + * Structure of an inode on the disk
1389 + */
1390 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
1391 +index 3543fe80a3c4..7b4736022761 100644
1392 +--- a/fs/ext4/inline.c
1393 ++++ b/fs/ext4/inline.c
1394 +@@ -1753,6 +1753,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
1395 + {
1396 + int err, inline_size;
1397 + struct ext4_iloc iloc;
1398 ++ size_t inline_len;
1399 + void *inline_pos;
1400 + unsigned int offset;
1401 + struct ext4_dir_entry_2 *de;
1402 +@@ -1780,8 +1781,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
1403 + goto out;
1404 + }
1405 +
1406 ++ inline_len = ext4_get_inline_size(dir);
1407 + offset = EXT4_INLINE_DOTDOT_SIZE;
1408 +- while (offset < dir->i_size) {
1409 ++ while (offset < inline_len) {
1410 + de = ext4_get_inline_entry(dir, &iloc, offset,
1411 + &inline_pos, &inline_size);
1412 + if (ext4_check_dir_entry(dir, NULL, de,
1413 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1414 +index 4efe77286ecd..2276137d0083 100644
1415 +--- a/fs/ext4/inode.c
1416 ++++ b/fs/ext4/inode.c
1417 +@@ -3412,12 +3412,16 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
1418 + {
1419 + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1420 + unsigned int blkbits = inode->i_blkbits;
1421 +- unsigned long first_block = offset >> blkbits;
1422 +- unsigned long last_block = (offset + length - 1) >> blkbits;
1423 ++ unsigned long first_block, last_block;
1424 + struct ext4_map_blocks map;
1425 + bool delalloc = false;
1426 + int ret;
1427 +
1428 ++ if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
1429 ++ return -EINVAL;
1430 ++ first_block = offset >> blkbits;
1431 ++ last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
1432 ++ EXT4_MAX_LOGICAL_BLOCK);
1433 +
1434 + if (flags & IOMAP_REPORT) {
1435 + if (ext4_has_inline_data(inode)) {
1436 +@@ -3947,6 +3951,7 @@ static const struct address_space_operations ext4_dax_aops = {
1437 + .writepages = ext4_dax_writepages,
1438 + .direct_IO = noop_direct_IO,
1439 + .set_page_dirty = noop_set_page_dirty,
1440 ++ .bmap = ext4_bmap,
1441 + .invalidatepage = noop_invalidatepage,
1442 + };
1443 +
1444 +@@ -4856,6 +4861,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1445 + * not initialized on a new filesystem. */
1446 + }
1447 + ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1448 ++ ext4_set_inode_flags(inode);
1449 + inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
1450 + ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
1451 + if (ext4_has_feature_64bit(sb))
1452 +@@ -5005,7 +5011,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1453 + goto bad_inode;
1454 + }
1455 + brelse(iloc.bh);
1456 +- ext4_set_inode_flags(inode);
1457 +
1458 + unlock_new_inode(inode);
1459 + return inode;
1460 +diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
1461 +index 638ad4743477..38e6a846aac1 100644
1462 +--- a/fs/ext4/mmp.c
1463 ++++ b/fs/ext4/mmp.c
1464 +@@ -49,7 +49,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
1465 + */
1466 + sb_start_write(sb);
1467 + ext4_mmp_csum_set(sb, mmp);
1468 +- mark_buffer_dirty(bh);
1469 + lock_buffer(bh);
1470 + bh->b_end_io = end_buffer_write_sync;
1471 + get_bh(bh);
1472 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1473 +index 116ff68c5bd4..377d516c475f 100644
1474 +--- a/fs/ext4/namei.c
1475 ++++ b/fs/ext4/namei.c
1476 +@@ -3478,6 +3478,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
1477 + int credits;
1478 + u8 old_file_type;
1479 +
1480 ++ if (new.inode && new.inode->i_nlink == 0) {
1481 ++ EXT4_ERROR_INODE(new.inode,
1482 ++ "target of rename is already freed");
1483 ++ return -EFSCORRUPTED;
1484 ++ }
1485 ++
1486 + if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) &&
1487 + (!projid_eq(EXT4_I(new_dir)->i_projid,
1488 + EXT4_I(old_dentry->d_inode)->i_projid)))
1489 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
1490 +index e5fb38451a73..ebbc663d0798 100644
1491 +--- a/fs/ext4/resize.c
1492 ++++ b/fs/ext4/resize.c
1493 +@@ -19,6 +19,7 @@
1494 +
1495 + int ext4_resize_begin(struct super_block *sb)
1496 + {
1497 ++ struct ext4_sb_info *sbi = EXT4_SB(sb);
1498 + int ret = 0;
1499 +
1500 + if (!capable(CAP_SYS_RESOURCE))
1501 +@@ -29,7 +30,7 @@ int ext4_resize_begin(struct super_block *sb)
1502 + * because the user tools have no way of handling this. Probably a
1503 + * bad time to do it anyways.
1504 + */
1505 +- if (EXT4_SB(sb)->s_sbh->b_blocknr !=
1506 ++ if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
1507 + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
1508 + ext4_warning(sb, "won't resize using backup superblock at %llu",
1509 + (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
1510 +@@ -1986,6 +1987,26 @@ retry:
1511 + }
1512 + }
1513 +
1514 ++ /*
1515 ++ * Make sure the last group has enough space so that it's
1516 ++ * guaranteed to have enough space for all metadata blocks
1517 ++ * that it might need to hold. (We might not need to store
1518 ++ * the inode table blocks in the last block group, but there
1519 ++ * will be cases where this might be needed.)
1520 ++ */
1521 ++ if ((ext4_group_first_block_no(sb, n_group) +
1522 ++ ext4_group_overhead_blocks(sb, n_group) + 2 +
1523 ++ sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
1524 ++ n_blocks_count = ext4_group_first_block_no(sb, n_group);
1525 ++ n_group--;
1526 ++ n_blocks_count_retry = 0;
1527 ++ if (resize_inode) {
1528 ++ iput(resize_inode);
1529 ++ resize_inode = NULL;
1530 ++ }
1531 ++ goto retry;
1532 ++ }
1533 ++
1534 + /* extend the last group */
1535 + if (n_group == o_group)
1536 + add = n_blocks_count - o_blocks_count;
1537 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1538 +index 130c12974e28..a7a0fffc3ae8 100644
1539 +--- a/fs/ext4/super.c
1540 ++++ b/fs/ext4/super.c
1541 +@@ -2126,6 +2126,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
1542 + SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
1543 + if (test_opt(sb, DATA_ERR_ABORT))
1544 + SEQ_OPTS_PUTS("data_err=abort");
1545 ++ if (DUMMY_ENCRYPTION_ENABLED(sbi))
1546 ++ SEQ_OPTS_PUTS("test_dummy_encryption");
1547 +
1548 + ext4_show_quota_options(seq, sb);
1549 + return 0;
1550 +@@ -4357,11 +4359,13 @@ no_journal:
1551 + block = ext4_count_free_clusters(sb);
1552 + ext4_free_blocks_count_set(sbi->s_es,
1553 + EXT4_C2B(sbi, block));
1554 ++ ext4_superblock_csum_set(sb);
1555 + err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
1556 + GFP_KERNEL);
1557 + if (!err) {
1558 + unsigned long freei = ext4_count_free_inodes(sb);
1559 + sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
1560 ++ ext4_superblock_csum_set(sb);
1561 + err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
1562 + GFP_KERNEL);
1563 + }
1564 +diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
1565 +index d9ebe11c8990..1d098c3c00e0 100644
1566 +--- a/fs/ocfs2/buffer_head_io.c
1567 ++++ b/fs/ocfs2/buffer_head_io.c
1568 +@@ -342,6 +342,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
1569 + * for this bh as it's not marked locally
1570 + * uptodate. */
1571 + status = -EIO;
1572 ++ clear_buffer_needs_validate(bh);
1573 + put_bh(bh);
1574 + bhs[i] = NULL;
1575 + continue;
1576 +diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
1577 +index 09e37e63bddd..6f720fdf5020 100644
1578 +--- a/fs/ubifs/xattr.c
1579 ++++ b/fs/ubifs/xattr.c
1580 +@@ -152,12 +152,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
1581 + ui->data_len = size;
1582 +
1583 + mutex_lock(&host_ui->ui_mutex);
1584 +-
1585 +- if (!host->i_nlink) {
1586 +- err = -ENOENT;
1587 +- goto out_noent;
1588 +- }
1589 +-
1590 + host->i_ctime = current_time(host);
1591 + host_ui->xattr_cnt += 1;
1592 + host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
1593 +@@ -190,7 +184,6 @@ out_cancel:
1594 + host_ui->xattr_size -= CALC_XATTR_BYTES(size);
1595 + host_ui->xattr_names -= fname_len(nm);
1596 + host_ui->flags &= ~UBIFS_CRYPT_FL;
1597 +-out_noent:
1598 + mutex_unlock(&host_ui->ui_mutex);
1599 + out_free:
1600 + make_bad_inode(inode);
1601 +@@ -242,12 +235,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
1602 + mutex_unlock(&ui->ui_mutex);
1603 +
1604 + mutex_lock(&host_ui->ui_mutex);
1605 +-
1606 +- if (!host->i_nlink) {
1607 +- err = -ENOENT;
1608 +- goto out_noent;
1609 +- }
1610 +-
1611 + host->i_ctime = current_time(host);
1612 + host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
1613 + host_ui->xattr_size += CALC_XATTR_BYTES(size);
1614 +@@ -269,7 +256,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
1615 + out_cancel:
1616 + host_ui->xattr_size -= CALC_XATTR_BYTES(size);
1617 + host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
1618 +-out_noent:
1619 + mutex_unlock(&host_ui->ui_mutex);
1620 + make_bad_inode(inode);
1621 + out_free:
1622 +@@ -496,12 +482,6 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
1623 + return err;
1624 +
1625 + mutex_lock(&host_ui->ui_mutex);
1626 +-
1627 +- if (!host->i_nlink) {
1628 +- err = -ENOENT;
1629 +- goto out_noent;
1630 +- }
1631 +-
1632 + host->i_ctime = current_time(host);
1633 + host_ui->xattr_cnt -= 1;
1634 + host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
1635 +@@ -521,7 +501,6 @@ out_cancel:
1636 + host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
1637 + host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
1638 + host_ui->xattr_names += fname_len(nm);
1639 +-out_noent:
1640 + mutex_unlock(&host_ui->ui_mutex);
1641 + ubifs_release_budget(c, &req);
1642 + make_bad_inode(inode);
1643 +@@ -561,9 +540,6 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
1644 +
1645 + ubifs_assert(inode_is_locked(host));
1646 +
1647 +- if (!host->i_nlink)
1648 +- return -ENOENT;
1649 +-
1650 + if (fname_len(&nm) > UBIFS_MAX_NLEN)
1651 + return -ENAMETOOLONG;
1652 +
1653 +diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
1654 +index 316694dafa5b..008f466d1da7 100644
1655 +--- a/include/net/nfc/hci.h
1656 ++++ b/include/net/nfc/hci.h
1657 +@@ -87,7 +87,7 @@ struct nfc_hci_pipe {
1658 + * According to specification 102 622 chapter 4.4 Pipes,
1659 + * the pipe identifier is 7 bits long.
1660 + */
1661 +-#define NFC_HCI_MAX_PIPES 127
1662 ++#define NFC_HCI_MAX_PIPES 128
1663 + struct nfc_hci_init_data {
1664 + u8 gate_count;
1665 + struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
1666 +diff --git a/include/net/tls.h b/include/net/tls.h
1667 +index 70c273777fe9..32b71e5b1290 100644
1668 +--- a/include/net/tls.h
1669 ++++ b/include/net/tls.h
1670 +@@ -165,15 +165,14 @@ struct cipher_context {
1671 + char *rec_seq;
1672 + };
1673 +
1674 ++union tls_crypto_context {
1675 ++ struct tls_crypto_info info;
1676 ++ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
1677 ++};
1678 ++
1679 + struct tls_context {
1680 +- union {
1681 +- struct tls_crypto_info crypto_send;
1682 +- struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128;
1683 +- };
1684 +- union {
1685 +- struct tls_crypto_info crypto_recv;
1686 +- struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128;
1687 +- };
1688 ++ union tls_crypto_context crypto_send;
1689 ++ union tls_crypto_context crypto_recv;
1690 +
1691 + struct list_head list;
1692 + struct net_device *netdev;
1693 +@@ -337,8 +336,8 @@ static inline void tls_fill_prepend(struct tls_context *ctx,
1694 + * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
1695 + */
1696 + buf[0] = record_type;
1697 +- buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version);
1698 +- buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version);
1699 ++ buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version);
1700 ++ buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version);
1701 + /* we can use IV for nonce explicit according to spec */
1702 + buf[3] = pkt_len >> 8;
1703 + buf[4] = pkt_len & 0xFF;
1704 +diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
1705 +index 910cc4334b21..7b8c9e19bad1 100644
1706 +--- a/include/uapi/linux/keyctl.h
1707 ++++ b/include/uapi/linux/keyctl.h
1708 +@@ -65,7 +65,7 @@
1709 +
1710 + /* keyctl structures */
1711 + struct keyctl_dh_params {
1712 +- __s32 dh_private;
1713 ++ __s32 private;
1714 + __s32 prime;
1715 + __s32 base;
1716 + };
1717 +diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
1718 +index f58cafa42f18..f39352cef382 100644
1719 +--- a/include/uapi/sound/skl-tplg-interface.h
1720 ++++ b/include/uapi/sound/skl-tplg-interface.h
1721 +@@ -10,6 +10,8 @@
1722 + #ifndef __HDA_TPLG_INTERFACE_H__
1723 + #define __HDA_TPLG_INTERFACE_H__
1724 +
1725 ++#include <linux/types.h>
1726 ++
1727 + /*
1728 + * Default types range from 0~12. type can range from 0 to 0xff
1729 + * SST types start at higher to avoid any overlapping in future
1730 +@@ -143,10 +145,10 @@ enum skl_module_param_type {
1731 + };
1732 +
1733 + struct skl_dfw_algo_data {
1734 +- u32 set_params:2;
1735 +- u32 rsvd:30;
1736 +- u32 param_id;
1737 +- u32 max;
1738 ++ __u32 set_params:2;
1739 ++ __u32 rsvd:30;
1740 ++ __u32 param_id;
1741 ++ __u32 max;
1742 + char params[0];
1743 + } __packed;
1744 +
1745 +@@ -163,68 +165,68 @@ enum skl_tuple_type {
1746 + /* v4 configuration data */
1747 +
1748 + struct skl_dfw_v4_module_pin {
1749 +- u16 module_id;
1750 +- u16 instance_id;
1751 ++ __u16 module_id;
1752 ++ __u16 instance_id;
1753 + } __packed;
1754 +
1755 + struct skl_dfw_v4_module_fmt {
1756 +- u32 channels;
1757 +- u32 freq;
1758 +- u32 bit_depth;
1759 +- u32 valid_bit_depth;
1760 +- u32 ch_cfg;
1761 +- u32 interleaving_style;
1762 +- u32 sample_type;
1763 +- u32 ch_map;
1764 ++ __u32 channels;
1765 ++ __u32 freq;
1766 ++ __u32 bit_depth;
1767 ++ __u32 valid_bit_depth;
1768 ++ __u32 ch_cfg;
1769 ++ __u32 interleaving_style;
1770 ++ __u32 sample_type;
1771 ++ __u32 ch_map;
1772 + } __packed;
1773 +
1774 + struct skl_dfw_v4_module_caps {
1775 +- u32 set_params:2;
1776 +- u32 rsvd:30;
1777 +- u32 param_id;
1778 +- u32 caps_size;
1779 +- u32 caps[HDA_SST_CFG_MAX];
1780 ++ __u32 set_params:2;
1781 ++ __u32 rsvd:30;
1782 ++ __u32 param_id;
1783 ++ __u32 caps_size;
1784 ++ __u32 caps[HDA_SST_CFG_MAX];
1785 + } __packed;
1786 +
1787 + struct skl_dfw_v4_pipe {
1788 +- u8 pipe_id;
1789 +- u8 pipe_priority;
1790 +- u16 conn_type:4;
1791 +- u16 rsvd:4;
1792 +- u16 memory_pages:8;
1793 ++ __u8 pipe_id;
1794 ++ __u8 pipe_priority;
1795 ++ __u16 conn_type:4;
1796 ++ __u16 rsvd:4;
1797 ++ __u16 memory_pages:8;
1798 + } __packed;
1799 +
1800 + struct skl_dfw_v4_module {
1801 + char uuid[SKL_UUID_STR_SZ];
1802 +
1803 +- u16 module_id;
1804 +- u16 instance_id;
1805 +- u32 max_mcps;
1806 +- u32 mem_pages;
1807 +- u32 obs;
1808 +- u32 ibs;
1809 +- u32 vbus_id;
1810 +-
1811 +- u32 max_in_queue:8;
1812 +- u32 max_out_queue:8;
1813 +- u32 time_slot:8;
1814 +- u32 core_id:4;
1815 +- u32 rsvd1:4;
1816 +-
1817 +- u32 module_type:8;
1818 +- u32 conn_type:4;
1819 +- u32 dev_type:4;
1820 +- u32 hw_conn_type:4;
1821 +- u32 rsvd2:12;
1822 +-
1823 +- u32 params_fixup:8;
1824 +- u32 converter:8;
1825 +- u32 input_pin_type:1;
1826 +- u32 output_pin_type:1;
1827 +- u32 is_dynamic_in_pin:1;
1828 +- u32 is_dynamic_out_pin:1;
1829 +- u32 is_loadable:1;
1830 +- u32 rsvd3:11;
1831 ++ __u16 module_id;
1832 ++ __u16 instance_id;
1833 ++ __u32 max_mcps;
1834 ++ __u32 mem_pages;
1835 ++ __u32 obs;
1836 ++ __u32 ibs;
1837 ++ __u32 vbus_id;
1838 ++
1839 ++ __u32 max_in_queue:8;
1840 ++ __u32 max_out_queue:8;
1841 ++ __u32 time_slot:8;
1842 ++ __u32 core_id:4;
1843 ++ __u32 rsvd1:4;
1844 ++
1845 ++ __u32 module_type:8;
1846 ++ __u32 conn_type:4;
1847 ++ __u32 dev_type:4;
1848 ++ __u32 hw_conn_type:4;
1849 ++ __u32 rsvd2:12;
1850 ++
1851 ++ __u32 params_fixup:8;
1852 ++ __u32 converter:8;
1853 ++ __u32 input_pin_type:1;
1854 ++ __u32 output_pin_type:1;
1855 ++ __u32 is_dynamic_in_pin:1;
1856 ++ __u32 is_dynamic_out_pin:1;
1857 ++ __u32 is_loadable:1;
1858 ++ __u32 rsvd3:11;
1859 +
1860 + struct skl_dfw_v4_pipe pipe;
1861 + struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE];
1862 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1863 +index 63aaac52a265..adbe21c8876e 100644
1864 +--- a/kernel/bpf/verifier.c
1865 ++++ b/kernel/bpf/verifier.c
1866 +@@ -3132,7 +3132,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1867 + * an arbitrary scalar. Disallow all math except
1868 + * pointer subtraction
1869 + */
1870 +- if (opcode == BPF_SUB){
1871 ++ if (opcode == BPF_SUB && env->allow_ptr_leaks) {
1872 + mark_reg_unknown(env, regs, insn->dst_reg);
1873 + return 0;
1874 + }
1875 +diff --git a/kernel/pid.c b/kernel/pid.c
1876 +index 157fe4b19971..2ff2d8bfa4e0 100644
1877 +--- a/kernel/pid.c
1878 ++++ b/kernel/pid.c
1879 +@@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
1880 + idr_preload_end();
1881 +
1882 + if (nr < 0) {
1883 +- retval = nr;
1884 ++ retval = (nr == -ENOSPC) ? -EAGAIN : nr;
1885 + goto out_free;
1886 + }
1887 +
1888 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1889 +index 478d9d3e6be9..26526fc41f0d 100644
1890 +--- a/kernel/sched/fair.c
1891 ++++ b/kernel/sched/fair.c
1892 +@@ -10019,7 +10019,8 @@ static inline bool vruntime_normalized(struct task_struct *p)
1893 + * - A task which has been woken up by try_to_wake_up() and
1894 + * waiting for actually being woken up by sched_ttwu_pending().
1895 + */
1896 +- if (!se->sum_exec_runtime || p->state == TASK_WAKING)
1897 ++ if (!se->sum_exec_runtime ||
1898 ++ (p->state == TASK_WAKING && p->sched_remote_wakeup))
1899 + return true;
1900 +
1901 + return false;
1902 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
1903 +index 0b0b688ea166..e58fd35ff64a 100644
1904 +--- a/kernel/trace/ring_buffer.c
1905 ++++ b/kernel/trace/ring_buffer.c
1906 +@@ -1545,6 +1545,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1907 + tmp_iter_page = first_page;
1908 +
1909 + do {
1910 ++ cond_resched();
1911 ++
1912 + to_remove_page = tmp_iter_page;
1913 + rb_inc_page(cpu_buffer, &tmp_iter_page);
1914 +
1915 +diff --git a/mm/Kconfig b/mm/Kconfig
1916 +index 94af022b7f3d..22e949e263f0 100644
1917 +--- a/mm/Kconfig
1918 ++++ b/mm/Kconfig
1919 +@@ -637,6 +637,7 @@ config DEFERRED_STRUCT_PAGE_INIT
1920 + depends on NO_BOOTMEM
1921 + depends on SPARSEMEM
1922 + depends on !NEED_PER_CPU_KM
1923 ++ depends on 64BIT
1924 + help
1925 + Ordinarily all struct pages are initialised during early boot in a
1926 + single thread. On very large machines this can take a considerable
1927 +diff --git a/mm/shmem.c b/mm/shmem.c
1928 +index 41b9bbf24e16..8264bbdbb6a5 100644
1929 +--- a/mm/shmem.c
1930 ++++ b/mm/shmem.c
1931 +@@ -2226,6 +2226,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
1932 + mpol_shared_policy_init(&info->policy, NULL);
1933 + break;
1934 + }
1935 ++
1936 ++ lockdep_annotate_inode_mutex_key(inode);
1937 + } else
1938 + shmem_free_inode(sb);
1939 + return inode;
1940 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1941 +index 8e3fda9e725c..cb01d509d511 100644
1942 +--- a/net/core/neighbour.c
1943 ++++ b/net/core/neighbour.c
1944 +@@ -1179,6 +1179,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1945 + lladdr = neigh->ha;
1946 + }
1947 +
1948 ++ /* Update confirmed timestamp for neighbour entry after we
1949 ++ * received ARP packet even if it doesn't change IP to MAC binding.
1950 ++ */
1951 ++ if (new & NUD_CONNECTED)
1952 ++ neigh->confirmed = jiffies;
1953 ++
1954 + /* If entry was valid and address is not changed,
1955 + do not change entry state, if new one is STALE.
1956 + */
1957 +@@ -1200,15 +1206,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1958 + }
1959 + }
1960 +
1961 +- /* Update timestamps only once we know we will make a change to the
1962 ++ /* Update timestamp only once we know we will make a change to the
1963 + * neighbour entry. Otherwise we risk to move the locktime window with
1964 + * noop updates and ignore relevant ARP updates.
1965 + */
1966 +- if (new != old || lladdr != neigh->ha) {
1967 +- if (new & NUD_CONNECTED)
1968 +- neigh->confirmed = jiffies;
1969 ++ if (new != old || lladdr != neigh->ha)
1970 + neigh->updated = jiffies;
1971 +- }
1972 +
1973 + if (new != old) {
1974 + neigh_del_timer(neigh);
1975 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1976 +index e3f743c141b3..bafaa033826f 100644
1977 +--- a/net/core/rtnetlink.c
1978 ++++ b/net/core/rtnetlink.c
1979 +@@ -2760,7 +2760,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
1980 + }
1981 +
1982 + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
1983 +- __dev_notify_flags(dev, old_flags, 0U);
1984 ++ __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
1985 + } else {
1986 + dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
1987 + __dev_notify_flags(dev, old_flags, ~0U);
1988 +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
1989 +index b403499fdabe..0c43b050dac7 100644
1990 +--- a/net/ipv4/af_inet.c
1991 ++++ b/net/ipv4/af_inet.c
1992 +@@ -1377,6 +1377,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1993 + if (encap)
1994 + skb_reset_inner_headers(skb);
1995 + skb->network_header = (u8 *)iph - skb->head;
1996 ++ skb_reset_mac_len(skb);
1997 + } while ((skb = skb->next));
1998 +
1999 + out:
2000 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2001 +index 24e116ddae79..fed65bc9df86 100644
2002 +--- a/net/ipv4/udp.c
2003 ++++ b/net/ipv4/udp.c
2004 +@@ -2128,6 +2128,28 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
2005 + inet_compute_pseudo);
2006 + }
2007 +
2008 ++/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
2009 ++ * return code conversion for ip layer consumption
2010 ++ */
2011 ++static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
2012 ++ struct udphdr *uh)
2013 ++{
2014 ++ int ret;
2015 ++
2016 ++ if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
2017 ++ skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
2018 ++ inet_compute_pseudo);
2019 ++
2020 ++ ret = udp_queue_rcv_skb(sk, skb);
2021 ++
2022 ++ /* a return value > 0 means to resubmit the input, but
2023 ++ * it wants the return to be -protocol, or 0
2024 ++ */
2025 ++ if (ret > 0)
2026 ++ return -ret;
2027 ++ return 0;
2028 ++}
2029 ++
2030 + /*
2031 + * All we need to do is get the socket, and then do a checksum.
2032 + */
2033 +@@ -2174,14 +2196,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
2034 + if (unlikely(sk->sk_rx_dst != dst))
2035 + udp_sk_rx_dst_set(sk, dst);
2036 +
2037 +- ret = udp_queue_rcv_skb(sk, skb);
2038 ++ ret = udp_unicast_rcv_skb(sk, skb, uh);
2039 + sock_put(sk);
2040 +- /* a return value > 0 means to resubmit the input, but
2041 +- * it wants the return to be -protocol, or 0
2042 +- */
2043 +- if (ret > 0)
2044 +- return -ret;
2045 +- return 0;
2046 ++ return ret;
2047 + }
2048 +
2049 + if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
2050 +@@ -2189,22 +2206,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
2051 + saddr, daddr, udptable, proto);
2052 +
2053 + sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
2054 +- if (sk) {
2055 +- int ret;
2056 +-
2057 +- if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
2058 +- skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
2059 +- inet_compute_pseudo);
2060 +-
2061 +- ret = udp_queue_rcv_skb(sk, skb);
2062 +-
2063 +- /* a return value > 0 means to resubmit the input, but
2064 +- * it wants the return to be -protocol, or 0
2065 +- */
2066 +- if (ret > 0)
2067 +- return -ret;
2068 +- return 0;
2069 +- }
2070 ++ if (sk)
2071 ++ return udp_unicast_rcv_skb(sk, skb, uh);
2072 +
2073 + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2074 + goto drop;
2075 +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
2076 +index 5b3f2f89ef41..c6b75e96868c 100644
2077 +--- a/net/ipv6/ip6_offload.c
2078 ++++ b/net/ipv6/ip6_offload.c
2079 +@@ -115,6 +115,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
2080 + payload_len = skb->len - nhoff - sizeof(*ipv6h);
2081 + ipv6h->payload_len = htons(payload_len);
2082 + skb->network_header = (u8 *)ipv6h - skb->head;
2083 ++ skb_reset_mac_len(skb);
2084 +
2085 + if (udpfrag) {
2086 + int err = ip6_find_1stfragopt(skb, &prevhdr);
2087 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2088 +index 3168847c30d1..4f607aace43c 100644
2089 +--- a/net/ipv6/ip6_output.c
2090 ++++ b/net/ipv6/ip6_output.c
2091 +@@ -219,12 +219,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
2092 + kfree_skb(skb);
2093 + return -ENOBUFS;
2094 + }
2095 ++ if (skb->sk)
2096 ++ skb_set_owner_w(skb2, skb->sk);
2097 + consume_skb(skb);
2098 + skb = skb2;
2099 +- /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
2100 +- * it is safe to call in our context (socket lock not held)
2101 +- */
2102 +- skb_set_owner_w(skb, (struct sock *)sk);
2103 + }
2104 + if (opt->opt_flen)
2105 + ipv6_push_frag_opts(skb, opt, &proto);
2106 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2107 +index 18e00ce1719a..480a79f47c52 100644
2108 +--- a/net/ipv6/route.c
2109 ++++ b/net/ipv6/route.c
2110 +@@ -946,8 +946,6 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
2111 +
2112 + static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
2113 + {
2114 +- rt->dst.flags |= fib6_info_dst_flags(ort);
2115 +-
2116 + if (ort->fib6_flags & RTF_REJECT) {
2117 + ip6_rt_init_dst_reject(rt, ort);
2118 + return;
2119 +@@ -4670,20 +4668,31 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
2120 + int iif, int type, u32 portid, u32 seq,
2121 + unsigned int flags)
2122 + {
2123 +- struct rtmsg *rtm;
2124 ++ struct rt6_info *rt6 = (struct rt6_info *)dst;
2125 ++ struct rt6key *rt6_dst, *rt6_src;
2126 ++ u32 *pmetrics, table, rt6_flags;
2127 + struct nlmsghdr *nlh;
2128 ++ struct rtmsg *rtm;
2129 + long expires = 0;
2130 +- u32 *pmetrics;
2131 +- u32 table;
2132 +
2133 + nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2134 + if (!nlh)
2135 + return -EMSGSIZE;
2136 +
2137 ++ if (rt6) {
2138 ++ rt6_dst = &rt6->rt6i_dst;
2139 ++ rt6_src = &rt6->rt6i_src;
2140 ++ rt6_flags = rt6->rt6i_flags;
2141 ++ } else {
2142 ++ rt6_dst = &rt->fib6_dst;
2143 ++ rt6_src = &rt->fib6_src;
2144 ++ rt6_flags = rt->fib6_flags;
2145 ++ }
2146 ++
2147 + rtm = nlmsg_data(nlh);
2148 + rtm->rtm_family = AF_INET6;
2149 +- rtm->rtm_dst_len = rt->fib6_dst.plen;
2150 +- rtm->rtm_src_len = rt->fib6_src.plen;
2151 ++ rtm->rtm_dst_len = rt6_dst->plen;
2152 ++ rtm->rtm_src_len = rt6_src->plen;
2153 + rtm->rtm_tos = 0;
2154 + if (rt->fib6_table)
2155 + table = rt->fib6_table->tb6_id;
2156 +@@ -4698,7 +4707,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
2157 + rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2158 + rtm->rtm_protocol = rt->fib6_protocol;
2159 +
2160 +- if (rt->fib6_flags & RTF_CACHE)
2161 ++ if (rt6_flags & RTF_CACHE)
2162 + rtm->rtm_flags |= RTM_F_CLONED;
2163 +
2164 + if (dest) {
2165 +@@ -4706,7 +4715,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
2166 + goto nla_put_failure;
2167 + rtm->rtm_dst_len = 128;
2168 + } else if (rtm->rtm_dst_len)
2169 +- if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr))
2170 ++ if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
2171 + goto nla_put_failure;
2172 + #ifdef CONFIG_IPV6_SUBTREES
2173 + if (src) {
2174 +@@ -4714,12 +4723,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
2175 + goto nla_put_failure;
2176 + rtm->rtm_src_len = 128;
2177 + } else if (rtm->rtm_src_len &&
2178 +- nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr))
2179 ++ nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
2180 + goto nla_put_failure;
2181 + #endif
2182 + if (iif) {
2183 + #ifdef CONFIG_IPV6_MROUTE
2184 +- if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) {
2185 ++ if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
2186 + int err = ip6mr_get_route(net, skb, rtm, portid);
2187 +
2188 + if (err == 0)
2189 +@@ -4754,7 +4763,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
2190 + /* For multipath routes, walk the siblings list and add
2191 + * each as a nexthop within RTA_MULTIPATH.
2192 + */
2193 +- if (rt->fib6_nsiblings) {
2194 ++ if (rt6) {
2195 ++ if (rt6_flags & RTF_GATEWAY &&
2196 ++ nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
2197 ++ goto nla_put_failure;
2198 ++
2199 ++ if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
2200 ++ goto nla_put_failure;
2201 ++ } else if (rt->fib6_nsiblings) {
2202 + struct fib6_info *sibling, *next_sibling;
2203 + struct nlattr *mp;
2204 +
2205 +@@ -4777,7 +4793,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
2206 + goto nla_put_failure;
2207 + }
2208 +
2209 +- if (rt->fib6_flags & RTF_EXPIRES) {
2210 ++ if (rt6_flags & RTF_EXPIRES) {
2211 + expires = dst ? dst->expires : rt->expires;
2212 + expires -= jiffies;
2213 + }
2214 +@@ -4785,7 +4801,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
2215 + if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
2216 + goto nla_put_failure;
2217 +
2218 +- if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags)))
2219 ++ if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
2220 + goto nla_put_failure;
2221 +
2222 +
2223 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
2224 +index e6645cae403e..39d0cab919bb 100644
2225 +--- a/net/ipv6/udp.c
2226 ++++ b/net/ipv6/udp.c
2227 +@@ -748,6 +748,28 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
2228 + }
2229 + }
2230 +
2231 ++/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
2232 ++ * return code conversion for ip layer consumption
2233 ++ */
2234 ++static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
2235 ++ struct udphdr *uh)
2236 ++{
2237 ++ int ret;
2238 ++
2239 ++ if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
2240 ++ skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
2241 ++ ip6_compute_pseudo);
2242 ++
2243 ++ ret = udpv6_queue_rcv_skb(sk, skb);
2244 ++
2245 ++ /* a return value > 0 means to resubmit the input, but
2246 ++ * it wants the return to be -protocol, or 0
2247 ++ */
2248 ++ if (ret > 0)
2249 ++ return -ret;
2250 ++ return 0;
2251 ++}
2252 ++
2253 + int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
2254 + int proto)
2255 + {
2256 +@@ -799,13 +821,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
2257 + if (unlikely(sk->sk_rx_dst != dst))
2258 + udp6_sk_rx_dst_set(sk, dst);
2259 +
2260 +- ret = udpv6_queue_rcv_skb(sk, skb);
2261 +- sock_put(sk);
2262 ++ if (!uh->check && !udp_sk(sk)->no_check6_rx) {
2263 ++ sock_put(sk);
2264 ++ goto report_csum_error;
2265 ++ }
2266 +
2267 +- /* a return value > 0 means to resubmit the input */
2268 +- if (ret > 0)
2269 +- return ret;
2270 +- return 0;
2271 ++ ret = udp6_unicast_rcv_skb(sk, skb, uh);
2272 ++ sock_put(sk);
2273 ++ return ret;
2274 + }
2275 +
2276 + /*
2277 +@@ -818,30 +841,13 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
2278 + /* Unicast */
2279 + sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
2280 + if (sk) {
2281 +- int ret;
2282 +-
2283 +- if (!uh->check && !udp_sk(sk)->no_check6_rx) {
2284 +- udp6_csum_zero_error(skb);
2285 +- goto csum_error;
2286 +- }
2287 +-
2288 +- if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
2289 +- skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
2290 +- ip6_compute_pseudo);
2291 +-
2292 +- ret = udpv6_queue_rcv_skb(sk, skb);
2293 +-
2294 +- /* a return value > 0 means to resubmit the input */
2295 +- if (ret > 0)
2296 +- return ret;
2297 +-
2298 +- return 0;
2299 ++ if (!uh->check && !udp_sk(sk)->no_check6_rx)
2300 ++ goto report_csum_error;
2301 ++ return udp6_unicast_rcv_skb(sk, skb, uh);
2302 + }
2303 +
2304 +- if (!uh->check) {
2305 +- udp6_csum_zero_error(skb);
2306 +- goto csum_error;
2307 +- }
2308 ++ if (!uh->check)
2309 ++ goto report_csum_error;
2310 +
2311 + if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
2312 + goto discard;
2313 +@@ -862,6 +868,9 @@ short_packet:
2314 + ulen, skb->len,
2315 + daddr, ntohs(uh->dest));
2316 + goto discard;
2317 ++
2318 ++report_csum_error:
2319 ++ udp6_csum_zero_error(skb);
2320 + csum_error:
2321 + __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
2322 + discard:
2323 +diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
2324 +index ac8030c4bcf8..19cb2e473ea6 100644
2325 +--- a/net/nfc/hci/core.c
2326 ++++ b/net/nfc/hci/core.c
2327 +@@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
2328 + }
2329 + create_info = (struct hci_create_pipe_resp *)skb->data;
2330 +
2331 ++ if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
2332 ++ status = NFC_HCI_ANY_E_NOK;
2333 ++ goto exit;
2334 ++ }
2335 ++
2336 + /* Save the new created pipe and bind with local gate,
2337 + * the description for skb->data[3] is destination gate id
2338 + * but since we received this cmd from host controller, we
2339 +@@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
2340 + }
2341 + delete_info = (struct hci_delete_pipe_noti *)skb->data;
2342 +
2343 ++ if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
2344 ++ status = NFC_HCI_ANY_E_NOK;
2345 ++ goto exit;
2346 ++ }
2347 ++
2348 + hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
2349 + hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
2350 + break;
2351 +diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
2352 +index 5db358497c9e..e0e334a3a6e1 100644
2353 +--- a/net/sched/act_sample.c
2354 ++++ b/net/sched/act_sample.c
2355 +@@ -64,7 +64,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
2356 +
2357 + if (!exists) {
2358 + ret = tcf_idr_create(tn, parm->index, est, a,
2359 +- &act_sample_ops, bind, false);
2360 ++ &act_sample_ops, bind, true);
2361 + if (ret)
2362 + return ret;
2363 + ret = ACT_P_CREATED;
2364 +diff --git a/net/socket.c b/net/socket.c
2365 +index 4ac3b834cce9..d4187ac17d55 100644
2366 +--- a/net/socket.c
2367 ++++ b/net/socket.c
2368 +@@ -962,7 +962,8 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
2369 + EXPORT_SYMBOL(dlci_ioctl_set);
2370 +
2371 + static long sock_do_ioctl(struct net *net, struct socket *sock,
2372 +- unsigned int cmd, unsigned long arg)
2373 ++ unsigned int cmd, unsigned long arg,
2374 ++ unsigned int ifreq_size)
2375 + {
2376 + int err;
2377 + void __user *argp = (void __user *)arg;
2378 +@@ -988,11 +989,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
2379 + } else {
2380 + struct ifreq ifr;
2381 + bool need_copyout;
2382 +- if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
2383 ++ if (copy_from_user(&ifr, argp, ifreq_size))
2384 + return -EFAULT;
2385 + err = dev_ioctl(net, cmd, &ifr, &need_copyout);
2386 + if (!err && need_copyout)
2387 +- if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
2388 ++ if (copy_to_user(argp, &ifr, ifreq_size))
2389 + return -EFAULT;
2390 + }
2391 + return err;
2392 +@@ -1091,7 +1092,8 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
2393 + err = open_related_ns(&net->ns, get_net_ns);
2394 + break;
2395 + default:
2396 +- err = sock_do_ioctl(net, sock, cmd, arg);
2397 ++ err = sock_do_ioctl(net, sock, cmd, arg,
2398 ++ sizeof(struct ifreq));
2399 + break;
2400 + }
2401 + return err;
2402 +@@ -2762,7 +2764,8 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
2403 + int err;
2404 +
2405 + set_fs(KERNEL_DS);
2406 +- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
2407 ++ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
2408 ++ sizeof(struct compat_ifreq));
2409 + set_fs(old_fs);
2410 + if (!err)
2411 + err = compat_put_timeval(&ktv, up);
2412 +@@ -2778,7 +2781,8 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
2413 + int err;
2414 +
2415 + set_fs(KERNEL_DS);
2416 +- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
2417 ++ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
2418 ++ sizeof(struct compat_ifreq));
2419 + set_fs(old_fs);
2420 + if (!err)
2421 + err = compat_put_timespec(&kts, up);
2422 +@@ -3084,7 +3088,8 @@ static int routing_ioctl(struct net *net, struct socket *sock,
2423 + }
2424 +
2425 + set_fs(KERNEL_DS);
2426 +- ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
2427 ++ ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
2428 ++ sizeof(struct compat_ifreq));
2429 + set_fs(old_fs);
2430 +
2431 + out:
2432 +@@ -3197,7 +3202,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
2433 + case SIOCBONDSETHWADDR:
2434 + case SIOCBONDCHANGEACTIVE:
2435 + case SIOCGIFNAME:
2436 +- return sock_do_ioctl(net, sock, cmd, arg);
2437 ++ return sock_do_ioctl(net, sock, cmd, arg,
2438 ++ sizeof(struct compat_ifreq));
2439 + }
2440 +
2441 + return -ENOIOCTLCMD;
2442 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
2443 +index a7a8f8e20ff3..9bd0286d5407 100644
2444 +--- a/net/tls/tls_device.c
2445 ++++ b/net/tls/tls_device.c
2446 +@@ -552,7 +552,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
2447 + goto free_marker_record;
2448 + }
2449 +
2450 +- crypto_info = &ctx->crypto_send;
2451 ++ crypto_info = &ctx->crypto_send.info;
2452 + switch (crypto_info->cipher_type) {
2453 + case TLS_CIPHER_AES_GCM_128:
2454 + nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2455 +@@ -650,7 +650,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
2456 +
2457 + ctx->priv_ctx_tx = offload_ctx;
2458 + rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
2459 +- &ctx->crypto_send,
2460 ++ &ctx->crypto_send.info,
2461 + tcp_sk(sk)->write_seq);
2462 + if (rc)
2463 + goto release_netdev;
2464 +diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
2465 +index 748914abdb60..72143679d3d6 100644
2466 +--- a/net/tls/tls_device_fallback.c
2467 ++++ b/net/tls/tls_device_fallback.c
2468 +@@ -320,7 +320,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
2469 + goto free_req;
2470 +
2471 + iv = buf;
2472 +- memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt,
2473 ++ memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
2474 + TLS_CIPHER_AES_GCM_128_SALT_SIZE);
2475 + aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
2476 + TLS_CIPHER_AES_GCM_128_IV_SIZE;
2477 +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
2478 +index 45188d920013..2ccf194c3ebb 100644
2479 +--- a/net/tls/tls_main.c
2480 ++++ b/net/tls/tls_main.c
2481 +@@ -245,6 +245,16 @@ static void tls_write_space(struct sock *sk)
2482 + ctx->sk_write_space(sk);
2483 + }
2484 +
2485 ++static void tls_ctx_free(struct tls_context *ctx)
2486 ++{
2487 ++ if (!ctx)
2488 ++ return;
2489 ++
2490 ++ memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
2491 ++ memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
2492 ++ kfree(ctx);
2493 ++}
2494 ++
2495 + static void tls_sk_proto_close(struct sock *sk, long timeout)
2496 + {
2497 + struct tls_context *ctx = tls_get_ctx(sk);
2498 +@@ -295,7 +305,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
2499 + #else
2500 + {
2501 + #endif
2502 +- kfree(ctx);
2503 ++ tls_ctx_free(ctx);
2504 + ctx = NULL;
2505 + }
2506 +
2507 +@@ -306,7 +316,7 @@ skip_tx_cleanup:
2508 + * for sk->sk_prot->unhash [tls_hw_unhash]
2509 + */
2510 + if (free_ctx)
2511 +- kfree(ctx);
2512 ++ tls_ctx_free(ctx);
2513 + }
2514 +
2515 + static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
2516 +@@ -331,7 +341,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
2517 + }
2518 +
2519 + /* get user crypto info */
2520 +- crypto_info = &ctx->crypto_send;
2521 ++ crypto_info = &ctx->crypto_send.info;
2522 +
2523 + if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
2524 + rc = -EBUSY;
2525 +@@ -418,9 +428,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
2526 + }
2527 +
2528 + if (tx)
2529 +- crypto_info = &ctx->crypto_send;
2530 ++ crypto_info = &ctx->crypto_send.info;
2531 + else
2532 +- crypto_info = &ctx->crypto_recv;
2533 ++ crypto_info = &ctx->crypto_recv.info;
2534 +
2535 + /* Currently we don't support set crypto info more than one time */
2536 + if (TLS_CRYPTO_INFO_READY(crypto_info)) {
2537 +@@ -492,7 +502,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
2538 + goto out;
2539 +
2540 + err_crypto_info:
2541 +- memset(crypto_info, 0, sizeof(*crypto_info));
2542 ++ memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
2543 + out:
2544 + return rc;
2545 + }
2546 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
2547 +index b3344bbe336b..9fab8e5a4a5b 100644
2548 +--- a/net/tls/tls_sw.c
2549 ++++ b/net/tls/tls_sw.c
2550 +@@ -872,7 +872,15 @@ fallback_to_reg_recv:
2551 + if (control != TLS_RECORD_TYPE_DATA)
2552 + goto recv_end;
2553 + }
2554 ++ } else {
2555 ++ /* MSG_PEEK right now cannot look beyond current skb
2556 ++ * from strparser, meaning we cannot advance skb here
2557 ++ * and thus unpause strparser since we'd loose original
2558 ++ * one.
2559 ++ */
2560 ++ break;
2561 + }
2562 ++
2563 + /* If we have a new message from strparser, continue now. */
2564 + if (copied >= target && !ctx->recv_pkt)
2565 + break;
2566 +@@ -989,8 +997,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
2567 + goto read_failure;
2568 + }
2569 +
2570 +- if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
2571 +- header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
2572 ++ if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
2573 ++ header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
2574 + ret = -EINVAL;
2575 + goto read_failure;
2576 + }
2577 +@@ -1064,7 +1072,6 @@ void tls_sw_free_resources_rx(struct sock *sk)
2578 +
2579 + int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2580 + {
2581 +- char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
2582 + struct tls_crypto_info *crypto_info;
2583 + struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2584 + struct tls_sw_context_tx *sw_ctx_tx = NULL;
2585 +@@ -1100,11 +1107,11 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2586 + }
2587 +
2588 + if (tx) {
2589 +- crypto_info = &ctx->crypto_send;
2590 ++ crypto_info = &ctx->crypto_send.info;
2591 + cctx = &ctx->tx;
2592 + aead = &sw_ctx_tx->aead_send;
2593 + } else {
2594 +- crypto_info = &ctx->crypto_recv;
2595 ++ crypto_info = &ctx->crypto_recv.info;
2596 + cctx = &ctx->rx;
2597 + aead = &sw_ctx_rx->aead_recv;
2598 + }
2599 +@@ -1184,9 +1191,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2600 +
2601 + ctx->push_pending_record = tls_sw_push_pending_record;
2602 +
2603 +- memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
2604 +-
2605 +- rc = crypto_aead_setkey(*aead, keyval,
2606 ++ rc = crypto_aead_setkey(*aead, gcm_128_info->key,
2607 + TLS_CIPHER_AES_GCM_128_KEY_SIZE);
2608 + if (rc)
2609 + goto free_aead;
2610 +diff --git a/security/keys/dh.c b/security/keys/dh.c
2611 +index 1a68d27e72b4..b203f7758f97 100644
2612 +--- a/security/keys/dh.c
2613 ++++ b/security/keys/dh.c
2614 +@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
2615 + }
2616 + dh_inputs.g_size = dlen;
2617 +
2618 +- dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
2619 ++ dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
2620 + if (dlen < 0) {
2621 + ret = dlen;
2622 + goto out2;
2623 +diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
2624 +index 730ea91d9be8..93676354f87f 100644
2625 +--- a/sound/firewire/bebob/bebob.c
2626 ++++ b/sound/firewire/bebob/bebob.c
2627 +@@ -263,6 +263,8 @@ do_registration(struct work_struct *work)
2628 + error:
2629 + mutex_unlock(&devices_mutex);
2630 + snd_bebob_stream_destroy_duplex(bebob);
2631 ++ kfree(bebob->maudio_special_quirk);
2632 ++ bebob->maudio_special_quirk = NULL;
2633 + snd_card_free(bebob->card);
2634 + dev_info(&bebob->unit->device,
2635 + "Sound card registration failed: %d\n", err);
2636 +diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
2637 +index bd55620c6a47..c266997ad299 100644
2638 +--- a/sound/firewire/bebob/bebob_maudio.c
2639 ++++ b/sound/firewire/bebob/bebob_maudio.c
2640 +@@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
2641 + struct fw_device *device = fw_parent_device(unit);
2642 + int err, rcode;
2643 + u64 date;
2644 +- __le32 cues[3] = {
2645 +- cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
2646 +- cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
2647 +- cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
2648 +- };
2649 ++ __le32 *cues;
2650 +
2651 + /* check date of software used to build */
2652 + err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
2653 + &date, sizeof(u64));
2654 + if (err < 0)
2655 +- goto end;
2656 ++ return err;
2657 + /*
2658 + * firmware version 5058 or later has date later than "20070401", but
2659 + * 'date' is not null-terminated.
2660 +@@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
2661 + if (date < 0x3230303730343031LL) {
2662 + dev_err(&unit->device,
2663 + "Use firmware version 5058 or later\n");
2664 +- err = -ENOSYS;
2665 +- goto end;
2666 ++ return -ENXIO;
2667 + }
2668 +
2669 ++ cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
2670 ++ if (!cues)
2671 ++ return -ENOMEM;
2672 ++
2673 ++ cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
2674 ++ cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
2675 ++ cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
2676 ++
2677 + rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
2678 + device->node_id, device->generation,
2679 + device->max_speed, BEBOB_ADDR_REG_REQ,
2680 +- cues, sizeof(cues));
2681 ++ cues, 3 * sizeof(*cues));
2682 ++ kfree(cues);
2683 + if (rcode != RCODE_COMPLETE) {
2684 + dev_err(&unit->device,
2685 + "Failed to send a cue to load firmware\n");
2686 + err = -EIO;
2687 + }
2688 +-end:
2689 ++
2690 + return err;
2691 + }
2692 +
2693 +@@ -290,10 +294,6 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814)
2694 + bebob->midi_output_ports = 2;
2695 + }
2696 + end:
2697 +- if (err < 0) {
2698 +- kfree(params);
2699 +- bebob->maudio_special_quirk = NULL;
2700 +- }
2701 + mutex_unlock(&bebob->mutex);
2702 + return err;
2703 + }
2704 +diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
2705 +index 1f5e1d23f31a..ef689997d6a5 100644
2706 +--- a/sound/firewire/digi00x/digi00x.c
2707 ++++ b/sound/firewire/digi00x/digi00x.c
2708 +@@ -49,6 +49,7 @@ static void dg00x_free(struct snd_dg00x *dg00x)
2709 + fw_unit_put(dg00x->unit);
2710 +
2711 + mutex_destroy(&dg00x->mutex);
2712 ++ kfree(dg00x);
2713 + }
2714 +
2715 + static void dg00x_card_free(struct snd_card *card)
2716 +diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
2717 +index ad7a0a32557d..64c3cb0fb926 100644
2718 +--- a/sound/firewire/fireface/ff-protocol-ff400.c
2719 ++++ b/sound/firewire/fireface/ff-protocol-ff400.c
2720 +@@ -146,6 +146,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
2721 + {
2722 + __le32 *reg;
2723 + int i;
2724 ++ int err;
2725 +
2726 + reg = kcalloc(18, sizeof(__le32), GFP_KERNEL);
2727 + if (reg == NULL)
2728 +@@ -163,9 +164,11 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
2729 + reg[i] = cpu_to_le32(0x00000001);
2730 + }
2731 +
2732 +- return snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
2733 +- FF400_FETCH_PCM_FRAMES, reg,
2734 +- sizeof(__le32) * 18, 0);
2735 ++ err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
2736 ++ FF400_FETCH_PCM_FRAMES, reg,
2737 ++ sizeof(__le32) * 18, 0);
2738 ++ kfree(reg);
2739 ++ return err;
2740 + }
2741 +
2742 + static void ff400_dump_sync_status(struct snd_ff *ff,
2743 +diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
2744 +index 71a0613d3da0..f2d073365cf6 100644
2745 +--- a/sound/firewire/fireworks/fireworks.c
2746 ++++ b/sound/firewire/fireworks/fireworks.c
2747 +@@ -301,6 +301,8 @@ error:
2748 + snd_efw_transaction_remove_instance(efw);
2749 + snd_efw_stream_destroy_duplex(efw);
2750 + snd_card_free(efw->card);
2751 ++ kfree(efw->resp_buf);
2752 ++ efw->resp_buf = NULL;
2753 + dev_info(&efw->unit->device,
2754 + "Sound card registration failed: %d\n", err);
2755 + }
2756 +diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
2757 +index 1e5b2c802635..2ea8be6c8584 100644
2758 +--- a/sound/firewire/oxfw/oxfw.c
2759 ++++ b/sound/firewire/oxfw/oxfw.c
2760 +@@ -130,6 +130,7 @@ static void oxfw_free(struct snd_oxfw *oxfw)
2761 +
2762 + kfree(oxfw->spec);
2763 + mutex_destroy(&oxfw->mutex);
2764 ++ kfree(oxfw);
2765 + }
2766 +
2767 + /*
2768 +@@ -207,6 +208,7 @@ static int detect_quirks(struct snd_oxfw *oxfw)
2769 + static void do_registration(struct work_struct *work)
2770 + {
2771 + struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
2772 ++ int i;
2773 + int err;
2774 +
2775 + if (oxfw->registered)
2776 +@@ -269,7 +271,15 @@ error:
2777 + snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
2778 + if (oxfw->has_output)
2779 + snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
2780 ++ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) {
2781 ++ kfree(oxfw->tx_stream_formats[i]);
2782 ++ oxfw->tx_stream_formats[i] = NULL;
2783 ++ kfree(oxfw->rx_stream_formats[i]);
2784 ++ oxfw->rx_stream_formats[i] = NULL;
2785 ++ }
2786 + snd_card_free(oxfw->card);
2787 ++ kfree(oxfw->spec);
2788 ++ oxfw->spec = NULL;
2789 + dev_info(&oxfw->unit->device,
2790 + "Sound card registration failed: %d\n", err);
2791 + }
2792 +diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
2793 +index 44ad41fb7374..d3fdc463a884 100644
2794 +--- a/sound/firewire/tascam/tascam.c
2795 ++++ b/sound/firewire/tascam/tascam.c
2796 +@@ -93,6 +93,7 @@ static void tscm_free(struct snd_tscm *tscm)
2797 + fw_unit_put(tscm->unit);
2798 +
2799 + mutex_destroy(&tscm->mutex);
2800 ++ kfree(tscm);
2801 + }
2802 +
2803 + static void tscm_card_free(struct snd_card *card)
2804 +diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
2805 +index de2ecbe95d6c..2c54d26f30a6 100644
2806 +--- a/sound/pci/emu10k1/emufx.c
2807 ++++ b/sound/pci/emu10k1/emufx.c
2808 +@@ -2540,7 +2540,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
2809 + emu->support_tlv = 1;
2810 + return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
2811 + case SNDRV_EMU10K1_IOCTL_INFO:
2812 +- info = kmalloc(sizeof(*info), GFP_KERNEL);
2813 ++ info = kzalloc(sizeof(*info), GFP_KERNEL);
2814 + if (!info)
2815 + return -ENOMEM;
2816 + snd_emu10k1_fx8010_info(emu, info);
2817 +diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
2818 +index 275677de669f..407554175282 100644
2819 +--- a/sound/soc/codecs/cs4265.c
2820 ++++ b/sound/soc/codecs/cs4265.c
2821 +@@ -157,8 +157,8 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
2822 + SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
2823 + 3, 1, 0),
2824 + SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
2825 +- SOC_SINGLE("MMTLR Data Switch", 0,
2826 +- 1, 1, 0),
2827 ++ SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
2828 ++ 0, 1, 0),
2829 + SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
2830 + SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
2831 + };
2832 +diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
2833 +index 14999b999fd3..0d6145549a98 100644
2834 +--- a/sound/soc/codecs/tas6424.c
2835 ++++ b/sound/soc/codecs/tas6424.c
2836 +@@ -424,8 +424,10 @@ static void tas6424_fault_check_work(struct work_struct *work)
2837 + TAS6424_FAULT_PVDD_UV |
2838 + TAS6424_FAULT_VBAT_UV;
2839 +
2840 +- if (reg)
2841 ++ if (!reg) {
2842 ++ tas6424->last_fault1 = reg;
2843 + goto check_global_fault2_reg;
2844 ++ }
2845 +
2846 + /*
2847 + * Only flag errors once for a given occurrence. This is needed as
2848 +@@ -461,8 +463,10 @@ check_global_fault2_reg:
2849 + TAS6424_FAULT_OTSD_CH3 |
2850 + TAS6424_FAULT_OTSD_CH4;
2851 +
2852 +- if (!reg)
2853 ++ if (!reg) {
2854 ++ tas6424->last_fault2 = reg;
2855 + goto check_warn_reg;
2856 ++ }
2857 +
2858 + if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD))
2859 + dev_crit(dev, "experienced a global overtemp shutdown\n");
2860 +@@ -497,8 +501,10 @@ check_warn_reg:
2861 + TAS6424_WARN_VDD_OTW_CH3 |
2862 + TAS6424_WARN_VDD_OTW_CH4;
2863 +
2864 +- if (!reg)
2865 ++ if (!reg) {
2866 ++ tas6424->last_warn = reg;
2867 + goto out;
2868 ++ }
2869 +
2870 + if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV))
2871 + dev_warn(dev, "experienced a VDD under voltage condition\n");
2872 +diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
2873 +index 953d94d50586..ade34c26ad2f 100644
2874 +--- a/sound/soc/codecs/wm9712.c
2875 ++++ b/sound/soc/codecs/wm9712.c
2876 +@@ -719,7 +719,7 @@ static int wm9712_probe(struct platform_device *pdev)
2877 +
2878 + static struct platform_driver wm9712_component_driver = {
2879 + .driver = {
2880 +- .name = "wm9712-component",
2881 ++ .name = "wm9712-codec",
2882 + },
2883 +
2884 + .probe = wm9712_probe,
2885 +diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
2886 +index f237002180c0..ff13189a7ee4 100644
2887 +--- a/sound/soc/sh/rcar/core.c
2888 ++++ b/sound/soc/sh/rcar/core.c
2889 +@@ -953,12 +953,23 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
2890 + rsnd_dai_stream_quit(io);
2891 + }
2892 +
2893 ++static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream,
2894 ++ struct snd_soc_dai *dai)
2895 ++{
2896 ++ struct rsnd_priv *priv = rsnd_dai_to_priv(dai);
2897 ++ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
2898 ++ struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
2899 ++
2900 ++ return rsnd_dai_call(prepare, io, priv);
2901 ++}
2902 ++
2903 + static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
2904 + .startup = rsnd_soc_dai_startup,
2905 + .shutdown = rsnd_soc_dai_shutdown,
2906 + .trigger = rsnd_soc_dai_trigger,
2907 + .set_fmt = rsnd_soc_dai_set_fmt,
2908 + .set_tdm_slot = rsnd_soc_set_dai_tdm_slot,
2909 ++ .prepare = rsnd_soc_dai_prepare,
2910 + };
2911 +
2912 + void rsnd_parse_connect_common(struct rsnd_dai *rdai,
2913 +diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
2914 +index 6d7280d2d9be..e93032498a5b 100644
2915 +--- a/sound/soc/sh/rcar/rsnd.h
2916 ++++ b/sound/soc/sh/rcar/rsnd.h
2917 +@@ -283,6 +283,9 @@ struct rsnd_mod_ops {
2918 + int (*nolock_stop)(struct rsnd_mod *mod,
2919 + struct rsnd_dai_stream *io,
2920 + struct rsnd_priv *priv);
2921 ++ int (*prepare)(struct rsnd_mod *mod,
2922 ++ struct rsnd_dai_stream *io,
2923 ++ struct rsnd_priv *priv);
2924 + };
2925 +
2926 + struct rsnd_dai_stream;
2927 +@@ -312,6 +315,7 @@ struct rsnd_mod {
2928 + * H 0: fallback
2929 + * H 0: hw_params
2930 + * H 0: pointer
2931 ++ * H 0: prepare
2932 + */
2933 + #define __rsnd_mod_shift_nolock_start 0
2934 + #define __rsnd_mod_shift_nolock_stop 0
2935 +@@ -326,6 +330,7 @@ struct rsnd_mod {
2936 + #define __rsnd_mod_shift_fallback 28 /* always called */
2937 + #define __rsnd_mod_shift_hw_params 28 /* always called */
2938 + #define __rsnd_mod_shift_pointer 28 /* always called */
2939 ++#define __rsnd_mod_shift_prepare 28 /* always called */
2940 +
2941 + #define __rsnd_mod_add_probe 0
2942 + #define __rsnd_mod_add_remove 0
2943 +@@ -340,6 +345,7 @@ struct rsnd_mod {
2944 + #define __rsnd_mod_add_fallback 0
2945 + #define __rsnd_mod_add_hw_params 0
2946 + #define __rsnd_mod_add_pointer 0
2947 ++#define __rsnd_mod_add_prepare 0
2948 +
2949 + #define __rsnd_mod_call_probe 0
2950 + #define __rsnd_mod_call_remove 0
2951 +@@ -354,6 +360,7 @@ struct rsnd_mod {
2952 + #define __rsnd_mod_call_pointer 0
2953 + #define __rsnd_mod_call_nolock_start 0
2954 + #define __rsnd_mod_call_nolock_stop 1
2955 ++#define __rsnd_mod_call_prepare 0
2956 +
2957 + #define rsnd_mod_to_priv(mod) ((mod)->priv)
2958 + #define rsnd_mod_name(mod) ((mod)->ops->name)
2959 +diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
2960 +index 6e1166ec24a0..cf4b40d376e5 100644
2961 +--- a/sound/soc/sh/rcar/ssi.c
2962 ++++ b/sound/soc/sh/rcar/ssi.c
2963 +@@ -286,7 +286,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
2964 + if (rsnd_ssi_is_multi_slave(mod, io))
2965 + return 0;
2966 +
2967 +- if (ssi->usrcnt > 1) {
2968 ++ if (ssi->rate) {
2969 + if (ssi->rate != rate) {
2970 + dev_err(dev, "SSI parent/child should use same rate\n");
2971 + return -EINVAL;
2972 +@@ -431,7 +431,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
2973 + struct rsnd_priv *priv)
2974 + {
2975 + struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
2976 +- int ret;
2977 +
2978 + if (!rsnd_ssi_is_run_mods(mod, io))
2979 + return 0;
2980 +@@ -440,10 +439,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
2981 +
2982 + rsnd_mod_power_on(mod);
2983 +
2984 +- ret = rsnd_ssi_master_clk_start(mod, io);
2985 +- if (ret < 0)
2986 +- return ret;
2987 +-
2988 + rsnd_ssi_config_init(mod, io);
2989 +
2990 + rsnd_ssi_register_setup(mod);
2991 +@@ -846,6 +841,13 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
2992 + return 0;
2993 + }
2994 +
2995 ++static int rsnd_ssi_prepare(struct rsnd_mod *mod,
2996 ++ struct rsnd_dai_stream *io,
2997 ++ struct rsnd_priv *priv)
2998 ++{
2999 ++ return rsnd_ssi_master_clk_start(mod, io);
3000 ++}
3001 ++
3002 + static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
3003 + .name = SSI_NAME,
3004 + .probe = rsnd_ssi_common_probe,
3005 +@@ -858,6 +860,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
3006 + .pointer = rsnd_ssi_pio_pointer,
3007 + .pcm_new = rsnd_ssi_pcm_new,
3008 + .hw_params = rsnd_ssi_hw_params,
3009 ++ .prepare = rsnd_ssi_prepare,
3010 + };
3011 +
3012 + static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
3013 +@@ -934,6 +937,7 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
3014 + .pcm_new = rsnd_ssi_pcm_new,
3015 + .fallback = rsnd_ssi_fallback,
3016 + .hw_params = rsnd_ssi_hw_params,
3017 ++ .prepare = rsnd_ssi_prepare,
3018 + };
3019 +
3020 + int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)