Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 14 Nov 2018 14:01:08
Message-Id: 1542204041.2b43e27fc0295f52ffed3cf2450eefbc993c2da6.mpagano@gentoo
1 commit: 2b43e27fc0295f52ffed3cf2450eefbc993c2da6
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Oct 20 12:40:43 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 14 14:00:41 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2b43e27f
7
8 Linux patch 4.14.78
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1077_linux-4.14.78.patch | 2020 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2024 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 832ddd2..509ffd2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -351,6 +351,10 @@ Patch: 1076_linux-4.14.77.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.77
23
24 +Patch: 1077_linux-4.14.78.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.78
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1077_linux-4.14.78.patch b/1077_linux-4.14.78.patch
33 new file mode 100644
34 index 0000000..e0b842e
35 --- /dev/null
36 +++ b/1077_linux-4.14.78.patch
37 @@ -0,0 +1,2020 @@
38 +diff --git a/Makefile b/Makefile
39 +index 16d1a18496fb..89574ee68d6b 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 77
47 ++SUBLEVEL = 78
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/arc/Makefile b/arch/arc/Makefile
52 +index 6c1b20dd76ad..7c6c97782022 100644
53 +--- a/arch/arc/Makefile
54 ++++ b/arch/arc/Makefile
55 +@@ -6,34 +6,12 @@
56 + # published by the Free Software Foundation.
57 + #
58 +
59 +-ifeq ($(CROSS_COMPILE),)
60 +-ifndef CONFIG_CPU_BIG_ENDIAN
61 +-CROSS_COMPILE := arc-linux-
62 +-else
63 +-CROSS_COMPILE := arceb-linux-
64 +-endif
65 +-endif
66 +-
67 + KBUILD_DEFCONFIG := nsim_700_defconfig
68 +
69 + cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
70 + cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
71 + cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
72 +
73 +-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
74 +-
75 +-ifdef CONFIG_ISA_ARCOMPACT
76 +-ifeq ($(is_700), 0)
77 +- $(error Toolchain not configured for ARCompact builds)
78 +-endif
79 +-endif
80 +-
81 +-ifdef CONFIG_ISA_ARCV2
82 +-ifeq ($(is_700), 1)
83 +- $(error Toolchain not configured for ARCv2 builds)
84 +-endif
85 +-endif
86 +-
87 + ifdef CONFIG_ARC_CURR_IN_REG
88 + # For a global register defintion, make sure it gets passed to every file
89 + # We had a customer reported bug where some code built in kernel was NOT using
90 +@@ -87,7 +65,7 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
91 + # --build-id w/o "-marclinux". Default arc-elf32-ld is OK
92 + ldflags-$(upto_gcc44) += -marclinux
93 +
94 +-LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
95 ++LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
96 +
97 + # Modules with short calls might break for calls into builtin-kernel
98 + KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
99 +diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
100 +index 2c895e8d07f7..812535f40124 100644
101 +--- a/arch/powerpc/include/asm/code-patching.h
102 ++++ b/arch/powerpc/include/asm/code-patching.h
103 +@@ -31,6 +31,7 @@ unsigned int create_cond_branch(const unsigned int *addr,
104 + unsigned long target, int flags);
105 + int patch_branch(unsigned int *addr, unsigned long target, int flags);
106 + int patch_instruction(unsigned int *addr, unsigned int instr);
107 ++int raw_patch_instruction(unsigned int *addr, unsigned int instr);
108 +
109 + int instr_is_relative_branch(unsigned int instr);
110 + int instr_is_relative_link_branch(unsigned int instr);
111 +diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
112 +index 1da12f521cb7..b735b727ed2b 100644
113 +--- a/arch/powerpc/kernel/tm.S
114 ++++ b/arch/powerpc/kernel/tm.S
115 +@@ -167,13 +167,27 @@ _GLOBAL(tm_reclaim)
116 + std r1, PACATMSCRATCH(r13)
117 + ld r1, PACAR1(r13)
118 +
119 +- /* Store the PPR in r11 and reset to decent value */
120 + std r11, GPR11(r1) /* Temporary stash */
121 +
122 ++ /*
123 ++ * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
124 ++ * clobbered by an exception once we turn on MSR_RI below.
125 ++ */
126 ++ ld r11, PACATMSCRATCH(r13)
127 ++ std r11, GPR1(r1)
128 ++
129 ++ /*
130 ++ * Store r13 away so we can free up the scratch SPR for the SLB fault
131 ++ * handler (needed once we start accessing the thread_struct).
132 ++ */
133 ++ GET_SCRATCH0(r11)
134 ++ std r11, GPR13(r1)
135 ++
136 + /* Reset MSR RI so we can take SLB faults again */
137 + li r11, MSR_RI
138 + mtmsrd r11, 1
139 +
140 ++ /* Store the PPR in r11 and reset to decent value */
141 + mfspr r11, SPRN_PPR
142 + HMT_MEDIUM
143 +
144 +@@ -198,11 +212,11 @@ _GLOBAL(tm_reclaim)
145 + SAVE_GPR(8, r7) /* user r8 */
146 + SAVE_GPR(9, r7) /* user r9 */
147 + SAVE_GPR(10, r7) /* user r10 */
148 +- ld r3, PACATMSCRATCH(r13) /* user r1 */
149 ++ ld r3, GPR1(r1) /* user r1 */
150 + ld r4, GPR7(r1) /* user r7 */
151 + ld r5, GPR11(r1) /* user r11 */
152 + ld r6, GPR12(r1) /* user r12 */
153 +- GET_SCRATCH0(8) /* user r13 */
154 ++ ld r8, GPR13(r1) /* user r13 */
155 + std r3, GPR1(r7)
156 + std r4, GPR7(r7)
157 + std r5, GPR11(r7)
158 +diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
159 +index 882c750dc519..130405158afa 100644
160 +--- a/arch/powerpc/lib/code-patching.c
161 ++++ b/arch/powerpc/lib/code-patching.c
162 +@@ -39,7 +39,7 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
163 + return 0;
164 + }
165 +
166 +-static int raw_patch_instruction(unsigned int *addr, unsigned int instr)
167 ++int raw_patch_instruction(unsigned int *addr, unsigned int instr)
168 + {
169 + return __patch_instruction(addr, instr, addr);
170 + }
171 +@@ -156,7 +156,7 @@ static int do_patch_instruction(unsigned int *addr, unsigned int instr)
172 + * when text_poke_area is not ready, but we still need
173 + * to allow patching. We just do the plain old patching
174 + */
175 +- if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
176 ++ if (!this_cpu_read(text_poke_area))
177 + return raw_patch_instruction(addr, instr);
178 +
179 + local_irq_save(flags);
180 +diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
181 +index 762a899e85a4..e1bcdc32a851 100644
182 +--- a/arch/powerpc/lib/feature-fixups.c
183 ++++ b/arch/powerpc/lib/feature-fixups.c
184 +@@ -63,7 +63,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
185 + }
186 + }
187 +
188 +- patch_instruction(dest, instr);
189 ++ raw_patch_instruction(dest, instr);
190 +
191 + return 0;
192 + }
193 +@@ -92,7 +92,7 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
194 + }
195 +
196 + for (; dest < end; dest++)
197 +- patch_instruction(dest, PPC_INST_NOP);
198 ++ raw_patch_instruction(dest, PPC_INST_NOP);
199 +
200 + return 0;
201 + }
202 +@@ -292,7 +292,7 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
203 +
204 + for (; start < end; start++) {
205 + dest = (void *)start + *start;
206 +- patch_instruction(dest, PPC_INST_LWSYNC);
207 ++ raw_patch_instruction(dest, PPC_INST_LWSYNC);
208 + }
209 + }
210 +
211 +@@ -310,7 +310,7 @@ static void do_final_fixups(void)
212 + length = (__end_interrupts - _stext) / sizeof(int);
213 +
214 + while (length--) {
215 +- patch_instruction(dest, *src);
216 ++ raw_patch_instruction(dest, *src);
217 + src++;
218 + dest++;
219 + }
220 +diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c
221 +index cdf23b628688..cdfe1c82f3f0 100644
222 +--- a/drivers/clocksource/timer-fttmr010.c
223 ++++ b/drivers/clocksource/timer-fttmr010.c
224 +@@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
225 + cr &= ~fttmr010->t1_enable_val;
226 + writel(cr, fttmr010->base + TIMER_CR);
227 +
228 +- /* Setup the match register forward/backward in time */
229 +- cr = readl(fttmr010->base + TIMER1_COUNT);
230 +- if (fttmr010->count_down)
231 +- cr -= cycles;
232 +- else
233 +- cr += cycles;
234 +- writel(cr, fttmr010->base + TIMER1_MATCH1);
235 ++ if (fttmr010->count_down) {
236 ++ /*
237 ++ * ASPEED Timer Controller will load TIMER1_LOAD register
238 ++ * into TIMER1_COUNT register when the timer is re-enabled.
239 ++ */
240 ++ writel(cycles, fttmr010->base + TIMER1_LOAD);
241 ++ } else {
242 ++ /* Setup the match register forward in time */
243 ++ cr = readl(fttmr010->base + TIMER1_COUNT);
244 ++ writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
245 ++ }
246 +
247 + /* Start */
248 + cr = readl(fttmr010->base + TIMER_CR);
249 +diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
250 +index 880a861ab3c8..713214d085e0 100644
251 +--- a/drivers/clocksource/timer-ti-32k.c
252 ++++ b/drivers/clocksource/timer-ti-32k.c
253 +@@ -98,6 +98,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
254 + return -ENXIO;
255 + }
256 +
257 ++ if (!of_machine_is_compatible("ti,am43"))
258 ++ ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
259 ++
260 + ti_32k_timer.counter = ti_32k_timer.base;
261 +
262 + /*
263 +diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
264 +index 1a57cc28955e..ff3348ee9595 100644
265 +--- a/drivers/gpu/drm/arm/malidp_drv.c
266 ++++ b/drivers/gpu/drm/arm/malidp_drv.c
267 +@@ -617,6 +617,7 @@ static int malidp_bind(struct device *dev)
268 + drm->irq_enabled = true;
269 +
270 + ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
271 ++ drm_crtc_vblank_reset(&malidp->crtc);
272 + if (ret < 0) {
273 + DRM_ERROR("failed to initialise vblank\n");
274 + goto vblank_fail;
275 +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
276 +index 562220ec9d41..c75f4ccbcdef 100644
277 +--- a/drivers/gpu/drm/i915/i915_drv.c
278 ++++ b/drivers/gpu/drm/i915/i915_drv.c
279 +@@ -878,7 +878,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
280 +
281 + spin_lock_init(&dev_priv->mm.object_stat_lock);
282 + mutex_init(&dev_priv->sb_lock);
283 +- mutex_init(&dev_priv->modeset_restore_lock);
284 + mutex_init(&dev_priv->av_mutex);
285 + mutex_init(&dev_priv->wm.wm_mutex);
286 + mutex_init(&dev_priv->pps_mutex);
287 +@@ -1505,11 +1504,6 @@ static int i915_drm_suspend(struct drm_device *dev)
288 + pci_power_t opregion_target_state;
289 + int error;
290 +
291 +- /* ignore lid events during suspend */
292 +- mutex_lock(&dev_priv->modeset_restore_lock);
293 +- dev_priv->modeset_restore = MODESET_SUSPENDED;
294 +- mutex_unlock(&dev_priv->modeset_restore_lock);
295 +-
296 + disable_rpm_wakeref_asserts(dev_priv);
297 +
298 + /* We do a lot of poking in a lot of registers, make sure they work
299 +@@ -1718,10 +1712,6 @@ static int i915_drm_resume(struct drm_device *dev)
300 +
301 + intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
302 +
303 +- mutex_lock(&dev_priv->modeset_restore_lock);
304 +- dev_priv->modeset_restore = MODESET_DONE;
305 +- mutex_unlock(&dev_priv->modeset_restore_lock);
306 +-
307 + intel_opregion_notify_adapter(dev_priv, PCI_D0);
308 +
309 + intel_autoenable_gt_powersave(dev_priv);
310 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
311 +index 51411894d2cd..41f51509c9e4 100644
312 +--- a/drivers/gpu/drm/i915/i915_drv.h
313 ++++ b/drivers/gpu/drm/i915/i915_drv.h
314 +@@ -1183,6 +1183,7 @@ enum intel_sbi_destination {
315 + #define QUIRK_BACKLIGHT_PRESENT (1<<3)
316 + #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
317 + #define QUIRK_INCREASE_T12_DELAY (1<<6)
318 ++#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
319 +
320 + struct intel_fbdev;
321 + struct intel_fbc_work;
322 +@@ -1614,12 +1615,6 @@ struct i915_gpu_error {
323 + unsigned long test_irq_rings;
324 + };
325 +
326 +-enum modeset_restore {
327 +- MODESET_ON_LID_OPEN,
328 +- MODESET_DONE,
329 +- MODESET_SUSPENDED,
330 +-};
331 +-
332 + #define DP_AUX_A 0x40
333 + #define DP_AUX_B 0x10
334 + #define DP_AUX_C 0x20
335 +@@ -2296,8 +2291,6 @@ struct drm_i915_private {
336 +
337 + unsigned long quirks;
338 +
339 +- enum modeset_restore modeset_restore;
340 +- struct mutex modeset_restore_lock;
341 + struct drm_atomic_state *modeset_restore_state;
342 + struct drm_modeset_acquire_ctx reset_ctx;
343 +
344 +diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
345 +index 3a4a581345c4..77085b9bcb30 100644
346 +--- a/drivers/gpu/drm/i915/intel_ddi.c
347 ++++ b/drivers/gpu/drm/i915/intel_ddi.c
348 +@@ -1526,15 +1526,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
349 + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
350 + }
351 +
352 +-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
353 +- enum transcoder cpu_transcoder)
354 ++void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
355 + {
356 ++ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
357 ++ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
358 ++ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
359 + i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
360 + uint32_t val = I915_READ(reg);
361 +
362 + val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
363 + val |= TRANS_DDI_PORT_NONE;
364 + I915_WRITE(reg, val);
365 ++
366 ++ if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
367 ++ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
368 ++ DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
369 ++ /* Quirk time at 100ms for reliable operation */
370 ++ msleep(100);
371 ++ }
372 + }
373 +
374 + bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
375 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
376 +index cf648c526e12..2006ab44fbf9 100644
377 +--- a/drivers/gpu/drm/i915/intel_display.c
378 ++++ b/drivers/gpu/drm/i915/intel_display.c
379 +@@ -5653,7 +5653,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
380 + intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
381 +
382 + if (!transcoder_is_dsi(cpu_transcoder))
383 +- intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
384 ++ intel_ddi_disable_transcoder_func(old_crtc_state);
385 +
386 + if (INTEL_GEN(dev_priv) >= 9)
387 + skylake_scaler_disable(intel_crtc);
388 +@@ -14286,6 +14286,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
389 + DRM_INFO("Applying T12 delay quirk\n");
390 + }
391 +
392 ++/*
393 ++ * GeminiLake NUC HDMI outputs require additional off time
394 ++ * this allows the onboard retimer to correctly sync to signal
395 ++ */
396 ++static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
397 ++{
398 ++ struct drm_i915_private *dev_priv = to_i915(dev);
399 ++
400 ++ dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
401 ++ DRM_INFO("Applying Increase DDI Disabled quirk\n");
402 ++}
403 ++
404 + struct intel_quirk {
405 + int device;
406 + int subsystem_vendor;
407 +@@ -14372,6 +14384,13 @@ static struct intel_quirk intel_quirks[] = {
408 +
409 + /* Toshiba Satellite P50-C-18C */
410 + { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
411 ++
412 ++ /* GeminiLake NUC */
413 ++ { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
414 ++ { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
415 ++ /* ASRock ITX*/
416 ++ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
417 ++ { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
418 + };
419 +
420 + static void intel_init_quirks(struct drm_device *dev)
421 +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
422 +index 589905aab185..3adb9c3b412e 100644
423 +--- a/drivers/gpu/drm/i915/intel_drv.h
424 ++++ b/drivers/gpu/drm/i915/intel_drv.h
425 +@@ -1254,8 +1254,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
426 + enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
427 + bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
428 + void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
429 +-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
430 +- enum transcoder cpu_transcoder);
431 ++void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
432 + void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
433 + void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
434 + struct intel_encoder *
435 +diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
436 +index dae4e22a2c3f..fe67e458b003 100644
437 +--- a/drivers/gpu/drm/i915/intel_lvds.c
438 ++++ b/drivers/gpu/drm/i915/intel_lvds.c
439 +@@ -44,8 +44,6 @@
440 + /* Private structure for the integrated LVDS support */
441 + struct intel_lvds_connector {
442 + struct intel_connector base;
443 +-
444 +- struct notifier_block lid_notifier;
445 + };
446 +
447 + struct intel_lvds_pps {
448 +@@ -440,26 +438,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
449 + return true;
450 + }
451 +
452 +-/**
453 +- * Detect the LVDS connection.
454 +- *
455 +- * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
456 +- * connected and closed means disconnected. We also send hotplug events as
457 +- * needed, using lid status notification from the input layer.
458 +- */
459 + static enum drm_connector_status
460 + intel_lvds_detect(struct drm_connector *connector, bool force)
461 + {
462 +- struct drm_i915_private *dev_priv = to_i915(connector->dev);
463 +- enum drm_connector_status status;
464 +-
465 +- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
466 +- connector->base.id, connector->name);
467 +-
468 +- status = intel_panel_detect(dev_priv);
469 +- if (status != connector_status_unknown)
470 +- return status;
471 +-
472 + return connector_status_connected;
473 + }
474 +
475 +@@ -484,117 +465,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
476 + return 1;
477 + }
478 +
479 +-static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
480 +-{
481 +- DRM_INFO("Skipping forced modeset for %s\n", id->ident);
482 +- return 1;
483 +-}
484 +-
485 +-/* The GPU hangs up on these systems if modeset is performed on LID open */
486 +-static const struct dmi_system_id intel_no_modeset_on_lid[] = {
487 +- {
488 +- .callback = intel_no_modeset_on_lid_dmi_callback,
489 +- .ident = "Toshiba Tecra A11",
490 +- .matches = {
491 +- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
492 +- DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
493 +- },
494 +- },
495 +-
496 +- { } /* terminating entry */
497 +-};
498 +-
499 +-/*
500 +- * Lid events. Note the use of 'modeset':
501 +- * - we set it to MODESET_ON_LID_OPEN on lid close,
502 +- * and set it to MODESET_DONE on open
503 +- * - we use it as a "only once" bit (ie we ignore
504 +- * duplicate events where it was already properly set)
505 +- * - the suspend/resume paths will set it to
506 +- * MODESET_SUSPENDED and ignore the lid open event,
507 +- * because they restore the mode ("lid open").
508 +- */
509 +-static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
510 +- void *unused)
511 +-{
512 +- struct intel_lvds_connector *lvds_connector =
513 +- container_of(nb, struct intel_lvds_connector, lid_notifier);
514 +- struct drm_connector *connector = &lvds_connector->base.base;
515 +- struct drm_device *dev = connector->dev;
516 +- struct drm_i915_private *dev_priv = to_i915(dev);
517 +-
518 +- if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
519 +- return NOTIFY_OK;
520 +-
521 +- mutex_lock(&dev_priv->modeset_restore_lock);
522 +- if (dev_priv->modeset_restore == MODESET_SUSPENDED)
523 +- goto exit;
524 +- /*
525 +- * check and update the status of LVDS connector after receiving
526 +- * the LID nofication event.
527 +- */
528 +- connector->status = connector->funcs->detect(connector, false);
529 +-
530 +- /* Don't force modeset on machines where it causes a GPU lockup */
531 +- if (dmi_check_system(intel_no_modeset_on_lid))
532 +- goto exit;
533 +- if (!acpi_lid_open()) {
534 +- /* do modeset on next lid open event */
535 +- dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
536 +- goto exit;
537 +- }
538 +-
539 +- if (dev_priv->modeset_restore == MODESET_DONE)
540 +- goto exit;
541 +-
542 +- /*
543 +- * Some old platform's BIOS love to wreak havoc while the lid is closed.
544 +- * We try to detect this here and undo any damage. The split for PCH
545 +- * platforms is rather conservative and a bit arbitrary expect that on
546 +- * those platforms VGA disabling requires actual legacy VGA I/O access,
547 +- * and as part of the cleanup in the hw state restore we also redisable
548 +- * the vga plane.
549 +- */
550 +- if (!HAS_PCH_SPLIT(dev_priv))
551 +- intel_display_resume(dev);
552 +-
553 +- dev_priv->modeset_restore = MODESET_DONE;
554 +-
555 +-exit:
556 +- mutex_unlock(&dev_priv->modeset_restore_lock);
557 +- return NOTIFY_OK;
558 +-}
559 +-
560 +-static int
561 +-intel_lvds_connector_register(struct drm_connector *connector)
562 +-{
563 +- struct intel_lvds_connector *lvds = to_lvds_connector(connector);
564 +- int ret;
565 +-
566 +- ret = intel_connector_register(connector);
567 +- if (ret)
568 +- return ret;
569 +-
570 +- lvds->lid_notifier.notifier_call = intel_lid_notify;
571 +- if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
572 +- DRM_DEBUG_KMS("lid notifier registration failed\n");
573 +- lvds->lid_notifier.notifier_call = NULL;
574 +- }
575 +-
576 +- return 0;
577 +-}
578 +-
579 +-static void
580 +-intel_lvds_connector_unregister(struct drm_connector *connector)
581 +-{
582 +- struct intel_lvds_connector *lvds = to_lvds_connector(connector);
583 +-
584 +- if (lvds->lid_notifier.notifier_call)
585 +- acpi_lid_notifier_unregister(&lvds->lid_notifier);
586 +-
587 +- intel_connector_unregister(connector);
588 +-}
589 +-
590 + /**
591 + * intel_lvds_destroy - unregister and free LVDS structures
592 + * @connector: connector to free
593 +@@ -627,8 +497,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
594 + .fill_modes = drm_helper_probe_single_connector_modes,
595 + .atomic_get_property = intel_digital_connector_atomic_get_property,
596 + .atomic_set_property = intel_digital_connector_atomic_set_property,
597 +- .late_register = intel_lvds_connector_register,
598 +- .early_unregister = intel_lvds_connector_unregister,
599 ++ .late_register = intel_connector_register,
600 ++ .early_unregister = intel_connector_unregister,
601 + .destroy = intel_lvds_destroy,
602 + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
603 + .atomic_duplicate_state = intel_digital_connector_duplicate_state,
604 +@@ -1091,8 +961,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
605 + * 2) check for VBT data
606 + * 3) check to see if LVDS is already on
607 + * if none of the above, no panel
608 +- * 4) make sure lid is open
609 +- * if closed, act like it's not there for now
610 + */
611 +
612 + /*
613 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
614 +index 672b0be41d44..a306493e2e97 100644
615 +--- a/drivers/hid/hid-core.c
616 ++++ b/drivers/hid/hid-core.c
617 +@@ -1964,6 +1964,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
618 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
619 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
620 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
621 ++ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
622 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
623 ++ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
624 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
625 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
626 + #endif
627 +diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
628 +index c2a2ce8ee541..ef699477d94a 100644
629 +--- a/drivers/hwtracing/intel_th/pci.c
630 ++++ b/drivers/hwtracing/intel_th/pci.c
631 +@@ -168,6 +168,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
632 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
633 + .driver_data = (kernel_ulong_t)&intel_th_2x,
634 + },
635 ++ {
636 ++ /* Ice Lake PCH */
637 ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
638 ++ .driver_data = (kernel_ulong_t)&intel_th_2x,
639 ++ },
640 + { 0 },
641 + };
642 +
643 +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
644 +index 15d764afec3b..7f044df1ea07 100644
645 +--- a/drivers/i2c/busses/i2c-rcar.c
646 ++++ b/drivers/i2c/busses/i2c-rcar.c
647 +@@ -32,6 +32,7 @@
648 + #include <linux/of_device.h>
649 + #include <linux/platform_device.h>
650 + #include <linux/pm_runtime.h>
651 ++#include <linux/reset.h>
652 + #include <linux/slab.h>
653 +
654 + /* register offsets */
655 +@@ -111,8 +112,9 @@
656 + #define ID_ARBLOST (1 << 3)
657 + #define ID_NACK (1 << 4)
658 + /* persistent flags */
659 ++#define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */
660 + #define ID_P_PM_BLOCKED (1 << 31)
661 +-#define ID_P_MASK ID_P_PM_BLOCKED
662 ++#define ID_P_MASK (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
663 +
664 + enum rcar_i2c_type {
665 + I2C_RCAR_GEN1,
666 +@@ -140,6 +142,8 @@ struct rcar_i2c_priv {
667 + struct dma_chan *dma_rx;
668 + struct scatterlist sg;
669 + enum dma_data_direction dma_direction;
670 ++
671 ++ struct reset_control *rstc;
672 + };
673 +
674 + #define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
675 +@@ -321,6 +325,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
676 + dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
677 + sg_dma_len(&priv->sg), priv->dma_direction);
678 +
679 ++ /* Gen3 can only do one RXDMA per transfer and we just completed it */
680 ++ if (priv->devtype == I2C_RCAR_GEN3 &&
681 ++ priv->dma_direction == DMA_FROM_DEVICE)
682 ++ priv->flags |= ID_P_NO_RXDMA;
683 ++
684 + priv->dma_direction = DMA_NONE;
685 + }
686 +
687 +@@ -358,8 +367,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
688 + unsigned char *buf;
689 + int len;
690 +
691 +- /* Do not use DMA if it's not available or for messages < 8 bytes */
692 +- if (IS_ERR(chan) || msg->len < 8)
693 ++ /* Do various checks to see if DMA is feasible at all */
694 ++ if (IS_ERR(chan) || msg->len < 8 ||
695 ++ (read && priv->flags & ID_P_NO_RXDMA))
696 + return;
697 +
698 + if (read) {
699 +@@ -688,6 +698,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
700 + }
701 + }
702 +
703 ++/* I2C is a special case, we need to poll the status of a reset */
704 ++static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
705 ++{
706 ++ int i, ret;
707 ++
708 ++ ret = reset_control_reset(priv->rstc);
709 ++ if (ret)
710 ++ return ret;
711 ++
712 ++ for (i = 0; i < LOOP_TIMEOUT; i++) {
713 ++ ret = reset_control_status(priv->rstc);
714 ++ if (ret == 0)
715 ++ return 0;
716 ++ udelay(1);
717 ++ }
718 ++
719 ++ return -ETIMEDOUT;
720 ++}
721 ++
722 + static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
723 + struct i2c_msg *msgs,
724 + int num)
725 +@@ -699,6 +728,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
726 +
727 + pm_runtime_get_sync(dev);
728 +
729 ++ /* Gen3 needs a reset before allowing RXDMA once */
730 ++ if (priv->devtype == I2C_RCAR_GEN3) {
731 ++ priv->flags |= ID_P_NO_RXDMA;
732 ++ if (!IS_ERR(priv->rstc)) {
733 ++ ret = rcar_i2c_do_reset(priv);
734 ++ if (ret == 0)
735 ++ priv->flags &= ~ID_P_NO_RXDMA;
736 ++ }
737 ++ }
738 ++
739 + rcar_i2c_init(priv);
740 +
741 + ret = rcar_i2c_bus_barrier(priv);
742 +@@ -868,6 +907,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
743 + if (ret < 0)
744 + goto out_pm_put;
745 +
746 ++ if (priv->devtype == I2C_RCAR_GEN3) {
747 ++ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
748 ++ if (!IS_ERR(priv->rstc)) {
749 ++ ret = reset_control_status(priv->rstc);
750 ++ if (ret < 0)
751 ++ priv->rstc = ERR_PTR(-ENOTSUPP);
752 ++ }
753 ++ }
754 ++
755 + /* Stay always active when multi-master to keep arbitration working */
756 + if (of_property_read_bool(dev->of_node, "multi-master"))
757 + priv->flags |= ID_P_PM_BLOCKED;
758 +diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
759 +index 33cf1734c4e5..f9faacce9250 100644
760 +--- a/drivers/infiniband/hw/hfi1/chip.c
761 ++++ b/drivers/infiniband/hw/hfi1/chip.c
762 +@@ -6722,6 +6722,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
763 + struct hfi1_devdata *dd = ppd->dd;
764 + struct send_context *sc;
765 + int i;
766 ++ int sc_flags;
767 +
768 + if (flags & FREEZE_SELF)
769 + write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
770 +@@ -6732,11 +6733,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
771 + /* notify all SDMA engines that they are going into a freeze */
772 + sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
773 +
774 ++ sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
775 ++ SCF_LINK_DOWN : 0);
776 + /* do halt pre-handling on all enabled send contexts */
777 + for (i = 0; i < dd->num_send_contexts; i++) {
778 + sc = dd->send_contexts[i].sc;
779 + if (sc && (sc->flags & SCF_ENABLED))
780 +- sc_stop(sc, SCF_FROZEN | SCF_HALTED);
781 ++ sc_stop(sc, sc_flags);
782 + }
783 +
784 + /* Send context are frozen. Notify user space */
785 +@@ -10646,6 +10649,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
786 + add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
787 +
788 + handle_linkup_change(dd, 1);
789 ++ pio_kernel_linkup(dd);
790 ++
791 + ppd->host_link_state = HLS_UP_INIT;
792 + break;
793 + case HLS_UP_ARMED:
794 +diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
795 +index 19a8e6052820..07bf282fd8aa 100644
796 +--- a/drivers/infiniband/hw/hfi1/pio.c
797 ++++ b/drivers/infiniband/hw/hfi1/pio.c
798 +@@ -942,20 +942,18 @@ void sc_free(struct send_context *sc)
799 + void sc_disable(struct send_context *sc)
800 + {
801 + u64 reg;
802 +- unsigned long flags;
803 + struct pio_buf *pbuf;
804 +
805 + if (!sc)
806 + return;
807 +
808 + /* do all steps, even if already disabled */
809 +- spin_lock_irqsave(&sc->alloc_lock, flags);
810 ++ spin_lock_irq(&sc->alloc_lock);
811 + reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
812 + reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
813 + sc->flags &= ~SCF_ENABLED;
814 + sc_wait_for_packet_egress(sc, 1);
815 + write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
816 +- spin_unlock_irqrestore(&sc->alloc_lock, flags);
817 +
818 + /*
819 + * Flush any waiters. Once the context is disabled,
820 +@@ -965,7 +963,7 @@ void sc_disable(struct send_context *sc)
821 + * proceed with the flush.
822 + */
823 + udelay(1);
824 +- spin_lock_irqsave(&sc->release_lock, flags);
825 ++ spin_lock(&sc->release_lock);
826 + if (sc->sr) { /* this context has a shadow ring */
827 + while (sc->sr_tail != sc->sr_head) {
828 + pbuf = &sc->sr[sc->sr_tail].pbuf;
829 +@@ -976,7 +974,8 @@ void sc_disable(struct send_context *sc)
830 + sc->sr_tail = 0;
831 + }
832 + }
833 +- spin_unlock_irqrestore(&sc->release_lock, flags);
834 ++ spin_unlock(&sc->release_lock);
835 ++ spin_unlock_irq(&sc->alloc_lock);
836 + }
837 +
838 + /* return SendEgressCtxtStatus.PacketOccupancy */
839 +@@ -1199,11 +1198,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
840 + sc = dd->send_contexts[i].sc;
841 + if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
842 + continue;
843 ++ if (sc->flags & SCF_LINK_DOWN)
844 ++ continue;
845 +
846 + sc_enable(sc); /* will clear the sc frozen flag */
847 + }
848 + }
849 +
850 ++/**
851 ++ * pio_kernel_linkup() - Re-enable send contexts after linkup event
852 ++ * @dd: valid devive data
853 ++ *
854 ++ * When the link goes down, the freeze path is taken. However, a link down
855 ++ * event is different from a freeze because if the send context is re-enabled
856 ++ * whowever is sending data will start sending data again, which will hang
857 ++ * any QP that is sending data.
858 ++ *
859 ++ * The freeze path now looks at the type of event that occurs and takes this
860 ++ * path for link down event.
861 ++ */
862 ++void pio_kernel_linkup(struct hfi1_devdata *dd)
863 ++{
864 ++ struct send_context *sc;
865 ++ int i;
866 ++
867 ++ for (i = 0; i < dd->num_send_contexts; i++) {
868 ++ sc = dd->send_contexts[i].sc;
869 ++ if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
870 ++ continue;
871 ++
872 ++ sc_enable(sc); /* will clear the sc link down flag */
873 ++ }
874 ++}
875 ++
876 + /*
877 + * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
878 + * Returns:
879 +@@ -1403,11 +1430,10 @@ void sc_stop(struct send_context *sc, int flag)
880 + {
881 + unsigned long flags;
882 +
883 +- /* mark the context */
884 +- sc->flags |= flag;
885 +-
886 + /* stop buffer allocations */
887 + spin_lock_irqsave(&sc->alloc_lock, flags);
888 ++ /* mark the context */
889 ++ sc->flags |= flag;
890 + sc->flags &= ~SCF_ENABLED;
891 + spin_unlock_irqrestore(&sc->alloc_lock, flags);
892 + wake_up(&sc->halt_wait);
893 +diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
894 +index 99ca5edb0b43..c7c4e6e5d317 100644
895 +--- a/drivers/infiniband/hw/hfi1/pio.h
896 ++++ b/drivers/infiniband/hw/hfi1/pio.h
897 +@@ -145,6 +145,7 @@ struct send_context {
898 + #define SCF_IN_FREE 0x02
899 + #define SCF_HALTED 0x04
900 + #define SCF_FROZEN 0x08
901 ++#define SCF_LINK_DOWN 0x10
902 +
903 + struct send_context_info {
904 + struct send_context *sc; /* allocated working context */
905 +@@ -312,6 +313,7 @@ void set_pio_integrity(struct send_context *sc);
906 + void pio_reset_all(struct hfi1_devdata *dd);
907 + void pio_freeze(struct hfi1_devdata *dd);
908 + void pio_kernel_unfreeze(struct hfi1_devdata *dd);
909 ++void pio_kernel_linkup(struct hfi1_devdata *dd);
910 +
911 + /* global PIO send control operations */
912 + #define PSC_GLOBAL_ENABLE 0
913 +diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
914 +index f1235831283d..fdeda0b0fbd6 100644
915 +--- a/drivers/input/keyboard/atakbd.c
916 ++++ b/drivers/input/keyboard/atakbd.c
917 +@@ -79,8 +79,7 @@ MODULE_LICENSE("GPL");
918 + */
919 +
920 +
921 +-static unsigned char atakbd_keycode[0x72] = { /* American layout */
922 +- [0] = KEY_GRAVE,
923 ++static unsigned char atakbd_keycode[0x73] = { /* American layout */
924 + [1] = KEY_ESC,
925 + [2] = KEY_1,
926 + [3] = KEY_2,
927 +@@ -121,9 +120,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
928 + [38] = KEY_L,
929 + [39] = KEY_SEMICOLON,
930 + [40] = KEY_APOSTROPHE,
931 +- [41] = KEY_BACKSLASH, /* FIXME, '#' */
932 ++ [41] = KEY_GRAVE,
933 + [42] = KEY_LEFTSHIFT,
934 +- [43] = KEY_GRAVE, /* FIXME: '~' */
935 ++ [43] = KEY_BACKSLASH,
936 + [44] = KEY_Z,
937 + [45] = KEY_X,
938 + [46] = KEY_C,
939 +@@ -149,45 +148,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
940 + [66] = KEY_F8,
941 + [67] = KEY_F9,
942 + [68] = KEY_F10,
943 +- [69] = KEY_ESC,
944 +- [70] = KEY_DELETE,
945 +- [71] = KEY_KP7,
946 +- [72] = KEY_KP8,
947 +- [73] = KEY_KP9,
948 ++ [71] = KEY_HOME,
949 ++ [72] = KEY_UP,
950 + [74] = KEY_KPMINUS,
951 +- [75] = KEY_KP4,
952 +- [76] = KEY_KP5,
953 +- [77] = KEY_KP6,
954 ++ [75] = KEY_LEFT,
955 ++ [77] = KEY_RIGHT,
956 + [78] = KEY_KPPLUS,
957 +- [79] = KEY_KP1,
958 +- [80] = KEY_KP2,
959 +- [81] = KEY_KP3,
960 +- [82] = KEY_KP0,
961 +- [83] = KEY_KPDOT,
962 +- [90] = KEY_KPLEFTPAREN,
963 +- [91] = KEY_KPRIGHTPAREN,
964 +- [92] = KEY_KPASTERISK, /* FIXME */
965 +- [93] = KEY_KPASTERISK,
966 +- [94] = KEY_KPPLUS,
967 +- [95] = KEY_HELP,
968 ++ [80] = KEY_DOWN,
969 ++ [82] = KEY_INSERT,
970 ++ [83] = KEY_DELETE,
971 + [96] = KEY_102ND,
972 +- [97] = KEY_KPASTERISK, /* FIXME */
973 +- [98] = KEY_KPSLASH,
974 ++ [97] = KEY_UNDO,
975 ++ [98] = KEY_HELP,
976 + [99] = KEY_KPLEFTPAREN,
977 + [100] = KEY_KPRIGHTPAREN,
978 + [101] = KEY_KPSLASH,
979 + [102] = KEY_KPASTERISK,
980 +- [103] = KEY_UP,
981 +- [104] = KEY_KPASTERISK, /* FIXME */
982 +- [105] = KEY_LEFT,
983 +- [106] = KEY_RIGHT,
984 +- [107] = KEY_KPASTERISK, /* FIXME */
985 +- [108] = KEY_DOWN,
986 +- [109] = KEY_KPASTERISK, /* FIXME */
987 +- [110] = KEY_KPASTERISK, /* FIXME */
988 +- [111] = KEY_KPASTERISK, /* FIXME */
989 +- [112] = KEY_KPASTERISK, /* FIXME */
990 +- [113] = KEY_KPASTERISK /* FIXME */
991 ++ [103] = KEY_KP7,
992 ++ [104] = KEY_KP8,
993 ++ [105] = KEY_KP9,
994 ++ [106] = KEY_KP4,
995 ++ [107] = KEY_KP5,
996 ++ [108] = KEY_KP6,
997 ++ [109] = KEY_KP1,
998 ++ [110] = KEY_KP2,
999 ++ [111] = KEY_KP3,
1000 ++ [112] = KEY_KP0,
1001 ++ [113] = KEY_KPDOT,
1002 ++ [114] = KEY_KPENTER,
1003 + };
1004 +
1005 + static struct input_dev *atakbd_dev;
1006 +@@ -195,21 +183,15 @@ static struct input_dev *atakbd_dev;
1007 + static void atakbd_interrupt(unsigned char scancode, char down)
1008 + {
1009 +
1010 +- if (scancode < 0x72) { /* scancodes < 0xf2 are keys */
1011 ++ if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
1012 +
1013 + // report raw events here?
1014 +
1015 + scancode = atakbd_keycode[scancode];
1016 +
1017 +- if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
1018 +- input_report_key(atakbd_dev, scancode, 1);
1019 +- input_report_key(atakbd_dev, scancode, 0);
1020 +- input_sync(atakbd_dev);
1021 +- } else {
1022 +- input_report_key(atakbd_dev, scancode, down);
1023 +- input_sync(atakbd_dev);
1024 +- }
1025 +- } else /* scancodes >= 0xf2 are mouse data, most likely */
1026 ++ input_report_key(atakbd_dev, scancode, down);
1027 ++ input_sync(atakbd_dev);
1028 ++ } else /* scancodes >= 0xf3 are mouse data, most likely */
1029 + printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
1030 +
1031 + return;
1032 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1033 +index 9137030423cd..efa6cd2500b9 100644
1034 +--- a/drivers/iommu/amd_iommu.c
1035 ++++ b/drivers/iommu/amd_iommu.c
1036 +@@ -253,7 +253,13 @@ static u16 get_alias(struct device *dev)
1037 +
1038 + /* The callers make sure that get_device_id() does not fail here */
1039 + devid = get_device_id(dev);
1040 ++
1041 ++ /* For ACPI HID devices, we simply return the devid as such */
1042 ++ if (!dev_is_pci(dev))
1043 ++ return devid;
1044 ++
1045 + ivrs_alias = amd_iommu_alias_table[devid];
1046 ++
1047 + pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
1048 +
1049 + if (ivrs_alias == pci_alias)
1050 +diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
1051 +index 666d319d3d1a..1f6c1eefe389 100644
1052 +--- a/drivers/media/usb/dvb-usb-v2/af9035.c
1053 ++++ b/drivers/media/usb/dvb-usb-v2/af9035.c
1054 +@@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
1055 + if (msg[0].addr == state->af9033_i2c_addr[1])
1056 + reg |= 0x100000;
1057 +
1058 +- ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
1059 +- msg[0].len - 3);
1060 ++ ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
1061 ++ &msg[0].buf[3],
1062 ++ msg[0].len - 3)
1063 ++ : -EOPNOTSUPP;
1064 + } else {
1065 + /* I2C write */
1066 + u8 buf[MAX_XFER_SIZE];
1067 +diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
1068 +index 7feff2450ed6..d1da8f05ef85 100644
1069 +--- a/drivers/net/ethernet/ibm/emac/core.c
1070 ++++ b/drivers/net/ethernet/ibm/emac/core.c
1071 +@@ -2671,12 +2671,17 @@ static int emac_init_phy(struct emac_instance *dev)
1072 + if (of_phy_is_fixed_link(np)) {
1073 + int res = emac_dt_mdio_probe(dev);
1074 +
1075 +- if (!res) {
1076 +- res = of_phy_register_fixed_link(np);
1077 +- if (res)
1078 +- mdiobus_unregister(dev->mii_bus);
1079 ++ if (res)
1080 ++ return res;
1081 ++
1082 ++ res = of_phy_register_fixed_link(np);
1083 ++ dev->phy_dev = of_phy_find_device(np);
1084 ++ if (res || !dev->phy_dev) {
1085 ++ mdiobus_unregister(dev->mii_bus);
1086 ++ return res ? res : -EINVAL;
1087 + }
1088 +- return res;
1089 ++ emac_adjust_link(dev->ndev);
1090 ++ put_device(&dev->phy_dev->mdio.dev);
1091 + }
1092 + return 0;
1093 + }
1094 +diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
1095 +index 6f57c052053e..050dc213e8db 100644
1096 +--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
1097 ++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
1098 +@@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
1099 + struct mlx4_dev *dev = &priv->dev;
1100 + struct mlx4_eq *eq = &priv->eq_table.eq[vec];
1101 +
1102 +- if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
1103 ++ if (!cpumask_available(eq->affinity_mask) ||
1104 ++ cpumask_empty(eq->affinity_mask))
1105 + return;
1106 +
1107 + hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
1108 +diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
1109 +index 96a27b00c90e..897bd33c2c50 100644
1110 +--- a/drivers/net/ethernet/renesas/ravb.h
1111 ++++ b/drivers/net/ethernet/renesas/ravb.h
1112 +@@ -431,6 +431,7 @@ enum EIS_BIT {
1113 + EIS_CULF1 = 0x00000080,
1114 + EIS_TFFF = 0x00000100,
1115 + EIS_QFS = 0x00010000,
1116 ++ EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)),
1117 + };
1118 +
1119 + /* RIC0 */
1120 +@@ -475,6 +476,7 @@ enum RIS0_BIT {
1121 + RIS0_FRF15 = 0x00008000,
1122 + RIS0_FRF16 = 0x00010000,
1123 + RIS0_FRF17 = 0x00020000,
1124 ++ RIS0_RESERVED = GENMASK(31, 18),
1125 + };
1126 +
1127 + /* RIC1 */
1128 +@@ -531,6 +533,7 @@ enum RIS2_BIT {
1129 + RIS2_QFF16 = 0x00010000,
1130 + RIS2_QFF17 = 0x00020000,
1131 + RIS2_RFFF = 0x80000000,
1132 ++ RIS2_RESERVED = GENMASK(30, 18),
1133 + };
1134 +
1135 + /* TIC */
1136 +@@ -547,6 +550,7 @@ enum TIS_BIT {
1137 + TIS_FTF1 = 0x00000002, /* Undocumented? */
1138 + TIS_TFUF = 0x00000100,
1139 + TIS_TFWF = 0x00000200,
1140 ++ TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
1141 + };
1142 +
1143 + /* ISS */
1144 +@@ -620,6 +624,7 @@ enum GIC_BIT {
1145 + enum GIS_BIT {
1146 + GIS_PTCF = 0x00000001, /* Undocumented? */
1147 + GIS_PTMF = 0x00000004,
1148 ++ GIS_RESERVED = GENMASK(15, 10),
1149 + };
1150 +
1151 + /* GIE (R-Car Gen3 only) */
1152 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
1153 +index e87a779bfcfe..ff3a293ffe36 100644
1154 +--- a/drivers/net/ethernet/renesas/ravb_main.c
1155 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
1156 +@@ -721,10 +721,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
1157 + u32 eis, ris2;
1158 +
1159 + eis = ravb_read(ndev, EIS);
1160 +- ravb_write(ndev, ~EIS_QFS, EIS);
1161 ++ ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
1162 + if (eis & EIS_QFS) {
1163 + ris2 = ravb_read(ndev, RIS2);
1164 +- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
1165 ++ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
1166 ++ RIS2);
1167 +
1168 + /* Receive Descriptor Empty int */
1169 + if (ris2 & RIS2_QFF0)
1170 +@@ -777,7 +778,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
1171 + u32 tis = ravb_read(ndev, TIS);
1172 +
1173 + if (tis & TIS_TFUF) {
1174 +- ravb_write(ndev, ~TIS_TFUF, TIS);
1175 ++ ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
1176 + ravb_get_tx_tstamp(ndev);
1177 + return true;
1178 + }
1179 +@@ -912,7 +913,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
1180 + /* Processing RX Descriptor Ring */
1181 + if (ris0 & mask) {
1182 + /* Clear RX interrupt */
1183 +- ravb_write(ndev, ~mask, RIS0);
1184 ++ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
1185 + if (ravb_rx(ndev, &quota, q))
1186 + goto out;
1187 + }
1188 +@@ -920,7 +921,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
1189 + if (tis & mask) {
1190 + spin_lock_irqsave(&priv->lock, flags);
1191 + /* Clear TX interrupt */
1192 +- ravb_write(ndev, ~mask, TIS);
1193 ++ ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
1194 + ravb_tx_free(ndev, q, true);
1195 + netif_wake_subqueue(ndev, q);
1196 + mmiowb();
1197 +diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
1198 +index eede70ec37f8..9e3222fd69f9 100644
1199 +--- a/drivers/net/ethernet/renesas/ravb_ptp.c
1200 ++++ b/drivers/net/ethernet/renesas/ravb_ptp.c
1201 +@@ -319,7 +319,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
1202 + }
1203 + }
1204 +
1205 +- ravb_write(ndev, ~gis, GIS);
1206 ++ ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
1207 + }
1208 +
1209 + void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
1210 +diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c
1211 +index 88abdddee2ad..a06ad2c65174 100644
1212 +--- a/drivers/pci/dwc/pcie-designware.c
1213 ++++ b/drivers/pci/dwc/pcie-designware.c
1214 +@@ -138,7 +138,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
1215 + if (val & PCIE_ATU_ENABLE)
1216 + return;
1217 +
1218 +- usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
1219 ++ mdelay(LINK_WAIT_IATU);
1220 + }
1221 + dev_err(pci->dev, "outbound iATU is not being enabled\n");
1222 + }
1223 +@@ -181,7 +181,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
1224 + if (val & PCIE_ATU_ENABLE)
1225 + return;
1226 +
1227 +- usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
1228 ++ mdelay(LINK_WAIT_IATU);
1229 + }
1230 + dev_err(pci->dev, "outbound iATU is not being enabled\n");
1231 + }
1232 +@@ -239,7 +239,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
1233 + if (val & PCIE_ATU_ENABLE)
1234 + return 0;
1235 +
1236 +- usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
1237 ++ mdelay(LINK_WAIT_IATU);
1238 + }
1239 + dev_err(pci->dev, "inbound iATU is not being enabled\n");
1240 +
1241 +@@ -285,7 +285,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
1242 + if (val & PCIE_ATU_ENABLE)
1243 + return 0;
1244 +
1245 +- usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
1246 ++ mdelay(LINK_WAIT_IATU);
1247 + }
1248 + dev_err(pci->dev, "inbound iATU is not being enabled\n");
1249 +
1250 +diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
1251 +index cb493bcae8b4..3551dd607b90 100644
1252 +--- a/drivers/pci/dwc/pcie-designware.h
1253 ++++ b/drivers/pci/dwc/pcie-designware.h
1254 +@@ -28,8 +28,7 @@
1255 +
1256 + /* Parameters for the waiting for iATU enabled routine */
1257 + #define LINK_WAIT_MAX_IATU_RETRIES 5
1258 +-#define LINK_WAIT_IATU_MIN 9000
1259 +-#define LINK_WAIT_IATU_MAX 10000
1260 ++#define LINK_WAIT_IATU 9
1261 +
1262 + /* Synopsys-specific PCIe configuration registers */
1263 + #define PCIE_PORT_LINK_CONTROL 0x710
1264 +diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1265 +index 2799a6b08f73..25d2741cdf96 100644
1266 +--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1267 ++++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1268 +@@ -3465,11 +3465,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
1269 + vscsi->dds.window[LOCAL].liobn,
1270 + vscsi->dds.window[REMOTE].liobn);
1271 +
1272 +- strcpy(vscsi->eye, "VSCSI ");
1273 +- strncat(vscsi->eye, vdev->name, MAX_EYE);
1274 ++ snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
1275 +
1276 + vscsi->dds.unit_id = vdev->unit_address;
1277 +- strncpy(vscsi->dds.partition_name, partition_name,
1278 ++ strscpy(vscsi->dds.partition_name, partition_name,
1279 + sizeof(vscsi->dds.partition_name));
1280 + vscsi->dds.partition_num = partition_number;
1281 +
1282 +diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
1283 +index f838bd73befa..35d54ee1c5c7 100644
1284 +--- a/drivers/scsi/ipr.c
1285 ++++ b/drivers/scsi/ipr.c
1286 +@@ -3308,6 +3308,65 @@ static void ipr_release_dump(struct kref *kref)
1287 + LEAVE;
1288 + }
1289 +
1290 ++static void ipr_add_remove_thread(struct work_struct *work)
1291 ++{
1292 ++ unsigned long lock_flags;
1293 ++ struct ipr_resource_entry *res;
1294 ++ struct scsi_device *sdev;
1295 ++ struct ipr_ioa_cfg *ioa_cfg =
1296 ++ container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
1297 ++ u8 bus, target, lun;
1298 ++ int did_work;
1299 ++
1300 ++ ENTER;
1301 ++ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1302 ++
1303 ++restart:
1304 ++ do {
1305 ++ did_work = 0;
1306 ++ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1307 ++ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1308 ++ return;
1309 ++ }
1310 ++
1311 ++ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1312 ++ if (res->del_from_ml && res->sdev) {
1313 ++ did_work = 1;
1314 ++ sdev = res->sdev;
1315 ++ if (!scsi_device_get(sdev)) {
1316 ++ if (!res->add_to_ml)
1317 ++ list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1318 ++ else
1319 ++ res->del_from_ml = 0;
1320 ++ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1321 ++ scsi_remove_device(sdev);
1322 ++ scsi_device_put(sdev);
1323 ++ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1324 ++ }
1325 ++ break;
1326 ++ }
1327 ++ }
1328 ++ } while (did_work);
1329 ++
1330 ++ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1331 ++ if (res->add_to_ml) {
1332 ++ bus = res->bus;
1333 ++ target = res->target;
1334 ++ lun = res->lun;
1335 ++ res->add_to_ml = 0;
1336 ++ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1337 ++ scsi_add_device(ioa_cfg->host, bus, target, lun);
1338 ++ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1339 ++ goto restart;
1340 ++ }
1341 ++ }
1342 ++
1343 ++ ioa_cfg->scan_done = 1;
1344 ++ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1345 ++ kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1346 ++ LEAVE;
1347 ++}
1348 ++
1349 + /**
1350 + * ipr_worker_thread - Worker thread
1351 + * @work: ioa config struct
1352 +@@ -3322,13 +3381,9 @@ static void ipr_release_dump(struct kref *kref)
1353 + static void ipr_worker_thread(struct work_struct *work)
1354 + {
1355 + unsigned long lock_flags;
1356 +- struct ipr_resource_entry *res;
1357 +- struct scsi_device *sdev;
1358 + struct ipr_dump *dump;
1359 + struct ipr_ioa_cfg *ioa_cfg =
1360 + container_of(work, struct ipr_ioa_cfg, work_q);
1361 +- u8 bus, target, lun;
1362 +- int did_work;
1363 +
1364 + ENTER;
1365 + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1366 +@@ -3366,49 +3421,9 @@ static void ipr_worker_thread(struct work_struct *work)
1367 + return;
1368 + }
1369 +
1370 +-restart:
1371 +- do {
1372 +- did_work = 0;
1373 +- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1374 +- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1375 +- return;
1376 +- }
1377 ++ schedule_work(&ioa_cfg->scsi_add_work_q);
1378 +
1379 +- list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1380 +- if (res->del_from_ml && res->sdev) {
1381 +- did_work = 1;
1382 +- sdev = res->sdev;
1383 +- if (!scsi_device_get(sdev)) {
1384 +- if (!res->add_to_ml)
1385 +- list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1386 +- else
1387 +- res->del_from_ml = 0;
1388 +- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1389 +- scsi_remove_device(sdev);
1390 +- scsi_device_put(sdev);
1391 +- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1392 +- }
1393 +- break;
1394 +- }
1395 +- }
1396 +- } while (did_work);
1397 +-
1398 +- list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1399 +- if (res->add_to_ml) {
1400 +- bus = res->bus;
1401 +- target = res->target;
1402 +- lun = res->lun;
1403 +- res->add_to_ml = 0;
1404 +- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1405 +- scsi_add_device(ioa_cfg->host, bus, target, lun);
1406 +- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1407 +- goto restart;
1408 +- }
1409 +- }
1410 +-
1411 +- ioa_cfg->scan_done = 1;
1412 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1413 +- kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
1414 + LEAVE;
1415 + }
1416 +
1417 +@@ -9937,6 +9952,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
1418 + INIT_LIST_HEAD(&ioa_cfg->free_res_q);
1419 + INIT_LIST_HEAD(&ioa_cfg->used_res_q);
1420 + INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
1421 ++ INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
1422 + init_waitqueue_head(&ioa_cfg->reset_wait_q);
1423 + init_waitqueue_head(&ioa_cfg->msi_wait_q);
1424 + init_waitqueue_head(&ioa_cfg->eeh_wait_q);
1425 +diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
1426 +index c7f0e9e3cd7d..085e6c90f9e6 100644
1427 +--- a/drivers/scsi/ipr.h
1428 ++++ b/drivers/scsi/ipr.h
1429 +@@ -1568,6 +1568,7 @@ struct ipr_ioa_cfg {
1430 + u8 saved_mode_page_len;
1431 +
1432 + struct work_struct work_q;
1433 ++ struct work_struct scsi_add_work_q;
1434 + struct workqueue_struct *reset_work_q;
1435 +
1436 + wait_queue_head_t reset_wait_q;
1437 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1438 +index 4a532318b211..6d3091ff9b92 100644
1439 +--- a/drivers/scsi/sd.c
1440 ++++ b/drivers/scsi/sd.c
1441 +@@ -1285,7 +1285,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
1442 + case REQ_OP_ZONE_RESET:
1443 + return sd_zbc_setup_reset_cmnd(cmd);
1444 + default:
1445 +- BUG();
1446 ++ WARN_ON_ONCE(1);
1447 ++ return BLKPREP_KILL;
1448 + }
1449 + }
1450 +
1451 +diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
1452 +index 63936091d524..4ba6e9c422c4 100644
1453 +--- a/drivers/staging/ccree/ssi_buffer_mgr.c
1454 ++++ b/drivers/staging/ccree/ssi_buffer_mgr.c
1455 +@@ -492,7 +492,8 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
1456 + DMA_TO_DEVICE);
1457 + }
1458 + /* Release pool */
1459 +- if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
1460 ++ if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI &&
1461 ++ req_ctx->mlli_params.mlli_virt_addr) {
1462 + dma_pool_free(req_ctx->mlli_params.curr_pool,
1463 + req_ctx->mlli_params.mlli_virt_addr,
1464 + req_ctx->mlli_params.mlli_dma_addr);
1465 +diff --git a/fs/namespace.c b/fs/namespace.c
1466 +index 3ee3ee5819bc..9dc146e7b5e0 100644
1467 +--- a/fs/namespace.c
1468 ++++ b/fs/namespace.c
1469 +@@ -446,10 +446,10 @@ int mnt_want_write_file_path(struct file *file)
1470 + {
1471 + int ret;
1472 +
1473 +- sb_start_write(file_inode(file)->i_sb);
1474 ++ sb_start_write(file->f_path.mnt->mnt_sb);
1475 + ret = __mnt_want_write_file(file);
1476 + if (ret)
1477 +- sb_end_write(file_inode(file)->i_sb);
1478 ++ sb_end_write(file->f_path.mnt->mnt_sb);
1479 + return ret;
1480 + }
1481 +
1482 +@@ -540,8 +540,7 @@ void __mnt_drop_write_file(struct file *file)
1483 +
1484 + void mnt_drop_write_file_path(struct file *file)
1485 + {
1486 +- __mnt_drop_write_file(file);
1487 +- sb_end_write(file_inode(file)->i_sb);
1488 ++ mnt_drop_write(file->f_path.mnt);
1489 + }
1490 +
1491 + void mnt_drop_write_file(struct file *file)
1492 +diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
1493 +index 87067d23a48b..bfa38da4c261 100644
1494 +--- a/include/linux/huge_mm.h
1495 ++++ b/include/linux/huge_mm.h
1496 +@@ -42,7 +42,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1497 + unsigned char *vec);
1498 + extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1499 + unsigned long new_addr, unsigned long old_end,
1500 +- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
1501 ++ pmd_t *old_pmd, pmd_t *new_pmd);
1502 + extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1503 + unsigned long addr, pgprot_t newprot,
1504 + int prot_numa);
1505 +diff --git a/kernel/memremap.c b/kernel/memremap.c
1506 +index 2b136d4988f7..790ddf3bce19 100644
1507 +--- a/kernel/memremap.c
1508 ++++ b/kernel/memremap.c
1509 +@@ -355,10 +355,27 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
1510 + struct dev_pagemap *pgmap;
1511 + struct page_map *page_map;
1512 + int error, nid, is_ram, i = 0;
1513 ++ struct dev_pagemap *conflict_pgmap;
1514 +
1515 + align_start = res->start & ~(SECTION_SIZE - 1);
1516 + align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
1517 + - align_start;
1518 ++ align_end = align_start + align_size - 1;
1519 ++
1520 ++ conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
1521 ++ if (conflict_pgmap) {
1522 ++ dev_WARN(dev, "Conflicting mapping in same section\n");
1523 ++ put_dev_pagemap(conflict_pgmap);
1524 ++ return ERR_PTR(-ENOMEM);
1525 ++ }
1526 ++
1527 ++ conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
1528 ++ if (conflict_pgmap) {
1529 ++ dev_WARN(dev, "Conflicting mapping in same section\n");
1530 ++ put_dev_pagemap(conflict_pgmap);
1531 ++ return ERR_PTR(-ENOMEM);
1532 ++ }
1533 ++
1534 + is_ram = region_intersects(align_start, align_size,
1535 + IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
1536 +
1537 +@@ -396,7 +413,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
1538 +
1539 + mutex_lock(&pgmap_lock);
1540 + error = 0;
1541 +- align_end = align_start + align_size - 1;
1542 +
1543 + foreach_order_pgoff(res, order, pgoff) {
1544 + struct dev_pagemap *dup;
1545 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1546 +index 39c1fedcfdb4..adacfe66cf3d 100644
1547 +--- a/mm/huge_memory.c
1548 ++++ b/mm/huge_memory.c
1549 +@@ -1765,7 +1765,7 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1550 +
1551 + bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1552 + unsigned long new_addr, unsigned long old_end,
1553 +- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
1554 ++ pmd_t *old_pmd, pmd_t *new_pmd)
1555 + {
1556 + spinlock_t *old_ptl, *new_ptl;
1557 + pmd_t pmd;
1558 +@@ -1796,7 +1796,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1559 + if (new_ptl != old_ptl)
1560 + spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1561 + pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1562 +- if (pmd_present(pmd) && pmd_dirty(pmd))
1563 ++ if (pmd_present(pmd))
1564 + force_flush = true;
1565 + VM_BUG_ON(!pmd_none(*new_pmd));
1566 +
1567 +@@ -1807,12 +1807,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1568 + }
1569 + pmd = move_soft_dirty_pmd(pmd);
1570 + set_pmd_at(mm, new_addr, new_pmd, pmd);
1571 +- if (new_ptl != old_ptl)
1572 +- spin_unlock(new_ptl);
1573 + if (force_flush)
1574 + flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1575 +- else
1576 +- *need_flush = true;
1577 ++ if (new_ptl != old_ptl)
1578 ++ spin_unlock(new_ptl);
1579 + spin_unlock(old_ptl);
1580 + return true;
1581 + }
1582 +diff --git a/mm/mremap.c b/mm/mremap.c
1583 +index 049470aa1e3e..88ceeb4ef817 100644
1584 +--- a/mm/mremap.c
1585 ++++ b/mm/mremap.c
1586 +@@ -115,7 +115,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
1587 + static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
1588 + unsigned long old_addr, unsigned long old_end,
1589 + struct vm_area_struct *new_vma, pmd_t *new_pmd,
1590 +- unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
1591 ++ unsigned long new_addr, bool need_rmap_locks)
1592 + {
1593 + struct mm_struct *mm = vma->vm_mm;
1594 + pte_t *old_pte, *new_pte, pte;
1595 +@@ -163,15 +163,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
1596 +
1597 + pte = ptep_get_and_clear(mm, old_addr, old_pte);
1598 + /*
1599 +- * If we are remapping a dirty PTE, make sure
1600 ++ * If we are remapping a valid PTE, make sure
1601 + * to flush TLB before we drop the PTL for the
1602 +- * old PTE or we may race with page_mkclean().
1603 ++ * PTE.
1604 + *
1605 +- * This check has to be done after we removed the
1606 +- * old PTE from page tables or another thread may
1607 +- * dirty it after the check and before the removal.
1608 ++ * NOTE! Both old and new PTL matter: the old one
1609 ++ * for racing with page_mkclean(), the new one to
1610 ++ * make sure the physical page stays valid until
1611 ++ * the TLB entry for the old mapping has been
1612 ++ * flushed.
1613 + */
1614 +- if (pte_present(pte) && pte_dirty(pte))
1615 ++ if (pte_present(pte))
1616 + force_flush = true;
1617 + pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
1618 + pte = move_soft_dirty_pte(pte);
1619 +@@ -179,13 +181,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
1620 + }
1621 +
1622 + arch_leave_lazy_mmu_mode();
1623 ++ if (force_flush)
1624 ++ flush_tlb_range(vma, old_end - len, old_end);
1625 + if (new_ptl != old_ptl)
1626 + spin_unlock(new_ptl);
1627 + pte_unmap(new_pte - 1);
1628 +- if (force_flush)
1629 +- flush_tlb_range(vma, old_end - len, old_end);
1630 +- else
1631 +- *need_flush = true;
1632 + pte_unmap_unlock(old_pte - 1, old_ptl);
1633 + if (need_rmap_locks)
1634 + drop_rmap_locks(vma);
1635 +@@ -200,7 +200,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
1636 + {
1637 + unsigned long extent, next, old_end;
1638 + pmd_t *old_pmd, *new_pmd;
1639 +- bool need_flush = false;
1640 + unsigned long mmun_start; /* For mmu_notifiers */
1641 + unsigned long mmun_end; /* For mmu_notifiers */
1642 +
1643 +@@ -231,8 +230,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
1644 + if (need_rmap_locks)
1645 + take_rmap_locks(vma);
1646 + moved = move_huge_pmd(vma, old_addr, new_addr,
1647 +- old_end, old_pmd, new_pmd,
1648 +- &need_flush);
1649 ++ old_end, old_pmd, new_pmd);
1650 + if (need_rmap_locks)
1651 + drop_rmap_locks(vma);
1652 + if (moved)
1653 +@@ -250,10 +248,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
1654 + if (extent > LATENCY_LIMIT)
1655 + extent = LATENCY_LIMIT;
1656 + move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
1657 +- new_pmd, new_addr, need_rmap_locks, &need_flush);
1658 ++ new_pmd, new_addr, need_rmap_locks);
1659 + }
1660 +- if (need_flush)
1661 +- flush_tlb_range(vma, old_end-len, old_addr);
1662 +
1663 + mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
1664 +
1665 +diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
1666 +index bd1064d98e16..e92dfedccc16 100644
1667 +--- a/net/batman-adv/bat_v_elp.c
1668 ++++ b/net/batman-adv/bat_v_elp.c
1669 +@@ -227,7 +227,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
1670 + * the packet to be exactly of that size to make the link
1671 + * throughput estimation effective.
1672 + */
1673 +- skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
1674 ++ skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
1675 +
1676 + batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1677 + "Sending unicast (probe) ELP packet on interface %s to %pM\n",
1678 +@@ -254,6 +254,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
1679 + struct batadv_priv *bat_priv;
1680 + struct sk_buff *skb;
1681 + u32 elp_interval;
1682 ++ bool ret;
1683 +
1684 + bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
1685 + hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
1686 +@@ -315,8 +316,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
1687 + * may sleep and that is not allowed in an rcu protected
1688 + * context. Therefore schedule a task for that.
1689 + */
1690 +- queue_work(batadv_event_workqueue,
1691 +- &hardif_neigh->bat_v.metric_work);
1692 ++ ret = queue_work(batadv_event_workqueue,
1693 ++ &hardif_neigh->bat_v.metric_work);
1694 ++
1695 ++ if (!ret)
1696 ++ batadv_hardif_neigh_put(hardif_neigh);
1697 + }
1698 + rcu_read_unlock();
1699 +
1700 +diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
1701 +index 422ee16b7854..c3c848f64fdd 100644
1702 +--- a/net/batman-adv/bridge_loop_avoidance.c
1703 ++++ b/net/batman-adv/bridge_loop_avoidance.c
1704 +@@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1705 + {
1706 + struct batadv_bla_backbone_gw *backbone_gw;
1707 + struct ethhdr *ethhdr;
1708 ++ bool ret;
1709 +
1710 + ethhdr = eth_hdr(skb);
1711 +
1712 +@@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1713 + if (unlikely(!backbone_gw))
1714 + return true;
1715 +
1716 +- queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1717 +- /* backbone_gw is unreferenced in the report work function function */
1718 ++ ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1719 ++
1720 ++ /* backbone_gw is unreferenced in the report work function function
1721 ++ * if queue_work() call was successful
1722 ++ */
1723 ++ if (!ret)
1724 ++ batadv_backbone_gw_put(backbone_gw);
1725 +
1726 + return true;
1727 + }
1728 +diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
1729 +index 06276ae9f752..c6a7341f0527 100644
1730 +--- a/net/batman-adv/gateway_client.c
1731 ++++ b/net/batman-adv/gateway_client.c
1732 +@@ -31,6 +31,7 @@
1733 + #include <linux/kernel.h>
1734 + #include <linux/kref.h>
1735 + #include <linux/list.h>
1736 ++#include <linux/lockdep.h>
1737 + #include <linux/netdevice.h>
1738 + #include <linux/netlink.h>
1739 + #include <linux/rculist.h>
1740 +@@ -325,6 +326,9 @@ out:
1741 + * @bat_priv: the bat priv with all the soft interface information
1742 + * @orig_node: originator announcing gateway capabilities
1743 + * @gateway: announced bandwidth information
1744 ++ *
1745 ++ * Has to be called with the appropriate locks being acquired
1746 ++ * (gw.list_lock).
1747 + */
1748 + static void batadv_gw_node_add(struct batadv_priv *bat_priv,
1749 + struct batadv_orig_node *orig_node,
1750 +@@ -332,6 +336,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
1751 + {
1752 + struct batadv_gw_node *gw_node;
1753 +
1754 ++ lockdep_assert_held(&bat_priv->gw.list_lock);
1755 ++
1756 + if (gateway->bandwidth_down == 0)
1757 + return;
1758 +
1759 +@@ -346,10 +352,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
1760 + gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
1761 + gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
1762 +
1763 +- spin_lock_bh(&bat_priv->gw.list_lock);
1764 + kref_get(&gw_node->refcount);
1765 + hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
1766 +- spin_unlock_bh(&bat_priv->gw.list_lock);
1767 +
1768 + batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1769 + "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
1770 +@@ -405,11 +409,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
1771 + {
1772 + struct batadv_gw_node *gw_node, *curr_gw = NULL;
1773 +
1774 ++ spin_lock_bh(&bat_priv->gw.list_lock);
1775 + gw_node = batadv_gw_node_get(bat_priv, orig_node);
1776 + if (!gw_node) {
1777 + batadv_gw_node_add(bat_priv, orig_node, gateway);
1778 ++ spin_unlock_bh(&bat_priv->gw.list_lock);
1779 + goto out;
1780 + }
1781 ++ spin_unlock_bh(&bat_priv->gw.list_lock);
1782 +
1783 + if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) &&
1784 + (gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)))
1785 +diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
1786 +index 3604d7899e2c..7a7dcac20566 100644
1787 +--- a/net/batman-adv/network-coding.c
1788 ++++ b/net/batman-adv/network-coding.c
1789 +@@ -850,16 +850,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
1790 + spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
1791 + struct list_head *list;
1792 +
1793 ++ /* Select ingoing or outgoing coding node */
1794 ++ if (in_coding) {
1795 ++ lock = &orig_neigh_node->in_coding_list_lock;
1796 ++ list = &orig_neigh_node->in_coding_list;
1797 ++ } else {
1798 ++ lock = &orig_neigh_node->out_coding_list_lock;
1799 ++ list = &orig_neigh_node->out_coding_list;
1800 ++ }
1801 ++
1802 ++ spin_lock_bh(lock);
1803 ++
1804 + /* Check if nc_node is already added */
1805 + nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
1806 +
1807 + /* Node found */
1808 + if (nc_node)
1809 +- return nc_node;
1810 ++ goto unlock;
1811 +
1812 + nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
1813 + if (!nc_node)
1814 +- return NULL;
1815 ++ goto unlock;
1816 +
1817 + /* Initialize nc_node */
1818 + INIT_LIST_HEAD(&nc_node->list);
1819 +@@ -868,22 +879,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
1820 + kref_get(&orig_neigh_node->refcount);
1821 + nc_node->orig_node = orig_neigh_node;
1822 +
1823 +- /* Select ingoing or outgoing coding node */
1824 +- if (in_coding) {
1825 +- lock = &orig_neigh_node->in_coding_list_lock;
1826 +- list = &orig_neigh_node->in_coding_list;
1827 +- } else {
1828 +- lock = &orig_neigh_node->out_coding_list_lock;
1829 +- list = &orig_neigh_node->out_coding_list;
1830 +- }
1831 +-
1832 + batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
1833 + nc_node->addr, nc_node->orig_node->orig);
1834 +
1835 + /* Add nc_node to orig_node */
1836 +- spin_lock_bh(lock);
1837 + kref_get(&nc_node->refcount);
1838 + list_add_tail_rcu(&nc_node->list, list);
1839 ++
1840 ++unlock:
1841 + spin_unlock_bh(lock);
1842 +
1843 + return nc_node;
1844 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
1845 +index aa2c49fa31ce..8cedb5db1ab3 100644
1846 +--- a/net/batman-adv/soft-interface.c
1847 ++++ b/net/batman-adv/soft-interface.c
1848 +@@ -566,15 +566,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
1849 + struct batadv_softif_vlan *vlan;
1850 + int err;
1851 +
1852 ++ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
1853 ++
1854 + vlan = batadv_softif_vlan_get(bat_priv, vid);
1855 + if (vlan) {
1856 + batadv_softif_vlan_put(vlan);
1857 ++ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
1858 + return -EEXIST;
1859 + }
1860 +
1861 + vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
1862 +- if (!vlan)
1863 ++ if (!vlan) {
1864 ++ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
1865 + return -ENOMEM;
1866 ++ }
1867 +
1868 + vlan->bat_priv = bat_priv;
1869 + vlan->vid = vid;
1870 +@@ -582,17 +587,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
1871 +
1872 + atomic_set(&vlan->ap_isolation, 0);
1873 +
1874 ++ kref_get(&vlan->refcount);
1875 ++ hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
1876 ++ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
1877 ++
1878 ++ /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
1879 ++ * sleeping behavior of the sysfs functions and the fs_reclaim lock
1880 ++ */
1881 + err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
1882 + if (err) {
1883 +- kfree(vlan);
1884 ++ /* ref for the function */
1885 ++ batadv_softif_vlan_put(vlan);
1886 ++
1887 ++ /* ref for the list */
1888 ++ batadv_softif_vlan_put(vlan);
1889 + return err;
1890 + }
1891 +
1892 +- spin_lock_bh(&bat_priv->softif_vlan_list_lock);
1893 +- kref_get(&vlan->refcount);
1894 +- hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
1895 +- spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
1896 +-
1897 + /* add a new TT local entry. This one will be marked with the NOPURGE
1898 + * flag
1899 + */
1900 +diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
1901 +index 0ae8b30e4eaa..2ef9b136fc39 100644
1902 +--- a/net/batman-adv/sysfs.c
1903 ++++ b/net/batman-adv/sysfs.c
1904 +@@ -186,7 +186,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
1905 + \
1906 + return __batadv_store_uint_attr(buff, count, _min, _max, \
1907 + _post_func, attr, \
1908 +- &bat_priv->_var, net_dev); \
1909 ++ &bat_priv->_var, net_dev, \
1910 ++ NULL); \
1911 + }
1912 +
1913 + #define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
1914 +@@ -260,7 +261,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
1915 + \
1916 + length = __batadv_store_uint_attr(buff, count, _min, _max, \
1917 + _post_func, attr, \
1918 +- &hard_iface->_var, net_dev); \
1919 ++ &hard_iface->_var, \
1920 ++ hard_iface->soft_iface, \
1921 ++ net_dev); \
1922 + \
1923 + batadv_hardif_put(hard_iface); \
1924 + return length; \
1925 +@@ -354,10 +357,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
1926 +
1927 + static int batadv_store_uint_attr(const char *buff, size_t count,
1928 + struct net_device *net_dev,
1929 ++ struct net_device *slave_dev,
1930 + const char *attr_name,
1931 + unsigned int min, unsigned int max,
1932 + atomic_t *attr)
1933 + {
1934 ++ char ifname[IFNAMSIZ + 3] = "";
1935 + unsigned long uint_val;
1936 + int ret;
1937 +
1938 +@@ -383,8 +388,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
1939 + if (atomic_read(attr) == uint_val)
1940 + return count;
1941 +
1942 +- batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
1943 +- attr_name, atomic_read(attr), uint_val);
1944 ++ if (slave_dev)
1945 ++ snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
1946 ++
1947 ++ batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
1948 ++ attr_name, ifname, atomic_read(attr), uint_val);
1949 +
1950 + atomic_set(attr, uint_val);
1951 + return count;
1952 +@@ -395,12 +403,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
1953 + void (*post_func)(struct net_device *),
1954 + const struct attribute *attr,
1955 + atomic_t *attr_store,
1956 +- struct net_device *net_dev)
1957 ++ struct net_device *net_dev,
1958 ++ struct net_device *slave_dev)
1959 + {
1960 + int ret;
1961 +
1962 +- ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
1963 +- attr_store);
1964 ++ ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
1965 ++ attr->name, min, max, attr_store);
1966 + if (post_func && ret)
1967 + post_func(net_dev);
1968 +
1969 +@@ -569,7 +578,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
1970 + return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
1971 + batadv_post_gw_reselect, attr,
1972 + &bat_priv->gw.sel_class,
1973 +- bat_priv->soft_iface);
1974 ++ bat_priv->soft_iface, NULL);
1975 + }
1976 +
1977 + static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
1978 +@@ -1078,8 +1087,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
1979 + if (old_tp_override == tp_override)
1980 + goto out;
1981 +
1982 +- batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
1983 +- "throughput_override",
1984 ++ batadv_info(hard_iface->soft_iface,
1985 ++ "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
1986 ++ "throughput_override", net_dev->name,
1987 + old_tp_override / 10, old_tp_override % 10,
1988 + tp_override / 10, tp_override % 10);
1989 +
1990 +diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
1991 +index 0f4d4eece3e4..9da3455847ff 100644
1992 +--- a/net/batman-adv/translation-table.c
1993 ++++ b/net/batman-adv/translation-table.c
1994 +@@ -1587,6 +1587,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1995 + {
1996 + struct batadv_tt_orig_list_entry *orig_entry;
1997 +
1998 ++ spin_lock_bh(&tt_global->list_lock);
1999 ++
2000 + orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
2001 + if (orig_entry) {
2002 + /* refresh the ttvn: the current value could be a bogus one that
2003 +@@ -1609,11 +1611,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
2004 + orig_entry->flags = flags;
2005 + kref_init(&orig_entry->refcount);
2006 +
2007 +- spin_lock_bh(&tt_global->list_lock);
2008 + kref_get(&orig_entry->refcount);
2009 + hlist_add_head_rcu(&orig_entry->list,
2010 + &tt_global->orig_list);
2011 +- spin_unlock_bh(&tt_global->list_lock);
2012 + atomic_inc(&tt_global->orig_list_count);
2013 +
2014 + sync_flags:
2015 +@@ -1621,6 +1621,8 @@ sync_flags:
2016 + out:
2017 + if (orig_entry)
2018 + batadv_tt_orig_list_entry_put(orig_entry);
2019 ++
2020 ++ spin_unlock_bh(&tt_global->list_lock);
2021 + }
2022 +
2023 + /**
2024 +diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
2025 +index 1d9e267caec9..d6d6d95e48aa 100644
2026 +--- a/net/batman-adv/tvlv.c
2027 ++++ b/net/batman-adv/tvlv.c
2028 +@@ -528,15 +528,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
2029 + {
2030 + struct batadv_tvlv_handler *tvlv_handler;
2031 +
2032 ++ spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
2033 ++
2034 + tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
2035 + if (tvlv_handler) {
2036 ++ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
2037 + batadv_tvlv_handler_put(tvlv_handler);
2038 + return;
2039 + }
2040 +
2041 + tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
2042 +- if (!tvlv_handler)
2043 ++ if (!tvlv_handler) {
2044 ++ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
2045 + return;
2046 ++ }
2047 +
2048 + tvlv_handler->ogm_handler = optr;
2049 + tvlv_handler->unicast_handler = uptr;
2050 +@@ -546,7 +551,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
2051 + kref_init(&tvlv_handler->refcount);
2052 + INIT_HLIST_NODE(&tvlv_handler->list);
2053 +
2054 +- spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
2055 + kref_get(&tvlv_handler->refcount);
2056 + hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
2057 + spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);