Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 10 Feb 2021 10:15:58
Message-Id: 1612952117.7a70358f7d8e29ea67ced2af4015fde285ae84a7.alicef@gentoo
1 commit: 7a70358f7d8e29ea67ced2af4015fde285ae84a7
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 10 10:15:07 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 10 10:15:17 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7a70358f
7
8 Linux patch 4.9.257
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1256_linux-4.9.257.patch | 1823 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1827 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index d822171..e08f1e6 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1067,6 +1067,10 @@ Patch: 1255_linux-4.9.256.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.256
23
24 +Patch: 1256_linux-4.9.257.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.257
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1256_linux-4.9.257.patch b/1256_linux-4.9.257.patch
33 new file mode 100644
34 index 0000000..7d4e271
35 --- /dev/null
36 +++ b/1256_linux-4.9.257.patch
37 @@ -0,0 +1,1823 @@
38 +diff --git a/Makefile b/Makefile
39 +index 69af44d3dcd14..e53096154f816 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 9
45 +-SUBLEVEL = 256
46 ++SUBLEVEL = 257
47 + EXTRAVERSION =
48 + NAME = Roaring Lionus
49 +
50 +@@ -841,12 +841,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
51 + # change __FILE__ to the relative path from the srctree
52 + KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
53 +
54 +-# ensure -fcf-protection is disabled when using retpoline as it is
55 +-# incompatible with -mindirect-branch=thunk-extern
56 +-ifdef CONFIG_RETPOLINE
57 +-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
58 +-endif
59 +-
60 + # use the deterministic mode of AR if available
61 + KBUILD_ARFLAGS := $(call ar-option,D)
62 +
63 +@@ -1141,7 +1135,7 @@ endef
64 +
65 + define filechk_version.h
66 + (echo \#define LINUX_VERSION_CODE $(shell \
67 +- expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
68 ++ expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \
69 + echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
70 + endef
71 +
72 +diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
73 +index 96a3d73ef4bf4..fd6c9169fa78e 100644
74 +--- a/arch/arm/mach-footbridge/dc21285.c
75 ++++ b/arch/arm/mach-footbridge/dc21285.c
76 +@@ -69,15 +69,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
77 + if (addr)
78 + switch (size) {
79 + case 1:
80 +- asm("ldrb %0, [%1, %2]"
81 ++ asm volatile("ldrb %0, [%1, %2]"
82 + : "=r" (v) : "r" (addr), "r" (where) : "cc");
83 + break;
84 + case 2:
85 +- asm("ldrh %0, [%1, %2]"
86 ++ asm volatile("ldrh %0, [%1, %2]"
87 + : "=r" (v) : "r" (addr), "r" (where) : "cc");
88 + break;
89 + case 4:
90 +- asm("ldr %0, [%1, %2]"
91 ++ asm volatile("ldr %0, [%1, %2]"
92 + : "=r" (v) : "r" (addr), "r" (where) : "cc");
93 + break;
94 + }
95 +@@ -103,17 +103,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
96 + if (addr)
97 + switch (size) {
98 + case 1:
99 +- asm("strb %0, [%1, %2]"
100 ++ asm volatile("strb %0, [%1, %2]"
101 + : : "r" (value), "r" (addr), "r" (where)
102 + : "cc");
103 + break;
104 + case 2:
105 +- asm("strh %0, [%1, %2]"
106 ++ asm volatile("strh %0, [%1, %2]"
107 + : : "r" (value), "r" (addr), "r" (where)
108 + : "cc");
109 + break;
110 + case 4:
111 +- asm("str %0, [%1, %2]"
112 ++ asm volatile("str %0, [%1, %2]"
113 + : : "r" (value), "r" (addr), "r" (where)
114 + : "cc");
115 + break;
116 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
117 +index 940ed27a62123..a95d414663b1e 100644
118 +--- a/arch/x86/Makefile
119 ++++ b/arch/x86/Makefile
120 +@@ -137,6 +137,9 @@ else
121 + KBUILD_CFLAGS += -mno-red-zone
122 + KBUILD_CFLAGS += -mcmodel=kernel
123 +
124 ++ # Intel CET isn't enabled in the kernel
125 ++ KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
126 ++
127 + # -funit-at-a-time shrinks the kernel .text considerably
128 + # unfortunately it makes reading oopses harder.
129 + KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
130 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
131 +index f39fd349cef65..a6d034257b7bb 100644
132 +--- a/arch/x86/include/asm/apic.h
133 ++++ b/arch/x86/include/asm/apic.h
134 +@@ -176,16 +176,6 @@ static inline void lapic_update_tsc_freq(void) { }
135 + #endif /* !CONFIG_X86_LOCAL_APIC */
136 +
137 + #ifdef CONFIG_X86_X2APIC
138 +-/*
139 +- * Make previous memory operations globally visible before
140 +- * sending the IPI through x2apic wrmsr. We need a serializing instruction or
141 +- * mfence for this.
142 +- */
143 +-static inline void x2apic_wrmsr_fence(void)
144 +-{
145 +- asm volatile("mfence" : : : "memory");
146 +-}
147 +-
148 + static inline void native_apic_msr_write(u32 reg, u32 v)
149 + {
150 + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
151 +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
152 +index a0f450b21d676..89b75edf24af8 100644
153 +--- a/arch/x86/include/asm/barrier.h
154 ++++ b/arch/x86/include/asm/barrier.h
155 +@@ -110,4 +110,22 @@ do { \
156 +
157 + #include <asm-generic/barrier.h>
158 +
159 ++/*
160 ++ * Make previous memory operations globally visible before
161 ++ * a WRMSR.
162 ++ *
163 ++ * MFENCE makes writes visible, but only affects load/store
164 ++ * instructions. WRMSR is unfortunately not a load/store
165 ++ * instruction and is unaffected by MFENCE. The LFENCE ensures
166 ++ * that the WRMSR is not reordered.
167 ++ *
168 ++ * Most WRMSRs are full serializing instructions themselves and
169 ++ * do not require this barrier. This is only required for the
170 ++ * IA32_TSC_DEADLINE and X2APIC MSRs.
171 ++ */
172 ++static inline void weak_wrmsr_fence(void)
173 ++{
174 ++ asm volatile("mfence; lfence" : : : "memory");
175 ++}
176 ++
177 + #endif /* _ASM_X86_BARRIER_H */
178 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
179 +index 722a76b88bcc0..107a9eff587bf 100644
180 +--- a/arch/x86/kernel/apic/apic.c
181 ++++ b/arch/x86/kernel/apic/apic.c
182 +@@ -42,6 +42,7 @@
183 + #include <asm/x86_init.h>
184 + #include <asm/pgalloc.h>
185 + #include <linux/atomic.h>
186 ++#include <asm/barrier.h>
187 + #include <asm/mpspec.h>
188 + #include <asm/i8259.h>
189 + #include <asm/proto.h>
190 +@@ -476,6 +477,9 @@ static int lapic_next_deadline(unsigned long delta,
191 + {
192 + u64 tsc;
193 +
194 ++ /* This MSR is special and need a special fence: */
195 ++ weak_wrmsr_fence();
196 ++
197 + tsc = rdtsc();
198 + wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
199 + return 0;
200 +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
201 +index 200af5ae96626..ca64c150d1c53 100644
202 +--- a/arch/x86/kernel/apic/x2apic_cluster.c
203 ++++ b/arch/x86/kernel/apic/x2apic_cluster.c
204 +@@ -27,7 +27,8 @@ static void x2apic_send_IPI(int cpu, int vector)
205 + {
206 + u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
207 +
208 +- x2apic_wrmsr_fence();
209 ++ /* x2apic MSRs are special and need a special fence: */
210 ++ weak_wrmsr_fence();
211 + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
212 + }
213 +
214 +@@ -40,7 +41,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
215 + unsigned long flags;
216 + u32 dest;
217 +
218 +- x2apic_wrmsr_fence();
219 ++ /* x2apic MSRs are special and need a special fence: */
220 ++ weak_wrmsr_fence();
221 +
222 + local_irq_save(flags);
223 +
224 +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
225 +index ff111f05a3145..8889420ea7c6f 100644
226 +--- a/arch/x86/kernel/apic/x2apic_phys.c
227 ++++ b/arch/x86/kernel/apic/x2apic_phys.c
228 +@@ -40,7 +40,8 @@ static void x2apic_send_IPI(int cpu, int vector)
229 + {
230 + u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
231 +
232 +- x2apic_wrmsr_fence();
233 ++ /* x2apic MSRs are special and need a special fence: */
234 ++ weak_wrmsr_fence();
235 + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
236 + }
237 +
238 +@@ -51,7 +52,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
239 + unsigned long this_cpu;
240 + unsigned long flags;
241 +
242 +- x2apic_wrmsr_fence();
243 ++ /* x2apic MSRs are special and need a special fence: */
244 ++ weak_wrmsr_fence();
245 +
246 + local_irq_save(flags);
247 +
248 +diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
249 +index 35e8fbca10ad5..c53c88b531639 100644
250 +--- a/drivers/acpi/thermal.c
251 ++++ b/drivers/acpi/thermal.c
252 +@@ -188,6 +188,8 @@ struct acpi_thermal {
253 + int tz_enabled;
254 + int kelvin_offset;
255 + struct work_struct thermal_check_work;
256 ++ struct mutex thermal_check_lock;
257 ++ atomic_t thermal_check_count;
258 + };
259 +
260 + /* --------------------------------------------------------------------------
261 +@@ -513,17 +515,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
262 + return 0;
263 + }
264 +
265 +-static void acpi_thermal_check(void *data)
266 +-{
267 +- struct acpi_thermal *tz = data;
268 +-
269 +- if (!tz->tz_enabled)
270 +- return;
271 +-
272 +- thermal_zone_device_update(tz->thermal_zone,
273 +- THERMAL_EVENT_UNSPECIFIED);
274 +-}
275 +-
276 + /* sys I/F for generic thermal sysfs support */
277 +
278 + static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
279 +@@ -557,6 +548,8 @@ static int thermal_get_mode(struct thermal_zone_device *thermal,
280 + return 0;
281 + }
282 +
283 ++static void acpi_thermal_check_fn(struct work_struct *work);
284 ++
285 + static int thermal_set_mode(struct thermal_zone_device *thermal,
286 + enum thermal_device_mode mode)
287 + {
288 +@@ -582,7 +575,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
289 + ACPI_DEBUG_PRINT((ACPI_DB_INFO,
290 + "%s kernel ACPI thermal control\n",
291 + tz->tz_enabled ? "Enable" : "Disable"));
292 +- acpi_thermal_check(tz);
293 ++ acpi_thermal_check_fn(&tz->thermal_check_work);
294 + }
295 + return 0;
296 + }
297 +@@ -951,6 +944,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
298 + Driver Interface
299 + -------------------------------------------------------------------------- */
300 +
301 ++static void acpi_queue_thermal_check(struct acpi_thermal *tz)
302 ++{
303 ++ if (!work_pending(&tz->thermal_check_work))
304 ++ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
305 ++}
306 ++
307 + static void acpi_thermal_notify(struct acpi_device *device, u32 event)
308 + {
309 + struct acpi_thermal *tz = acpi_driver_data(device);
310 +@@ -961,17 +960,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
311 +
312 + switch (event) {
313 + case ACPI_THERMAL_NOTIFY_TEMPERATURE:
314 +- acpi_thermal_check(tz);
315 ++ acpi_queue_thermal_check(tz);
316 + break;
317 + case ACPI_THERMAL_NOTIFY_THRESHOLDS:
318 + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
319 +- acpi_thermal_check(tz);
320 ++ acpi_queue_thermal_check(tz);
321 + acpi_bus_generate_netlink_event(device->pnp.device_class,
322 + dev_name(&device->dev), event, 0);
323 + break;
324 + case ACPI_THERMAL_NOTIFY_DEVICES:
325 + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
326 +- acpi_thermal_check(tz);
327 ++ acpi_queue_thermal_check(tz);
328 + acpi_bus_generate_netlink_event(device->pnp.device_class,
329 + dev_name(&device->dev), event, 0);
330 + break;
331 +@@ -1071,7 +1070,27 @@ static void acpi_thermal_check_fn(struct work_struct *work)
332 + {
333 + struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
334 + thermal_check_work);
335 +- acpi_thermal_check(tz);
336 ++
337 ++ if (!tz->tz_enabled)
338 ++ return;
339 ++ /*
340 ++ * In general, it is not sufficient to check the pending bit, because
341 ++ * subsequent instances of this function may be queued after one of them
342 ++ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just
343 ++ * one of them is running, though, because it may have done the actual
344 ++ * check some time ago, so allow at least one of them to block on the
345 ++ * mutex while another one is running the update.
346 ++ */
347 ++ if (!atomic_add_unless(&tz->thermal_check_count, -1, 1))
348 ++ return;
349 ++
350 ++ mutex_lock(&tz->thermal_check_lock);
351 ++
352 ++ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
353 ++
354 ++ atomic_inc(&tz->thermal_check_count);
355 ++
356 ++ mutex_unlock(&tz->thermal_check_lock);
357 + }
358 +
359 + static int acpi_thermal_add(struct acpi_device *device)
360 +@@ -1103,6 +1122,8 @@ static int acpi_thermal_add(struct acpi_device *device)
361 + if (result)
362 + goto free_memory;
363 +
364 ++ atomic_set(&tz->thermal_check_count, 3);
365 ++ mutex_init(&tz->thermal_check_lock);
366 + INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
367 +
368 + pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
369 +@@ -1168,7 +1189,7 @@ static int acpi_thermal_resume(struct device *dev)
370 + tz->state.active |= tz->trips.active[i].flags.enabled;
371 + }
372 +
373 +- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
374 ++ acpi_queue_thermal_check(tz);
375 +
376 + return AE_OK;
377 + }
378 +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
379 +index 637f1347cd13d..815b69d35722c 100644
380 +--- a/drivers/input/joystick/xpad.c
381 ++++ b/drivers/input/joystick/xpad.c
382 +@@ -232,9 +232,17 @@ static const struct xpad_device {
383 + { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
384 + { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
385 + { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
386 +- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
387 ++ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
388 ++ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
389 ++ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
390 + { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
391 + { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
392 ++ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
393 ++ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
394 ++ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
395 ++ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
396 ++ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
397 ++ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
398 + { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
399 + { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
400 + { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
401 +@@ -313,6 +321,9 @@ static const struct xpad_device {
402 + { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
403 + { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
404 + { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
405 ++ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
406 ++ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
407 ++ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
408 + { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
409 + { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
410 + { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
411 +@@ -446,8 +457,12 @@ static const struct usb_device_id xpad_table[] = {
412 + XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
413 + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
414 + XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
415 ++ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
416 ++ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
417 + XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
418 + XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
419 ++ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
420 ++ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
421 + { }
422 + };
423 +
424 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
425 +index fa07be0b4500e..2317f8d3fef6f 100644
426 +--- a/drivers/input/serio/i8042-x86ia64io.h
427 ++++ b/drivers/input/serio/i8042-x86ia64io.h
428 +@@ -223,6 +223,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
429 + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
430 + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
431 + },
432 ++ },
433 ++ {
434 + .matches = {
435 + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
436 + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
437 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
438 +index 593a4bfcba428..b2061cfa05ab4 100644
439 +--- a/drivers/iommu/intel-iommu.c
440 ++++ b/drivers/iommu/intel-iommu.c
441 +@@ -3323,6 +3323,12 @@ static int __init init_dmars(void)
442 +
443 + if (!ecap_pass_through(iommu->ecap))
444 + hw_pass_through = 0;
445 ++
446 ++ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
447 ++ pr_info("Disable batched IOTLB flush due to virtualization");
448 ++ intel_iommu_strict = 1;
449 ++ }
450 ++
451 + #ifdef CONFIG_INTEL_IOMMU_SVM
452 + if (pasid_enabled(iommu))
453 + intel_svm_alloc_pasid_tables(iommu);
454 +diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
455 +index 934c4816d78bf..14c56cf66ddc6 100644
456 +--- a/drivers/mmc/core/sdio_cis.c
457 ++++ b/drivers/mmc/core/sdio_cis.c
458 +@@ -24,6 +24,8 @@
459 + #include "sdio_cis.h"
460 + #include "sdio_ops.h"
461 +
462 ++#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
463 ++
464 + static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
465 + const unsigned char *buf, unsigned size)
466 + {
467 +@@ -269,6 +271,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
468 +
469 + do {
470 + unsigned char tpl_code, tpl_link;
471 ++ unsigned long timeout = jiffies +
472 ++ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
473 +
474 + ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
475 + if (ret)
476 +@@ -321,6 +325,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
477 + prev = &this->next;
478 +
479 + if (ret == -ENOENT) {
480 ++ if (time_after(jiffies, timeout))
481 ++ break;
482 + /* warn about unknown tuples */
483 + pr_warn_ratelimited("%s: queuing unknown"
484 + " CIS tuple 0x%02x (%u bytes)\n",
485 +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
486 +index 84def5819d2ec..a3742a3b413cd 100644
487 +--- a/drivers/net/dsa/bcm_sf2.c
488 ++++ b/drivers/net/dsa/bcm_sf2.c
489 +@@ -515,15 +515,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
490 + /* Find our integrated MDIO bus node */
491 + dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
492 + priv->master_mii_bus = of_mdio_find_bus(dn);
493 +- if (!priv->master_mii_bus)
494 ++ if (!priv->master_mii_bus) {
495 ++ of_node_put(dn);
496 + return -EPROBE_DEFER;
497 ++ }
498 +
499 + get_device(&priv->master_mii_bus->dev);
500 + priv->master_mii_dn = dn;
501 +
502 + priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
503 +- if (!priv->slave_mii_bus)
504 ++ if (!priv->slave_mii_bus) {
505 ++ of_node_put(dn);
506 + return -ENOMEM;
507 ++ }
508 +
509 + priv->slave_mii_bus->priv = priv;
510 + priv->slave_mii_bus->name = "sf2 slave mii";
511 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
512 +index a8ff4f8a6b87d..f23559a2b2bd1 100644
513 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
514 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
515 +@@ -3496,6 +3496,12 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
516 + while (!done) {
517 + /* Pull all the valid messages off the CRQ */
518 + while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
519 ++ /* This barrier makes sure ibmvnic_next_crq()'s
520 ++ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
521 ++ * before ibmvnic_handle_crq()'s
522 ++ * switch(gen_crq->first) and switch(gen_crq->cmd).
523 ++ */
524 ++ dma_rmb();
525 + ibmvnic_handle_crq(crq, adapter);
526 + crq->generic.first = 0;
527 + }
528 +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
529 +index 04b3ac17531db..7865feb8e5e83 100644
530 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c
531 ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
532 +@@ -2891,8 +2891,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
533 + unsigned long flags = 0;
534 +
535 + spin_lock_irqsave(shost->host_lock, flags);
536 +- if (sdev->type == TYPE_DISK)
537 ++ if (sdev->type == TYPE_DISK) {
538 + sdev->allow_restart = 1;
539 ++ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
540 ++ }
541 + spin_unlock_irqrestore(shost->host_lock, flags);
542 + return 0;
543 + }
544 +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
545 +index d0a86ef806522..59fd6101f188b 100644
546 +--- a/drivers/scsi/libfc/fc_exch.c
547 ++++ b/drivers/scsi/libfc/fc_exch.c
548 +@@ -1585,8 +1585,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
549 + rc = fc_exch_done_locked(ep);
550 + WARN_ON(fc_seq_exch(sp) != ep);
551 + spin_unlock_bh(&ep->ex_lock);
552 +- if (!rc)
553 ++ if (!rc) {
554 + fc_exch_delete(ep);
555 ++ } else {
556 ++ FC_EXCH_DBG(ep, "ep is completed already,"
557 ++ "hence skip calling the resp\n");
558 ++ goto skip_resp;
559 ++ }
560 + }
561 +
562 + /*
563 +@@ -1605,6 +1610,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
564 + if (!fc_invoke_resp(ep, sp, fp))
565 + fc_frame_free(fp);
566 +
567 ++skip_resp:
568 + fc_exch_release(ep);
569 + return;
570 + rel:
571 +@@ -1848,10 +1854,16 @@ static void fc_exch_reset(struct fc_exch *ep)
572 +
573 + fc_exch_hold(ep);
574 +
575 +- if (!rc)
576 ++ if (!rc) {
577 + fc_exch_delete(ep);
578 ++ } else {
579 ++ FC_EXCH_DBG(ep, "ep is completed already,"
580 ++ "hence skip calling the resp\n");
581 ++ goto skip_resp;
582 ++ }
583 +
584 + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
585 ++skip_resp:
586 + fc_seq_set_resp(sp, NULL, ep->arg);
587 + fc_exch_release(ep);
588 + }
589 +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
590 +index 76701d6ce92c3..582099f4f449f 100644
591 +--- a/drivers/usb/class/usblp.c
592 ++++ b/drivers/usb/class/usblp.c
593 +@@ -1349,14 +1349,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
594 + if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
595 + return -EINVAL;
596 +
597 +- alts = usblp->protocol[protocol].alt_setting;
598 +- if (alts < 0)
599 +- return -EINVAL;
600 +- r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
601 +- if (r < 0) {
602 +- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
603 +- alts, usblp->ifnum);
604 +- return r;
605 ++ /* Don't unnecessarily set the interface if there's a single alt. */
606 ++ if (usblp->intf->num_altsetting > 1) {
607 ++ alts = usblp->protocol[protocol].alt_setting;
608 ++ if (alts < 0)
609 ++ return -EINVAL;
610 ++ r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
611 ++ if (r < 0) {
612 ++ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
613 ++ alts, usblp->ifnum);
614 ++ return r;
615 ++ }
616 + }
617 +
618 + usblp->bidir = (usblp->protocol[protocol].epread != NULL);
619 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
620 +index 9381a108a9851..6bde5db1490a1 100644
621 +--- a/drivers/usb/dwc2/gadget.c
622 ++++ b/drivers/usb/dwc2/gadget.c
623 +@@ -942,7 +942,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
624 + static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
625 + u32 windex)
626 + {
627 +- struct dwc2_hsotg_ep *ep;
628 + int dir = (windex & USB_DIR_IN) ? 1 : 0;
629 + int idx = windex & 0x7F;
630 +
631 +@@ -952,12 +951,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
632 + if (idx > hsotg->num_of_eps)
633 + return NULL;
634 +
635 +- ep = index_to_ep(hsotg, idx, dir);
636 +-
637 +- if (idx && ep->dir_in != dir)
638 +- return NULL;
639 +-
640 +- return ep;
641 ++ return index_to_ep(hsotg, idx, dir);
642 + }
643 +
644 + /**
645 +diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
646 +index 25a2c2e485920..3396e7193dba2 100644
647 +--- a/drivers/usb/gadget/legacy/ether.c
648 ++++ b/drivers/usb/gadget/legacy/ether.c
649 +@@ -407,8 +407,10 @@ static int eth_bind(struct usb_composite_dev *cdev)
650 + struct usb_descriptor_header *usb_desc;
651 +
652 + usb_desc = usb_otg_descriptor_alloc(gadget);
653 +- if (!usb_desc)
654 ++ if (!usb_desc) {
655 ++ status = -ENOMEM;
656 + goto fail1;
657 ++ }
658 + usb_otg_descriptor_init(gadget, usb_desc);
659 + otg_desc[0] = usb_desc;
660 + otg_desc[1] = NULL;
661 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
662 +index 6e1d65fda97e0..69dbfffde4df0 100644
663 +--- a/drivers/usb/host/xhci-ring.c
664 ++++ b/drivers/usb/host/xhci-ring.c
665 +@@ -692,11 +692,16 @@ void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
666 + dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
667 + DMA_FROM_DEVICE);
668 + /* for in tranfers we need to copy the data from bounce to sg */
669 +- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
670 +- seg->bounce_len, seg->bounce_offs);
671 +- if (len != seg->bounce_len)
672 +- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
673 +- len, seg->bounce_len);
674 ++ if (urb->num_sgs) {
675 ++ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
676 ++ seg->bounce_len, seg->bounce_offs);
677 ++ if (len != seg->bounce_len)
678 ++ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
679 ++ len, seg->bounce_len);
680 ++ } else {
681 ++ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
682 ++ seg->bounce_len);
683 ++ }
684 + seg->bounce_len = 0;
685 + seg->bounce_offs = 0;
686 + }
687 +@@ -3196,12 +3201,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
688 +
689 + /* create a max max_pkt sized bounce buffer pointed to by last trb */
690 + if (usb_urb_dir_out(urb)) {
691 +- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
692 +- seg->bounce_buf, new_buff_len, enqd_len);
693 +- if (len != new_buff_len)
694 +- xhci_warn(xhci,
695 +- "WARN Wrong bounce buffer write length: %zu != %d\n",
696 +- len, new_buff_len);
697 ++ if (urb->num_sgs) {
698 ++ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
699 ++ seg->bounce_buf, new_buff_len, enqd_len);
700 ++ if (len != new_buff_len)
701 ++ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
702 ++ len, new_buff_len);
703 ++ } else {
704 ++ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
705 ++ }
706 ++
707 + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
708 + max_pkt, DMA_TO_DEVICE);
709 + } else {
710 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
711 +index abf8a3cac2651..1847074b4d819 100644
712 +--- a/drivers/usb/serial/cp210x.c
713 ++++ b/drivers/usb/serial/cp210x.c
714 +@@ -58,6 +58,7 @@ static const struct usb_device_id id_table[] = {
715 + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
716 + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
717 + { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
718 ++ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
719 + { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
720 + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
721 + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
722 +@@ -198,6 +199,7 @@ static const struct usb_device_id id_table[] = {
723 + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
724 + { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
725 + { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
726 ++ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
727 + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
728 + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
729 + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
730 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
731 +index 1998b314368e0..3c536eed07541 100644
732 +--- a/drivers/usb/serial/option.c
733 ++++ b/drivers/usb/serial/option.c
734 +@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb);
735 + #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
736 + #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
737 + #define CINTERION_PRODUCT_CLS8 0x00b0
738 ++#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
739 ++#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
740 +
741 + /* Olivetti products */
742 + #define OLIVETTI_VENDOR_ID 0x0b3c
743 +@@ -1896,6 +1898,10 @@ static const struct usb_device_id option_ids[] = {
744 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
745 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
746 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
747 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
748 ++ .driver_info = RSVD(3)},
749 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
750 ++ .driver_info = RSVD(0)},
751 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
752 + .driver_info = RSVD(4) },
753 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
754 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
755 +index 0262c8f7e7c76..09f49dab77393 100644
756 +--- a/fs/cifs/dir.c
757 ++++ b/fs/cifs/dir.c
758 +@@ -830,6 +830,7 @@ static int
759 + cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
760 + {
761 + struct inode *inode;
762 ++ int rc;
763 +
764 + if (flags & LOOKUP_RCU)
765 + return -ECHILD;
766 +@@ -839,8 +840,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
767 + if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
768 + CIFS_I(inode)->time = 0; /* force reval */
769 +
770 +- if (cifs_revalidate_dentry(direntry))
771 +- return 0;
772 ++ rc = cifs_revalidate_dentry(direntry);
773 ++ if (rc) {
774 ++ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
775 ++ switch (rc) {
776 ++ case -ENOENT:
777 ++ case -ESTALE:
778 ++ /*
779 ++ * Those errors mean the dentry is invalid
780 ++ * (file was deleted or recreated)
781 ++ */
782 ++ return 0;
783 ++ default:
784 ++ /*
785 ++ * Otherwise some unexpected error happened
786 ++ * report it as-is to VFS layer
787 ++ */
788 ++ return rc;
789 ++ }
790 ++ }
791 + else {
792 + /*
793 + * If the inode wasn't known to be a dfs entry when
794 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
795 +index 253b03451b727..ba1909b887efb 100644
796 +--- a/fs/hugetlbfs/inode.c
797 ++++ b/fs/hugetlbfs/inode.c
798 +@@ -665,8 +665,9 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
799 +
800 + mutex_unlock(&hugetlb_fault_mutex_table[hash]);
801 +
802 ++ set_page_huge_active(page);
803 + /*
804 +- * page_put due to reference from alloc_huge_page()
805 ++ * put_page() due to reference from alloc_huge_page()
806 + * unlock_page because locked by add_to_page_cache()
807 + */
808 + put_page(page);
809 +diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
810 +index 698d51a0eea3f..4adf7faeaeb59 100644
811 +--- a/include/linux/elfcore.h
812 ++++ b/include/linux/elfcore.h
813 +@@ -55,6 +55,7 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse
814 + }
815 + #endif
816 +
817 ++#if defined(CONFIG_UM) || defined(CONFIG_IA64)
818 + /*
819 + * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
820 + * extra segments containing the gate DSO contents. Dumping its
821 +@@ -69,5 +70,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
822 + extern int
823 + elf_core_write_extra_data(struct coredump_params *cprm);
824 + extern size_t elf_core_extra_data_size(void);
825 ++#else
826 ++static inline Elf_Half elf_core_extra_phdrs(void)
827 ++{
828 ++ return 0;
829 ++}
830 ++
831 ++static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
832 ++{
833 ++ return 1;
834 ++}
835 ++
836 ++static inline int elf_core_write_extra_data(struct coredump_params *cprm)
837 ++{
838 ++ return 1;
839 ++}
840 ++
841 ++static inline size_t elf_core_extra_data_size(void)
842 ++{
843 ++ return 0;
844 ++}
845 ++#endif
846 +
847 + #endif /* _LINUX_ELFCORE_H */
848 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
849 +index 6b8a7b654771a..4e4c35a6bfc5a 100644
850 +--- a/include/linux/hugetlb.h
851 ++++ b/include/linux/hugetlb.h
852 +@@ -502,6 +502,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
853 + {
854 + atomic_long_sub(l, &mm->hugetlb_usage);
855 + }
856 ++
857 ++void set_page_huge_active(struct page *page);
858 ++
859 + #else /* CONFIG_HUGETLB_PAGE */
860 + struct hstate {};
861 + #define alloc_huge_page(v, a, r) NULL
862 +diff --git a/kernel/Makefile b/kernel/Makefile
863 +index 92488cf6ad913..6c4a28cf680e4 100644
864 +--- a/kernel/Makefile
865 ++++ b/kernel/Makefile
866 +@@ -90,7 +90,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
867 + obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
868 + obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
869 + obj-$(CONFIG_LATENCYTOP) += latencytop.o
870 +-obj-$(CONFIG_ELFCORE) += elfcore.o
871 + obj-$(CONFIG_FUNCTION_TRACER) += trace/
872 + obj-$(CONFIG_TRACING) += trace/
873 + obj-$(CONFIG_TRACE_CLOCK) += trace/
874 +diff --git a/kernel/elfcore.c b/kernel/elfcore.c
875 +deleted file mode 100644
876 +index a2b29b9bdfcb2..0000000000000
877 +--- a/kernel/elfcore.c
878 ++++ /dev/null
879 +@@ -1,25 +0,0 @@
880 +-#include <linux/elf.h>
881 +-#include <linux/fs.h>
882 +-#include <linux/mm.h>
883 +-#include <linux/binfmts.h>
884 +-#include <linux/elfcore.h>
885 +-
886 +-Elf_Half __weak elf_core_extra_phdrs(void)
887 +-{
888 +- return 0;
889 +-}
890 +-
891 +-int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
892 +-{
893 +- return 1;
894 +-}
895 +-
896 +-int __weak elf_core_write_extra_data(struct coredump_params *cprm)
897 +-{
898 +- return 1;
899 +-}
900 +-
901 +-size_t __weak elf_core_extra_data_size(void)
902 +-{
903 +- return 0;
904 +-}
905 +diff --git a/kernel/futex.c b/kernel/futex.c
906 +index 2ef8c5aef35d0..83db5787c67ef 100644
907 +--- a/kernel/futex.c
908 ++++ b/kernel/futex.c
909 +@@ -837,6 +837,29 @@ static struct futex_pi_state * alloc_pi_state(void)
910 + return pi_state;
911 + }
912 +
913 ++static void pi_state_update_owner(struct futex_pi_state *pi_state,
914 ++ struct task_struct *new_owner)
915 ++{
916 ++ struct task_struct *old_owner = pi_state->owner;
917 ++
918 ++ lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
919 ++
920 ++ if (old_owner) {
921 ++ raw_spin_lock(&old_owner->pi_lock);
922 ++ WARN_ON(list_empty(&pi_state->list));
923 ++ list_del_init(&pi_state->list);
924 ++ raw_spin_unlock(&old_owner->pi_lock);
925 ++ }
926 ++
927 ++ if (new_owner) {
928 ++ raw_spin_lock(&new_owner->pi_lock);
929 ++ WARN_ON(!list_empty(&pi_state->list));
930 ++ list_add(&pi_state->list, &new_owner->pi_state_list);
931 ++ pi_state->owner = new_owner;
932 ++ raw_spin_unlock(&new_owner->pi_lock);
933 ++ }
934 ++}
935 ++
936 + /*
937 + * Drops a reference to the pi_state object and frees or caches it
938 + * when the last reference is gone.
939 +@@ -856,11 +879,8 @@ static void put_pi_state(struct futex_pi_state *pi_state)
940 + * and has cleaned up the pi_state already
941 + */
942 + if (pi_state->owner) {
943 +- raw_spin_lock_irq(&pi_state->owner->pi_lock);
944 +- list_del_init(&pi_state->list);
945 +- raw_spin_unlock_irq(&pi_state->owner->pi_lock);
946 +-
947 +- rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
948 ++ pi_state_update_owner(pi_state, NULL);
949 ++ rt_mutex_proxy_unlock(&pi_state->pi_mutex);
950 + }
951 +
952 + if (current->pi_state_cache)
953 +@@ -941,7 +961,7 @@ static void exit_pi_state_list(struct task_struct *curr)
954 + pi_state->owner = NULL;
955 + raw_spin_unlock_irq(&curr->pi_lock);
956 +
957 +- rt_mutex_unlock(&pi_state->pi_mutex);
958 ++ rt_mutex_futex_unlock(&pi_state->pi_mutex);
959 +
960 + spin_unlock(&hb->lock);
961 +
962 +@@ -997,7 +1017,8 @@ static void exit_pi_state_list(struct task_struct *curr)
963 + * FUTEX_OWNER_DIED bit. See [4]
964 + *
965 + * [10] There is no transient state which leaves owner and user space
966 +- * TID out of sync.
967 ++ * TID out of sync. Except one error case where the kernel is denied
968 ++ * write access to the user address, see fixup_pi_state_owner().
969 + */
970 +
971 + /*
972 +@@ -1394,12 +1415,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
973 + new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
974 +
975 + /*
976 +- * It is possible that the next waiter (the one that brought
977 +- * this owner to the kernel) timed out and is no longer
978 +- * waiting on the lock.
979 ++ * When we interleave with futex_lock_pi() where it does
980 ++ * rt_mutex_timed_futex_lock(), we might observe @this futex_q waiter,
981 ++ * but the rt_mutex's wait_list can be empty (either still, or again,
982 ++ * depending on which side we land).
983 ++ *
984 ++ * When this happens, give up our locks and try again, giving the
985 ++ * futex_lock_pi() instance time to complete, either by waiting on the
986 ++ * rtmutex or removing itself from the futex queue.
987 + */
988 +- if (!new_owner)
989 +- new_owner = this->task;
990 ++ if (!new_owner) {
991 ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
992 ++ return -EAGAIN;
993 ++ }
994 +
995 + /*
996 + * We pass it to the next owner. The WAITERS bit is always
997 +@@ -1425,36 +1453,24 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
998 + else
999 + ret = -EINVAL;
1000 + }
1001 +- if (ret) {
1002 +- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1003 +- return ret;
1004 +- }
1005 +-
1006 +- raw_spin_lock(&pi_state->owner->pi_lock);
1007 +- WARN_ON(list_empty(&pi_state->list));
1008 +- list_del_init(&pi_state->list);
1009 +- raw_spin_unlock(&pi_state->owner->pi_lock);
1010 +
1011 +- raw_spin_lock(&new_owner->pi_lock);
1012 +- WARN_ON(!list_empty(&pi_state->list));
1013 +- list_add(&pi_state->list, &new_owner->pi_state_list);
1014 +- pi_state->owner = new_owner;
1015 +- raw_spin_unlock(&new_owner->pi_lock);
1016 ++ if (!ret) {
1017 ++ /*
1018 ++ * This is a point of no return; once we modified the uval
1019 ++ * there is no going back and subsequent operations must
1020 ++ * not fail.
1021 ++ */
1022 ++ pi_state_update_owner(pi_state, new_owner);
1023 ++ deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1024 ++ }
1025 +
1026 + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1027 +-
1028 +- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1029 +-
1030 +- /*
1031 +- * First unlock HB so the waiter does not spin on it once he got woken
1032 +- * up. Second wake up the waiter before the priority is adjusted. If we
1033 +- * deboost first (and lose our higher priority), then the task might get
1034 +- * scheduled away before the wake up can take place.
1035 +- */
1036 + spin_unlock(&hb->lock);
1037 +- wake_up_q(&wake_q);
1038 +- if (deboost)
1039 ++
1040 ++ if (deboost) {
1041 ++ wake_up_q(&wake_q);
1042 + rt_mutex_adjust_prio(current);
1043 ++ }
1044 +
1045 + return 0;
1046 + }
1047 +@@ -2257,30 +2273,32 @@ static void unqueue_me_pi(struct futex_q *q)
1048 + spin_unlock(q->lock_ptr);
1049 + }
1050 +
1051 +-/*
1052 +- * Fixup the pi_state owner with the new owner.
1053 +- *
1054 +- * Must be called with hash bucket lock held and mm->sem held for non
1055 +- * private futexes.
1056 +- */
1057 +-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1058 +- struct task_struct *newowner)
1059 ++static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1060 ++ struct task_struct *argowner)
1061 + {
1062 +- u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1063 + struct futex_pi_state *pi_state = q->pi_state;
1064 +- struct task_struct *oldowner = pi_state->owner;
1065 +- u32 uval, uninitialized_var(curval), newval;
1066 +- int ret;
1067 ++ struct task_struct *oldowner, *newowner;
1068 ++ u32 uval, curval, newval, newtid;
1069 ++ int err = 0;
1070 ++
1071 ++ oldowner = pi_state->owner;
1072 +
1073 + /* Owner died? */
1074 + if (!pi_state->owner)
1075 + newtid |= FUTEX_OWNER_DIED;
1076 +
1077 + /*
1078 +- * We are here either because we stole the rtmutex from the
1079 +- * previous highest priority waiter or we are the highest priority
1080 +- * waiter but failed to get the rtmutex the first time.
1081 +- * We have to replace the newowner TID in the user space variable.
1082 ++ * We are here because either:
1083 ++ *
1084 ++ * - we stole the lock and pi_state->owner needs updating to reflect
1085 ++ * that (@argowner == current),
1086 ++ *
1087 ++ * or:
1088 ++ *
1089 ++ * - someone stole our lock and we need to fix things to point to the
1090 ++ * new owner (@argowner == NULL).
1091 ++ *
1092 ++ * Either way, we have to replace the TID in the user space variable.
1093 + * This must be atomic as we have to preserve the owner died bit here.
1094 + *
1095 + * Note: We write the user space value _before_ changing the pi_state
1096 +@@ -2294,6 +2312,39 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1097 + * in lookup_pi_state.
1098 + */
1099 + retry:
1100 ++ if (!argowner) {
1101 ++ if (oldowner != current) {
1102 ++ /*
1103 ++ * We raced against a concurrent self; things are
1104 ++ * already fixed up. Nothing to do.
1105 ++ */
1106 ++ return 0;
1107 ++ }
1108 ++
1109 ++ if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
1110 ++ /* We got the lock after all, nothing to fix. */
1111 ++ return 1;
1112 ++ }
1113 ++
1114 ++ /*
1115 ++ * Since we just failed the trylock; there must be an owner.
1116 ++ */
1117 ++ newowner = rt_mutex_owner(&pi_state->pi_mutex);
1118 ++ BUG_ON(!newowner);
1119 ++ } else {
1120 ++ WARN_ON_ONCE(argowner != current);
1121 ++ if (oldowner == current) {
1122 ++ /*
1123 ++ * We raced against a concurrent self; things are
1124 ++ * already fixed up. Nothing to do.
1125 ++ */
1126 ++ return 1;
1127 ++ }
1128 ++ newowner = argowner;
1129 ++ }
1130 ++
1131 ++ newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1132 ++
1133 + if (get_futex_value_locked(&uval, uaddr))
1134 + goto handle_fault;
1135 +
1136 +@@ -2311,19 +2362,8 @@ retry:
1137 + * We fixed up user space. Now we need to fix the pi_state
1138 + * itself.
1139 + */
1140 +- if (pi_state->owner != NULL) {
1141 +- raw_spin_lock_irq(&pi_state->owner->pi_lock);
1142 +- WARN_ON(list_empty(&pi_state->list));
1143 +- list_del_init(&pi_state->list);
1144 +- raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1145 +- }
1146 +-
1147 +- pi_state->owner = newowner;
1148 ++ pi_state_update_owner(pi_state, newowner);
1149 +
1150 +- raw_spin_lock_irq(&newowner->pi_lock);
1151 +- WARN_ON(!list_empty(&pi_state->list));
1152 +- list_add(&pi_state->list, &newowner->pi_state_list);
1153 +- raw_spin_unlock_irq(&newowner->pi_lock);
1154 + return 0;
1155 +
1156 + /*
1157 +@@ -2339,7 +2379,7 @@ retry:
1158 + handle_fault:
1159 + spin_unlock(q->lock_ptr);
1160 +
1161 +- ret = fault_in_user_writeable(uaddr);
1162 ++ err = fault_in_user_writeable(uaddr);
1163 +
1164 + spin_lock(q->lock_ptr);
1165 +
1166 +@@ -2347,12 +2387,45 @@ handle_fault:
1167 + * Check if someone else fixed it for us:
1168 + */
1169 + if (pi_state->owner != oldowner)
1170 +- return 0;
1171 ++ return argowner == current;
1172 +
1173 +- if (ret)
1174 +- return ret;
1175 ++ /* Retry if err was -EAGAIN or the fault in succeeded */
1176 ++ if (!err)
1177 ++ goto retry;
1178 +
1179 +- goto retry;
1180 ++ /*
1181 ++ * fault_in_user_writeable() failed so user state is immutable. At
1182 ++ * best we can make the kernel state consistent but user state will
1183 ++ * be most likely hosed and any subsequent unlock operation will be
1184 ++ * rejected due to PI futex rule [10].
1185 ++ *
1186 ++ * Ensure that the rtmutex owner is also the pi_state owner despite
1187 ++ * the user space value claiming something different. There is no
1188 ++ * point in unlocking the rtmutex if current is the owner as it
1189 ++ * would need to wait until the next waiter has taken the rtmutex
1190 ++ * to guarantee consistent state. Keep it simple. Userspace asked
1191 ++ * for this wreckaged state.
1192 ++ *
1193 ++ * The rtmutex has an owner - either current or some other
1194 ++ * task. See the EAGAIN loop above.
1195 ++ */
1196 ++ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
1197 ++
1198 ++ return err;
1199 ++}
1200 ++
1201 ++static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1202 ++ struct task_struct *argowner)
1203 ++{
1204 ++ struct futex_pi_state *pi_state = q->pi_state;
1205 ++ int ret;
1206 ++
1207 ++ lockdep_assert_held(q->lock_ptr);
1208 ++
1209 ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1210 ++ ret = __fixup_pi_state_owner(uaddr, q, argowner);
1211 ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1212 ++ return ret;
1213 + }
1214 +
1215 + static long futex_wait_restart(struct restart_block *restart);
1216 +@@ -2374,13 +2447,16 @@ static long futex_wait_restart(struct restart_block *restart);
1217 + */
1218 + static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1219 + {
1220 +- struct task_struct *owner;
1221 + int ret = 0;
1222 +
1223 + if (locked) {
1224 + /*
1225 + * Got the lock. We might not be the anticipated owner if we
1226 + * did a lock-steal - fix up the PI-state in that case:
1227 ++ *
1228 ++ * Speculative pi_state->owner read (we don't hold wait_lock);
1229 ++ * since we own the lock pi_state->owner == current is the
1230 ++ * stable state, anything else needs more attention.
1231 + */
1232 + if (q->pi_state->owner != current)
1233 + ret = fixup_pi_state_owner(uaddr, q, current);
1234 +@@ -2388,43 +2464,24 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1235 + }
1236 +
1237 + /*
1238 +- * Catch the rare case, where the lock was released when we were on the
1239 +- * way back before we locked the hash bucket.
1240 ++ * If we didn't get the lock; check if anybody stole it from us. In
1241 ++ * that case, we need to fix up the uval to point to them instead of
1242 ++ * us, otherwise bad things happen. [10]
1243 ++ *
1244 ++ * Another speculative read; pi_state->owner == current is unstable
1245 ++ * but needs our attention.
1246 + */
1247 + if (q->pi_state->owner == current) {
1248 +- /*
1249 +- * Try to get the rt_mutex now. This might fail as some other
1250 +- * task acquired the rt_mutex after we removed ourself from the
1251 +- * rt_mutex waiters list.
1252 +- */
1253 +- if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1254 +- locked = 1;
1255 +- goto out;
1256 +- }
1257 +-
1258 +- /*
1259 +- * pi_state is incorrect, some other task did a lock steal and
1260 +- * we returned due to timeout or signal without taking the
1261 +- * rt_mutex. Too late.
1262 +- */
1263 +- raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
1264 +- owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1265 +- if (!owner)
1266 +- owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
1267 +- raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
1268 +- ret = fixup_pi_state_owner(uaddr, q, owner);
1269 ++ ret = fixup_pi_state_owner(uaddr, q, NULL);
1270 + goto out;
1271 + }
1272 +
1273 + /*
1274 + * Paranoia check. If we did not take the lock, then we should not be
1275 +- * the owner of the rt_mutex.
1276 ++ * the owner of the rt_mutex. Warn and establish consistent state.
1277 + */
1278 +- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1279 +- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1280 +- "pi-state %p\n", ret,
1281 +- q->pi_state->pi_mutex.owner,
1282 +- q->pi_state->owner);
1283 ++ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
1284 ++ return fixup_pi_state_owner(uaddr, q, current);
1285 +
1286 + out:
1287 + return ret ? ret : locked;
1288 +@@ -2721,7 +2778,7 @@ retry_private:
1289 + if (!trylock) {
1290 + ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
1291 + } else {
1292 +- ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1293 ++ ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
1294 + /* Fixup the trylock return value: */
1295 + ret = ret ? 0 : -EWOULDBLOCK;
1296 + }
1297 +@@ -2739,13 +2796,6 @@ retry_private:
1298 + if (res)
1299 + ret = (res < 0) ? res : 0;
1300 +
1301 +- /*
1302 +- * If fixup_owner() faulted and was unable to handle the fault, unlock
1303 +- * it and return the fault to userspace.
1304 +- */
1305 +- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1306 +- rt_mutex_unlock(&q.pi_state->pi_mutex);
1307 +-
1308 + /* Unqueue and drop the lock */
1309 + unqueue_me_pi(&q);
1310 +
1311 +@@ -3050,8 +3100,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
1312 + if (q.pi_state && (q.pi_state->owner != current)) {
1313 + spin_lock(q.lock_ptr);
1314 + ret = fixup_pi_state_owner(uaddr2, &q, current);
1315 +- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
1316 +- rt_mutex_unlock(&q.pi_state->pi_mutex);
1317 + /*
1318 + * Drop the reference to the pi state which
1319 + * the requeue_pi() code acquired for us.
1320 +@@ -3088,14 +3136,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
1321 + if (res)
1322 + ret = (res < 0) ? res : 0;
1323 +
1324 +- /*
1325 +- * If fixup_pi_state_owner() faulted and was unable to handle
1326 +- * the fault, unlock the rt_mutex and return the fault to
1327 +- * userspace.
1328 +- */
1329 +- if (ret && rt_mutex_owner(pi_mutex) == current)
1330 +- rt_mutex_unlock(pi_mutex);
1331 +-
1332 + /* Unqueue and drop the lock. */
1333 + unqueue_me_pi(&q);
1334 + }
1335 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
1336 +index 3938e4670b89b..51867a2e537fa 100644
1337 +--- a/kernel/kprobes.c
1338 ++++ b/kernel/kprobes.c
1339 +@@ -1884,6 +1884,10 @@ int register_kretprobe(struct kretprobe *rp)
1340 + int i;
1341 + void *addr;
1342 +
1343 ++ /* If only rp->kp.addr is specified, check reregistering kprobes */
1344 ++ if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
1345 ++ return -EINVAL;
1346 ++
1347 + if (kretprobe_blacklist_size) {
1348 + addr = kprobe_addr(&rp->kp);
1349 + if (IS_ERR(addr))
1350 +diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
1351 +index 62b6cee8ea7f9..0613c4b1d0596 100644
1352 +--- a/kernel/locking/rtmutex-debug.c
1353 ++++ b/kernel/locking/rtmutex-debug.c
1354 +@@ -173,12 +173,3 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
1355 + lock->name = name;
1356 + }
1357 +
1358 +-void
1359 +-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)
1360 +-{
1361 +-}
1362 +-
1363 +-void rt_mutex_deadlock_account_unlock(struct task_struct *task)
1364 +-{
1365 +-}
1366 +-
1367 +diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
1368 +index d0519c3432b67..b585af9a1b508 100644
1369 +--- a/kernel/locking/rtmutex-debug.h
1370 ++++ b/kernel/locking/rtmutex-debug.h
1371 +@@ -9,9 +9,6 @@
1372 + * This file contains macros used solely by rtmutex.c. Debug version.
1373 + */
1374 +
1375 +-extern void
1376 +-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
1377 +-extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
1378 + extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
1379 + extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
1380 + extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
1381 +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
1382 +index 7615e7722258c..6ff4156b3929e 100644
1383 +--- a/kernel/locking/rtmutex.c
1384 ++++ b/kernel/locking/rtmutex.c
1385 +@@ -956,8 +956,6 @@ takeit:
1386 + */
1387 + rt_mutex_set_owner(lock, task);
1388 +
1389 +- rt_mutex_deadlock_account_lock(lock, task);
1390 +-
1391 + return 1;
1392 + }
1393 +
1394 +@@ -1316,6 +1314,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1395 + return ret;
1396 + }
1397 +
1398 ++static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
1399 ++{
1400 ++ int ret = try_to_take_rt_mutex(lock, current, NULL);
1401 ++
1402 ++ /*
1403 ++ * try_to_take_rt_mutex() sets the lock waiters bit
1404 ++ * unconditionally. Clean this up.
1405 ++ */
1406 ++ fixup_rt_mutex_waiters(lock);
1407 ++
1408 ++ return ret;
1409 ++}
1410 ++
1411 + /*
1412 + * Slow path try-lock function:
1413 + */
1414 +@@ -1338,13 +1349,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1415 + */
1416 + raw_spin_lock_irqsave(&lock->wait_lock, flags);
1417 +
1418 +- ret = try_to_take_rt_mutex(lock, current, NULL);
1419 +-
1420 +- /*
1421 +- * try_to_take_rt_mutex() sets the lock waiters bit
1422 +- * unconditionally. Clean this up.
1423 +- */
1424 +- fixup_rt_mutex_waiters(lock);
1425 ++ ret = __rt_mutex_slowtrylock(lock);
1426 +
1427 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1428 +
1429 +@@ -1365,8 +1370,6 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1430 +
1431 + debug_rt_mutex_unlock(lock);
1432 +
1433 +- rt_mutex_deadlock_account_unlock(current);
1434 +-
1435 + /*
1436 + * We must be careful here if the fast path is enabled. If we
1437 + * have no waiters queued we cannot set owner to NULL here
1438 +@@ -1432,11 +1435,10 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
1439 + struct hrtimer_sleeper *timeout,
1440 + enum rtmutex_chainwalk chwalk))
1441 + {
1442 +- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
1443 +- rt_mutex_deadlock_account_lock(lock, current);
1444 ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1445 + return 0;
1446 +- } else
1447 +- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
1448 ++
1449 ++ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
1450 + }
1451 +
1452 + static inline int
1453 +@@ -1448,21 +1450,19 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1454 + enum rtmutex_chainwalk chwalk))
1455 + {
1456 + if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1457 +- likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
1458 +- rt_mutex_deadlock_account_lock(lock, current);
1459 ++ likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1460 + return 0;
1461 +- } else
1462 +- return slowfn(lock, state, timeout, chwalk);
1463 ++
1464 ++ return slowfn(lock, state, timeout, chwalk);
1465 + }
1466 +
1467 + static inline int
1468 + rt_mutex_fasttrylock(struct rt_mutex *lock,
1469 + int (*slowfn)(struct rt_mutex *lock))
1470 + {
1471 +- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
1472 +- rt_mutex_deadlock_account_lock(lock, current);
1473 ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1474 + return 1;
1475 +- }
1476 ++
1477 + return slowfn(lock);
1478 + }
1479 +
1480 +@@ -1472,19 +1472,18 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
1481 + struct wake_q_head *wqh))
1482 + {
1483 + WAKE_Q(wake_q);
1484 ++ bool deboost;
1485 +
1486 +- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
1487 +- rt_mutex_deadlock_account_unlock(current);
1488 ++ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
1489 ++ return;
1490 +
1491 +- } else {
1492 +- bool deboost = slowfn(lock, &wake_q);
1493 ++ deboost = slowfn(lock, &wake_q);
1494 +
1495 +- wake_up_q(&wake_q);
1496 ++ wake_up_q(&wake_q);
1497 +
1498 +- /* Undo pi boosting if necessary: */
1499 +- if (deboost)
1500 +- rt_mutex_adjust_prio(current);
1501 +- }
1502 ++ /* Undo pi boosting if necessary: */
1503 ++ if (deboost)
1504 ++ rt_mutex_adjust_prio(current);
1505 + }
1506 +
1507 + /**
1508 +@@ -1519,15 +1518,28 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1509 +
1510 + /*
1511 + * Futex variant with full deadlock detection.
1512 ++ * Futex variants must not use the fast-path, see __rt_mutex_futex_unlock().
1513 + */
1514 +-int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
1515 ++int __sched rt_mutex_timed_futex_lock(struct rt_mutex *lock,
1516 + struct hrtimer_sleeper *timeout)
1517 + {
1518 + might_sleep();
1519 +
1520 +- return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1521 +- RT_MUTEX_FULL_CHAINWALK,
1522 +- rt_mutex_slowlock);
1523 ++ return rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE,
1524 ++ timeout, RT_MUTEX_FULL_CHAINWALK);
1525 ++}
1526 ++
1527 ++/*
1528 ++ * Futex variant, must not use fastpath.
1529 ++ */
1530 ++int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
1531 ++{
1532 ++ return rt_mutex_slowtrylock(lock);
1533 ++}
1534 ++
1535 ++int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
1536 ++{
1537 ++ return __rt_mutex_slowtrylock(lock);
1538 + }
1539 +
1540 + /**
1541 +@@ -1586,20 +1598,38 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
1542 + EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1543 +
1544 + /**
1545 +- * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
1546 +- * @lock: the rt_mutex to be unlocked
1547 +- *
1548 +- * Returns: true/false indicating whether priority adjustment is
1549 +- * required or not.
1550 ++ * Futex variant, that since futex variants do not use the fast-path, can be
1551 ++ * simple and will not need to retry.
1552 + */
1553 +-bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
1554 +- struct wake_q_head *wqh)
1555 ++bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
1556 ++ struct wake_q_head *wake_q)
1557 + {
1558 +- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
1559 +- rt_mutex_deadlock_account_unlock(current);
1560 +- return false;
1561 ++ lockdep_assert_held(&lock->wait_lock);
1562 ++
1563 ++ debug_rt_mutex_unlock(lock);
1564 ++
1565 ++ if (!rt_mutex_has_waiters(lock)) {
1566 ++ lock->owner = NULL;
1567 ++ return false; /* done */
1568 ++ }
1569 ++
1570 ++ mark_wakeup_next_waiter(wake_q, lock);
1571 ++ return true; /* deboost and wakeups */
1572 ++}
1573 ++
1574 ++void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
1575 ++{
1576 ++ WAKE_Q(wake_q);
1577 ++ bool deboost;
1578 ++
1579 ++ raw_spin_lock_irq(&lock->wait_lock);
1580 ++ deboost = __rt_mutex_futex_unlock(lock, &wake_q);
1581 ++ raw_spin_unlock_irq(&lock->wait_lock);
1582 ++
1583 ++ if (deboost) {
1584 ++ wake_up_q(&wake_q);
1585 ++ rt_mutex_adjust_prio(current);
1586 + }
1587 +- return rt_mutex_slowunlock(lock, wqh);
1588 + }
1589 +
1590 + /**
1591 +@@ -1656,7 +1686,6 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1592 + __rt_mutex_init(lock, NULL);
1593 + debug_rt_mutex_proxy_lock(lock, proxy_owner);
1594 + rt_mutex_set_owner(lock, proxy_owner);
1595 +- rt_mutex_deadlock_account_lock(lock, proxy_owner);
1596 + }
1597 +
1598 + /**
1599 +@@ -1667,12 +1696,10 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1600 + * No locking. Caller has to do serializing itself
1601 + * Special API call for PI-futex support
1602 + */
1603 +-void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1604 +- struct task_struct *proxy_owner)
1605 ++void rt_mutex_proxy_unlock(struct rt_mutex *lock)
1606 + {
1607 + debug_rt_mutex_proxy_unlock(lock);
1608 + rt_mutex_set_owner(lock, NULL);
1609 +- rt_mutex_deadlock_account_unlock(proxy_owner);
1610 + }
1611 +
1612 + /**
1613 +diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
1614 +index c4060584c4076..6607802efa8bd 100644
1615 +--- a/kernel/locking/rtmutex.h
1616 ++++ b/kernel/locking/rtmutex.h
1617 +@@ -11,8 +11,6 @@
1618 + */
1619 +
1620 + #define rt_mutex_deadlock_check(l) (0)
1621 +-#define rt_mutex_deadlock_account_lock(m, t) do { } while (0)
1622 +-#define rt_mutex_deadlock_account_unlock(l) do { } while (0)
1623 + #define debug_rt_mutex_init_waiter(w) do { } while (0)
1624 + #define debug_rt_mutex_free_waiter(w) do { } while (0)
1625 + #define debug_rt_mutex_lock(l) do { } while (0)
1626 +diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
1627 +index 14cbafed00142..bea5d677fe343 100644
1628 +--- a/kernel/locking/rtmutex_common.h
1629 ++++ b/kernel/locking/rtmutex_common.h
1630 +@@ -102,8 +102,7 @@ enum rtmutex_chainwalk {
1631 + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
1632 + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1633 + struct task_struct *proxy_owner);
1634 +-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1635 +- struct task_struct *proxy_owner);
1636 ++extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
1637 + extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1638 + struct rt_mutex_waiter *waiter,
1639 + struct task_struct *task);
1640 +@@ -113,8 +112,13 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
1641 + extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
1642 + struct rt_mutex_waiter *waiter);
1643 + extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
1644 +-extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
1645 +- struct wake_q_head *wqh);
1646 ++extern int rt_mutex_futex_trylock(struct rt_mutex *l);
1647 ++extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
1648 ++
1649 ++extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
1650 ++extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
1651 ++ struct wake_q_head *wqh);
1652 ++
1653 + extern void rt_mutex_adjust_prio(struct task_struct *task);
1654 +
1655 + #ifdef CONFIG_DEBUG_RT_MUTEXES
1656 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1657 +index 0385c57a2b7af..05ca01ef97f7f 100644
1658 +--- a/mm/huge_memory.c
1659 ++++ b/mm/huge_memory.c
1660 +@@ -1753,7 +1753,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1661 + spinlock_t *ptl;
1662 + struct mm_struct *mm = vma->vm_mm;
1663 + unsigned long haddr = address & HPAGE_PMD_MASK;
1664 +- bool was_locked = false;
1665 ++ bool do_unlock_page = false;
1666 + pmd_t _pmd;
1667 +
1668 + mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
1669 +@@ -1766,7 +1766,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1670 + VM_BUG_ON(freeze && !page);
1671 + if (page) {
1672 + VM_WARN_ON_ONCE(!PageLocked(page));
1673 +- was_locked = true;
1674 + if (page != pmd_page(*pmd))
1675 + goto out;
1676 + }
1677 +@@ -1775,19 +1774,29 @@ repeat:
1678 + if (pmd_trans_huge(*pmd)) {
1679 + if (!page) {
1680 + page = pmd_page(*pmd);
1681 +- if (unlikely(!trylock_page(page))) {
1682 +- get_page(page);
1683 +- _pmd = *pmd;
1684 +- spin_unlock(ptl);
1685 +- lock_page(page);
1686 +- spin_lock(ptl);
1687 +- if (unlikely(!pmd_same(*pmd, _pmd))) {
1688 +- unlock_page(page);
1689 ++ /*
1690 ++ * An anonymous page must be locked, to ensure that a
1691 ++ * concurrent reuse_swap_page() sees stable mapcount;
1692 ++ * but reuse_swap_page() is not used on shmem or file,
1693 ++ * and page lock must not be taken when zap_pmd_range()
1694 ++ * calls __split_huge_pmd() while i_mmap_lock is held.
1695 ++ */
1696 ++ if (PageAnon(page)) {
1697 ++ if (unlikely(!trylock_page(page))) {
1698 ++ get_page(page);
1699 ++ _pmd = *pmd;
1700 ++ spin_unlock(ptl);
1701 ++ lock_page(page);
1702 ++ spin_lock(ptl);
1703 ++ if (unlikely(!pmd_same(*pmd, _pmd))) {
1704 ++ unlock_page(page);
1705 ++ put_page(page);
1706 ++ page = NULL;
1707 ++ goto repeat;
1708 ++ }
1709 + put_page(page);
1710 +- page = NULL;
1711 +- goto repeat;
1712 + }
1713 +- put_page(page);
1714 ++ do_unlock_page = true;
1715 + }
1716 + }
1717 + if (PageMlocked(page))
1718 +@@ -1797,7 +1806,7 @@ repeat:
1719 + __split_huge_pmd_locked(vma, pmd, haddr, freeze);
1720 + out:
1721 + spin_unlock(ptl);
1722 +- if (!was_locked && page)
1723 ++ if (do_unlock_page)
1724 + unlock_page(page);
1725 + mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
1726 + }
1727 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1728 +index 52b5e0e026d60..5a16d892c891c 100644
1729 +--- a/mm/hugetlb.c
1730 ++++ b/mm/hugetlb.c
1731 +@@ -1210,12 +1210,11 @@ struct hstate *size_to_hstate(unsigned long size)
1732 + */
1733 + bool page_huge_active(struct page *page)
1734 + {
1735 +- VM_BUG_ON_PAGE(!PageHuge(page), page);
1736 +- return PageHead(page) && PagePrivate(&page[1]);
1737 ++ return PageHeadHuge(page) && PagePrivate(&page[1]);
1738 + }
1739 +
1740 + /* never called for tail page */
1741 +-static void set_page_huge_active(struct page *page)
1742 ++void set_page_huge_active(struct page *page)
1743 + {
1744 + VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1745 + SetPagePrivate(&page[1]);
1746 +@@ -4657,9 +4656,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
1747 + {
1748 + bool ret = true;
1749 +
1750 +- VM_BUG_ON_PAGE(!PageHead(page), page);
1751 + spin_lock(&hugetlb_lock);
1752 +- if (!page_huge_active(page) || !get_page_unless_zero(page)) {
1753 ++ if (!PageHeadHuge(page) || !page_huge_active(page) ||
1754 ++ !get_page_unless_zero(page)) {
1755 + ret = false;
1756 + goto unlock;
1757 + }
1758 +diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
1759 +index 482c94d9d958a..d1c7dcc234486 100644
1760 +--- a/net/lapb/lapb_out.c
1761 ++++ b/net/lapb/lapb_out.c
1762 +@@ -87,7 +87,8 @@ void lapb_kick(struct lapb_cb *lapb)
1763 + skb = skb_dequeue(&lapb->write_queue);
1764 +
1765 + do {
1766 +- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
1767 ++ skbn = skb_copy(skb, GFP_ATOMIC);
1768 ++ if (!skbn) {
1769 + skb_queue_head(&lapb->write_queue, skb);
1770 + break;
1771 + }
1772 +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
1773 +index f783d1377d9a8..9f0f437a09b95 100644
1774 +--- a/net/mac80211/driver-ops.c
1775 ++++ b/net/mac80211/driver-ops.c
1776 +@@ -128,8 +128,11 @@ int drv_sta_state(struct ieee80211_local *local,
1777 + } else if (old_state == IEEE80211_STA_AUTH &&
1778 + new_state == IEEE80211_STA_ASSOC) {
1779 + ret = drv_sta_add(local, sdata, &sta->sta);
1780 +- if (ret == 0)
1781 ++ if (ret == 0) {
1782 + sta->uploaded = true;
1783 ++ if (rcu_access_pointer(sta->sta.rates))
1784 ++ drv_sta_rate_tbl_update(local, sdata, &sta->sta);
1785 ++ }
1786 + } else if (old_state == IEEE80211_STA_ASSOC &&
1787 + new_state == IEEE80211_STA_AUTH) {
1788 + drv_sta_remove(local, sdata, &sta->sta);
1789 +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
1790 +index e6096dfd02105..41421609a0f02 100644
1791 +--- a/net/mac80211/rate.c
1792 ++++ b/net/mac80211/rate.c
1793 +@@ -892,7 +892,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
1794 + if (old)
1795 + kfree_rcu(old, rcu_head);
1796 +
1797 +- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
1798 ++ if (sta->uploaded)
1799 ++ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
1800 +
1801 + return 0;
1802 + }
1803 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
1804 +index 9be82ed02e0e5..c38d68131d02e 100644
1805 +--- a/net/mac80211/rx.c
1806 ++++ b/net/mac80211/rx.c
1807 +@@ -3802,6 +3802,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
1808 +
1809 + rcu_read_lock();
1810 + key = rcu_dereference(sta->ptk[sta->ptk_idx]);
1811 ++ if (!key)
1812 ++ key = rcu_dereference(sdata->default_unicast_key);
1813 + if (key) {
1814 + switch (key->conf.cipher) {
1815 + case WLAN_CIPHER_SUITE_TKIP:
1816 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
1817 +index 912ed9b901ac9..45038c837eab4 100644
1818 +--- a/net/sched/sch_api.c
1819 ++++ b/net/sched/sch_api.c
1820 +@@ -393,7 +393,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
1821 + {
1822 + struct qdisc_rate_table *rtab;
1823 +
1824 +- if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
1825 ++ if (tab == NULL || r->rate == 0 ||
1826 ++ r->cell_log == 0 || r->cell_log >= 32 ||
1827 + nla_len(tab) != TC_RTAB_SIZE)
1828 + return NULL;
1829 +
1830 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1831 +index 720de648510dc..b003cb07254a8 100644
1832 +--- a/sound/pci/hda/patch_realtek.c
1833 ++++ b/sound/pci/hda/patch_realtek.c
1834 +@@ -6284,7 +6284,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
1835 + SND_HDA_PIN_QUIRK(0x10ec0299, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
1836 + ALC225_STANDARD_PINS,
1837 + {0x12, 0xb7a60130},
1838 +- {0x13, 0xb8a60140},
1839 ++ {0x13, 0xb8a61140},
1840 + {0x17, 0x90170110}),
1841 + {}
1842 + };
1843 +diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
1844 +index d84c28eac262d..0ba5bb51bd93c 100644
1845 +--- a/tools/objtool/elf.c
1846 ++++ b/tools/objtool/elf.c
1847 +@@ -226,8 +226,11 @@ static int read_symbols(struct elf *elf)
1848 +
1849 + symtab = find_section_by_name(elf, ".symtab");
1850 + if (!symtab) {
1851 +- WARN("missing symbol table");
1852 +- return -1;
1853 ++ /*
1854 ++ * A missing symbol table is actually possible if it's an empty
1855 ++ * .o file. This can happen for thunk_64.o.
1856 ++ */
1857 ++ return 0;
1858 + }
1859 +
1860 + symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;