Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 10 Feb 2021 10:17:58
Message-Id: 1612952261.346dc70cb8b10bf114f95ed45924706a3f159ffe.alicef@gentoo
1 commit: 346dc70cb8b10bf114f95ed45924706a3f159ffe
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 10 10:17:29 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 10 10:17:41 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=346dc70c
7
8 Linux patch 4.4.257
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1256_linux-4.4.257.patch | 1682 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1686 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e5f13f4..269cc08 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1067,6 +1067,10 @@ Patch: 1255_linux-4.4.256.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.4.256
23
24 +Patch: 1256_linux-4.4.257.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.4.257
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1256_linux-4.4.257.patch b/1256_linux-4.4.257.patch
33 new file mode 100644
34 index 0000000..e42f5ea
35 --- /dev/null
36 +++ b/1256_linux-4.4.257.patch
37 @@ -0,0 +1,1682 @@
38 +diff --git a/Makefile b/Makefile
39 +index 0057587d2cbe2..8de8f9ac32795 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 4
45 +-SUBLEVEL = 256
46 ++SUBLEVEL = 257
47 + EXTRAVERSION =
48 + NAME = Blurry Fish Butt
49 +
50 +@@ -830,12 +830,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=strict-prototypes)
51 + # Prohibit date/time macros, which would make the build non-deterministic
52 + KBUILD_CFLAGS += $(call cc-option,-Werror=date-time)
53 +
54 +-# ensure -fcf-protection is disabled when using retpoline as it is
55 +-# incompatible with -mindirect-branch=thunk-extern
56 +-ifdef CONFIG_RETPOLINE
57 +-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
58 +-endif
59 +-
60 + # use the deterministic mode of AR if available
61 + KBUILD_ARFLAGS := $(call ar-option,D)
62 +
63 +@@ -1068,7 +1062,7 @@ endef
64 +
65 + define filechk_version.h
66 + (echo \#define LINUX_VERSION_CODE $(shell \
67 +- expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
68 ++ expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \
69 + echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
70 + endef
71 +
72 +diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
73 +index 96a3d73ef4bf4..fd6c9169fa78e 100644
74 +--- a/arch/arm/mach-footbridge/dc21285.c
75 ++++ b/arch/arm/mach-footbridge/dc21285.c
76 +@@ -69,15 +69,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
77 + if (addr)
78 + switch (size) {
79 + case 1:
80 +- asm("ldrb %0, [%1, %2]"
81 ++ asm volatile("ldrb %0, [%1, %2]"
82 + : "=r" (v) : "r" (addr), "r" (where) : "cc");
83 + break;
84 + case 2:
85 +- asm("ldrh %0, [%1, %2]"
86 ++ asm volatile("ldrh %0, [%1, %2]"
87 + : "=r" (v) : "r" (addr), "r" (where) : "cc");
88 + break;
89 + case 4:
90 +- asm("ldr %0, [%1, %2]"
91 ++ asm volatile("ldr %0, [%1, %2]"
92 + : "=r" (v) : "r" (addr), "r" (where) : "cc");
93 + break;
94 + }
95 +@@ -103,17 +103,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
96 + if (addr)
97 + switch (size) {
98 + case 1:
99 +- asm("strb %0, [%1, %2]"
100 ++ asm volatile("strb %0, [%1, %2]"
101 + : : "r" (value), "r" (addr), "r" (where)
102 + : "cc");
103 + break;
104 + case 2:
105 +- asm("strh %0, [%1, %2]"
106 ++ asm volatile("strh %0, [%1, %2]"
107 + : : "r" (value), "r" (addr), "r" (where)
108 + : "cc");
109 + break;
110 + case 4:
111 +- asm("str %0, [%1, %2]"
112 ++ asm volatile("str %0, [%1, %2]"
113 + : : "r" (value), "r" (addr), "r" (where)
114 + : "cc");
115 + break;
116 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
117 +index 9d8bc19edc48e..9f1376788820e 100644
118 +--- a/arch/mips/Kconfig
119 ++++ b/arch/mips/Kconfig
120 +@@ -2990,6 +2990,7 @@ config MIPS32_N32
121 + config BINFMT_ELF32
122 + bool
123 + default y if MIPS32_O32 || MIPS32_N32
124 ++ select ELFCORE
125 +
126 + endmenu
127 +
128 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
129 +index 8b4d022ce0cbc..e59dc138b24ea 100644
130 +--- a/arch/x86/Makefile
131 ++++ b/arch/x86/Makefile
132 +@@ -137,6 +137,9 @@ else
133 + KBUILD_CFLAGS += -mno-red-zone
134 + KBUILD_CFLAGS += -mcmodel=kernel
135 +
136 ++ # Intel CET isn't enabled in the kernel
137 ++ KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
138 ++
139 + # -funit-at-a-time shrinks the kernel .text considerably
140 + # unfortunately it makes reading oopses harder.
141 + KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
142 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
143 +index 3328a37ddc75c..34f11bc42d9b7 100644
144 +--- a/arch/x86/include/asm/apic.h
145 ++++ b/arch/x86/include/asm/apic.h
146 +@@ -168,16 +168,6 @@ static inline void disable_local_APIC(void) { }
147 + #endif /* !CONFIG_X86_LOCAL_APIC */
148 +
149 + #ifdef CONFIG_X86_X2APIC
150 +-/*
151 +- * Make previous memory operations globally visible before
152 +- * sending the IPI through x2apic wrmsr. We need a serializing instruction or
153 +- * mfence for this.
154 +- */
155 +-static inline void x2apic_wrmsr_fence(void)
156 +-{
157 +- asm volatile("mfence" : : : "memory");
158 +-}
159 +-
160 + static inline void native_apic_msr_write(u32 reg, u32 v)
161 + {
162 + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
163 +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
164 +index b2a5bef742822..134d7ffc662e8 100644
165 +--- a/arch/x86/include/asm/barrier.h
166 ++++ b/arch/x86/include/asm/barrier.h
167 +@@ -119,4 +119,22 @@ do { \
168 + #define smp_mb__before_atomic() do { } while (0)
169 + #define smp_mb__after_atomic() do { } while (0)
170 +
171 ++/*
172 ++ * Make previous memory operations globally visible before
173 ++ * a WRMSR.
174 ++ *
175 ++ * MFENCE makes writes visible, but only affects load/store
176 ++ * instructions. WRMSR is unfortunately not a load/store
177 ++ * instruction and is unaffected by MFENCE. The LFENCE ensures
178 ++ * that the WRMSR is not reordered.
179 ++ *
180 ++ * Most WRMSRs are full serializing instructions themselves and
181 ++ * do not require this barrier. This is only required for the
182 ++ * IA32_TSC_DEADLINE and X2APIC MSRs.
183 ++ */
184 ++static inline void weak_wrmsr_fence(void)
185 ++{
186 ++ asm volatile("mfence; lfence" : : : "memory");
187 ++}
188 ++
189 + #endif /* _ASM_X86_BARRIER_H */
190 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
191 +index 4dcf71c26d647..f53849f3f7fbf 100644
192 +--- a/arch/x86/kernel/apic/apic.c
193 ++++ b/arch/x86/kernel/apic/apic.c
194 +@@ -41,6 +41,7 @@
195 + #include <asm/x86_init.h>
196 + #include <asm/pgalloc.h>
197 + #include <linux/atomic.h>
198 ++#include <asm/barrier.h>
199 + #include <asm/mpspec.h>
200 + #include <asm/i8259.h>
201 + #include <asm/proto.h>
202 +@@ -464,6 +465,9 @@ static int lapic_next_deadline(unsigned long delta,
203 + {
204 + u64 tsc;
205 +
206 ++ /* This MSR is special and need a special fence: */
207 ++ weak_wrmsr_fence();
208 ++
209 + tsc = rdtsc();
210 + wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
211 + return 0;
212 +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
213 +index cc8311c4d2985..f474756fc151e 100644
214 +--- a/arch/x86/kernel/apic/x2apic_cluster.c
215 ++++ b/arch/x86/kernel/apic/x2apic_cluster.c
216 +@@ -32,7 +32,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
217 + unsigned long flags;
218 + u32 dest;
219 +
220 +- x2apic_wrmsr_fence();
221 ++ /* x2apic MSRs are special and need a special fence: */
222 ++ weak_wrmsr_fence();
223 +
224 + local_irq_save(flags);
225 +
226 +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
227 +index 662e9150ea6f2..ad7c3544b07f9 100644
228 +--- a/arch/x86/kernel/apic/x2apic_phys.c
229 ++++ b/arch/x86/kernel/apic/x2apic_phys.c
230 +@@ -43,7 +43,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
231 + unsigned long this_cpu;
232 + unsigned long flags;
233 +
234 +- x2apic_wrmsr_fence();
235 ++ /* x2apic MSRs are special and need a special fence: */
236 ++ weak_wrmsr_fence();
237 +
238 + local_irq_save(flags);
239 +
240 +diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
241 +index 82707f9824cae..b4826335ad0b3 100644
242 +--- a/drivers/acpi/thermal.c
243 ++++ b/drivers/acpi/thermal.c
244 +@@ -188,6 +188,8 @@ struct acpi_thermal {
245 + int tz_enabled;
246 + int kelvin_offset;
247 + struct work_struct thermal_check_work;
248 ++ struct mutex thermal_check_lock;
249 ++ atomic_t thermal_check_count;
250 + };
251 +
252 + /* --------------------------------------------------------------------------
253 +@@ -513,16 +515,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
254 + return 0;
255 + }
256 +
257 +-static void acpi_thermal_check(void *data)
258 +-{
259 +- struct acpi_thermal *tz = data;
260 +-
261 +- if (!tz->tz_enabled)
262 +- return;
263 +-
264 +- thermal_zone_device_update(tz->thermal_zone);
265 +-}
266 +-
267 + /* sys I/F for generic thermal sysfs support */
268 +
269 + static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
270 +@@ -556,6 +548,8 @@ static int thermal_get_mode(struct thermal_zone_device *thermal,
271 + return 0;
272 + }
273 +
274 ++static void acpi_thermal_check_fn(struct work_struct *work);
275 ++
276 + static int thermal_set_mode(struct thermal_zone_device *thermal,
277 + enum thermal_device_mode mode)
278 + {
279 +@@ -581,7 +575,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
280 + ACPI_DEBUG_PRINT((ACPI_DB_INFO,
281 + "%s kernel ACPI thermal control\n",
282 + tz->tz_enabled ? "Enable" : "Disable"));
283 +- acpi_thermal_check(tz);
284 ++ acpi_thermal_check_fn(&tz->thermal_check_work);
285 + }
286 + return 0;
287 + }
288 +@@ -950,6 +944,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
289 + Driver Interface
290 + -------------------------------------------------------------------------- */
291 +
292 ++static void acpi_queue_thermal_check(struct acpi_thermal *tz)
293 ++{
294 ++ if (!work_pending(&tz->thermal_check_work))
295 ++ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
296 ++}
297 ++
298 + static void acpi_thermal_notify(struct acpi_device *device, u32 event)
299 + {
300 + struct acpi_thermal *tz = acpi_driver_data(device);
301 +@@ -960,17 +960,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
302 +
303 + switch (event) {
304 + case ACPI_THERMAL_NOTIFY_TEMPERATURE:
305 +- acpi_thermal_check(tz);
306 ++ acpi_queue_thermal_check(tz);
307 + break;
308 + case ACPI_THERMAL_NOTIFY_THRESHOLDS:
309 + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
310 +- acpi_thermal_check(tz);
311 ++ acpi_queue_thermal_check(tz);
312 + acpi_bus_generate_netlink_event(device->pnp.device_class,
313 + dev_name(&device->dev), event, 0);
314 + break;
315 + case ACPI_THERMAL_NOTIFY_DEVICES:
316 + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
317 +- acpi_thermal_check(tz);
318 ++ acpi_queue_thermal_check(tz);
319 + acpi_bus_generate_netlink_event(device->pnp.device_class,
320 + dev_name(&device->dev), event, 0);
321 + break;
322 +@@ -1070,7 +1070,27 @@ static void acpi_thermal_check_fn(struct work_struct *work)
323 + {
324 + struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
325 + thermal_check_work);
326 +- acpi_thermal_check(tz);
327 ++
328 ++ if (!tz->tz_enabled)
329 ++ return;
330 ++ /*
331 ++ * In general, it is not sufficient to check the pending bit, because
332 ++ * subsequent instances of this function may be queued after one of them
333 ++ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just
334 ++ * one of them is running, though, because it may have done the actual
335 ++ * check some time ago, so allow at least one of them to block on the
336 ++ * mutex while another one is running the update.
337 ++ */
338 ++ if (!atomic_add_unless(&tz->thermal_check_count, -1, 1))
339 ++ return;
340 ++
341 ++ mutex_lock(&tz->thermal_check_lock);
342 ++
343 ++ thermal_zone_device_update(tz->thermal_zone);
344 ++
345 ++ atomic_inc(&tz->thermal_check_count);
346 ++
347 ++ mutex_unlock(&tz->thermal_check_lock);
348 + }
349 +
350 + static int acpi_thermal_add(struct acpi_device *device)
351 +@@ -1102,6 +1122,8 @@ static int acpi_thermal_add(struct acpi_device *device)
352 + if (result)
353 + goto free_memory;
354 +
355 ++ atomic_set(&tz->thermal_check_count, 3);
356 ++ mutex_init(&tz->thermal_check_lock);
357 + INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
358 +
359 + pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
360 +@@ -1167,7 +1189,7 @@ static int acpi_thermal_resume(struct device *dev)
361 + tz->state.active |= tz->trips.active[i].flags.enabled;
362 + }
363 +
364 +- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
365 ++ acpi_queue_thermal_check(tz);
366 +
367 + return AE_OK;
368 + }
369 +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
370 +index 637f1347cd13d..815b69d35722c 100644
371 +--- a/drivers/input/joystick/xpad.c
372 ++++ b/drivers/input/joystick/xpad.c
373 +@@ -232,9 +232,17 @@ static const struct xpad_device {
374 + { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
375 + { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
376 + { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
377 +- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
378 ++ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
379 ++ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
380 ++ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
381 + { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
382 + { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
383 ++ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
384 ++ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
385 ++ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
386 ++ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
387 ++ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
388 ++ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
389 + { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
390 + { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
391 + { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
392 +@@ -313,6 +321,9 @@ static const struct xpad_device {
393 + { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
394 + { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
395 + { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
396 ++ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
397 ++ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
398 ++ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
399 + { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
400 + { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
401 + { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
402 +@@ -446,8 +457,12 @@ static const struct usb_device_id xpad_table[] = {
403 + XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
404 + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
405 + XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
406 ++ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
407 ++ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
408 + XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
409 + XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
410 ++ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
411 ++ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
412 + { }
413 + };
414 +
415 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
416 +index fa07be0b4500e..2317f8d3fef6f 100644
417 +--- a/drivers/input/serio/i8042-x86ia64io.h
418 ++++ b/drivers/input/serio/i8042-x86ia64io.h
419 +@@ -223,6 +223,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
420 + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
421 + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
422 + },
423 ++ },
424 ++ {
425 + .matches = {
426 + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
427 + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
428 +diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
429 +index 8651bd30863d4..f9416535f79d8 100644
430 +--- a/drivers/mmc/core/sdio_cis.c
431 ++++ b/drivers/mmc/core/sdio_cis.c
432 +@@ -24,6 +24,8 @@
433 + #include "sdio_cis.h"
434 + #include "sdio_ops.h"
435 +
436 ++#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
437 ++
438 + static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
439 + const unsigned char *buf, unsigned size)
440 + {
441 +@@ -263,6 +265,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
442 +
443 + do {
444 + unsigned char tpl_code, tpl_link;
445 ++ unsigned long timeout = jiffies +
446 ++ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
447 +
448 + ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
449 + if (ret)
450 +@@ -315,6 +319,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
451 + prev = &this->next;
452 +
453 + if (ret == -ENOENT) {
454 ++ if (time_after(jiffies, timeout))
455 ++ break;
456 + /* warn about unknown tuples */
457 + pr_warn_ratelimited("%s: queuing unknown"
458 + " CIS tuple 0x%02x (%u bytes)\n",
459 +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
460 +index db80ab8335dfb..aa74f72e582ab 100644
461 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c
462 ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
463 +@@ -2883,8 +2883,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
464 + unsigned long flags = 0;
465 +
466 + spin_lock_irqsave(shost->host_lock, flags);
467 +- if (sdev->type == TYPE_DISK)
468 ++ if (sdev->type == TYPE_DISK) {
469 + sdev->allow_restart = 1;
470 ++ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
471 ++ }
472 + spin_unlock_irqrestore(shost->host_lock, flags);
473 + return 0;
474 + }
475 +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
476 +index b20c575564e43..a088f74a157c7 100644
477 +--- a/drivers/scsi/libfc/fc_exch.c
478 ++++ b/drivers/scsi/libfc/fc_exch.c
479 +@@ -1577,8 +1577,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
480 + rc = fc_exch_done_locked(ep);
481 + WARN_ON(fc_seq_exch(sp) != ep);
482 + spin_unlock_bh(&ep->ex_lock);
483 +- if (!rc)
484 ++ if (!rc) {
485 + fc_exch_delete(ep);
486 ++ } else {
487 ++ FC_EXCH_DBG(ep, "ep is completed already,"
488 ++ "hence skip calling the resp\n");
489 ++ goto skip_resp;
490 ++ }
491 + }
492 +
493 + /*
494 +@@ -1597,6 +1602,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
495 + if (!fc_invoke_resp(ep, sp, fp))
496 + fc_frame_free(fp);
497 +
498 ++skip_resp:
499 + fc_exch_release(ep);
500 + return;
501 + rel:
502 +@@ -1841,10 +1847,16 @@ static void fc_exch_reset(struct fc_exch *ep)
503 +
504 + fc_exch_hold(ep);
505 +
506 +- if (!rc)
507 ++ if (!rc) {
508 + fc_exch_delete(ep);
509 ++ } else {
510 ++ FC_EXCH_DBG(ep, "ep is completed already,"
511 ++ "hence skip calling the resp\n");
512 ++ goto skip_resp;
513 ++ }
514 +
515 + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
516 ++skip_resp:
517 + fc_seq_set_resp(sp, NULL, ep->arg);
518 + fc_exch_release(ep);
519 + }
520 +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
521 +index 76701d6ce92c3..582099f4f449f 100644
522 +--- a/drivers/usb/class/usblp.c
523 ++++ b/drivers/usb/class/usblp.c
524 +@@ -1349,14 +1349,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
525 + if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
526 + return -EINVAL;
527 +
528 +- alts = usblp->protocol[protocol].alt_setting;
529 +- if (alts < 0)
530 +- return -EINVAL;
531 +- r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
532 +- if (r < 0) {
533 +- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
534 +- alts, usblp->ifnum);
535 +- return r;
536 ++ /* Don't unnecessarily set the interface if there's a single alt. */
537 ++ if (usblp->intf->num_altsetting > 1) {
538 ++ alts = usblp->protocol[protocol].alt_setting;
539 ++ if (alts < 0)
540 ++ return -EINVAL;
541 ++ r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
542 ++ if (r < 0) {
543 ++ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
544 ++ alts, usblp->ifnum);
545 ++ return r;
546 ++ }
547 + }
548 +
549 + usblp->bidir = (usblp->protocol[protocol].epread != NULL);
550 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
551 +index e5ad717cba22f..135e97310f118 100644
552 +--- a/drivers/usb/dwc2/gadget.c
553 ++++ b/drivers/usb/dwc2/gadget.c
554 +@@ -871,7 +871,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
555 + static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
556 + u32 windex)
557 + {
558 +- struct dwc2_hsotg_ep *ep;
559 + int dir = (windex & USB_DIR_IN) ? 1 : 0;
560 + int idx = windex & 0x7F;
561 +
562 +@@ -881,12 +880,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
563 + if (idx > hsotg->num_of_eps)
564 + return NULL;
565 +
566 +- ep = index_to_ep(hsotg, idx, dir);
567 +-
568 +- if (idx && ep->dir_in != dir)
569 +- return NULL;
570 +-
571 +- return ep;
572 ++ return index_to_ep(hsotg, idx, dir);
573 + }
574 +
575 + /**
576 +diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
577 +index 31e9160223e9a..0b7229678b530 100644
578 +--- a/drivers/usb/gadget/legacy/ether.c
579 ++++ b/drivers/usb/gadget/legacy/ether.c
580 +@@ -407,8 +407,10 @@ static int eth_bind(struct usb_composite_dev *cdev)
581 + struct usb_descriptor_header *usb_desc;
582 +
583 + usb_desc = usb_otg_descriptor_alloc(gadget);
584 +- if (!usb_desc)
585 ++ if (!usb_desc) {
586 ++ status = -ENOMEM;
587 + goto fail1;
588 ++ }
589 + usb_otg_descriptor_init(gadget, usb_desc);
590 + otg_desc[0] = usb_desc;
591 + otg_desc[1] = NULL;
592 +diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
593 +index a6a1678cb9276..c6859fdd74bc2 100644
594 +--- a/drivers/usb/gadget/udc/udc-core.c
595 ++++ b/drivers/usb/gadget/udc/udc-core.c
596 +@@ -612,10 +612,13 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
597 + struct device_attribute *attr, const char *buf, size_t n)
598 + {
599 + struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
600 ++ ssize_t ret;
601 +
602 ++ mutex_lock(&udc_lock);
603 + if (!udc->driver) {
604 + dev_err(dev, "soft-connect without a gadget driver\n");
605 +- return -EOPNOTSUPP;
606 ++ ret = -EOPNOTSUPP;
607 ++ goto out;
608 + }
609 +
610 + if (sysfs_streq(buf, "connect")) {
611 +@@ -627,10 +630,14 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
612 + usb_gadget_udc_stop(udc);
613 + } else {
614 + dev_err(dev, "unsupported command '%s'\n", buf);
615 +- return -EINVAL;
616 ++ ret = -EINVAL;
617 ++ goto out;
618 + }
619 +
620 +- return n;
621 ++ ret = n;
622 ++out:
623 ++ mutex_unlock(&udc_lock);
624 ++ return ret;
625 + }
626 + static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
627 +
628 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
629 +index 13c718ebaee5b..ded4c8f2bba4e 100644
630 +--- a/drivers/usb/serial/cp210x.c
631 ++++ b/drivers/usb/serial/cp210x.c
632 +@@ -57,6 +57,7 @@ static const struct usb_device_id id_table[] = {
633 + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
634 + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
635 + { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
636 ++ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
637 + { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
638 + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
639 + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
640 +@@ -197,6 +198,7 @@ static const struct usb_device_id id_table[] = {
641 + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
642 + { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
643 + { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
644 ++ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
645 + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
646 + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
647 + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
648 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
649 +index 1998b314368e0..3c536eed07541 100644
650 +--- a/drivers/usb/serial/option.c
651 ++++ b/drivers/usb/serial/option.c
652 +@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb);
653 + #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
654 + #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
655 + #define CINTERION_PRODUCT_CLS8 0x00b0
656 ++#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
657 ++#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
658 +
659 + /* Olivetti products */
660 + #define OLIVETTI_VENDOR_ID 0x0b3c
661 +@@ -1896,6 +1898,10 @@ static const struct usb_device_id option_ids[] = {
662 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
663 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
664 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
665 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
666 ++ .driver_info = RSVD(3)},
667 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
668 ++ .driver_info = RSVD(0)},
669 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
670 + .driver_info = RSVD(4) },
671 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
672 +diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
673 +index 2d0cbbd14cfc8..72c03354c14bf 100644
674 +--- a/fs/Kconfig.binfmt
675 ++++ b/fs/Kconfig.binfmt
676 +@@ -1,6 +1,7 @@
677 + config BINFMT_ELF
678 + bool "Kernel support for ELF binaries"
679 + depends on MMU && (BROKEN || !FRV)
680 ++ select ELFCORE
681 + default y
682 + ---help---
683 + ELF (Executable and Linkable Format) is a format for libraries and
684 +@@ -26,6 +27,7 @@ config BINFMT_ELF
685 + config COMPAT_BINFMT_ELF
686 + bool
687 + depends on COMPAT && BINFMT_ELF
688 ++ select ELFCORE
689 +
690 + config ARCH_BINFMT_ELF_STATE
691 + bool
692 +@@ -34,6 +36,7 @@ config BINFMT_ELF_FDPIC
693 + bool "Kernel support for FDPIC ELF binaries"
694 + default y
695 + depends on (FRV || BLACKFIN || (SUPERH32 && !MMU) || C6X)
696 ++ select ELFCORE
697 + help
698 + ELF FDPIC binaries are based on ELF, but allow the individual load
699 + segments of a binary to be located in memory independently of each
700 +@@ -43,6 +46,11 @@ config BINFMT_ELF_FDPIC
701 +
702 + It is also possible to run FDPIC ELF binaries on MMU linux also.
703 +
704 ++config ELFCORE
705 ++ bool
706 ++ help
707 ++ This option enables kernel/elfcore.o.
708 ++
709 + config CORE_DUMP_DEFAULT_ELF_HEADERS
710 + bool "Write ELF core dumps with partial segments"
711 + default y
712 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
713 +index be16da31cbccf..9f1641324a811 100644
714 +--- a/fs/cifs/dir.c
715 ++++ b/fs/cifs/dir.c
716 +@@ -831,6 +831,7 @@ static int
717 + cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
718 + {
719 + struct inode *inode;
720 ++ int rc;
721 +
722 + if (flags & LOOKUP_RCU)
723 + return -ECHILD;
724 +@@ -840,8 +841,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
725 + if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
726 + CIFS_I(inode)->time = 0; /* force reval */
727 +
728 +- if (cifs_revalidate_dentry(direntry))
729 +- return 0;
730 ++ rc = cifs_revalidate_dentry(direntry);
731 ++ if (rc) {
732 ++ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
733 ++ switch (rc) {
734 ++ case -ENOENT:
735 ++ case -ESTALE:
736 ++ /*
737 ++ * Those errors mean the dentry is invalid
738 ++ * (file was deleted or recreated)
739 ++ */
740 ++ return 0;
741 ++ default:
742 ++ /*
743 ++ * Otherwise some unexpected error happened
744 ++ * report it as-is to VFS layer
745 ++ */
746 ++ return rc;
747 ++ }
748 ++ }
749 + else {
750 + /*
751 + * If the inode wasn't known to be a dfs entry when
752 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
753 +index 937c6ee1786f9..b743aa5bce0d2 100644
754 +--- a/fs/hugetlbfs/inode.c
755 ++++ b/fs/hugetlbfs/inode.c
756 +@@ -661,8 +661,9 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
757 +
758 + mutex_unlock(&hugetlb_fault_mutex_table[hash]);
759 +
760 ++ set_page_huge_active(page);
761 + /*
762 +- * page_put due to reference from alloc_huge_page()
763 ++ * put_page() due to reference from alloc_huge_page()
764 + * unlock_page because locked by add_to_page_cache()
765 + */
766 + put_page(page);
767 +diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
768 +index 698d51a0eea3f..4adf7faeaeb59 100644
769 +--- a/include/linux/elfcore.h
770 ++++ b/include/linux/elfcore.h
771 +@@ -55,6 +55,7 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse
772 + }
773 + #endif
774 +
775 ++#if defined(CONFIG_UM) || defined(CONFIG_IA64)
776 + /*
777 + * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
778 + * extra segments containing the gate DSO contents. Dumping its
779 +@@ -69,5 +70,26 @@ elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
780 + extern int
781 + elf_core_write_extra_data(struct coredump_params *cprm);
782 + extern size_t elf_core_extra_data_size(void);
783 ++#else
784 ++static inline Elf_Half elf_core_extra_phdrs(void)
785 ++{
786 ++ return 0;
787 ++}
788 ++
789 ++static inline int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
790 ++{
791 ++ return 1;
792 ++}
793 ++
794 ++static inline int elf_core_write_extra_data(struct coredump_params *cprm)
795 ++{
796 ++ return 1;
797 ++}
798 ++
799 ++static inline size_t elf_core_extra_data_size(void)
800 ++{
801 ++ return 0;
802 ++}
803 ++#endif
804 +
805 + #endif /* _LINUX_ELFCORE_H */
806 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
807 +index cc185525a94ba..c4a4a39a458dc 100644
808 +--- a/include/linux/hugetlb.h
809 ++++ b/include/linux/hugetlb.h
810 +@@ -506,6 +506,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
811 + {
812 + atomic_long_sub(l, &mm->hugetlb_usage);
813 + }
814 ++
815 ++void set_page_huge_active(struct page *page);
816 ++
817 + #else /* CONFIG_HUGETLB_PAGE */
818 + struct hstate {};
819 + #define alloc_huge_page(v, a, r) NULL
820 +diff --git a/kernel/Makefile b/kernel/Makefile
821 +index a672bece1f499..8b73d57804f23 100644
822 +--- a/kernel/Makefile
823 ++++ b/kernel/Makefile
824 +@@ -77,9 +77,6 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
825 + obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
826 + obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
827 + obj-$(CONFIG_LATENCYTOP) += latencytop.o
828 +-obj-$(CONFIG_BINFMT_ELF) += elfcore.o
829 +-obj-$(CONFIG_COMPAT_BINFMT_ELF) += elfcore.o
830 +-obj-$(CONFIG_BINFMT_ELF_FDPIC) += elfcore.o
831 + obj-$(CONFIG_FUNCTION_TRACER) += trace/
832 + obj-$(CONFIG_TRACING) += trace/
833 + obj-$(CONFIG_TRACE_CLOCK) += trace/
834 +diff --git a/kernel/elfcore.c b/kernel/elfcore.c
835 +deleted file mode 100644
836 +index a2b29b9bdfcb2..0000000000000
837 +--- a/kernel/elfcore.c
838 ++++ /dev/null
839 +@@ -1,25 +0,0 @@
840 +-#include <linux/elf.h>
841 +-#include <linux/fs.h>
842 +-#include <linux/mm.h>
843 +-#include <linux/binfmts.h>
844 +-#include <linux/elfcore.h>
845 +-
846 +-Elf_Half __weak elf_core_extra_phdrs(void)
847 +-{
848 +- return 0;
849 +-}
850 +-
851 +-int __weak elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
852 +-{
853 +- return 1;
854 +-}
855 +-
856 +-int __weak elf_core_write_extra_data(struct coredump_params *cprm)
857 +-{
858 +- return 1;
859 +-}
860 +-
861 +-size_t __weak elf_core_extra_data_size(void)
862 +-{
863 +- return 0;
864 +-}
865 +diff --git a/kernel/futex.c b/kernel/futex.c
866 +index f1990e2a51e5a..199e63c5b6120 100644
867 +--- a/kernel/futex.c
868 ++++ b/kernel/futex.c
869 +@@ -835,6 +835,29 @@ static struct futex_pi_state * alloc_pi_state(void)
870 + return pi_state;
871 + }
872 +
873 ++static void pi_state_update_owner(struct futex_pi_state *pi_state,
874 ++ struct task_struct *new_owner)
875 ++{
876 ++ struct task_struct *old_owner = pi_state->owner;
877 ++
878 ++ lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
879 ++
880 ++ if (old_owner) {
881 ++ raw_spin_lock(&old_owner->pi_lock);
882 ++ WARN_ON(list_empty(&pi_state->list));
883 ++ list_del_init(&pi_state->list);
884 ++ raw_spin_unlock(&old_owner->pi_lock);
885 ++ }
886 ++
887 ++ if (new_owner) {
888 ++ raw_spin_lock(&new_owner->pi_lock);
889 ++ WARN_ON(!list_empty(&pi_state->list));
890 ++ list_add(&pi_state->list, &new_owner->pi_state_list);
891 ++ pi_state->owner = new_owner;
892 ++ raw_spin_unlock(&new_owner->pi_lock);
893 ++ }
894 ++}
895 ++
896 + /*
897 + * Must be called with the hb lock held.
898 + */
899 +@@ -851,11 +874,8 @@ static void free_pi_state(struct futex_pi_state *pi_state)
900 + * and has cleaned up the pi_state already
901 + */
902 + if (pi_state->owner) {
903 +- raw_spin_lock_irq(&pi_state->owner->pi_lock);
904 +- list_del_init(&pi_state->list);
905 +- raw_spin_unlock_irq(&pi_state->owner->pi_lock);
906 +-
907 +- rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
908 ++ pi_state_update_owner(pi_state, NULL);
909 ++ rt_mutex_proxy_unlock(&pi_state->pi_mutex);
910 + }
911 +
912 + if (current->pi_state_cache)
913 +@@ -936,7 +956,7 @@ static void exit_pi_state_list(struct task_struct *curr)
914 + pi_state->owner = NULL;
915 + raw_spin_unlock_irq(&curr->pi_lock);
916 +
917 +- rt_mutex_unlock(&pi_state->pi_mutex);
918 ++ rt_mutex_futex_unlock(&pi_state->pi_mutex);
919 +
920 + spin_unlock(&hb->lock);
921 +
922 +@@ -992,7 +1012,8 @@ static void exit_pi_state_list(struct task_struct *curr)
923 + * FUTEX_OWNER_DIED bit. See [4]
924 + *
925 + * [10] There is no transient state which leaves owner and user space
926 +- * TID out of sync.
927 ++ * TID out of sync. Except one error case where the kernel is denied
928 ++ * write access to the user address, see fixup_pi_state_owner().
929 + */
930 +
931 + /*
932 +@@ -1389,12 +1410,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
933 + new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
934 +
935 + /*
936 +- * It is possible that the next waiter (the one that brought
937 +- * this owner to the kernel) timed out and is no longer
938 +- * waiting on the lock.
939 ++ * When we interleave with futex_lock_pi() where it does
940 ++ * rt_mutex_timed_futex_lock(), we might observe @this futex_q waiter,
941 ++ * but the rt_mutex's wait_list can be empty (either still, or again,
942 ++ * depending on which side we land).
943 ++ *
944 ++ * When this happens, give up our locks and try again, giving the
945 ++ * futex_lock_pi() instance time to complete, either by waiting on the
946 ++ * rtmutex or removing itself from the futex queue.
947 + */
948 +- if (!new_owner)
949 +- new_owner = this->task;
950 ++ if (!new_owner) {
951 ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
952 ++ return -EAGAIN;
953 ++ }
954 +
955 + /*
956 + * We pass it to the next owner. The WAITERS bit is always
957 +@@ -1420,36 +1448,24 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
958 + else
959 + ret = -EINVAL;
960 + }
961 +- if (ret) {
962 +- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
963 +- return ret;
964 +- }
965 +-
966 +- raw_spin_lock_irq(&pi_state->owner->pi_lock);
967 +- WARN_ON(list_empty(&pi_state->list));
968 +- list_del_init(&pi_state->list);
969 +- raw_spin_unlock_irq(&pi_state->owner->pi_lock);
970 +-
971 +- raw_spin_lock_irq(&new_owner->pi_lock);
972 +- WARN_ON(!list_empty(&pi_state->list));
973 +- list_add(&pi_state->list, &new_owner->pi_state_list);
974 +- pi_state->owner = new_owner;
975 +- raw_spin_unlock_irq(&new_owner->pi_lock);
976 +-
977 +- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
978 +
979 +- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
980 ++ if (!ret) {
981 ++ /*
982 ++ * This is a point of no return; once we modified the uval
983 ++ * there is no going back and subsequent operations must
984 ++ * not fail.
985 ++ */
986 ++ pi_state_update_owner(pi_state, new_owner);
987 ++ deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
988 ++ }
989 +
990 +- /*
991 +- * First unlock HB so the waiter does not spin on it once he got woken
992 +- * up. Second wake up the waiter before the priority is adjusted. If we
993 +- * deboost first (and lose our higher priority), then the task might get
994 +- * scheduled away before the wake up can take place.
995 +- */
996 ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
997 + spin_unlock(&hb->lock);
998 +- wake_up_q(&wake_q);
999 +- if (deboost)
1000 ++
1001 ++ if (deboost) {
1002 ++ wake_up_q(&wake_q);
1003 + rt_mutex_adjust_prio(current);
1004 ++ }
1005 +
1006 + return 0;
1007 + }
1008 +@@ -2222,30 +2238,32 @@ static void unqueue_me_pi(struct futex_q *q)
1009 + spin_unlock(q->lock_ptr);
1010 + }
1011 +
1012 +-/*
1013 +- * Fixup the pi_state owner with the new owner.
1014 +- *
1015 +- * Must be called with hash bucket lock held and mm->sem held for non
1016 +- * private futexes.
1017 +- */
1018 +-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1019 +- struct task_struct *newowner)
1020 ++static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1021 ++ struct task_struct *argowner)
1022 + {
1023 +- u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1024 + struct futex_pi_state *pi_state = q->pi_state;
1025 +- struct task_struct *oldowner = pi_state->owner;
1026 +- u32 uval, uninitialized_var(curval), newval;
1027 +- int ret;
1028 ++ struct task_struct *oldowner, *newowner;
1029 ++ u32 uval, curval, newval, newtid;
1030 ++ int err = 0;
1031 ++
1032 ++ oldowner = pi_state->owner;
1033 +
1034 + /* Owner died? */
1035 + if (!pi_state->owner)
1036 + newtid |= FUTEX_OWNER_DIED;
1037 +
1038 + /*
1039 +- * We are here either because we stole the rtmutex from the
1040 +- * previous highest priority waiter or we are the highest priority
1041 +- * waiter but failed to get the rtmutex the first time.
1042 +- * We have to replace the newowner TID in the user space variable.
1043 ++ * We are here because either:
1044 ++ *
1045 ++ * - we stole the lock and pi_state->owner needs updating to reflect
1046 ++ * that (@argowner == current),
1047 ++ *
1048 ++ * or:
1049 ++ *
1050 ++ * - someone stole our lock and we need to fix things to point to the
1051 ++ * new owner (@argowner == NULL).
1052 ++ *
1053 ++ * Either way, we have to replace the TID in the user space variable.
1054 + * This must be atomic as we have to preserve the owner died bit here.
1055 + *
1056 + * Note: We write the user space value _before_ changing the pi_state
1057 +@@ -2259,6 +2277,39 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1058 + * in lookup_pi_state.
1059 + */
1060 + retry:
1061 ++ if (!argowner) {
1062 ++ if (oldowner != current) {
1063 ++ /*
1064 ++ * We raced against a concurrent self; things are
1065 ++ * already fixed up. Nothing to do.
1066 ++ */
1067 ++ return 0;
1068 ++ }
1069 ++
1070 ++ if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
1071 ++ /* We got the lock after all, nothing to fix. */
1072 ++ return 1;
1073 ++ }
1074 ++
1075 ++ /*
1076 ++ * Since we just failed the trylock; there must be an owner.
1077 ++ */
1078 ++ newowner = rt_mutex_owner(&pi_state->pi_mutex);
1079 ++ BUG_ON(!newowner);
1080 ++ } else {
1081 ++ WARN_ON_ONCE(argowner != current);
1082 ++ if (oldowner == current) {
1083 ++ /*
1084 ++ * We raced against a concurrent self; things are
1085 ++ * already fixed up. Nothing to do.
1086 ++ */
1087 ++ return 1;
1088 ++ }
1089 ++ newowner = argowner;
1090 ++ }
1091 ++
1092 ++ newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1093 ++
1094 + if (get_futex_value_locked(&uval, uaddr))
1095 + goto handle_fault;
1096 +
1097 +@@ -2276,19 +2327,8 @@ retry:
1098 + * We fixed up user space. Now we need to fix the pi_state
1099 + * itself.
1100 + */
1101 +- if (pi_state->owner != NULL) {
1102 +- raw_spin_lock_irq(&pi_state->owner->pi_lock);
1103 +- WARN_ON(list_empty(&pi_state->list));
1104 +- list_del_init(&pi_state->list);
1105 +- raw_spin_unlock_irq(&pi_state->owner->pi_lock);
1106 +- }
1107 +-
1108 +- pi_state->owner = newowner;
1109 ++ pi_state_update_owner(pi_state, newowner);
1110 +
1111 +- raw_spin_lock_irq(&newowner->pi_lock);
1112 +- WARN_ON(!list_empty(&pi_state->list));
1113 +- list_add(&pi_state->list, &newowner->pi_state_list);
1114 +- raw_spin_unlock_irq(&newowner->pi_lock);
1115 + return 0;
1116 +
1117 + /*
1118 +@@ -2304,7 +2344,7 @@ retry:
1119 + handle_fault:
1120 + spin_unlock(q->lock_ptr);
1121 +
1122 +- ret = fault_in_user_writeable(uaddr);
1123 ++ err = fault_in_user_writeable(uaddr);
1124 +
1125 + spin_lock(q->lock_ptr);
1126 +
1127 +@@ -2312,12 +2352,45 @@ handle_fault:
1128 + * Check if someone else fixed it for us:
1129 + */
1130 + if (pi_state->owner != oldowner)
1131 +- return 0;
1132 ++ return argowner == current;
1133 +
1134 +- if (ret)
1135 +- return ret;
1136 ++ /* Retry if err was -EAGAIN or the fault in succeeded */
1137 ++ if (!err)
1138 ++ goto retry;
1139 +
1140 +- goto retry;
1141 ++ /*
1142 ++ * fault_in_user_writeable() failed so user state is immutable. At
1143 ++ * best we can make the kernel state consistent but user state will
1144 ++ * be most likely hosed and any subsequent unlock operation will be
1145 ++ * rejected due to PI futex rule [10].
1146 ++ *
1147 ++ * Ensure that the rtmutex owner is also the pi_state owner despite
1148 ++ * the user space value claiming something different. There is no
1149 ++ * point in unlocking the rtmutex if current is the owner as it
1150 ++ * would need to wait until the next waiter has taken the rtmutex
1151 ++ * to guarantee consistent state. Keep it simple. Userspace asked
1152 ++ * for this wreckaged state.
1153 ++ *
1154 ++ * The rtmutex has an owner - either current or some other
1155 ++ * task. See the EAGAIN loop above.
1156 ++ */
1157 ++ pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
1158 ++
1159 ++ return err;
1160 ++}
1161 ++
1162 ++static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1163 ++ struct task_struct *argowner)
1164 ++{
1165 ++ struct futex_pi_state *pi_state = q->pi_state;
1166 ++ int ret;
1167 ++
1168 ++ lockdep_assert_held(q->lock_ptr);
1169 ++
1170 ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1171 ++ ret = __fixup_pi_state_owner(uaddr, q, argowner);
1172 ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1173 ++ return ret;
1174 + }
1175 +
1176 + static long futex_wait_restart(struct restart_block *restart);
1177 +@@ -2339,13 +2412,16 @@ static long futex_wait_restart(struct restart_block *restart);
1178 + */
1179 + static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1180 + {
1181 +- struct task_struct *owner;
1182 + int ret = 0;
1183 +
1184 + if (locked) {
1185 + /*
1186 + * Got the lock. We might not be the anticipated owner if we
1187 + * did a lock-steal - fix up the PI-state in that case:
1188 ++ *
1189 ++ * Speculative pi_state->owner read (we don't hold wait_lock);
1190 ++ * since we own the lock pi_state->owner == current is the
1191 ++ * stable state, anything else needs more attention.
1192 + */
1193 + if (q->pi_state->owner != current)
1194 + ret = fixup_pi_state_owner(uaddr, q, current);
1195 +@@ -2353,43 +2429,24 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
1196 + }
1197 +
1198 + /*
1199 +- * Catch the rare case, where the lock was released when we were on the
1200 +- * way back before we locked the hash bucket.
1201 ++ * If we didn't get the lock; check if anybody stole it from us. In
1202 ++ * that case, we need to fix up the uval to point to them instead of
1203 ++ * us, otherwise bad things happen. [10]
1204 ++ *
1205 ++ * Another speculative read; pi_state->owner == current is unstable
1206 ++ * but needs our attention.
1207 + */
1208 + if (q->pi_state->owner == current) {
1209 +- /*
1210 +- * Try to get the rt_mutex now. This might fail as some other
1211 +- * task acquired the rt_mutex after we removed ourself from the
1212 +- * rt_mutex waiters list.
1213 +- */
1214 +- if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1215 +- locked = 1;
1216 +- goto out;
1217 +- }
1218 +-
1219 +- /*
1220 +- * pi_state is incorrect, some other task did a lock steal and
1221 +- * we returned due to timeout or signal without taking the
1222 +- * rt_mutex. Too late.
1223 +- */
1224 +- raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
1225 +- owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1226 +- if (!owner)
1227 +- owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
1228 +- raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
1229 +- ret = fixup_pi_state_owner(uaddr, q, owner);
1230 ++ ret = fixup_pi_state_owner(uaddr, q, NULL);
1231 + goto out;
1232 + }
1233 +
1234 + /*
1235 + * Paranoia check. If we did not take the lock, then we should not be
1236 +- * the owner of the rt_mutex.
1237 ++ * the owner of the rt_mutex. Warn and establish consistent state.
1238 + */
1239 +- if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1240 +- printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1241 +- "pi-state %p\n", ret,
1242 +- q->pi_state->pi_mutex.owner,
1243 +- q->pi_state->owner);
1244 ++ if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
1245 ++ return fixup_pi_state_owner(uaddr, q, current);
1246 +
1247 + out:
1248 + return ret ? ret : locked;
1249 +@@ -2686,7 +2743,7 @@ retry_private:
1250 + if (!trylock) {
1251 + ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
1252 + } else {
1253 +- ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1254 ++ ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
1255 + /* Fixup the trylock return value: */
1256 + ret = ret ? 0 : -EWOULDBLOCK;
1257 + }
1258 +@@ -2704,13 +2761,6 @@ retry_private:
1259 + if (res)
1260 + ret = (res < 0) ? res : 0;
1261 +
1262 +- /*
1263 +- * If fixup_owner() faulted and was unable to handle the fault, unlock
1264 +- * it and return the fault to userspace.
1265 +- */
1266 +- if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1267 +- rt_mutex_unlock(&q.pi_state->pi_mutex);
1268 +-
1269 + /* Unqueue and drop the lock */
1270 + unqueue_me_pi(&q);
1271 +
1272 +@@ -3015,8 +3065,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
1273 + if (q.pi_state && (q.pi_state->owner != current)) {
1274 + spin_lock(q.lock_ptr);
1275 + ret = fixup_pi_state_owner(uaddr2, &q, current);
1276 +- if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
1277 +- rt_mutex_unlock(&q.pi_state->pi_mutex);
1278 + /*
1279 + * Drop the reference to the pi state which
1280 + * the requeue_pi() code acquired for us.
1281 +@@ -3053,14 +3101,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
1282 + if (res)
1283 + ret = (res < 0) ? res : 0;
1284 +
1285 +- /*
1286 +- * If fixup_pi_state_owner() faulted and was unable to handle
1287 +- * the fault, unlock the rt_mutex and return the fault to
1288 +- * userspace.
1289 +- */
1290 +- if (ret && rt_mutex_owner(pi_mutex) == current)
1291 +- rt_mutex_unlock(pi_mutex);
1292 +-
1293 + /* Unqueue and drop the lock. */
1294 + unqueue_me_pi(&q);
1295 + }
1296 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
1297 +index 33c37dbc56a05..90f46c8aa9007 100644
1298 +--- a/kernel/kprobes.c
1299 ++++ b/kernel/kprobes.c
1300 +@@ -1884,6 +1884,10 @@ int register_kretprobe(struct kretprobe *rp)
1301 + int i;
1302 + void *addr;
1303 +
1304 ++ /* If only rp->kp.addr is specified, check reregistering kprobes */
1305 ++ if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
1306 ++ return -EINVAL;
1307 ++
1308 + if (kretprobe_blacklist_size) {
1309 + addr = kprobe_addr(&rp->kp);
1310 + if (IS_ERR(addr))
1311 +diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
1312 +index 62b6cee8ea7f9..0613c4b1d0596 100644
1313 +--- a/kernel/locking/rtmutex-debug.c
1314 ++++ b/kernel/locking/rtmutex-debug.c
1315 +@@ -173,12 +173,3 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
1316 + lock->name = name;
1317 + }
1318 +
1319 +-void
1320 +-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)
1321 +-{
1322 +-}
1323 +-
1324 +-void rt_mutex_deadlock_account_unlock(struct task_struct *task)
1325 +-{
1326 +-}
1327 +-
1328 +diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
1329 +index d0519c3432b67..b585af9a1b508 100644
1330 +--- a/kernel/locking/rtmutex-debug.h
1331 ++++ b/kernel/locking/rtmutex-debug.h
1332 +@@ -9,9 +9,6 @@
1333 + * This file contains macros used solely by rtmutex.c. Debug version.
1334 + */
1335 +
1336 +-extern void
1337 +-rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
1338 +-extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
1339 + extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
1340 + extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
1341 + extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
1342 +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
1343 +index dd173df9ee5e5..1c0cb5c3c6ad6 100644
1344 +--- a/kernel/locking/rtmutex.c
1345 ++++ b/kernel/locking/rtmutex.c
1346 +@@ -937,8 +937,6 @@ takeit:
1347 + */
1348 + rt_mutex_set_owner(lock, task);
1349 +
1350 +- rt_mutex_deadlock_account_lock(lock, task);
1351 +-
1352 + return 1;
1353 + }
1354 +
1355 +@@ -1286,6 +1284,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1356 + return ret;
1357 + }
1358 +
1359 ++static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
1360 ++{
1361 ++ int ret = try_to_take_rt_mutex(lock, current, NULL);
1362 ++
1363 ++ /*
1364 ++ * try_to_take_rt_mutex() sets the lock waiters bit
1365 ++ * unconditionally. Clean this up.
1366 ++ */
1367 ++ fixup_rt_mutex_waiters(lock);
1368 ++
1369 ++ return ret;
1370 ++}
1371 ++
1372 + /*
1373 + * Slow path try-lock function:
1374 + */
1375 +@@ -1307,13 +1318,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1376 + */
1377 + raw_spin_lock(&lock->wait_lock);
1378 +
1379 +- ret = try_to_take_rt_mutex(lock, current, NULL);
1380 +-
1381 +- /*
1382 +- * try_to_take_rt_mutex() sets the lock waiters bit
1383 +- * unconditionally. Clean this up.
1384 +- */
1385 +- fixup_rt_mutex_waiters(lock);
1386 ++ ret = __rt_mutex_slowtrylock(lock);
1387 +
1388 + raw_spin_unlock(&lock->wait_lock);
1389 +
1390 +@@ -1331,8 +1336,6 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1391 +
1392 + debug_rt_mutex_unlock(lock);
1393 +
1394 +- rt_mutex_deadlock_account_unlock(current);
1395 +-
1396 + /*
1397 + * We must be careful here if the fast path is enabled. If we
1398 + * have no waiters queued we cannot set owner to NULL here
1399 +@@ -1398,11 +1401,10 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
1400 + struct hrtimer_sleeper *timeout,
1401 + enum rtmutex_chainwalk chwalk))
1402 + {
1403 +- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
1404 +- rt_mutex_deadlock_account_lock(lock, current);
1405 ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1406 + return 0;
1407 +- } else
1408 +- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
1409 ++
1410 ++ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
1411 + }
1412 +
1413 + static inline int
1414 +@@ -1414,21 +1416,19 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1415 + enum rtmutex_chainwalk chwalk))
1416 + {
1417 + if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1418 +- likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
1419 +- rt_mutex_deadlock_account_lock(lock, current);
1420 ++ likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1421 + return 0;
1422 +- } else
1423 +- return slowfn(lock, state, timeout, chwalk);
1424 ++
1425 ++ return slowfn(lock, state, timeout, chwalk);
1426 + }
1427 +
1428 + static inline int
1429 + rt_mutex_fasttrylock(struct rt_mutex *lock,
1430 + int (*slowfn)(struct rt_mutex *lock))
1431 + {
1432 +- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
1433 +- rt_mutex_deadlock_account_lock(lock, current);
1434 ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1435 + return 1;
1436 +- }
1437 ++
1438 + return slowfn(lock);
1439 + }
1440 +
1441 +@@ -1438,19 +1438,18 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
1442 + struct wake_q_head *wqh))
1443 + {
1444 + WAKE_Q(wake_q);
1445 ++ bool deboost;
1446 +
1447 +- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
1448 +- rt_mutex_deadlock_account_unlock(current);
1449 ++ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
1450 ++ return;
1451 +
1452 +- } else {
1453 +- bool deboost = slowfn(lock, &wake_q);
1454 ++ deboost = slowfn(lock, &wake_q);
1455 +
1456 +- wake_up_q(&wake_q);
1457 ++ wake_up_q(&wake_q);
1458 +
1459 +- /* Undo pi boosting if necessary: */
1460 +- if (deboost)
1461 +- rt_mutex_adjust_prio(current);
1462 +- }
1463 ++ /* Undo pi boosting if necessary: */
1464 ++ if (deboost)
1465 ++ rt_mutex_adjust_prio(current);
1466 + }
1467 +
1468 + /**
1469 +@@ -1485,15 +1484,28 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1470 +
1471 + /*
1472 + * Futex variant with full deadlock detection.
1473 ++ * Futex variants must not use the fast-path, see __rt_mutex_futex_unlock().
1474 + */
1475 +-int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
1476 ++int __sched rt_mutex_timed_futex_lock(struct rt_mutex *lock,
1477 + struct hrtimer_sleeper *timeout)
1478 + {
1479 + might_sleep();
1480 +
1481 +- return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1482 +- RT_MUTEX_FULL_CHAINWALK,
1483 +- rt_mutex_slowlock);
1484 ++ return rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE,
1485 ++ timeout, RT_MUTEX_FULL_CHAINWALK);
1486 ++}
1487 ++
1488 ++/*
1489 ++ * Futex variant, must not use fastpath.
1490 ++ */
1491 ++int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
1492 ++{
1493 ++ return rt_mutex_slowtrylock(lock);
1494 ++}
1495 ++
1496 ++int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
1497 ++{
1498 ++ return __rt_mutex_slowtrylock(lock);
1499 + }
1500 +
1501 + /**
1502 +@@ -1552,20 +1564,38 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
1503 + EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1504 +
1505 + /**
1506 +- * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
1507 +- * @lock: the rt_mutex to be unlocked
1508 +- *
1509 +- * Returns: true/false indicating whether priority adjustment is
1510 +- * required or not.
1511 ++ * Futex variant, that since futex variants do not use the fast-path, can be
1512 ++ * simple and will not need to retry.
1513 + */
1514 +-bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
1515 +- struct wake_q_head *wqh)
1516 ++bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
1517 ++ struct wake_q_head *wake_q)
1518 + {
1519 +- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
1520 +- rt_mutex_deadlock_account_unlock(current);
1521 +- return false;
1522 ++ lockdep_assert_held(&lock->wait_lock);
1523 ++
1524 ++ debug_rt_mutex_unlock(lock);
1525 ++
1526 ++ if (!rt_mutex_has_waiters(lock)) {
1527 ++ lock->owner = NULL;
1528 ++ return false; /* done */
1529 ++ }
1530 ++
1531 ++ mark_wakeup_next_waiter(wake_q, lock);
1532 ++ return true; /* deboost and wakeups */
1533 ++}
1534 ++
1535 ++void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
1536 ++{
1537 ++ WAKE_Q(wake_q);
1538 ++ bool deboost;
1539 ++
1540 ++ raw_spin_lock_irq(&lock->wait_lock);
1541 ++ deboost = __rt_mutex_futex_unlock(lock, &wake_q);
1542 ++ raw_spin_unlock_irq(&lock->wait_lock);
1543 ++
1544 ++ if (deboost) {
1545 ++ wake_up_q(&wake_q);
1546 ++ rt_mutex_adjust_prio(current);
1547 + }
1548 +- return rt_mutex_slowunlock(lock, wqh);
1549 + }
1550 +
1551 + /**
1552 +@@ -1622,7 +1652,6 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1553 + __rt_mutex_init(lock, NULL);
1554 + debug_rt_mutex_proxy_lock(lock, proxy_owner);
1555 + rt_mutex_set_owner(lock, proxy_owner);
1556 +- rt_mutex_deadlock_account_lock(lock, proxy_owner);
1557 + }
1558 +
1559 + /**
1560 +@@ -1633,12 +1662,10 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1561 + * No locking. Caller has to do serializing itself
1562 + * Special API call for PI-futex support
1563 + */
1564 +-void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1565 +- struct task_struct *proxy_owner)
1566 ++void rt_mutex_proxy_unlock(struct rt_mutex *lock)
1567 + {
1568 + debug_rt_mutex_proxy_unlock(lock);
1569 + rt_mutex_set_owner(lock, NULL);
1570 +- rt_mutex_deadlock_account_unlock(proxy_owner);
1571 + }
1572 +
1573 + /**
1574 +diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
1575 +index c4060584c4076..6607802efa8bd 100644
1576 +--- a/kernel/locking/rtmutex.h
1577 ++++ b/kernel/locking/rtmutex.h
1578 +@@ -11,8 +11,6 @@
1579 + */
1580 +
1581 + #define rt_mutex_deadlock_check(l) (0)
1582 +-#define rt_mutex_deadlock_account_lock(m, t) do { } while (0)
1583 +-#define rt_mutex_deadlock_account_unlock(l) do { } while (0)
1584 + #define debug_rt_mutex_init_waiter(w) do { } while (0)
1585 + #define debug_rt_mutex_free_waiter(w) do { } while (0)
1586 + #define debug_rt_mutex_lock(l) do { } while (0)
1587 +diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
1588 +index 6f8f68edb700c..4584db96265d4 100644
1589 +--- a/kernel/locking/rtmutex_common.h
1590 ++++ b/kernel/locking/rtmutex_common.h
1591 +@@ -101,8 +101,7 @@ enum rtmutex_chainwalk {
1592 + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
1593 + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1594 + struct task_struct *proxy_owner);
1595 +-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1596 +- struct task_struct *proxy_owner);
1597 ++extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
1598 + extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1599 + struct rt_mutex_waiter *waiter,
1600 + struct task_struct *task);
1601 +@@ -112,8 +111,13 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
1602 + extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
1603 + struct rt_mutex_waiter *waiter);
1604 + extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
1605 +-extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
1606 +- struct wake_q_head *wqh);
1607 ++extern int rt_mutex_futex_trylock(struct rt_mutex *l);
1608 ++extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
1609 ++
1610 ++extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
1611 ++extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
1612 ++ struct wake_q_head *wqh);
1613 ++
1614 + extern void rt_mutex_adjust_prio(struct task_struct *task);
1615 +
1616 + #ifdef CONFIG_DEBUG_RT_MUTEXES
1617 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1618 +index 7a23792230854..dc877712ef1f3 100644
1619 +--- a/mm/hugetlb.c
1620 ++++ b/mm/hugetlb.c
1621 +@@ -1184,12 +1184,11 @@ struct hstate *size_to_hstate(unsigned long size)
1622 + */
1623 + bool page_huge_active(struct page *page)
1624 + {
1625 +- VM_BUG_ON_PAGE(!PageHuge(page), page);
1626 +- return PageHead(page) && PagePrivate(&page[1]);
1627 ++ return PageHeadHuge(page) && PagePrivate(&page[1]);
1628 + }
1629 +
1630 + /* never called for tail page */
1631 +-static void set_page_huge_active(struct page *page)
1632 ++void set_page_huge_active(struct page *page)
1633 + {
1634 + VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1635 + SetPagePrivate(&page[1]);
1636 +@@ -4544,9 +4543,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
1637 + {
1638 + bool ret = true;
1639 +
1640 +- VM_BUG_ON_PAGE(!PageHead(page), page);
1641 + spin_lock(&hugetlb_lock);
1642 +- if (!page_huge_active(page) || !get_page_unless_zero(page)) {
1643 ++ if (!PageHeadHuge(page) || !page_huge_active(page) ||
1644 ++ !get_page_unless_zero(page)) {
1645 + ret = false;
1646 + goto unlock;
1647 + }
1648 +diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
1649 +index ba4d015bd1a67..7cbb77b7479a6 100644
1650 +--- a/net/lapb/lapb_out.c
1651 ++++ b/net/lapb/lapb_out.c
1652 +@@ -87,7 +87,8 @@ void lapb_kick(struct lapb_cb *lapb)
1653 + skb = skb_dequeue(&lapb->write_queue);
1654 +
1655 + do {
1656 +- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
1657 ++ skbn = skb_copy(skb, GFP_ATOMIC);
1658 ++ if (!skbn) {
1659 + skb_queue_head(&lapb->write_queue, skb);
1660 + break;
1661 + }
1662 +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
1663 +index df2e4e3112177..5d097ae26b70e 100644
1664 +--- a/net/mac80211/driver-ops.c
1665 ++++ b/net/mac80211/driver-ops.c
1666 +@@ -128,8 +128,11 @@ int drv_sta_state(struct ieee80211_local *local,
1667 + } else if (old_state == IEEE80211_STA_AUTH &&
1668 + new_state == IEEE80211_STA_ASSOC) {
1669 + ret = drv_sta_add(local, sdata, &sta->sta);
1670 +- if (ret == 0)
1671 ++ if (ret == 0) {
1672 + sta->uploaded = true;
1673 ++ if (rcu_access_pointer(sta->sta.rates))
1674 ++ drv_sta_rate_tbl_update(local, sdata, &sta->sta);
1675 ++ }
1676 + } else if (old_state == IEEE80211_STA_ASSOC &&
1677 + new_state == IEEE80211_STA_AUTH) {
1678 + drv_sta_remove(local, sdata, &sta->sta);
1679 +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
1680 +index a4e2f4e67f941..a4d9e9ee06bee 100644
1681 +--- a/net/mac80211/rate.c
1682 ++++ b/net/mac80211/rate.c
1683 +@@ -888,7 +888,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
1684 + if (old)
1685 + kfree_rcu(old, rcu_head);
1686 +
1687 +- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
1688 ++ if (sta->uploaded)
1689 ++ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
1690 +
1691 + return 0;
1692 + }
1693 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
1694 +index b379c330a3388..5e9ab343c062b 100644
1695 +--- a/net/sched/sch_api.c
1696 ++++ b/net/sched/sch_api.c
1697 +@@ -391,7 +391,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
1698 + {
1699 + struct qdisc_rate_table *rtab;
1700 +
1701 +- if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
1702 ++ if (tab == NULL || r->rate == 0 ||
1703 ++ r->cell_log == 0 || r->cell_log >= 32 ||
1704 + nla_len(tab) != TC_RTAB_SIZE)
1705 + return NULL;
1706 +
1707 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1708 +index 854d2da02cc98..c7061a5dd809a 100644
1709 +--- a/sound/pci/hda/patch_realtek.c
1710 ++++ b/sound/pci/hda/patch_realtek.c
1711 +@@ -6211,7 +6211,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
1712 + SND_HDA_PIN_QUIRK(0x10ec0299, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
1713 + ALC225_STANDARD_PINS,
1714 + {0x12, 0xb7a60130},
1715 +- {0x13, 0xb8a60140},
1716 ++ {0x13, 0xb8a61140},
1717 + {0x17, 0x90170110}),
1718 + {}
1719 + };