Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Fri, 17 Mar 2023 10:46:06
Message-Id: 1679049947.e670d9ccb903b2d03030936da7a9de29de88a247.mpagano@gentoo
1 commit: e670d9ccb903b2d03030936da7a9de29de88a247
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Mar 17 10:45:47 2023 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Mar 17 10:45:47 2023 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e670d9cc
7
8 Linux patch 5.4.237
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1236_linux-5.4.237.patch | 2763 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2767 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 70998c7a..682673ff 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -987,6 +987,10 @@ Patch: 1235_linux-5.4.236.patch
21 From: https://www.kernel.org
22 Desc: Linux 5.4.236
23
24 +Patch: 1236_linux-5.4.237.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 5.4.237
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1236_linux-5.4.237.patch b/1236_linux-5.4.237.patch
33 new file mode 100644
34 index 00000000..6f26d2d7
35 --- /dev/null
36 +++ b/1236_linux-5.4.237.patch
37 @@ -0,0 +1,2763 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index 8f71a17ad5442..5e5704faae24a 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -1944,24 +1944,57 @@
43 +
44 + ivrs_ioapic [HW,X86_64]
45 + Provide an override to the IOAPIC-ID<->DEVICE-ID
46 +- mapping provided in the IVRS ACPI table. For
47 +- example, to map IOAPIC-ID decimal 10 to
48 +- PCI device 00:14.0 write the parameter as:
49 ++ mapping provided in the IVRS ACPI table.
50 ++ By default, PCI segment is 0, and can be omitted.
51 ++
52 ++ For example, to map IOAPIC-ID decimal 10 to
53 ++ PCI segment 0x1 and PCI device 00:14.0,
54 ++ write the parameter as:
55 ++ ivrs_ioapic=10@0001:00:14.0
56 ++
57 ++ Deprecated formats:
58 ++ * To map IOAPIC-ID decimal 10 to PCI device 00:14.0
59 ++ write the parameter as:
60 + ivrs_ioapic[10]=00:14.0
61 ++ * To map IOAPIC-ID decimal 10 to PCI segment 0x1 and
62 ++ PCI device 00:14.0 write the parameter as:
63 ++ ivrs_ioapic[10]=0001:00:14.0
64 +
65 + ivrs_hpet [HW,X86_64]
66 + Provide an override to the HPET-ID<->DEVICE-ID
67 +- mapping provided in the IVRS ACPI table. For
68 +- example, to map HPET-ID decimal 0 to
69 +- PCI device 00:14.0 write the parameter as:
70 ++ mapping provided in the IVRS ACPI table.
71 ++ By default, PCI segment is 0, and can be omitted.
72 ++
73 ++ For example, to map HPET-ID decimal 10 to
74 ++ PCI segment 0x1 and PCI device 00:14.0,
75 ++ write the parameter as:
76 ++ ivrs_hpet=10@0001:00:14.0
77 ++
78 ++ Deprecated formats:
79 ++ * To map HPET-ID decimal 0 to PCI device 00:14.0
80 ++ write the parameter as:
81 + ivrs_hpet[0]=00:14.0
82 ++ * To map HPET-ID decimal 10 to PCI segment 0x1 and
83 ++ PCI device 00:14.0 write the parameter as:
84 ++ ivrs_ioapic[10]=0001:00:14.0
85 +
86 + ivrs_acpihid [HW,X86_64]
87 + Provide an override to the ACPI-HID:UID<->DEVICE-ID
88 +- mapping provided in the IVRS ACPI table. For
89 +- example, to map UART-HID:UID AMD0020:0 to
90 +- PCI device 00:14.5 write the parameter as:
91 ++ mapping provided in the IVRS ACPI table.
92 ++ By default, PCI segment is 0, and can be omitted.
93 ++
94 ++ For example, to map UART-HID:UID AMD0020:0 to
95 ++ PCI segment 0x1 and PCI device ID 00:14.5,
96 ++ write the parameter as:
97 ++ ivrs_acpihid=AMD0020:0@0001:00:14.5
98 ++
99 ++ Deprecated formats:
100 ++ * To map UART-HID:UID AMD0020:0 to PCI segment is 0,
101 ++ PCI device ID 00:14.5, write the parameter as:
102 + ivrs_acpihid[00:14.5]=AMD0020:0
103 ++ * To map UART-HID:UID AMD0020:0 to PCI segment 0x1 and
104 ++ PCI device ID 00:14.5, write the parameter as:
105 ++ ivrs_acpihid[0001:00:14.5]=AMD0020:0
106 +
107 + js= [HW,JOY] Analog joystick
108 + See Documentation/input/joydev/joystick.rst.
109 +diff --git a/Makefile b/Makefile
110 +index e3b56259d50af..ccac1c82eb781 100644
111 +--- a/Makefile
112 ++++ b/Makefile
113 +@@ -1,7 +1,7 @@
114 + # SPDX-License-Identifier: GPL-2.0
115 + VERSION = 5
116 + PATCHLEVEL = 4
117 +-SUBLEVEL = 236
118 ++SUBLEVEL = 237
119 + EXTRAVERSION =
120 + NAME = Kleptomaniac Octopus
121 +
122 +diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
123 +index ac110ae8f9780..b19a8aae74e1f 100644
124 +--- a/arch/alpha/kernel/module.c
125 ++++ b/arch/alpha/kernel/module.c
126 +@@ -146,10 +146,8 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
127 + base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr;
128 + symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr;
129 +
130 +- /* The small sections were sorted to the end of the segment.
131 +- The following should definitely cover them. */
132 +- gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
133 + got = sechdrs[me->arch.gotsecindex].sh_addr;
134 ++ gp = got + 0x8000;
135 +
136 + for (i = 0; i < n; i++) {
137 + unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);
138 +diff --git a/arch/mips/include/asm/mach-rc32434/pci.h b/arch/mips/include/asm/mach-rc32434/pci.h
139 +index 6f40d1515580b..1ff8a987025c8 100644
140 +--- a/arch/mips/include/asm/mach-rc32434/pci.h
141 ++++ b/arch/mips/include/asm/mach-rc32434/pci.h
142 +@@ -377,7 +377,7 @@ struct pci_msu {
143 + PCI_CFG04_STAT_SSE | \
144 + PCI_CFG04_STAT_PE)
145 +
146 +-#define KORINA_CNFG1 ((KORINA_STAT<<16)|KORINA_CMD)
147 ++#define KORINA_CNFG1 (KORINA_STAT | KORINA_CMD)
148 +
149 + #define KORINA_REVID 0
150 + #define KORINA_CLASS_CODE 0
151 +diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
152 +index 3ea360cad337b..46dfb3701c6ea 100644
153 +--- a/arch/powerpc/kernel/vmlinux.lds.S
154 ++++ b/arch/powerpc/kernel/vmlinux.lds.S
155 +@@ -6,6 +6,7 @@
156 + #endif
157 +
158 + #define BSS_FIRST_SECTIONS *(.bss.prominit)
159 ++#define RUNTIME_DISCARD_EXIT
160 +
161 + #include <asm/page.h>
162 + #include <asm-generic/vmlinux.lds.h>
163 +@@ -394,9 +395,12 @@ SECTIONS
164 + DISCARDS
165 + /DISCARD/ : {
166 + *(*.EMB.apuinfo)
167 +- *(.glink .iplt .plt .rela* .comment)
168 ++ *(.glink .iplt .plt .comment)
169 + *(.gnu.version*)
170 + *(.gnu.attributes)
171 + *(.eh_frame)
172 ++#ifndef CONFIG_RELOCATABLE
173 ++ *(.rela*)
174 ++#endif
175 + }
176 + }
177 +diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
178 +index 19e46f4160cc3..5ba4d23971fdb 100644
179 +--- a/arch/riscv/kernel/stacktrace.c
180 ++++ b/arch/riscv/kernel/stacktrace.c
181 +@@ -89,7 +89,7 @@ void notrace walk_stackframe(struct task_struct *task,
182 + while (!kstack_end(ksp)) {
183 + if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
184 + break;
185 +- pc = (*ksp++) - 0x4;
186 ++ pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
187 + }
188 + }
189 +
190 +diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
191 +index 99053c80388e4..f8cc4e8524bff 100644
192 +--- a/arch/s390/kernel/vmlinux.lds.S
193 ++++ b/arch/s390/kernel/vmlinux.lds.S
194 +@@ -15,6 +15,8 @@
195 + /* Handle ro_after_init data on our own. */
196 + #define RO_AFTER_INIT_DATA
197 +
198 ++#define RUNTIME_DISCARD_EXIT
199 ++
200 + #include <asm-generic/vmlinux.lds.h>
201 + #include <asm/vmlinux.lds.h>
202 +
203 +diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
204 +index 77a59d8c6b4d4..ec3bae172b203 100644
205 +--- a/arch/sh/kernel/vmlinux.lds.S
206 ++++ b/arch/sh/kernel/vmlinux.lds.S
207 +@@ -10,6 +10,7 @@ OUTPUT_ARCH(sh:sh5)
208 + #define LOAD_OFFSET 0
209 + OUTPUT_ARCH(sh)
210 + #endif
211 ++#define RUNTIME_DISCARD_EXIT
212 +
213 + #include <asm/thread_info.h>
214 + #include <asm/cache.h>
215 +diff --git a/arch/um/kernel/vmlinux.lds.S b/arch/um/kernel/vmlinux.lds.S
216 +index 16e49bfa2b426..53d719c04ba94 100644
217 +--- a/arch/um/kernel/vmlinux.lds.S
218 ++++ b/arch/um/kernel/vmlinux.lds.S
219 +@@ -1,4 +1,4 @@
220 +-
221 ++#define RUNTIME_DISCARD_EXIT
222 + KERNEL_STACK_SIZE = 4096 * (1 << CONFIG_KERNEL_STACK_ORDER);
223 +
224 + #ifdef CONFIG_LD_SCRIPT_STATIC
225 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
226 +index bc6cd29ddf163..b212771df0226 100644
227 +--- a/arch/x86/kernel/cpu/amd.c
228 ++++ b/arch/x86/kernel/cpu/amd.c
229 +@@ -205,6 +205,15 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
230 + return;
231 + }
232 + #endif
233 ++ /*
234 ++ * Work around Erratum 1386. The XSAVES instruction malfunctions in
235 ++ * certain circumstances on Zen1/2 uarch, and not all parts have had
236 ++ * updated microcode at the time of writing (March 2023).
237 ++ *
238 ++ * Affected parts all have no supervisor XSAVE states, meaning that
239 ++ * the XSAVEC instruction (which works fine) is equivalent.
240 ++ */
241 ++ clear_cpu_cap(c, X86_FEATURE_XSAVES);
242 + }
243 +
244 + static void init_amd_k7(struct cpuinfo_x86 *c)
245 +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
246 +index 1afe211d7a7ca..7e0e21082c93e 100644
247 +--- a/arch/x86/kernel/vmlinux.lds.S
248 ++++ b/arch/x86/kernel/vmlinux.lds.S
249 +@@ -21,6 +21,8 @@
250 + #define LOAD_OFFSET __START_KERNEL_map
251 + #endif
252 +
253 ++#define RUNTIME_DISCARD_EXIT
254 ++
255 + #include <asm-generic/vmlinux.lds.h>
256 + #include <asm/asm-offsets.h>
257 + #include <asm/thread_info.h>
258 +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
259 +index d6b69e19f78a7..d5f068a10a5a0 100644
260 +--- a/drivers/char/ipmi/ipmi_ssif.c
261 ++++ b/drivers/char/ipmi/ipmi_ssif.c
262 +@@ -79,7 +79,8 @@
263 + /*
264 + * Timer values
265 + */
266 +-#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */
267 ++#define SSIF_MSG_USEC 60000 /* 60ms between message tries (T3). */
268 ++#define SSIF_REQ_RETRY_USEC 60000 /* 60ms between send retries (T6). */
269 + #define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
270 +
271 + /* How many times to we retry sending/receiving the message. */
272 +@@ -87,7 +88,9 @@
273 + #define SSIF_RECV_RETRIES 250
274 +
275 + #define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000)
276 ++#define SSIF_REQ_RETRY_MSEC (SSIF_REQ_RETRY_USEC / 1000)
277 + #define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
278 ++#define SSIF_REQ_RETRY_JIFFIES ((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC)
279 + #define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
280 +
281 + /*
282 +@@ -236,6 +239,9 @@ struct ssif_info {
283 + bool got_alert;
284 + bool waiting_alert;
285 +
286 ++ /* Used to inform the timeout that it should do a resend. */
287 ++ bool do_resend;
288 ++
289 + /*
290 + * If set to true, this will request events the next time the
291 + * state machine is idle.
292 +@@ -248,12 +254,6 @@ struct ssif_info {
293 + */
294 + bool req_flags;
295 +
296 +- /*
297 +- * Used to perform timer operations when run-to-completion
298 +- * mode is on. This is a countdown timer.
299 +- */
300 +- int rtc_us_timer;
301 +-
302 + /* Used for sending/receiving data. +1 for the length. */
303 + unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
304 + unsigned int data_len;
305 +@@ -515,7 +515,7 @@ static int ipmi_ssif_thread(void *data)
306 + return 0;
307 + }
308 +
309 +-static int ssif_i2c_send(struct ssif_info *ssif_info,
310 ++static void ssif_i2c_send(struct ssif_info *ssif_info,
311 + ssif_i2c_done handler,
312 + int read_write, int command,
313 + unsigned char *data, unsigned int size)
314 +@@ -527,7 +527,6 @@ static int ssif_i2c_send(struct ssif_info *ssif_info,
315 + ssif_info->i2c_data = data;
316 + ssif_info->i2c_size = size;
317 + complete(&ssif_info->wake_thread);
318 +- return 0;
319 + }
320 +
321 +
322 +@@ -536,40 +535,35 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
323 +
324 + static void start_get(struct ssif_info *ssif_info)
325 + {
326 +- int rv;
327 +-
328 +- ssif_info->rtc_us_timer = 0;
329 + ssif_info->multi_pos = 0;
330 +
331 +- rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
332 +- SSIF_IPMI_RESPONSE,
333 +- ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
334 +- if (rv < 0) {
335 +- /* request failed, just return the error. */
336 +- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
337 +- dev_dbg(&ssif_info->client->dev,
338 +- "Error from i2c_non_blocking_op(5)\n");
339 +-
340 +- msg_done_handler(ssif_info, -EIO, NULL, 0);
341 +- }
342 ++ ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
343 ++ SSIF_IPMI_RESPONSE,
344 ++ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
345 + }
346 +
347 ++static void start_resend(struct ssif_info *ssif_info);
348 ++
349 + static void retry_timeout(struct timer_list *t)
350 + {
351 + struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
352 + unsigned long oflags, *flags;
353 +- bool waiting;
354 ++ bool waiting, resend;
355 +
356 + if (ssif_info->stopping)
357 + return;
358 +
359 + flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
360 ++ resend = ssif_info->do_resend;
361 ++ ssif_info->do_resend = false;
362 + waiting = ssif_info->waiting_alert;
363 + ssif_info->waiting_alert = false;
364 + ipmi_ssif_unlock_cond(ssif_info, flags);
365 +
366 + if (waiting)
367 + start_get(ssif_info);
368 ++ if (resend)
369 ++ start_resend(ssif_info);
370 + }
371 +
372 + static void watch_timeout(struct timer_list *t)
373 +@@ -618,14 +612,11 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
374 + start_get(ssif_info);
375 + }
376 +
377 +-static int start_resend(struct ssif_info *ssif_info);
378 +-
379 + static void msg_done_handler(struct ssif_info *ssif_info, int result,
380 + unsigned char *data, unsigned int len)
381 + {
382 + struct ipmi_smi_msg *msg;
383 + unsigned long oflags, *flags;
384 +- int rv;
385 +
386 + /*
387 + * We are single-threaded here, so no need for a lock until we
388 +@@ -639,7 +630,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
389 +
390 + flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
391 + ssif_info->waiting_alert = true;
392 +- ssif_info->rtc_us_timer = SSIF_MSG_USEC;
393 + if (!ssif_info->stopping)
394 + mod_timer(&ssif_info->retry_timer,
395 + jiffies + SSIF_MSG_JIFFIES);
396 +@@ -671,17 +661,10 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
397 + ssif_info->multi_len = len;
398 + ssif_info->multi_pos = 1;
399 +
400 +- rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
401 +- SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
402 +- ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
403 +- if (rv < 0) {
404 +- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
405 +- dev_dbg(&ssif_info->client->dev,
406 +- "Error from i2c_non_blocking_op(1)\n");
407 +-
408 +- result = -EIO;
409 +- } else
410 +- return;
411 ++ ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
412 ++ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
413 ++ ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
414 ++ return;
415 + } else if (ssif_info->multi_pos) {
416 + /* Middle of multi-part read. Start the next transaction. */
417 + int i;
418 +@@ -743,19 +726,12 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
419 +
420 + ssif_info->multi_pos++;
421 +
422 +- rv = ssif_i2c_send(ssif_info, msg_done_handler,
423 +- I2C_SMBUS_READ,
424 +- SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
425 +- ssif_info->recv,
426 +- I2C_SMBUS_BLOCK_DATA);
427 +- if (rv < 0) {
428 +- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
429 +- dev_dbg(&ssif_info->client->dev,
430 +- "Error from ssif_i2c_send\n");
431 +-
432 +- result = -EIO;
433 +- } else
434 +- return;
435 ++ ssif_i2c_send(ssif_info, msg_done_handler,
436 ++ I2C_SMBUS_READ,
437 ++ SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
438 ++ ssif_info->recv,
439 ++ I2C_SMBUS_BLOCK_DATA);
440 ++ return;
441 + }
442 + }
443 +
444 +@@ -936,37 +912,27 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
445 + static void msg_written_handler(struct ssif_info *ssif_info, int result,
446 + unsigned char *data, unsigned int len)
447 + {
448 +- int rv;
449 +-
450 + /* We are single-threaded here, so no need for a lock. */
451 + if (result < 0) {
452 + ssif_info->retries_left--;
453 + if (ssif_info->retries_left > 0) {
454 +- if (!start_resend(ssif_info)) {
455 +- ssif_inc_stat(ssif_info, send_retries);
456 +- return;
457 +- }
458 +- /* request failed, just return the error. */
459 +- ssif_inc_stat(ssif_info, send_errors);
460 +-
461 +- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
462 +- dev_dbg(&ssif_info->client->dev,
463 +- "%s: Out of retries\n", __func__);
464 +- msg_done_handler(ssif_info, -EIO, NULL, 0);
465 ++ /*
466 ++ * Wait the retry timeout time per the spec,
467 ++ * then redo the send.
468 ++ */
469 ++ ssif_info->do_resend = true;
470 ++ mod_timer(&ssif_info->retry_timer,
471 ++ jiffies + SSIF_REQ_RETRY_JIFFIES);
472 + return;
473 + }
474 +
475 + ssif_inc_stat(ssif_info, send_errors);
476 +
477 +- /*
478 +- * Got an error on transmit, let the done routine
479 +- * handle it.
480 +- */
481 + if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
482 + dev_dbg(&ssif_info->client->dev,
483 +- "%s: Error %d\n", __func__, result);
484 ++ "%s: Out of retries\n", __func__);
485 +
486 +- msg_done_handler(ssif_info, result, NULL, 0);
487 ++ msg_done_handler(ssif_info, -EIO, NULL, 0);
488 + return;
489 + }
490 +
491 +@@ -1000,18 +966,9 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
492 + ssif_info->multi_data = NULL;
493 + }
494 +
495 +- rv = ssif_i2c_send(ssif_info, msg_written_handler,
496 +- I2C_SMBUS_WRITE, cmd,
497 +- data_to_send, I2C_SMBUS_BLOCK_DATA);
498 +- if (rv < 0) {
499 +- /* request failed, just return the error. */
500 +- ssif_inc_stat(ssif_info, send_errors);
501 +-
502 +- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
503 +- dev_dbg(&ssif_info->client->dev,
504 +- "Error from i2c_non_blocking_op(3)\n");
505 +- msg_done_handler(ssif_info, -EIO, NULL, 0);
506 +- }
507 ++ ssif_i2c_send(ssif_info, msg_written_handler,
508 ++ I2C_SMBUS_WRITE, cmd,
509 ++ data_to_send, I2C_SMBUS_BLOCK_DATA);
510 + } else {
511 + /* Ready to request the result. */
512 + unsigned long oflags, *flags;
513 +@@ -1029,7 +986,6 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
514 + /* Wait a jiffie then request the next message */
515 + ssif_info->waiting_alert = true;
516 + ssif_info->retries_left = SSIF_RECV_RETRIES;
517 +- ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
518 + if (!ssif_info->stopping)
519 + mod_timer(&ssif_info->retry_timer,
520 + jiffies + SSIF_MSG_PART_JIFFIES);
521 +@@ -1038,9 +994,8 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
522 + }
523 + }
524 +
525 +-static int start_resend(struct ssif_info *ssif_info)
526 ++static void start_resend(struct ssif_info *ssif_info)
527 + {
528 +- int rv;
529 + int command;
530 +
531 + ssif_info->got_alert = false;
532 +@@ -1062,12 +1017,8 @@ static int start_resend(struct ssif_info *ssif_info)
533 + ssif_info->data[0] = ssif_info->data_len;
534 + }
535 +
536 +- rv = ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
537 +- command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
538 +- if (rv && (ssif_info->ssif_debug & SSIF_DEBUG_MSG))
539 +- dev_dbg(&ssif_info->client->dev,
540 +- "Error from i2c_non_blocking_op(4)\n");
541 +- return rv;
542 ++ ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
543 ++ command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
544 + }
545 +
546 + static int start_send(struct ssif_info *ssif_info,
547 +@@ -1082,7 +1033,8 @@ static int start_send(struct ssif_info *ssif_info,
548 + ssif_info->retries_left = SSIF_SEND_RETRIES;
549 + memcpy(ssif_info->data + 1, data, len);
550 + ssif_info->data_len = len;
551 +- return start_resend(ssif_info);
552 ++ start_resend(ssif_info);
553 ++ return 0;
554 + }
555 +
556 + /* Must be called with the message lock held. */
557 +@@ -1382,8 +1334,10 @@ static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
558 + ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
559 + if (ret) {
560 + retry_cnt--;
561 +- if (retry_cnt > 0)
562 ++ if (retry_cnt > 0) {
563 ++ msleep(SSIF_REQ_RETRY_MSEC);
564 + goto retry1;
565 ++ }
566 + return -ENODEV;
567 + }
568 +
569 +@@ -1523,8 +1477,10 @@ retry_write:
570 + 32, msg);
571 + if (ret) {
572 + retry_cnt--;
573 +- if (retry_cnt > 0)
574 ++ if (retry_cnt > 0) {
575 ++ msleep(SSIF_REQ_RETRY_MSEC);
576 + goto retry_write;
577 ++ }
578 + dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n");
579 + return ret;
580 + }
581 +diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
582 +index 72ad7fff64a7a..ccb62c480bdd7 100644
583 +--- a/drivers/char/ipmi/ipmi_watchdog.c
584 ++++ b/drivers/char/ipmi/ipmi_watchdog.c
585 +@@ -498,7 +498,7 @@ static void panic_halt_ipmi_heartbeat(void)
586 + msg.cmd = IPMI_WDOG_RESET_TIMER;
587 + msg.data = NULL;
588 + msg.data_len = 0;
589 +- atomic_add(1, &panic_done_count);
590 ++ atomic_add(2, &panic_done_count);
591 + rv = ipmi_request_supply_msgs(watchdog_user,
592 + (struct ipmi_addr *) &addr,
593 + 0,
594 +@@ -508,7 +508,7 @@ static void panic_halt_ipmi_heartbeat(void)
595 + &panic_halt_heartbeat_recv_msg,
596 + 1);
597 + if (rv)
598 +- atomic_sub(1, &panic_done_count);
599 ++ atomic_sub(2, &panic_done_count);
600 + }
601 +
602 + static struct ipmi_smi_msg panic_halt_smi_msg = {
603 +@@ -532,12 +532,12 @@ static void panic_halt_ipmi_set_timeout(void)
604 + /* Wait for the messages to be free. */
605 + while (atomic_read(&panic_done_count) != 0)
606 + ipmi_poll_interface(watchdog_user);
607 +- atomic_add(1, &panic_done_count);
608 ++ atomic_add(2, &panic_done_count);
609 + rv = __ipmi_set_timeout(&panic_halt_smi_msg,
610 + &panic_halt_recv_msg,
611 + &send_heartbeat_now);
612 + if (rv) {
613 +- atomic_sub(1, &panic_done_count);
614 ++ atomic_sub(2, &panic_done_count);
615 + pr_warn("Unable to extend the watchdog timeout\n");
616 + } else {
617 + if (send_heartbeat_now)
618 +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
619 +index 14aeaf7363210..2fc0f221fb4e2 100644
620 +--- a/drivers/gpu/drm/drm_atomic.c
621 ++++ b/drivers/gpu/drm/drm_atomic.c
622 +@@ -1006,6 +1006,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
623 + drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
624 + drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
625 + drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
626 ++ drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
627 +
628 + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
629 + if (state->writeback_job && state->writeback_job->fb)
630 +diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
631 +index eee9fcbe04344..808269b2108fb 100644
632 +--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
633 ++++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
634 +@@ -1208,7 +1208,7 @@ int intel_ring_pin(struct intel_ring *ring)
635 + if (unlikely(ret))
636 + goto err_unpin;
637 +
638 +- if (i915_vma_is_map_and_fenceable(vma))
639 ++ if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
640 + addr = (void __force *)i915_vma_pin_iomap(vma);
641 + else
642 + addr = i915_gem_object_pin_map(vma->obj,
643 +@@ -1252,7 +1252,7 @@ void intel_ring_unpin(struct intel_ring *ring)
644 + intel_ring_reset(ring, ring->emit);
645 +
646 + i915_vma_unset_ggtt_write(vma);
647 +- if (i915_vma_is_map_and_fenceable(vma))
648 ++ if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
649 + i915_vma_unpin_iomap(vma);
650 + else
651 + i915_gem_object_unpin_map(vma->obj);
652 +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
653 +index e3579e5ffa146..593b8d83179c9 100644
654 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
655 ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
656 +@@ -135,8 +135,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
657 + OUT_RING(ring, 1);
658 +
659 + /* Enable local preemption for finegrain preemption */
660 +- OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
661 +- OUT_RING(ring, 0x02);
662 ++ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
663 ++ OUT_RING(ring, 0x1);
664 +
665 + /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
666 + OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
667 +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
668 +index 533b920ed7df2..4a9feff340da7 100644
669 +--- a/drivers/iommu/amd_iommu_init.c
670 ++++ b/drivers/iommu/amd_iommu_init.c
671 +@@ -84,6 +84,10 @@
672 + #define ACPI_DEVFLAG_ATSDIS 0x10000000
673 +
674 + #define LOOP_TIMEOUT 2000000
675 ++
676 ++#define IVRS_GET_SBDF_ID(seg, bus, dev, fd) (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
677 ++ | ((dev & 0x1f) << 3) | (fn & 0x7))
678 ++
679 + /*
680 + * ACPI table definitions
681 + *
682 +@@ -2971,24 +2975,32 @@ static int __init parse_amd_iommu_options(char *str)
683 +
684 + static int __init parse_ivrs_ioapic(char *str)
685 + {
686 +- unsigned int bus, dev, fn;
687 +- int ret, id, i;
688 +- u16 devid;
689 ++ u32 seg = 0, bus, dev, fn;
690 ++ int id, i;
691 ++ u32 devid;
692 +
693 +- ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
694 ++ if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
695 ++ sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
696 ++ goto found;
697 +
698 +- if (ret != 4) {
699 +- pr_err("Invalid command line: ivrs_ioapic%s\n", str);
700 +- return 1;
701 ++ if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
702 ++ sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
703 ++ pr_warn("ivrs_ioapic%s option format deprecated; use ivrs_ioapic=%d@%04x:%02x:%02x.%d instead\n",
704 ++ str, id, seg, bus, dev, fn);
705 ++ goto found;
706 + }
707 +
708 ++ pr_err("Invalid command line: ivrs_ioapic%s\n", str);
709 ++ return 1;
710 ++
711 ++found:
712 + if (early_ioapic_map_size == EARLY_MAP_SIZE) {
713 + pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
714 + str);
715 + return 1;
716 + }
717 +
718 +- devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
719 ++ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
720 +
721 + cmdline_maps = true;
722 + i = early_ioapic_map_size++;
723 +@@ -3001,24 +3013,32 @@ static int __init parse_ivrs_ioapic(char *str)
724 +
725 + static int __init parse_ivrs_hpet(char *str)
726 + {
727 +- unsigned int bus, dev, fn;
728 +- int ret, id, i;
729 +- u16 devid;
730 ++ u32 seg = 0, bus, dev, fn;
731 ++ int id, i;
732 ++ u32 devid;
733 +
734 +- ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
735 ++ if (sscanf(str, "=%d@%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
736 ++ sscanf(str, "=%d@%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5)
737 ++ goto found;
738 +
739 +- if (ret != 4) {
740 +- pr_err("Invalid command line: ivrs_hpet%s\n", str);
741 +- return 1;
742 ++ if (sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn) == 4 ||
743 ++ sscanf(str, "[%d]=%x:%x:%x.%x", &id, &seg, &bus, &dev, &fn) == 5) {
744 ++ pr_warn("ivrs_hpet%s option format deprecated; use ivrs_hpet=%d@%04x:%02x:%02x.%d instead\n",
745 ++ str, id, seg, bus, dev, fn);
746 ++ goto found;
747 + }
748 +
749 ++ pr_err("Invalid command line: ivrs_hpet%s\n", str);
750 ++ return 1;
751 ++
752 ++found:
753 + if (early_hpet_map_size == EARLY_MAP_SIZE) {
754 + pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
755 + str);
756 + return 1;
757 + }
758 +
759 +- devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
760 ++ devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
761 +
762 + cmdline_maps = true;
763 + i = early_hpet_map_size++;
764 +@@ -3029,19 +3049,53 @@ static int __init parse_ivrs_hpet(char *str)
765 + return 1;
766 + }
767 +
768 ++#define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN)
769 ++
770 + static int __init parse_ivrs_acpihid(char *str)
771 + {
772 +- u32 bus, dev, fn;
773 +- char *hid, *uid, *p;
774 +- char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
775 +- int ret, i;
776 ++ u32 seg = 0, bus, dev, fn;
777 ++ char *hid, *uid, *p, *addr;
778 ++ char acpiid[ACPIID_LEN] = {0};
779 ++ int i;
780 +
781 +- ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
782 +- if (ret != 4) {
783 +- pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
784 +- return 1;
785 ++ addr = strchr(str, '@');
786 ++ if (!addr) {
787 ++ addr = strchr(str, '=');
788 ++ if (!addr)
789 ++ goto not_found;
790 ++
791 ++ ++addr;
792 ++
793 ++ if (strlen(addr) > ACPIID_LEN)
794 ++ goto not_found;
795 ++
796 ++ if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
797 ++ sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
798 ++ pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
799 ++ str, acpiid, seg, bus, dev, fn);
800 ++ goto found;
801 ++ }
802 ++ goto not_found;
803 + }
804 +
805 ++ /* We have the '@', make it the terminator to get just the acpiid */
806 ++ *addr++ = 0;
807 ++
808 ++ if (strlen(str) > ACPIID_LEN + 1)
809 ++ goto not_found;
810 ++
811 ++ if (sscanf(str, "=%s", acpiid) != 1)
812 ++ goto not_found;
813 ++
814 ++ if (sscanf(addr, "%x:%x.%x", &bus, &dev, &fn) == 3 ||
815 ++ sscanf(addr, "%x:%x:%x.%x", &seg, &bus, &dev, &fn) == 4)
816 ++ goto found;
817 ++
818 ++not_found:
819 ++ pr_err("Invalid command line: ivrs_acpihid%s\n", str);
820 ++ return 1;
821 ++
822 ++found:
823 + p = acpiid;
824 + hid = strsep(&p, ":");
825 + uid = p;
826 +@@ -3061,8 +3115,7 @@ static int __init parse_ivrs_acpihid(char *str)
827 + i = early_acpihid_map_size++;
828 + memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
829 + memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
830 +- early_acpihid_map[i].devid =
831 +- ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
832 ++ early_acpihid_map[i].devid = IVRS_GET_SBDF_ID(seg, bus, dev, fn);
833 + early_acpihid_map[i].cmd_line = true;
834 +
835 + return 1;
836 +diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
837 +index e7cb0b8a73327..58f060006ba31 100644
838 +--- a/drivers/iommu/intel-pasid.c
839 ++++ b/drivers/iommu/intel-pasid.c
840 +@@ -166,6 +166,9 @@ int intel_pasid_alloc_table(struct device *dev)
841 + attach_out:
842 + device_attach_pasid_table(info, pasid_table);
843 +
844 ++ if (!ecap_coherent(info->iommu->ecap))
845 ++ clflush_cache_range(pasid_table->table, size);
846 ++
847 + return 0;
848 + }
849 +
850 +@@ -250,6 +253,10 @@ struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid)
851 +
852 + WRITE_ONCE(dir[dir_index].val,
853 + (u64)virt_to_phys(entries) | PASID_PTE_PRESENT);
854 ++ if (!ecap_coherent(info->iommu->ecap)) {
855 ++ clflush_cache_range(entries, VTD_PAGE_SIZE);
856 ++ clflush_cache_range(&dir[dir_index].val, sizeof(*dir));
857 ++ }
858 + }
859 + spin_unlock(&pasid_lock);
860 +
861 +diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
862 +index 1e5fa09845e77..8713e80201c07 100644
863 +--- a/drivers/macintosh/windfarm_lm75_sensor.c
864 ++++ b/drivers/macintosh/windfarm_lm75_sensor.c
865 +@@ -34,8 +34,8 @@
866 + #endif
867 +
868 + struct wf_lm75_sensor {
869 +- int ds1775 : 1;
870 +- int inited : 1;
871 ++ unsigned int ds1775 : 1;
872 ++ unsigned int inited : 1;
873 + struct i2c_client *i2c;
874 + struct wf_sensor sens;
875 + };
876 +diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
877 +index 3e6059eaa1380..90823b4280259 100644
878 +--- a/drivers/macintosh/windfarm_smu_sensors.c
879 ++++ b/drivers/macintosh/windfarm_smu_sensors.c
880 +@@ -273,8 +273,8 @@ struct smu_cpu_power_sensor {
881 + struct list_head link;
882 + struct wf_sensor *volts;
883 + struct wf_sensor *amps;
884 +- int fake_volts : 1;
885 +- int quadratic : 1;
886 ++ unsigned int fake_volts : 1;
887 ++ unsigned int quadratic : 1;
888 + struct wf_sensor sens;
889 + };
890 + #define to_smu_cpu_power(c) container_of(c, struct smu_cpu_power_sensor, sens)
891 +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
892 +index be6c882dd1d54..087fb464ffc12 100644
893 +--- a/drivers/media/i2c/ov5640.c
894 ++++ b/drivers/media/i2c/ov5640.c
895 +@@ -2704,7 +2704,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
896 + /* Auto/manual gain */
897 + ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
898 + 0, 1, 1, 1);
899 +- ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
900 ++ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
901 + 0, 1023, 1, 0);
902 +
903 + ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
904 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
905 +index ef8225b7445d3..9fb1da36e9eb8 100644
906 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
907 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
908 +@@ -2747,7 +2747,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
909 +
910 + static void bnxt_free_tpa_info(struct bnxt *bp)
911 + {
912 +- int i;
913 ++ int i, j;
914 +
915 + for (i = 0; i < bp->rx_nr_rings; i++) {
916 + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
917 +@@ -2755,8 +2755,10 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
918 + kfree(rxr->rx_tpa_idx_map);
919 + rxr->rx_tpa_idx_map = NULL;
920 + if (rxr->rx_tpa) {
921 +- kfree(rxr->rx_tpa[0].agg_arr);
922 +- rxr->rx_tpa[0].agg_arr = NULL;
923 ++ for (j = 0; j < bp->max_tpa; j++) {
924 ++ kfree(rxr->rx_tpa[j].agg_arr);
925 ++ rxr->rx_tpa[j].agg_arr = NULL;
926 ++ }
927 + }
928 + kfree(rxr->rx_tpa);
929 + rxr->rx_tpa = NULL;
930 +@@ -2765,14 +2767,13 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
931 +
932 + static int bnxt_alloc_tpa_info(struct bnxt *bp)
933 + {
934 +- int i, j, total_aggs = 0;
935 ++ int i, j;
936 +
937 + bp->max_tpa = MAX_TPA;
938 + if (bp->flags & BNXT_FLAG_CHIP_P5) {
939 + if (!bp->max_tpa_v2)
940 + return 0;
941 + bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
942 +- total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
943 + }
944 +
945 + for (i = 0; i < bp->rx_nr_rings; i++) {
946 +@@ -2786,12 +2787,12 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
947 +
948 + if (!(bp->flags & BNXT_FLAG_CHIP_P5))
949 + continue;
950 +- agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
951 +- rxr->rx_tpa[0].agg_arr = agg;
952 +- if (!agg)
953 +- return -ENOMEM;
954 +- for (j = 1; j < bp->max_tpa; j++)
955 +- rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
956 ++ for (j = 0; j < bp->max_tpa; j++) {
957 ++ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
958 ++ if (!agg)
959 ++ return -ENOMEM;
960 ++ rxr->rx_tpa[j].agg_arr = agg;
961 ++ }
962 + rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
963 + GFP_KERNEL);
964 + if (!rxr->rx_tpa_idx_map)
965 +diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
966 +index a644e8e5071c3..375bbd60b38af 100644
967 +--- a/drivers/net/phy/microchip.c
968 ++++ b/drivers/net/phy/microchip.c
969 +@@ -326,6 +326,37 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
970 + return genphy_config_aneg(phydev);
971 + }
972 +
973 ++static void lan88xx_link_change_notify(struct phy_device *phydev)
974 ++{
975 ++ int temp;
976 ++
977 ++ /* At forced 100 F/H mode, chip may fail to set mode correctly
978 ++ * when cable is switched between long(~50+m) and short one.
979 ++ * As workaround, set to 10 before setting to 100
980 ++ * at forced 100 F/H mode.
981 ++ */
982 ++ if (!phydev->autoneg && phydev->speed == 100) {
983 ++ /* disable phy interrupt */
984 ++ temp = phy_read(phydev, LAN88XX_INT_MASK);
985 ++ temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
986 ++ phy_write(phydev, LAN88XX_INT_MASK, temp);
987 ++
988 ++ temp = phy_read(phydev, MII_BMCR);
989 ++ temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
990 ++ phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
991 ++ temp |= BMCR_SPEED100;
992 ++ phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
993 ++
994 ++ /* clear pending interrupt generated while workaround */
995 ++ temp = phy_read(phydev, LAN88XX_INT_STS);
996 ++
997 ++ /* enable phy interrupt back */
998 ++ temp = phy_read(phydev, LAN88XX_INT_MASK);
999 ++ temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1000 ++ phy_write(phydev, LAN88XX_INT_MASK, temp);
1001 ++ }
1002 ++}
1003 ++
1004 + static struct phy_driver microchip_phy_driver[] = {
1005 + {
1006 + .phy_id = 0x0007c130,
1007 +@@ -339,6 +370,7 @@ static struct phy_driver microchip_phy_driver[] = {
1008 +
1009 + .config_init = lan88xx_config_init,
1010 + .config_aneg = lan88xx_config_aneg,
1011 ++ .link_change_notify = lan88xx_link_change_notify,
1012 +
1013 + .ack_interrupt = lan88xx_phy_ack_interrupt,
1014 + .config_intr = lan88xx_phy_config_intr,
1015 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
1016 +index ce3c8f476d75c..b51017966bb37 100644
1017 +--- a/drivers/net/usb/lan78xx.c
1018 ++++ b/drivers/net/usb/lan78xx.c
1019 +@@ -824,20 +824,19 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1020 + u32 length, u8 *data)
1021 + {
1022 + int i;
1023 +- int ret;
1024 + u32 buf;
1025 + unsigned long timeout;
1026 +
1027 +- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1028 ++ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1029 +
1030 + if (buf & OTP_PWR_DN_PWRDN_N_) {
1031 + /* clear it and wait to be cleared */
1032 +- ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1033 ++ lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1034 +
1035 + timeout = jiffies + HZ;
1036 + do {
1037 + usleep_range(1, 10);
1038 +- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1039 ++ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1040 + if (time_after(jiffies, timeout)) {
1041 + netdev_warn(dev->net,
1042 + "timeout on OTP_PWR_DN");
1043 +@@ -847,18 +846,18 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1044 + }
1045 +
1046 + for (i = 0; i < length; i++) {
1047 +- ret = lan78xx_write_reg(dev, OTP_ADDR1,
1048 ++ lan78xx_write_reg(dev, OTP_ADDR1,
1049 + ((offset + i) >> 8) & OTP_ADDR1_15_11);
1050 +- ret = lan78xx_write_reg(dev, OTP_ADDR2,
1051 ++ lan78xx_write_reg(dev, OTP_ADDR2,
1052 + ((offset + i) & OTP_ADDR2_10_3));
1053 +
1054 +- ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1055 +- ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1056 ++ lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1057 ++ lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1058 +
1059 + timeout = jiffies + HZ;
1060 + do {
1061 + udelay(1);
1062 +- ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1063 ++ lan78xx_read_reg(dev, OTP_STATUS, &buf);
1064 + if (time_after(jiffies, timeout)) {
1065 + netdev_warn(dev->net,
1066 + "timeout on OTP_STATUS");
1067 +@@ -866,7 +865,7 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1068 + }
1069 + } while (buf & OTP_STATUS_BUSY_);
1070 +
1071 +- ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1072 ++ lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1073 +
1074 + data[i] = (u8)(buf & 0xFF);
1075 + }
1076 +@@ -878,20 +877,19 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1077 + u32 length, u8 *data)
1078 + {
1079 + int i;
1080 +- int ret;
1081 + u32 buf;
1082 + unsigned long timeout;
1083 +
1084 +- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1085 ++ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1086 +
1087 + if (buf & OTP_PWR_DN_PWRDN_N_) {
1088 + /* clear it and wait to be cleared */
1089 +- ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1090 ++ lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1091 +
1092 + timeout = jiffies + HZ;
1093 + do {
1094 + udelay(1);
1095 +- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1096 ++ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1097 + if (time_after(jiffies, timeout)) {
1098 + netdev_warn(dev->net,
1099 + "timeout on OTP_PWR_DN completion");
1100 +@@ -901,21 +899,21 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1101 + }
1102 +
1103 + /* set to BYTE program mode */
1104 +- ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1105 ++ lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1106 +
1107 + for (i = 0; i < length; i++) {
1108 +- ret = lan78xx_write_reg(dev, OTP_ADDR1,
1109 ++ lan78xx_write_reg(dev, OTP_ADDR1,
1110 + ((offset + i) >> 8) & OTP_ADDR1_15_11);
1111 +- ret = lan78xx_write_reg(dev, OTP_ADDR2,
1112 ++ lan78xx_write_reg(dev, OTP_ADDR2,
1113 + ((offset + i) & OTP_ADDR2_10_3));
1114 +- ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1115 +- ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1116 +- ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1117 ++ lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1118 ++ lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1119 ++ lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1120 +
1121 + timeout = jiffies + HZ;
1122 + do {
1123 + udelay(1);
1124 +- ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1125 ++ lan78xx_read_reg(dev, OTP_STATUS, &buf);
1126 + if (time_after(jiffies, timeout)) {
1127 + netdev_warn(dev->net,
1128 + "Timeout on OTP_STATUS completion");
1129 +@@ -1040,7 +1038,6 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
1130 + container_of(param, struct lan78xx_priv, set_multicast);
1131 + struct lan78xx_net *dev = pdata->dev;
1132 + int i;
1133 +- int ret;
1134 +
1135 + netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1136 + pdata->rfe_ctl);
1137 +@@ -1049,14 +1046,14 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
1138 + DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1139 +
1140 + for (i = 1; i < NUM_OF_MAF; i++) {
1141 +- ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1142 +- ret = lan78xx_write_reg(dev, MAF_LO(i),
1143 ++ lan78xx_write_reg(dev, MAF_HI(i), 0);
1144 ++ lan78xx_write_reg(dev, MAF_LO(i),
1145 + pdata->pfilter_table[i][1]);
1146 +- ret = lan78xx_write_reg(dev, MAF_HI(i),
1147 ++ lan78xx_write_reg(dev, MAF_HI(i),
1148 + pdata->pfilter_table[i][0]);
1149 + }
1150 +
1151 +- ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1152 ++ lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1153 + }
1154 +
1155 + static void lan78xx_set_multicast(struct net_device *netdev)
1156 +@@ -1126,7 +1123,6 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1157 + u16 lcladv, u16 rmtadv)
1158 + {
1159 + u32 flow = 0, fct_flow = 0;
1160 +- int ret;
1161 + u8 cap;
1162 +
1163 + if (dev->fc_autoneg)
1164 +@@ -1149,10 +1145,10 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1165 + (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1166 + (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1167 +
1168 +- ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1169 ++ lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1170 +
1171 + /* threshold value should be set before enabling flow */
1172 +- ret = lan78xx_write_reg(dev, FLOW, flow);
1173 ++ lan78xx_write_reg(dev, FLOW, flow);
1174 +
1175 + return 0;
1176 + }
1177 +@@ -1681,11 +1677,10 @@ static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1178 + static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1179 + {
1180 + u32 addr_lo, addr_hi;
1181 +- int ret;
1182 + u8 addr[6];
1183 +
1184 +- ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1185 +- ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1186 ++ lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1187 ++ lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1188 +
1189 + addr[0] = addr_lo & 0xFF;
1190 + addr[1] = (addr_lo >> 8) & 0xFF;
1191 +@@ -1718,12 +1713,12 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1192 + (addr[2] << 16) | (addr[3] << 24);
1193 + addr_hi = addr[4] | (addr[5] << 8);
1194 +
1195 +- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1196 +- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1197 ++ lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1198 ++ lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1199 + }
1200 +
1201 +- ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1202 +- ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1203 ++ lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1204 ++ lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1205 +
1206 + ether_addr_copy(dev->net->dev_addr, addr);
1207 + }
1208 +@@ -1856,33 +1851,8 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1209 + static void lan78xx_link_status_change(struct net_device *net)
1210 + {
1211 + struct phy_device *phydev = net->phydev;
1212 +- int ret, temp;
1213 +-
1214 +- /* At forced 100 F/H mode, chip may fail to set mode correctly
1215 +- * when cable is switched between long(~50+m) and short one.
1216 +- * As workaround, set to 10 before setting to 100
1217 +- * at forced 100 F/H mode.
1218 +- */
1219 +- if (!phydev->autoneg && (phydev->speed == 100)) {
1220 +- /* disable phy interrupt */
1221 +- temp = phy_read(phydev, LAN88XX_INT_MASK);
1222 +- temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1223 +- ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1224 +
1225 +- temp = phy_read(phydev, MII_BMCR);
1226 +- temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1227 +- phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1228 +- temp |= BMCR_SPEED100;
1229 +- phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1230 +-
1231 +- /* clear pending interrupt generated while workaround */
1232 +- temp = phy_read(phydev, LAN88XX_INT_STS);
1233 +-
1234 +- /* enable phy interrupt back */
1235 +- temp = phy_read(phydev, LAN88XX_INT_MASK);
1236 +- temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1237 +- ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1238 +- }
1239 ++ phy_print_status(phydev);
1240 + }
1241 +
1242 + static int irq_map(struct irq_domain *d, unsigned int irq,
1243 +@@ -1935,14 +1905,13 @@ static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1244 + struct lan78xx_net *dev =
1245 + container_of(data, struct lan78xx_net, domain_data);
1246 + u32 buf;
1247 +- int ret;
1248 +
1249 + /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1250 + * are only two callbacks executed in non-atomic contex.
1251 + */
1252 +- ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1253 ++ lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1254 + if (buf != data->irqenable)
1255 +- ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1256 ++ lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1257 +
1258 + mutex_unlock(&data->irq_lock);
1259 + }
1260 +@@ -2009,7 +1978,6 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1261 + static int lan8835_fixup(struct phy_device *phydev)
1262 + {
1263 + int buf;
1264 +- int ret;
1265 + struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1266 +
1267 + /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1268 +@@ -2019,11 +1987,11 @@ static int lan8835_fixup(struct phy_device *phydev)
1269 + phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1270 +
1271 + /* RGMII MAC TXC Delay Enable */
1272 +- ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1273 ++ lan78xx_write_reg(dev, MAC_RGMII_ID,
1274 + MAC_RGMII_ID_TXC_DELAY_EN_);
1275 +
1276 + /* RGMII TX DLL Tune Adjust */
1277 +- ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1278 ++ lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1279 +
1280 + dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1281 +
1282 +@@ -2207,28 +2175,27 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
1283 +
1284 + static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1285 + {
1286 +- int ret = 0;
1287 + u32 buf;
1288 + bool rxenabled;
1289 +
1290 +- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1291 ++ lan78xx_read_reg(dev, MAC_RX, &buf);
1292 +
1293 + rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1294 +
1295 + if (rxenabled) {
1296 + buf &= ~MAC_RX_RXEN_;
1297 +- ret = lan78xx_write_reg(dev, MAC_RX, buf);
1298 ++ lan78xx_write_reg(dev, MAC_RX, buf);
1299 + }
1300 +
1301 + /* add 4 to size for FCS */
1302 + buf &= ~MAC_RX_MAX_SIZE_MASK_;
1303 + buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1304 +
1305 +- ret = lan78xx_write_reg(dev, MAC_RX, buf);
1306 ++ lan78xx_write_reg(dev, MAC_RX, buf);
1307 +
1308 + if (rxenabled) {
1309 + buf |= MAC_RX_RXEN_;
1310 +- ret = lan78xx_write_reg(dev, MAC_RX, buf);
1311 ++ lan78xx_write_reg(dev, MAC_RX, buf);
1312 + }
1313 +
1314 + return 0;
1315 +@@ -2285,13 +2252,12 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1316 + int ll_mtu = new_mtu + netdev->hard_header_len;
1317 + int old_hard_mtu = dev->hard_mtu;
1318 + int old_rx_urb_size = dev->rx_urb_size;
1319 +- int ret;
1320 +
1321 + /* no second zero-length packet read wanted after mtu-sized packets */
1322 + if ((ll_mtu % dev->maxpacket) == 0)
1323 + return -EDOM;
1324 +
1325 +- ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
1326 ++ lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
1327 +
1328 + netdev->mtu = new_mtu;
1329 +
1330 +@@ -2314,7 +2280,6 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1331 + struct lan78xx_net *dev = netdev_priv(netdev);
1332 + struct sockaddr *addr = p;
1333 + u32 addr_lo, addr_hi;
1334 +- int ret;
1335 +
1336 + if (netif_running(netdev))
1337 + return -EBUSY;
1338 +@@ -2331,12 +2296,12 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1339 + addr_hi = netdev->dev_addr[4] |
1340 + netdev->dev_addr[5] << 8;
1341 +
1342 +- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1343 +- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1344 ++ lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1345 ++ lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1346 +
1347 + /* Added to support MAC address changes */
1348 +- ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1349 +- ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1350 ++ lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1351 ++ lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1352 +
1353 + return 0;
1354 + }
1355 +@@ -2348,7 +2313,6 @@ static int lan78xx_set_features(struct net_device *netdev,
1356 + struct lan78xx_net *dev = netdev_priv(netdev);
1357 + struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1358 + unsigned long flags;
1359 +- int ret;
1360 +
1361 + spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1362 +
1363 +@@ -2372,7 +2336,7 @@ static int lan78xx_set_features(struct net_device *netdev,
1364 +
1365 + spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1366 +
1367 +- ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1368 ++ lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1369 +
1370 + return 0;
1371 + }
1372 +@@ -3828,7 +3792,6 @@ static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
1373 + static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
1374 + {
1375 + u32 buf;
1376 +- int ret;
1377 + int mask_index;
1378 + u16 crc;
1379 + u32 temp_wucsr;
1380 +@@ -3837,26 +3800,26 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
1381 + const u8 ipv6_multicast[3] = { 0x33, 0x33 };
1382 + const u8 arp_type[2] = { 0x08, 0x06 };
1383 +
1384 +- ret = lan78xx_read_reg(dev, MAC_TX, &buf);
1385 ++ lan78xx_read_reg(dev, MAC_TX, &buf);
1386 + buf &= ~MAC_TX_TXEN_;
1387 +- ret = lan78xx_write_reg(dev, MAC_TX, buf);
1388 +- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1389 ++ lan78xx_write_reg(dev, MAC_TX, buf);
1390 ++ lan78xx_read_reg(dev, MAC_RX, &buf);
1391 + buf &= ~MAC_RX_RXEN_;
1392 +- ret = lan78xx_write_reg(dev, MAC_RX, buf);
1393 ++ lan78xx_write_reg(dev, MAC_RX, buf);
1394 +
1395 +- ret = lan78xx_write_reg(dev, WUCSR, 0);
1396 +- ret = lan78xx_write_reg(dev, WUCSR2, 0);
1397 +- ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
1398 ++ lan78xx_write_reg(dev, WUCSR, 0);
1399 ++ lan78xx_write_reg(dev, WUCSR2, 0);
1400 ++ lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
1401 +
1402 + temp_wucsr = 0;
1403 +
1404 + temp_pmt_ctl = 0;
1405 +- ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
1406 ++ lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
1407 + temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
1408 + temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
1409 +
1410 + for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
1411 +- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
1412 ++ lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
1413 +
1414 + mask_index = 0;
1415 + if (wol & WAKE_PHY) {
1416 +@@ -3885,30 +3848,30 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
1417 +
1418 + /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
1419 + crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
1420 +- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
1421 ++ lan78xx_write_reg(dev, WUF_CFG(mask_index),
1422 + WUF_CFGX_EN_ |
1423 + WUF_CFGX_TYPE_MCAST_ |
1424 + (0 << WUF_CFGX_OFFSET_SHIFT_) |
1425 + (crc & WUF_CFGX_CRC16_MASK_));
1426 +
1427 +- ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
1428 +- ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
1429 +- ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
1430 +- ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
1431 ++ lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
1432 ++ lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
1433 ++ lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
1434 ++ lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
1435 + mask_index++;
1436 +
1437 + /* for IPv6 Multicast */
1438 + crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
1439 +- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
1440 ++ lan78xx_write_reg(dev, WUF_CFG(mask_index),
1441 + WUF_CFGX_EN_ |
1442 + WUF_CFGX_TYPE_MCAST_ |
1443 + (0 << WUF_CFGX_OFFSET_SHIFT_) |
1444 + (crc & WUF_CFGX_CRC16_MASK_));
1445 +
1446 +- ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
1447 +- ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
1448 +- ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
1449 +- ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
1450 ++ lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
1451 ++ lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
1452 ++ lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
1453 ++ lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
1454 + mask_index++;
1455 +
1456 + temp_pmt_ctl |= PMT_CTL_WOL_EN_;
1457 +@@ -3929,16 +3892,16 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
1458 + * for packettype (offset 12,13) = ARP (0x0806)
1459 + */
1460 + crc = lan78xx_wakeframe_crc16(arp_type, 2);
1461 +- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
1462 ++ lan78xx_write_reg(dev, WUF_CFG(mask_index),
1463 + WUF_CFGX_EN_ |
1464 + WUF_CFGX_TYPE_ALL_ |
1465 + (0 << WUF_CFGX_OFFSET_SHIFT_) |
1466 + (crc & WUF_CFGX_CRC16_MASK_));
1467 +
1468 +- ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
1469 +- ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
1470 +- ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
1471 +- ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
1472 ++ lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
1473 ++ lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
1474 ++ lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
1475 ++ lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
1476 + mask_index++;
1477 +
1478 + temp_pmt_ctl |= PMT_CTL_WOL_EN_;
1479 +@@ -3946,7 +3909,7 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
1480 + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
1481 + }
1482 +
1483 +- ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
1484 ++ lan78xx_write_reg(dev, WUCSR, temp_wucsr);
1485 +
1486 + /* when multiple WOL bits are set */
1487 + if (hweight_long((unsigned long)wol) > 1) {
1488 +@@ -3954,16 +3917,16 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
1489 + temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
1490 + temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
1491 + }
1492 +- ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
1493 ++ lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
1494 +
1495 + /* clear WUPS */
1496 +- ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1497 ++ lan78xx_read_reg(dev, PMT_CTL, &buf);
1498 + buf |= PMT_CTL_WUPS_MASK_;
1499 +- ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1500 ++ lan78xx_write_reg(dev, PMT_CTL, buf);
1501 +
1502 +- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1503 ++ lan78xx_read_reg(dev, MAC_RX, &buf);
1504 + buf |= MAC_RX_RXEN_;
1505 +- ret = lan78xx_write_reg(dev, MAC_RX, buf);
1506 ++ lan78xx_write_reg(dev, MAC_RX, buf);
1507 +
1508 + return 0;
1509 + }
1510 +diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
1511 +index ad0abb1f0bae9..7305f1a06e97c 100644
1512 +--- a/drivers/nfc/fdp/i2c.c
1513 ++++ b/drivers/nfc/fdp/i2c.c
1514 +@@ -255,6 +255,9 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
1515 + len, sizeof(**fw_vsc_cfg),
1516 + GFP_KERNEL);
1517 +
1518 ++ if (!*fw_vsc_cfg)
1519 ++ goto alloc_err;
1520 ++
1521 + r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME,
1522 + *fw_vsc_cfg, len);
1523 +
1524 +@@ -268,6 +271,7 @@ vsc_read_err:
1525 + *fw_vsc_cfg = NULL;
1526 + }
1527 +
1528 ++alloc_err:
1529 + dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s",
1530 + *clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no");
1531 + }
1532 +diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
1533 +index f7ae03fd36cb5..f029884eabd19 100644
1534 +--- a/drivers/s390/block/dasd_diag.c
1535 ++++ b/drivers/s390/block/dasd_diag.c
1536 +@@ -644,12 +644,17 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
1537 + blk_queue_segment_boundary(q, PAGE_SIZE - 1);
1538 + }
1539 +
1540 ++static int dasd_diag_pe_handler(struct dasd_device *device, __u8 tbvpm)
1541 ++{
1542 ++ return dasd_generic_verify_path(device, tbvpm);
1543 ++}
1544 ++
1545 + static struct dasd_discipline dasd_diag_discipline = {
1546 + .owner = THIS_MODULE,
1547 + .name = "DIAG",
1548 + .ebcname = "DIAG",
1549 + .check_device = dasd_diag_check_device,
1550 +- .verify_path = dasd_generic_verify_path,
1551 ++ .pe_handler = dasd_diag_pe_handler,
1552 + .fill_geometry = dasd_diag_fill_geometry,
1553 + .setup_blk_queue = dasd_diag_setup_blk_queue,
1554 + .start_IO = dasd_start_diag,
1555 +diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
1556 +index 1a44e321b54e1..b159575a27608 100644
1557 +--- a/drivers/s390/block/dasd_fba.c
1558 ++++ b/drivers/s390/block/dasd_fba.c
1559 +@@ -803,13 +803,18 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block)
1560 + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
1561 + }
1562 +
1563 ++static int dasd_fba_pe_handler(struct dasd_device *device, __u8 tbvpm)
1564 ++{
1565 ++ return dasd_generic_verify_path(device, tbvpm);
1566 ++}
1567 ++
1568 + static struct dasd_discipline dasd_fba_discipline = {
1569 + .owner = THIS_MODULE,
1570 + .name = "FBA ",
1571 + .ebcname = "FBA ",
1572 + .check_device = dasd_fba_check_characteristics,
1573 + .do_analysis = dasd_fba_do_analysis,
1574 +- .verify_path = dasd_generic_verify_path,
1575 ++ .pe_handler = dasd_fba_pe_handler,
1576 + .setup_blk_queue = dasd_fba_setup_blk_queue,
1577 + .fill_geometry = dasd_fba_fill_geometry,
1578 + .start_IO = dasd_start_IO,
1579 +diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
1580 +index e8a06d85d6f72..5d7d35ca5eb48 100644
1581 +--- a/drivers/s390/block/dasd_int.h
1582 ++++ b/drivers/s390/block/dasd_int.h
1583 +@@ -298,7 +298,6 @@ struct dasd_discipline {
1584 + * e.g. verify that new path is compatible with the current
1585 + * configuration.
1586 + */
1587 +- int (*verify_path)(struct dasd_device *, __u8);
1588 + int (*pe_handler)(struct dasd_device *, __u8);
1589 +
1590 + /*
1591 +diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
1592 +index 45885e80992fb..b08d963013db6 100644
1593 +--- a/drivers/scsi/hosts.c
1594 ++++ b/drivers/scsi/hosts.c
1595 +@@ -179,6 +179,7 @@ void scsi_remove_host(struct Scsi_Host *shost)
1596 + scsi_forget_host(shost);
1597 + mutex_unlock(&shost->scan_mutex);
1598 + scsi_proc_host_rm(shost);
1599 ++ scsi_proc_hostdir_rm(shost->hostt);
1600 +
1601 + spin_lock_irqsave(shost->host_lock, flags);
1602 + if (scsi_host_set_state(shost, SHOST_DEL))
1603 +@@ -318,6 +319,7 @@ static void scsi_host_dev_release(struct device *dev)
1604 + struct Scsi_Host *shost = dev_to_shost(dev);
1605 + struct device *parent = dev->parent;
1606 +
1607 ++ /* In case scsi_remove_host() has not been called. */
1608 + scsi_proc_hostdir_rm(shost->hostt);
1609 +
1610 + /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
1611 +diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
1612 +index aa62cc8ffd0af..ce0c36fa26bf7 100644
1613 +--- a/drivers/scsi/megaraid/megaraid_sas.h
1614 ++++ b/drivers/scsi/megaraid/megaraid_sas.h
1615 +@@ -1515,6 +1515,8 @@ struct megasas_ctrl_info {
1616 + #define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
1617 + MEGASAS_MAX_DEV_PER_CHANNEL)
1618 +
1619 ++#define MEGASAS_MAX_SUPPORTED_LD_IDS 240
1620 ++
1621 + #define MEGASAS_MAX_SECTORS (2*1024)
1622 + #define MEGASAS_MAX_SECTORS_IEEE (2*128)
1623 + #define MEGASAS_DBG_LVL 1
1624 +diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
1625 +index 8bfb46dbbed3a..ff20f47090810 100644
1626 +--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
1627 ++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
1628 +@@ -359,7 +359,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
1629 + ld = MR_TargetIdToLdGet(i, drv_map);
1630 +
1631 + /* For non existing VDs, iterate to next VD*/
1632 +- if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
1633 ++ if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS)
1634 + continue;
1635 +
1636 + raid = MR_LdRaidGet(ld, drv_map);
1637 +diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
1638 +index 1f55072aa3023..1766d6d077f26 100644
1639 +--- a/fs/cifs/cifsacl.c
1640 ++++ b/fs/cifs/cifsacl.c
1641 +@@ -1056,7 +1056,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
1642 + struct cifs_ntsd *pntsd = NULL;
1643 + int oplock = 0;
1644 + unsigned int xid;
1645 +- int rc, create_options = 0;
1646 ++ int rc;
1647 + struct cifs_tcon *tcon;
1648 + struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1649 + struct cifs_fid fid;
1650 +@@ -1068,13 +1068,10 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
1651 + tcon = tlink_tcon(tlink);
1652 + xid = get_xid();
1653 +
1654 +- if (backup_cred(cifs_sb))
1655 +- create_options |= CREATE_OPEN_BACKUP_INTENT;
1656 +-
1657 + oparms.tcon = tcon;
1658 + oparms.cifs_sb = cifs_sb;
1659 + oparms.desired_access = READ_CONTROL;
1660 +- oparms.create_options = create_options;
1661 ++ oparms.create_options = cifs_create_options(cifs_sb, 0);
1662 + oparms.disposition = FILE_OPEN;
1663 + oparms.path = path;
1664 + oparms.fid = &fid;
1665 +@@ -1119,7 +1116,7 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1666 + {
1667 + int oplock = 0;
1668 + unsigned int xid;
1669 +- int rc, access_flags, create_options = 0;
1670 ++ int rc, access_flags;
1671 + struct cifs_tcon *tcon;
1672 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1673 + struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1674 +@@ -1132,9 +1129,6 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1675 + tcon = tlink_tcon(tlink);
1676 + xid = get_xid();
1677 +
1678 +- if (backup_cred(cifs_sb))
1679 +- create_options |= CREATE_OPEN_BACKUP_INTENT;
1680 +-
1681 + if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
1682 + access_flags = WRITE_OWNER;
1683 + else
1684 +@@ -1143,7 +1137,7 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1685 + oparms.tcon = tcon;
1686 + oparms.cifs_sb = cifs_sb;
1687 + oparms.desired_access = access_flags;
1688 +- oparms.create_options = create_options;
1689 ++ oparms.create_options = cifs_create_options(cifs_sb, 0);
1690 + oparms.disposition = FILE_OPEN;
1691 + oparms.path = path;
1692 + oparms.fid = &fid;
1693 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
1694 +index aa7827da7b178..871a7b044c1b8 100644
1695 +--- a/fs/cifs/cifsfs.c
1696 ++++ b/fs/cifs/cifsfs.c
1697 +@@ -275,7 +275,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
1698 + buf->f_ffree = 0; /* unlimited */
1699 +
1700 + if (server->ops->queryfs)
1701 +- rc = server->ops->queryfs(xid, tcon, buf);
1702 ++ rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
1703 +
1704 + free_xid(xid);
1705 + return rc;
1706 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
1707 +index 7c0eb110e2630..253321adc2664 100644
1708 +--- a/fs/cifs/cifsglob.h
1709 ++++ b/fs/cifs/cifsglob.h
1710 +@@ -300,7 +300,8 @@ struct smb_version_operations {
1711 + const char *, struct dfs_info3_param **,
1712 + unsigned int *, const struct nls_table *, int);
1713 + /* informational QFS call */
1714 +- void (*qfs_tcon)(const unsigned int, struct cifs_tcon *);
1715 ++ void (*qfs_tcon)(const unsigned int, struct cifs_tcon *,
1716 ++ struct cifs_sb_info *);
1717 + /* check if a path is accessible or not */
1718 + int (*is_path_accessible)(const unsigned int, struct cifs_tcon *,
1719 + struct cifs_sb_info *, const char *);
1720 +@@ -408,7 +409,7 @@ struct smb_version_operations {
1721 + struct cifsInodeInfo *);
1722 + /* query remote filesystem */
1723 + int (*queryfs)(const unsigned int, struct cifs_tcon *,
1724 +- struct kstatfs *);
1725 ++ struct cifs_sb_info *, struct kstatfs *);
1726 + /* send mandatory brlock to the server */
1727 + int (*mand_lock)(const unsigned int, struct cifsFileInfo *, __u64,
1728 + __u64, __u32, int, int, bool);
1729 +@@ -489,6 +490,7 @@ struct smb_version_operations {
1730 + /* ioctl passthrough for query_info */
1731 + int (*ioctl_query_info)(const unsigned int xid,
1732 + struct cifs_tcon *tcon,
1733 ++ struct cifs_sb_info *cifs_sb,
1734 + __le16 *path, int is_dir,
1735 + unsigned long p);
1736 + /* make unix special files (block, char, fifo, socket) */
1737 +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
1738 +index 56a4740ae93ab..a5fab9afd699f 100644
1739 +--- a/fs/cifs/cifsproto.h
1740 ++++ b/fs/cifs/cifsproto.h
1741 +@@ -600,4 +600,12 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
1742 + }
1743 + #endif
1744 +
1745 ++static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
1746 ++{
1747 ++ if (backup_cred(cifs_sb))
1748 ++ return options | CREATE_OPEN_BACKUP_INTENT;
1749 ++ else
1750 ++ return options;
1751 ++}
1752 ++
1753 + #endif /* _CIFSPROTO_H */
1754 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1755 +index 25a2a98ebda8d..6c8dd7c0b83a2 100644
1756 +--- a/fs/cifs/connect.c
1757 ++++ b/fs/cifs/connect.c
1758 +@@ -4319,7 +4319,7 @@ static int mount_get_conns(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
1759 +
1760 + /* do not care if a following call succeed - informational */
1761 + if (!tcon->pipe && server->ops->qfs_tcon) {
1762 +- server->ops->qfs_tcon(*xid, tcon);
1763 ++ server->ops->qfs_tcon(*xid, tcon, cifs_sb);
1764 + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
1765 + if (tcon->fsDevInfo.DeviceCharacteristics &
1766 + cpu_to_le32(FILE_READ_ONLY_DEVICE))
1767 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
1768 +index 9ae9a514676c3..548047a709bfc 100644
1769 +--- a/fs/cifs/dir.c
1770 ++++ b/fs/cifs/dir.c
1771 +@@ -357,13 +357,10 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
1772 + if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
1773 + create_options |= CREATE_OPTION_READONLY;
1774 +
1775 +- if (backup_cred(cifs_sb))
1776 +- create_options |= CREATE_OPEN_BACKUP_INTENT;
1777 +-
1778 + oparms.tcon = tcon;
1779 + oparms.cifs_sb = cifs_sb;
1780 + oparms.desired_access = desired_access;
1781 +- oparms.create_options = create_options;
1782 ++ oparms.create_options = cifs_create_options(cifs_sb, create_options);
1783 + oparms.disposition = disposition;
1784 + oparms.path = full_path;
1785 + oparms.fid = fid;
1786 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
1787 +index eb9b34442b1d3..86924831fd4ba 100644
1788 +--- a/fs/cifs/file.c
1789 ++++ b/fs/cifs/file.c
1790 +@@ -223,9 +223,6 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
1791 + if (!buf)
1792 + return -ENOMEM;
1793 +
1794 +- if (backup_cred(cifs_sb))
1795 +- create_options |= CREATE_OPEN_BACKUP_INTENT;
1796 +-
1797 + /* O_SYNC also has bit for O_DSYNC so following check picks up either */
1798 + if (f_flags & O_SYNC)
1799 + create_options |= CREATE_WRITE_THROUGH;
1800 +@@ -236,7 +233,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
1801 + oparms.tcon = tcon;
1802 + oparms.cifs_sb = cifs_sb;
1803 + oparms.desired_access = desired_access;
1804 +- oparms.create_options = create_options;
1805 ++ oparms.create_options = cifs_create_options(cifs_sb, create_options);
1806 + oparms.disposition = disposition;
1807 + oparms.path = full_path;
1808 + oparms.fid = fid;
1809 +@@ -751,9 +748,6 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1810 +
1811 + desired_access = cifs_convert_flags(cfile->f_flags);
1812 +
1813 +- if (backup_cred(cifs_sb))
1814 +- create_options |= CREATE_OPEN_BACKUP_INTENT;
1815 +-
1816 + /* O_SYNC also has bit for O_DSYNC so following check picks up either */
1817 + if (cfile->f_flags & O_SYNC)
1818 + create_options |= CREATE_WRITE_THROUGH;
1819 +@@ -767,7 +761,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1820 + oparms.tcon = tcon;
1821 + oparms.cifs_sb = cifs_sb;
1822 + oparms.desired_access = desired_access;
1823 +- oparms.create_options = create_options;
1824 ++ oparms.create_options = cifs_create_options(cifs_sb, create_options);
1825 + oparms.disposition = disposition;
1826 + oparms.path = full_path;
1827 + oparms.fid = &cfile->fid;
1828 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
1829 +index fd9e289f3e72a..af0980c720c78 100644
1830 +--- a/fs/cifs/inode.c
1831 ++++ b/fs/cifs/inode.c
1832 +@@ -472,9 +472,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
1833 + oparms.tcon = tcon;
1834 + oparms.cifs_sb = cifs_sb;
1835 + oparms.desired_access = GENERIC_READ;
1836 +- oparms.create_options = CREATE_NOT_DIR;
1837 +- if (backup_cred(cifs_sb))
1838 +- oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
1839 ++ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
1840 + oparms.disposition = FILE_OPEN;
1841 + oparms.path = path;
1842 + oparms.fid = &fid;
1843 +@@ -1225,7 +1223,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
1844 + oparms.tcon = tcon;
1845 + oparms.cifs_sb = cifs_sb;
1846 + oparms.desired_access = DELETE | FILE_WRITE_ATTRIBUTES;
1847 +- oparms.create_options = CREATE_NOT_DIR;
1848 ++ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
1849 + oparms.disposition = FILE_OPEN;
1850 + oparms.path = full_path;
1851 + oparms.fid = &fid;
1852 +@@ -1763,7 +1761,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
1853 + oparms.cifs_sb = cifs_sb;
1854 + /* open the file to be renamed -- we need DELETE perms */
1855 + oparms.desired_access = DELETE;
1856 +- oparms.create_options = CREATE_NOT_DIR;
1857 ++ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
1858 + oparms.disposition = FILE_OPEN;
1859 + oparms.path = from_path;
1860 + oparms.fid = &fid;
1861 +diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
1862 +index 9266dddd4b1eb..bc08d856ee05f 100644
1863 +--- a/fs/cifs/ioctl.c
1864 ++++ b/fs/cifs/ioctl.c
1865 +@@ -65,7 +65,7 @@ static long cifs_ioctl_query_info(unsigned int xid, struct file *filep,
1866 +
1867 + if (tcon->ses->server->ops->ioctl_query_info)
1868 + rc = tcon->ses->server->ops->ioctl_query_info(
1869 +- xid, tcon, utf16_path,
1870 ++ xid, tcon, cifs_sb, utf16_path,
1871 + filep->private_data ? 0 : 1, p);
1872 + else
1873 + rc = -EOPNOTSUPP;
1874 +diff --git a/fs/cifs/link.c b/fs/cifs/link.c
1875 +index b4b15d611deda..02949a2f28608 100644
1876 +--- a/fs/cifs/link.c
1877 ++++ b/fs/cifs/link.c
1878 +@@ -318,7 +318,7 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
1879 + oparms.tcon = tcon;
1880 + oparms.cifs_sb = cifs_sb;
1881 + oparms.desired_access = GENERIC_READ;
1882 +- oparms.create_options = CREATE_NOT_DIR;
1883 ++ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
1884 + oparms.disposition = FILE_OPEN;
1885 + oparms.path = path;
1886 + oparms.fid = &fid;
1887 +@@ -356,15 +356,11 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
1888 + struct cifs_fid fid;
1889 + struct cifs_open_parms oparms;
1890 + struct cifs_io_parms io_parms;
1891 +- int create_options = CREATE_NOT_DIR;
1892 +-
1893 +- if (backup_cred(cifs_sb))
1894 +- create_options |= CREATE_OPEN_BACKUP_INTENT;
1895 +
1896 + oparms.tcon = tcon;
1897 + oparms.cifs_sb = cifs_sb;
1898 + oparms.desired_access = GENERIC_WRITE;
1899 +- oparms.create_options = create_options;
1900 ++ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
1901 + oparms.disposition = FILE_CREATE;
1902 + oparms.path = path;
1903 + oparms.fid = &fid;
1904 +@@ -405,9 +401,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
1905 + oparms.tcon = tcon;
1906 + oparms.cifs_sb = cifs_sb;
1907 + oparms.desired_access = GENERIC_READ;
1908 +- oparms.create_options = CREATE_NOT_DIR;
1909 +- if (backup_cred(cifs_sb))
1910 +- oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
1911 ++ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
1912 + oparms.disposition = FILE_OPEN;
1913 + oparms.fid = &fid;
1914 + oparms.reconnect = false;
1915 +@@ -460,14 +454,10 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
1916 + struct cifs_fid fid;
1917 + struct cifs_open_parms oparms;
1918 + struct cifs_io_parms io_parms;
1919 +- int create_options = CREATE_NOT_DIR;
1920 + __le16 *utf16_path;
1921 + __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1922 + struct kvec iov[2];
1923 +
1924 +- if (backup_cred(cifs_sb))
1925 +- create_options |= CREATE_OPEN_BACKUP_INTENT;
1926 +-
1927 + cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1928 +
1929 + utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1930 +@@ -477,7 +467,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
1931 + oparms.tcon = tcon;
1932 + oparms.cifs_sb = cifs_sb;
1933 + oparms.desired_access = GENERIC_WRITE;
1934 +- oparms.create_options = create_options;
1935 ++ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
1936 + oparms.disposition = FILE_CREATE;
1937 + oparms.fid = &fid;
1938 + oparms.reconnect = false;
1939 +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
1940 +index e523c05a44876..b130efaf8feb2 100644
1941 +--- a/fs/cifs/smb1ops.c
1942 ++++ b/fs/cifs/smb1ops.c
1943 +@@ -504,7 +504,8 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
1944 + }
1945 +
1946 + static void
1947 +-cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
1948 ++cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
1949 ++ struct cifs_sb_info *cifs_sb)
1950 + {
1951 + CIFSSMBQFSDeviceInfo(xid, tcon);
1952 + CIFSSMBQFSAttributeInfo(xid, tcon);
1953 +@@ -565,7 +566,7 @@ cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
1954 + oparms.tcon = tcon;
1955 + oparms.cifs_sb = cifs_sb;
1956 + oparms.desired_access = FILE_READ_ATTRIBUTES;
1957 +- oparms.create_options = 0;
1958 ++ oparms.create_options = cifs_create_options(cifs_sb, 0);
1959 + oparms.disposition = FILE_OPEN;
1960 + oparms.path = full_path;
1961 + oparms.fid = &fid;
1962 +@@ -793,7 +794,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
1963 + oparms.tcon = tcon;
1964 + oparms.cifs_sb = cifs_sb;
1965 + oparms.desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES;
1966 +- oparms.create_options = CREATE_NOT_DIR;
1967 ++ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
1968 + oparms.disposition = FILE_OPEN;
1969 + oparms.path = full_path;
1970 + oparms.fid = &fid;
1971 +@@ -872,7 +873,7 @@ cifs_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
1972 +
1973 + static int
1974 + cifs_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
1975 +- struct kstatfs *buf)
1976 ++ struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
1977 + {
1978 + int rc = -EOPNOTSUPP;
1979 +
1980 +@@ -970,7 +971,8 @@ cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
1981 + oparms.tcon = tcon;
1982 + oparms.cifs_sb = cifs_sb;
1983 + oparms.desired_access = FILE_READ_ATTRIBUTES;
1984 +- oparms.create_options = OPEN_REPARSE_POINT;
1985 ++ oparms.create_options = cifs_create_options(cifs_sb,
1986 ++ OPEN_REPARSE_POINT);
1987 + oparms.disposition = FILE_OPEN;
1988 + oparms.path = full_path;
1989 + oparms.fid = &fid;
1990 +@@ -1029,7 +1031,6 @@ cifs_make_node(unsigned int xid, struct inode *inode,
1991 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1992 + struct inode *newinode = NULL;
1993 + int rc = -EPERM;
1994 +- int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
1995 + FILE_ALL_INFO *buf = NULL;
1996 + struct cifs_io_parms io_parms;
1997 + __u32 oplock = 0;
1998 +@@ -1090,13 +1091,11 @@ cifs_make_node(unsigned int xid, struct inode *inode,
1999 + goto out;
2000 + }
2001 +
2002 +- if (backup_cred(cifs_sb))
2003 +- create_options |= CREATE_OPEN_BACKUP_INTENT;
2004 +-
2005 + oparms.tcon = tcon;
2006 + oparms.cifs_sb = cifs_sb;
2007 + oparms.desired_access = GENERIC_WRITE;
2008 +- oparms.create_options = create_options;
2009 ++ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
2010 ++ CREATE_OPTION_SPECIAL);
2011 + oparms.disposition = FILE_CREATE;
2012 + oparms.path = full_path;
2013 + oparms.fid = &fid;
2014 +diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
2015 +index f2a6f7f28340d..c9abda93d65b4 100644
2016 +--- a/fs/cifs/smb2inode.c
2017 ++++ b/fs/cifs/smb2inode.c
2018 +@@ -98,9 +98,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
2019 + oparms.tcon = tcon;
2020 + oparms.desired_access = desired_access;
2021 + oparms.disposition = create_disposition;
2022 +- oparms.create_options = create_options;
2023 +- if (backup_cred(cifs_sb))
2024 +- oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
2025 ++ oparms.create_options = cifs_create_options(cifs_sb, create_options);
2026 + oparms.fid = &fid;
2027 + oparms.reconnect = false;
2028 + oparms.mode = mode;
2029 +@@ -456,7 +454,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
2030 +
2031 + /* If it is a root and its handle is cached then use it */
2032 + if (!strlen(full_path) && !no_cached_open) {
2033 +- rc = open_shroot(xid, tcon, &fid);
2034 ++ rc = open_shroot(xid, tcon, cifs_sb, &fid);
2035 + if (rc)
2036 + goto out;
2037 +
2038 +@@ -473,9 +471,6 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
2039 + goto out;
2040 + }
2041 +
2042 +- if (backup_cred(cifs_sb))
2043 +- create_options |= CREATE_OPEN_BACKUP_INTENT;
2044 +-
2045 + cifs_get_readable_path(tcon, full_path, &cfile);
2046 + rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
2047 + FILE_READ_ATTRIBUTES, FILE_OPEN, create_options,
2048 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2049 +index 944c575a4a705..4cb0ebe7330eb 100644
2050 +--- a/fs/cifs/smb2ops.c
2051 ++++ b/fs/cifs/smb2ops.c
2052 +@@ -644,7 +644,8 @@ smb2_cached_lease_break(struct work_struct *work)
2053 + /*
2054 + * Open the directory at the root of a share
2055 + */
2056 +-int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
2057 ++int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
2058 ++ struct cifs_sb_info *cifs_sb, struct cifs_fid *pfid)
2059 + {
2060 + struct cifs_ses *ses = tcon->ses;
2061 + struct TCP_Server_Info *server = ses->server;
2062 +@@ -696,7 +697,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
2063 + rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2064 +
2065 + oparms.tcon = tcon;
2066 +- oparms.create_options = 0;
2067 ++ oparms.create_options = cifs_create_options(cifs_sb, 0);
2068 + oparms.desired_access = FILE_READ_ATTRIBUTES;
2069 + oparms.disposition = FILE_OPEN;
2070 + oparms.fid = pfid;
2071 +@@ -812,7 +813,8 @@ oshr_free:
2072 + }
2073 +
2074 + static void
2075 +-smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
2076 ++smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
2077 ++ struct cifs_sb_info *cifs_sb)
2078 + {
2079 + int rc;
2080 + __le16 srch_path = 0; /* Null - open root of share */
2081 +@@ -821,18 +823,19 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
2082 + struct cifs_fid fid;
2083 + bool no_cached_open = tcon->nohandlecache;
2084 +
2085 +- oparms.tcon = tcon;
2086 +- oparms.desired_access = FILE_READ_ATTRIBUTES;
2087 +- oparms.disposition = FILE_OPEN;
2088 +- oparms.create_options = 0;
2089 +- oparms.fid = &fid;
2090 +- oparms.reconnect = false;
2091 ++ oparms = (struct cifs_open_parms) {
2092 ++ .tcon = tcon,
2093 ++ .desired_access = FILE_READ_ATTRIBUTES,
2094 ++ .disposition = FILE_OPEN,
2095 ++ .create_options = cifs_create_options(cifs_sb, 0),
2096 ++ .fid = &fid,
2097 ++ };
2098 +
2099 + if (no_cached_open)
2100 + rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
2101 + NULL);
2102 + else
2103 +- rc = open_shroot(xid, tcon, &fid);
2104 ++ rc = open_shroot(xid, tcon, cifs_sb, &fid);
2105 +
2106 + if (rc)
2107 + return;
2108 +@@ -854,7 +857,8 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
2109 + }
2110 +
2111 + static void
2112 +-smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
2113 ++smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
2114 ++ struct cifs_sb_info *cifs_sb)
2115 + {
2116 + int rc;
2117 + __le16 srch_path = 0; /* Null - open root of share */
2118 +@@ -865,7 +869,7 @@ smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
2119 + oparms.tcon = tcon;
2120 + oparms.desired_access = FILE_READ_ATTRIBUTES;
2121 + oparms.disposition = FILE_OPEN;
2122 +- oparms.create_options = 0;
2123 ++ oparms.create_options = cifs_create_options(cifs_sb, 0);
2124 + oparms.fid = &fid;
2125 + oparms.reconnect = false;
2126 +
2127 +@@ -900,10 +904,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
2128 + oparms.tcon = tcon;
2129 + oparms.desired_access = FILE_READ_ATTRIBUTES;
2130 + oparms.disposition = FILE_OPEN;
2131 +- if (backup_cred(cifs_sb))
2132 +- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2133 +- else
2134 +- oparms.create_options = 0;
2135 ++ oparms.create_options = cifs_create_options(cifs_sb, 0);
2136 + oparms.fid = &fid;
2137 + oparms.reconnect = false;
2138 +
2139 +@@ -1179,10 +1180,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
2140 + oparms.tcon = tcon;
2141 + oparms.desired_access = FILE_WRITE_EA;
2142 + oparms.disposition = FILE_OPEN;
2143 +- if (backup_cred(cifs_sb))
2144 +- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2145 +- else
2146 +- oparms.create_options = 0;
2147 ++ oparms.create_options = cifs_create_options(cifs_sb, 0);
2148 + oparms.fid = &fid;
2149 + oparms.reconnect = false;
2150 +
2151 +@@ -1414,6 +1412,7 @@ req_res_key_exit:
2152 + static int
2153 + smb2_ioctl_query_info(const unsigned int xid,
2154 + struct cifs_tcon *tcon,
2155 ++ struct cifs_sb_info *cifs_sb,
2156 + __le16 *path, int is_dir,
2157 + unsigned long p)
2158 + {
2159 +@@ -1439,6 +1438,7 @@ smb2_ioctl_query_info(const unsigned int xid,
2160 + struct kvec close_iov[1];
2161 + unsigned int size[2];
2162 + void *data[2];
2163 ++ int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
2164 +
2165 + memset(rqst, 0, sizeof(rqst));
2166 + resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2167 +@@ -1474,10 +1474,7 @@ smb2_ioctl_query_info(const unsigned int xid,
2168 + memset(&oparms, 0, sizeof(oparms));
2169 + oparms.tcon = tcon;
2170 + oparms.disposition = FILE_OPEN;
2171 +- if (is_dir)
2172 +- oparms.create_options = CREATE_NOT_FILE;
2173 +- else
2174 +- oparms.create_options = CREATE_NOT_DIR;
2175 ++ oparms.create_options = cifs_create_options(cifs_sb, create_options);
2176 + oparms.fid = &fid;
2177 + oparms.reconnect = false;
2178 +
2179 +@@ -2074,10 +2071,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2180 + oparms.tcon = tcon;
2181 + oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2182 + oparms.disposition = FILE_OPEN;
2183 +- if (backup_cred(cifs_sb))
2184 +- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2185 +- else
2186 +- oparms.create_options = 0;
2187 ++ oparms.create_options = cifs_create_options(cifs_sb, 0);
2188 + oparms.fid = fid;
2189 + oparms.reconnect = false;
2190 +
2191 +@@ -2278,10 +2272,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2192 + oparms.tcon = tcon;
2193 + oparms.desired_access = desired_access;
2194 + oparms.disposition = FILE_OPEN;
2195 +- if (cifs_sb && backup_cred(cifs_sb))
2196 +- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2197 +- else
2198 +- oparms.create_options = 0;
2199 ++ oparms.create_options = cifs_create_options(cifs_sb, 0);
2200 + oparms.fid = &fid;
2201 + oparms.reconnect = false;
2202 +
2203 +@@ -2337,7 +2328,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2204 +
2205 + static int
2206 + smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2207 +- struct kstatfs *buf)
2208 ++ struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
2209 + {
2210 + struct smb2_query_info_rsp *rsp;
2211 + struct smb2_fs_full_size_info *info = NULL;
2212 +@@ -2374,7 +2365,7 @@ qfs_exit:
2213 +
2214 + static int
2215 + smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2216 +- struct kstatfs *buf)
2217 ++ struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
2218 + {
2219 + int rc;
2220 + __le16 srch_path = 0; /* Null - open root of share */
2221 +@@ -2383,12 +2374,12 @@ smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2222 + struct cifs_fid fid;
2223 +
2224 + if (!tcon->posix_extensions)
2225 +- return smb2_queryfs(xid, tcon, buf);
2226 ++ return smb2_queryfs(xid, tcon, cifs_sb, buf);
2227 +
2228 + oparms.tcon = tcon;
2229 + oparms.desired_access = FILE_READ_ATTRIBUTES;
2230 + oparms.disposition = FILE_OPEN;
2231 +- oparms.create_options = 0;
2232 ++ oparms.create_options = cifs_create_options(cifs_sb, 0);
2233 + oparms.fid = &fid;
2234 + oparms.reconnect = false;
2235 +
2236 +@@ -2657,6 +2648,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2237 + struct smb2_create_rsp *create_rsp;
2238 + struct smb2_ioctl_rsp *ioctl_rsp;
2239 + struct reparse_data_buffer *reparse_buf;
2240 ++ int create_options = is_reparse_point ? OPEN_REPARSE_POINT : 0;
2241 + u32 plen;
2242 +
2243 + cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2244 +@@ -2683,14 +2675,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
2245 + oparms.tcon = tcon;
2246 + oparms.desired_access = FILE_READ_ATTRIBUTES;
2247 + oparms.disposition = FILE_OPEN;
2248 +-
2249 +- if (backup_cred(cifs_sb))
2250 +- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2251 +- else
2252 +- oparms.create_options = 0;
2253 +- if (is_reparse_point)
2254 +- oparms.create_options = OPEN_REPARSE_POINT;
2255 +-
2256 ++ oparms.create_options = cifs_create_options(cifs_sb, create_options);
2257 + oparms.fid = &fid;
2258 + oparms.reconnect = false;
2259 +
2260 +@@ -2869,11 +2854,6 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2261 + tcon = tlink_tcon(tlink);
2262 + xid = get_xid();
2263 +
2264 +- if (backup_cred(cifs_sb))
2265 +- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2266 +- else
2267 +- oparms.create_options = 0;
2268 +-
2269 + utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2270 + if (!utf16_path) {
2271 + rc = -ENOMEM;
2272 +@@ -2884,6 +2864,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2273 + oparms.tcon = tcon;
2274 + oparms.desired_access = READ_CONTROL;
2275 + oparms.disposition = FILE_OPEN;
2276 ++ oparms.create_options = cifs_create_options(cifs_sb, 0);
2277 + oparms.fid = &fid;
2278 + oparms.reconnect = false;
2279 +
2280 +@@ -2925,11 +2906,6 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
2281 + tcon = tlink_tcon(tlink);
2282 + xid = get_xid();
2283 +
2284 +- if (backup_cred(cifs_sb))
2285 +- oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2286 +- else
2287 +- oparms.create_options = 0;
2288 +-
2289 + if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
2290 + access_flags = WRITE_OWNER;
2291 + else
2292 +@@ -2944,6 +2920,7 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
2293 +
2294 + oparms.tcon = tcon;
2295 + oparms.desired_access = access_flags;
2296 ++ oparms.create_options = cifs_create_options(cifs_sb, 0);
2297 + oparms.disposition = FILE_OPEN;
2298 + oparms.path = path;
2299 + oparms.fid = &fid;
2300 +@@ -4481,7 +4458,6 @@ smb2_make_node(unsigned int xid, struct inode *inode,
2301 + {
2302 + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2303 + int rc = -EPERM;
2304 +- int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
2305 + FILE_ALL_INFO *buf = NULL;
2306 + struct cifs_io_parms io_parms;
2307 + __u32 oplock = 0;
2308 +@@ -4517,13 +4493,11 @@ smb2_make_node(unsigned int xid, struct inode *inode,
2309 + goto out;
2310 + }
2311 +
2312 +- if (backup_cred(cifs_sb))
2313 +- create_options |= CREATE_OPEN_BACKUP_INTENT;
2314 +-
2315 + oparms.tcon = tcon;
2316 + oparms.cifs_sb = cifs_sb;
2317 + oparms.desired_access = GENERIC_WRITE;
2318 +- oparms.create_options = create_options;
2319 ++ oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
2320 ++ CREATE_OPTION_SPECIAL);
2321 + oparms.disposition = FILE_CREATE;
2322 + oparms.path = full_path;
2323 + oparms.fid = &fid;
2324 +diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
2325 +index 57f7075a35871..4d4c0faa3d8a3 100644
2326 +--- a/fs/cifs/smb2proto.h
2327 ++++ b/fs/cifs/smb2proto.h
2328 +@@ -67,7 +67,7 @@ extern int smb3_handle_read_data(struct TCP_Server_Info *server,
2329 + struct mid_q_entry *mid);
2330 +
2331 + extern int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
2332 +- struct cifs_fid *pfid);
2333 ++ struct cifs_sb_info *cifs_sb, struct cifs_fid *pfid);
2334 + extern void close_shroot(struct cached_fid *cfid);
2335 + extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst,
2336 + struct smb2_file_all_info *src);
2337 +diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
2338 +index 37347ba868b70..d1ef651948d7e 100644
2339 +--- a/fs/ext4/fsmap.c
2340 ++++ b/fs/ext4/fsmap.c
2341 +@@ -486,6 +486,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
2342 + keys[0].fmr_physical = bofs;
2343 + if (keys[1].fmr_physical >= eofs)
2344 + keys[1].fmr_physical = eofs - 1;
2345 ++ if (keys[1].fmr_physical < keys[0].fmr_physical)
2346 ++ return 0;
2347 + start_fsb = keys[0].fmr_physical;
2348 + end_fsb = keys[1].fmr_physical;
2349 +
2350 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
2351 +index 5ef13ede04457..4eb4c23b04b93 100644
2352 +--- a/fs/ext4/inline.c
2353 ++++ b/fs/ext4/inline.c
2354 +@@ -157,7 +157,6 @@ int ext4_find_inline_data_nolock(struct inode *inode)
2355 + (void *)ext4_raw_inode(&is.iloc));
2356 + EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
2357 + le32_to_cpu(is.s.here->e_value_size);
2358 +- ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
2359 + }
2360 + out:
2361 + brelse(is.iloc.bh);
2362 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2363 +index cafa06114a1d1..63b10ee986d4f 100644
2364 +--- a/fs/ext4/inode.c
2365 ++++ b/fs/ext4/inode.c
2366 +@@ -4862,8 +4862,13 @@ static inline int ext4_iget_extra_inode(struct inode *inode,
2367 +
2368 + if (EXT4_INODE_HAS_XATTR_SPACE(inode) &&
2369 + *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
2370 ++ int err;
2371 ++
2372 + ext4_set_inode_state(inode, EXT4_STATE_XATTR);
2373 +- return ext4_find_inline_data_nolock(inode);
2374 ++ err = ext4_find_inline_data_nolock(inode);
2375 ++ if (!err && ext4_has_inline_data(inode))
2376 ++ ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
2377 ++ return err;
2378 + } else
2379 + EXT4_I(inode)->i_inline_off = 0;
2380 + return 0;
2381 +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
2382 +index 645186927c1f8..306ad7d0003bb 100644
2383 +--- a/fs/ext4/ioctl.c
2384 ++++ b/fs/ext4/ioctl.c
2385 +@@ -179,6 +179,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
2386 + ei_bl->i_flags = 0;
2387 + inode_set_iversion(inode_bl, 1);
2388 + i_size_write(inode_bl, 0);
2389 ++ EXT4_I(inode_bl)->i_disksize = inode_bl->i_size;
2390 + inode_bl->i_mode = S_IFREG;
2391 + if (ext4_has_feature_extents(sb)) {
2392 + ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS);
2393 +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
2394 +index ee9e931e2e925..b708b437b3e36 100644
2395 +--- a/fs/ext4/namei.c
2396 ++++ b/fs/ext4/namei.c
2397 +@@ -1502,11 +1502,10 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
2398 + int has_inline_data = 1;
2399 + ret = ext4_find_inline_entry(dir, fname, res_dir,
2400 + &has_inline_data);
2401 +- if (has_inline_data) {
2402 +- if (inlined)
2403 +- *inlined = 1;
2404 ++ if (inlined)
2405 ++ *inlined = has_inline_data;
2406 ++ if (has_inline_data)
2407 + goto cleanup_and_exit;
2408 +- }
2409 + }
2410 +
2411 + if ((namelen <= 2) && (name[0] == '.') &&
2412 +@@ -3630,7 +3629,8 @@ static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,
2413 + * so the old->de may no longer valid and need to find it again
2414 + * before reset old inode info.
2415 + */
2416 +- old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
2417 ++ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
2418 ++ &old.inlined);
2419 + if (IS_ERR(old.bh))
2420 + retval = PTR_ERR(old.bh);
2421 + if (!old.bh)
2422 +@@ -3795,9 +3795,20 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2423 + return retval;
2424 + }
2425 +
2426 +- old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
2427 +- if (IS_ERR(old.bh))
2428 +- return PTR_ERR(old.bh);
2429 ++ /*
2430 ++ * We need to protect against old.inode directory getting converted
2431 ++ * from inline directory format into a normal one.
2432 ++ */
2433 ++ if (S_ISDIR(old.inode->i_mode))
2434 ++ inode_lock_nested(old.inode, I_MUTEX_NONDIR2);
2435 ++
2436 ++ old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
2437 ++ &old.inlined);
2438 ++ if (IS_ERR(old.bh)) {
2439 ++ retval = PTR_ERR(old.bh);
2440 ++ goto unlock_moved_dir;
2441 ++ }
2442 ++
2443 + /*
2444 + * Check for inode number is _not_ due to possible IO errors.
2445 + * We might rmdir the source, keep it as pwd of some process
2446 +@@ -3855,8 +3866,10 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2447 + goto end_rename;
2448 + }
2449 + retval = ext4_rename_dir_prepare(handle, &old);
2450 +- if (retval)
2451 ++ if (retval) {
2452 ++ inode_unlock(old.inode);
2453 + goto end_rename;
2454 ++ }
2455 + }
2456 + /*
2457 + * If we're renaming a file within an inline_data dir and adding or
2458 +@@ -3956,6 +3969,11 @@ release_bh:
2459 + brelse(old.dir_bh);
2460 + brelse(old.bh);
2461 + brelse(new.bh);
2462 ++
2463 ++unlock_moved_dir:
2464 ++ if (S_ISDIR(old.inode->i_mode))
2465 ++ inode_unlock(old.inode);
2466 ++
2467 + return retval;
2468 + }
2469 +
2470 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
2471 +index cf794afbd52fb..254bc6b26d698 100644
2472 +--- a/fs/ext4/xattr.c
2473 ++++ b/fs/ext4/xattr.c
2474 +@@ -2819,6 +2819,9 @@ shift:
2475 + (void *)header, total_ino);
2476 + EXT4_I(inode)->i_extra_isize = new_extra_isize;
2477 +
2478 ++ if (ext4_has_inline_data(inode))
2479 ++ error = ext4_find_inline_data_nolock(inode);
2480 ++
2481 + cleanup:
2482 + if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) {
2483 + ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.",
2484 +diff --git a/fs/file.c b/fs/file.c
2485 +index 51f53a7dc2218..e56059fa1b309 100644
2486 +--- a/fs/file.c
2487 ++++ b/fs/file.c
2488 +@@ -654,6 +654,7 @@ int __close_fd_get_file(unsigned int fd, struct file **res)
2489 + fdt = files_fdtable(files);
2490 + if (fd >= fdt->max_fds)
2491 + goto out_unlock;
2492 ++ fd = array_index_nospec(fd, fdt->max_fds);
2493 + file = fdt->fd[fd];
2494 + if (!file)
2495 + goto out_unlock;
2496 +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
2497 +index c3bcac22c3894..a68535f36d135 100644
2498 +--- a/include/asm-generic/vmlinux.lds.h
2499 ++++ b/include/asm-generic/vmlinux.lds.h
2500 +@@ -825,7 +825,12 @@
2501 + #define TRACEDATA
2502 + #endif
2503 +
2504 ++/*
2505 ++ * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler.
2506 ++ * Otherwise, the type of .notes section would become PROGBITS instead of NOTES.
2507 ++ */
2508 + #define NOTES \
2509 ++ /DISCARD/ : { *(.note.GNU-stack) } \
2510 + .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
2511 + __start_notes = .; \
2512 + KEEP(*(.note.*)) \
2513 +@@ -900,10 +905,17 @@
2514 + * section definitions so that such archs put those in earlier section
2515 + * definitions.
2516 + */
2517 ++#ifdef RUNTIME_DISCARD_EXIT
2518 ++#define EXIT_DISCARDS
2519 ++#else
2520 ++#define EXIT_DISCARDS \
2521 ++ EXIT_TEXT \
2522 ++ EXIT_DATA
2523 ++#endif
2524 ++
2525 + #define DISCARDS \
2526 + /DISCARD/ : { \
2527 +- EXIT_TEXT \
2528 +- EXIT_DATA \
2529 ++ EXIT_DISCARDS \
2530 + EXIT_CALL \
2531 + *(.discard) \
2532 + *(.discard.*) \
2533 +diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
2534 +index 824d7a19dd66e..2552f66a7a891 100644
2535 +--- a/include/linux/irqdomain.h
2536 ++++ b/include/linux/irqdomain.h
2537 +@@ -254,7 +254,7 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
2538 + }
2539 +
2540 + void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
2541 +-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
2542 ++struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
2543 + irq_hw_number_t hwirq_max, int direct_max,
2544 + const struct irq_domain_ops *ops,
2545 + void *host_data);
2546 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
2547 +index a31aa3ac4219f..d35000669db5a 100644
2548 +--- a/include/linux/pci_ids.h
2549 ++++ b/include/linux/pci_ids.h
2550 +@@ -3106,6 +3106,8 @@
2551 +
2552 + #define PCI_VENDOR_ID_3COM_2 0xa727
2553 +
2554 ++#define PCI_VENDOR_ID_SOLIDRUN 0xd063
2555 ++
2556 + #define PCI_VENDOR_ID_DIGIUM 0xd161
2557 + #define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410
2558 +
2559 +diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
2560 +index 82d0e41b76f22..faa108b1ba675 100644
2561 +--- a/include/net/netfilter/nf_tproxy.h
2562 ++++ b/include/net/netfilter/nf_tproxy.h
2563 +@@ -17,6 +17,13 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
2564 + return false;
2565 + }
2566 +
2567 ++static inline void nf_tproxy_twsk_deschedule_put(struct inet_timewait_sock *tw)
2568 ++{
2569 ++ local_bh_disable();
2570 ++ inet_twsk_deschedule_put(tw);
2571 ++ local_bh_enable();
2572 ++}
2573 ++
2574 + /* assign a socket to the skb -- consumes sk */
2575 + static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
2576 + {
2577 +diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
2578 +index 8fd65a0eb7f3e..5189bc5ebd895 100644
2579 +--- a/kernel/bpf/btf.c
2580 ++++ b/kernel/bpf/btf.c
2581 +@@ -2719,6 +2719,7 @@ static int btf_datasec_resolve(struct btf_verifier_env *env,
2582 + struct btf *btf = env->btf;
2583 + u16 i;
2584 +
2585 ++ env->resolve_mode = RESOLVE_TBD;
2586 + for_each_vsi_from(i, v->next_member, v->t, vsi) {
2587 + u32 var_type_id = vsi->type, type_id, type_size = 0;
2588 + const struct btf_type *var_type = btf_type_by_id(env->btf,
2589 +diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
2590 +index 3d1b570a1dadd..d40ae18fe6617 100644
2591 +--- a/kernel/irq/irqdomain.c
2592 ++++ b/kernel/irq/irqdomain.c
2593 +@@ -114,23 +114,12 @@ void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
2594 + }
2595 + EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
2596 +
2597 +-/**
2598 +- * __irq_domain_add() - Allocate a new irq_domain data structure
2599 +- * @fwnode: firmware node for the interrupt controller
2600 +- * @size: Size of linear map; 0 for radix mapping only
2601 +- * @hwirq_max: Maximum number of interrupts supported by controller
2602 +- * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
2603 +- * direct mapping
2604 +- * @ops: domain callbacks
2605 +- * @host_data: Controller private data pointer
2606 +- *
2607 +- * Allocates and initializes an irq_domain structure.
2608 +- * Returns pointer to IRQ domain, or NULL on failure.
2609 +- */
2610 +-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
2611 +- irq_hw_number_t hwirq_max, int direct_max,
2612 +- const struct irq_domain_ops *ops,
2613 +- void *host_data)
2614 ++static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
2615 ++ unsigned int size,
2616 ++ irq_hw_number_t hwirq_max,
2617 ++ int direct_max,
2618 ++ const struct irq_domain_ops *ops,
2619 ++ void *host_data)
2620 + {
2621 + struct device_node *of_node = to_of_node(fwnode);
2622 + struct irqchip_fwid *fwid;
2623 +@@ -222,12 +211,44 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
2624 + domain->revmap_direct_max_irq = direct_max;
2625 + irq_domain_check_hierarchy(domain);
2626 +
2627 ++ return domain;
2628 ++}
2629 ++
2630 ++static void __irq_domain_publish(struct irq_domain *domain)
2631 ++{
2632 + mutex_lock(&irq_domain_mutex);
2633 + debugfs_add_domain_dir(domain);
2634 + list_add(&domain->link, &irq_domain_list);
2635 + mutex_unlock(&irq_domain_mutex);
2636 +
2637 + pr_debug("Added domain %s\n", domain->name);
2638 ++}
2639 ++
2640 ++/**
2641 ++ * __irq_domain_add() - Allocate a new irq_domain data structure
2642 ++ * @fwnode: firmware node for the interrupt controller
2643 ++ * @size: Size of linear map; 0 for radix mapping only
2644 ++ * @hwirq_max: Maximum number of interrupts supported by controller
2645 ++ * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
2646 ++ * direct mapping
2647 ++ * @ops: domain callbacks
2648 ++ * @host_data: Controller private data pointer
2649 ++ *
2650 ++ * Allocates and initializes an irq_domain structure.
2651 ++ * Returns pointer to IRQ domain, or NULL on failure.
2652 ++ */
2653 ++struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
2654 ++ irq_hw_number_t hwirq_max, int direct_max,
2655 ++ const struct irq_domain_ops *ops,
2656 ++ void *host_data)
2657 ++{
2658 ++ struct irq_domain *domain;
2659 ++
2660 ++ domain = __irq_domain_create(fwnode, size, hwirq_max, direct_max,
2661 ++ ops, host_data);
2662 ++ if (domain)
2663 ++ __irq_domain_publish(domain);
2664 ++
2665 + return domain;
2666 + }
2667 + EXPORT_SYMBOL_GPL(__irq_domain_add);
2668 +@@ -1068,12 +1089,15 @@ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
2669 + struct irq_domain *domain;
2670 +
2671 + if (size)
2672 +- domain = irq_domain_create_linear(fwnode, size, ops, host_data);
2673 ++ domain = __irq_domain_create(fwnode, size, size, 0, ops, host_data);
2674 + else
2675 +- domain = irq_domain_create_tree(fwnode, ops, host_data);
2676 ++ domain = __irq_domain_create(fwnode, 0, ~0, 0, ops, host_data);
2677 ++
2678 + if (domain) {
2679 + domain->parent = parent;
2680 + domain->flags |= flags;
2681 ++
2682 ++ __irq_domain_publish(domain);
2683 + }
2684 +
2685 + return domain;
2686 +diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
2687 +index 46c62dd1479b8..862226be22868 100644
2688 +--- a/net/caif/caif_usb.c
2689 ++++ b/net/caif/caif_usb.c
2690 +@@ -134,6 +134,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
2691 + struct usb_device *usbdev;
2692 + int res;
2693 +
2694 ++ if (what == NETDEV_UNREGISTER && dev->reg_state >= NETREG_UNREGISTERED)
2695 ++ return 0;
2696 ++
2697 + /* Check whether we have a NCM device, and find its VID/PID. */
2698 + if (!(dev->dev.parent && dev->dev.parent->driver &&
2699 + strcmp(dev->dev.parent->driver->name, "cdc_ncm") == 0))
2700 +diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
2701 +index b2bae0b0e42a1..61cb2341f50fe 100644
2702 +--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
2703 ++++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
2704 +@@ -38,7 +38,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
2705 + hp->source, lport ? lport : hp->dest,
2706 + skb->dev, NF_TPROXY_LOOKUP_LISTENER);
2707 + if (sk2) {
2708 +- inet_twsk_deschedule_put(inet_twsk(sk));
2709 ++ nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
2710 + sk = sk2;
2711 + }
2712 + }
2713 +diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
2714 +index 5fc1f4e0c0cf0..10f1367eb4ca0 100644
2715 +--- a/net/ipv6/ila/ila_xlat.c
2716 ++++ b/net/ipv6/ila/ila_xlat.c
2717 +@@ -477,6 +477,7 @@ int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
2718 +
2719 + rcu_read_lock();
2720 +
2721 ++ ret = -ESRCH;
2722 + ila = ila_lookup_by_params(&xp, ilan);
2723 + if (ila) {
2724 + ret = ila_dump_info(ila,
2725 +diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c
2726 +index 34d51cd426b0c..00761924f2766 100644
2727 +--- a/net/ipv6/netfilter/nf_tproxy_ipv6.c
2728 ++++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c
2729 +@@ -63,7 +63,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
2730 + lport ? lport : hp->dest,
2731 + skb->dev, NF_TPROXY_LOOKUP_LISTENER);
2732 + if (sk2) {
2733 +- inet_twsk_deschedule_put(inet_twsk(sk));
2734 ++ nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
2735 + sk = sk2;
2736 + }
2737 + }
2738 +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
2739 +index c4c57830e9636..66ab97131fd24 100644
2740 +--- a/net/nfc/netlink.c
2741 ++++ b/net/nfc/netlink.c
2742 +@@ -1454,8 +1454,8 @@ static int nfc_se_io(struct nfc_dev *dev, u32 se_idx,
2743 + return rc;
2744 +
2745 + error:
2746 +- kfree(cb_context);
2747 + device_unlock(&dev->dev);
2748 ++ kfree(cb_context);
2749 + return rc;
2750 + }
2751 +
2752 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
2753 +index 5d696b7fb47e1..b398d72a7bc39 100644
2754 +--- a/net/smc/af_smc.c
2755 ++++ b/net/smc/af_smc.c
2756 +@@ -1542,16 +1542,14 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2757 + {
2758 + struct sock *sk = sock->sk;
2759 + struct smc_sock *smc;
2760 +- int rc = -EPIPE;
2761 ++ int rc;
2762 +
2763 + smc = smc_sk(sk);
2764 + lock_sock(sk);
2765 +- if ((sk->sk_state != SMC_ACTIVE) &&
2766 +- (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2767 +- (sk->sk_state != SMC_INIT))
2768 +- goto out;
2769 +
2770 ++ /* SMC does not support connect with fastopen */
2771 + if (msg->msg_flags & MSG_FASTOPEN) {
2772 ++ /* not connected yet, fallback */
2773 + if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2774 + smc_switch_to_fallback(smc);
2775 + smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
2776 +@@ -1559,6 +1557,11 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2777 + rc = -EINVAL;
2778 + goto out;
2779 + }
2780 ++ } else if ((sk->sk_state != SMC_ACTIVE) &&
2781 ++ (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2782 ++ (sk->sk_state != SMC_INIT)) {
2783 ++ rc = -EPIPE;
2784 ++ goto out;
2785 + }
2786 +
2787 + if (smc->use_fallback)
2788 +diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
2789 +index 4e15e81673104..67697d8ea59a5 100755
2790 +--- a/tools/testing/selftests/netfilter/nft_nat.sh
2791 ++++ b/tools/testing/selftests/netfilter/nft_nat.sh
2792 +@@ -404,6 +404,8 @@ EOF
2793 + echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 &
2794 + sc_s=$!
2795 +
2796 ++ sleep 1
2797 ++
2798 + result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT)
2799 +
2800 + if [ "$result" = "SERVER-inet" ];then