Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.17 commit in: /
Date: Tue, 26 Jun 2018 16:29:51
Message-Id: 1530030252.c429ed15c8c907dbe61005f1542b6f1c16e99543.alicef@gentoo
1 commit: c429ed15c8c907dbe61005f1542b6f1c16e99543
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jun 26 16:24:12 2018 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Tue Jun 26 16:24:12 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c429ed15
7
8 linux kernel 4.17.3
9
10 0000_README | 4 +
11 1002_linux-4.17.3.patch | 2352 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2356 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index a4cf389..daa330b 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -51,6 +51,10 @@ Patch: 1001_linux-4.17.2.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.17.2
21
22 +Patch: 1002_linux-4.17.3.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.17.3
25 +
26 Patch: 1800_iommu-amd-dma-direct-revert.patch
27 From: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/?id=e16c4790de39dc861b749674c2a9319507f6f64f
28 Desc: Revert iommu/amd_iommu: Use CONFIG_DMA_DIRECT_OPS=y and dma_direct_{alloc,free}(). See bug #658538.
29
30 diff --git a/1002_linux-4.17.3.patch b/1002_linux-4.17.3.patch
31 new file mode 100644
32 index 0000000..9fdc2fc
33 --- /dev/null
34 +++ b/1002_linux-4.17.3.patch
35 @@ -0,0 +1,2352 @@
36 +diff --git a/Makefile b/Makefile
37 +index f43cd522b175..31dc3a08295a 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 17
44 +-SUBLEVEL = 2
45 ++SUBLEVEL = 3
46 + EXTRAVERSION =
47 + NAME = Merciless Moray
48 +
49 +diff --git a/arch/um/drivers/vector_transports.c b/arch/um/drivers/vector_transports.c
50 +index 9065047f844b..77e4ebc206ae 100644
51 +--- a/arch/um/drivers/vector_transports.c
52 ++++ b/arch/um/drivers/vector_transports.c
53 +@@ -120,7 +120,8 @@ static int raw_form_header(uint8_t *header,
54 + skb,
55 + vheader,
56 + virtio_legacy_is_little_endian(),
57 +- false
58 ++ false,
59 ++ 0
60 + );
61 +
62 + return 0;
63 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
64 +index 08acd954f00e..74a9e06b6cfd 100644
65 +--- a/arch/x86/include/asm/apic.h
66 ++++ b/arch/x86/include/asm/apic.h
67 +@@ -436,6 +436,8 @@ static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
68 +
69 + #endif /* CONFIG_X86_LOCAL_APIC */
70 +
71 ++extern void apic_ack_irq(struct irq_data *data);
72 ++
73 + static inline void ack_APIC_irq(void)
74 + {
75 + /*
76 +diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
77 +index 22647a642e98..0af81b590a0c 100644
78 +--- a/arch/x86/include/asm/trace/irq_vectors.h
79 ++++ b/arch/x86/include/asm/trace/irq_vectors.h
80 +@@ -236,7 +236,7 @@ TRACE_EVENT(vector_alloc,
81 + TP_PROTO(unsigned int irq, unsigned int vector, bool reserved,
82 + int ret),
83 +
84 +- TP_ARGS(irq, vector, ret, reserved),
85 ++ TP_ARGS(irq, vector, reserved, ret),
86 +
87 + TP_STRUCT__entry(
88 + __field( unsigned int, irq )
89 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
90 +index 7553819c74c3..3982f79d2377 100644
91 +--- a/arch/x86/kernel/apic/io_apic.c
92 ++++ b/arch/x86/kernel/apic/io_apic.c
93 +@@ -1851,7 +1851,7 @@ static void ioapic_ir_ack_level(struct irq_data *irq_data)
94 + * intr-remapping table entry. Hence for the io-apic
95 + * EOI we use the pin number.
96 + */
97 +- ack_APIC_irq();
98 ++ apic_ack_irq(irq_data);
99 + eoi_ioapic_pin(data->entry.vector, data);
100 + }
101 +
102 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
103 +index bb6f7a2148d7..b708f597eee3 100644
104 +--- a/arch/x86/kernel/apic/vector.c
105 ++++ b/arch/x86/kernel/apic/vector.c
106 +@@ -235,6 +235,15 @@ static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
107 + if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
108 + return 0;
109 +
110 ++ /*
111 ++ * Careful here. @apicd might either have move_in_progress set or
112 ++ * be enqueued for cleanup. Assigning a new vector would either
113 ++ * leave a stale vector on some CPU around or in case of a pending
114 ++ * cleanup corrupt the hlist.
115 ++ */
116 ++ if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
117 ++ return -EBUSY;
118 ++
119 + vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
120 + if (vector > 0)
121 + apic_update_vector(irqd, vector, cpu);
122 +@@ -800,13 +809,18 @@ static int apic_retrigger_irq(struct irq_data *irqd)
123 + return 1;
124 + }
125 +
126 +-void apic_ack_edge(struct irq_data *irqd)
127 ++void apic_ack_irq(struct irq_data *irqd)
128 + {
129 +- irq_complete_move(irqd_cfg(irqd));
130 + irq_move_irq(irqd);
131 + ack_APIC_irq();
132 + }
133 +
134 ++void apic_ack_edge(struct irq_data *irqd)
135 ++{
136 ++ irq_complete_move(irqd_cfg(irqd));
137 ++ apic_ack_irq(irqd);
138 ++}
139 ++
140 + static struct irq_chip lapic_controller = {
141 + .name = "APIC",
142 + .irq_ack = apic_ack_edge,
143 +diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
144 +index 589b948e6e01..316a8875bd90 100644
145 +--- a/arch/x86/kernel/cpu/intel_rdt.c
146 ++++ b/arch/x86/kernel/cpu/intel_rdt.c
147 +@@ -821,6 +821,8 @@ static __init void rdt_quirks(void)
148 + case INTEL_FAM6_SKYLAKE_X:
149 + if (boot_cpu_data.x86_stepping <= 4)
150 + set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
151 ++ else
152 ++ set_rdt_options("!l3cat");
153 + }
154 + }
155 +
156 +diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
157 +index 475cb4f5f14f..c805a06e14c3 100644
158 +--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
159 ++++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
160 +@@ -48,7 +48,7 @@ static struct dentry *dfs_inj;
161 +
162 + static u8 n_banks;
163 +
164 +-#define MAX_FLAG_OPT_SIZE 3
165 ++#define MAX_FLAG_OPT_SIZE 4
166 + #define NBCFG 0x44
167 +
168 + enum injection_type {
169 +diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
170 +index e4cb9f4cde8a..fc13cbbb2dce 100644
171 +--- a/arch/x86/platform/uv/uv_irq.c
172 ++++ b/arch/x86/platform/uv/uv_irq.c
173 +@@ -47,11 +47,6 @@ static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
174 +
175 + static void uv_noop(struct irq_data *data) { }
176 +
177 +-static void uv_ack_apic(struct irq_data *data)
178 +-{
179 +- ack_APIC_irq();
180 +-}
181 +-
182 + static int
183 + uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
184 + bool force)
185 +@@ -73,7 +68,7 @@ static struct irq_chip uv_irq_chip = {
186 + .name = "UV-CORE",
187 + .irq_mask = uv_noop,
188 + .irq_unmask = uv_noop,
189 +- .irq_eoi = uv_ack_apic,
190 ++ .irq_eoi = apic_ack_irq,
191 + .irq_set_affinity = uv_set_irq_affinity,
192 + };
193 +
194 +diff --git a/block/blk-mq.c b/block/blk-mq.c
195 +index 9ce9cac16c3f..90ffd8151c57 100644
196 +--- a/block/blk-mq.c
197 ++++ b/block/blk-mq.c
198 +@@ -2473,7 +2473,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
199 +
200 + mutex_lock(&set->tag_list_lock);
201 + list_del_rcu(&q->tag_set_list);
202 +- INIT_LIST_HEAD(&q->tag_set_list);
203 + if (list_is_singular(&set->tag_list)) {
204 + /* just transitioned to unshared */
205 + set->flags &= ~BLK_MQ_F_TAG_SHARED;
206 +@@ -2481,8 +2480,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
207 + blk_mq_update_tag_set_depth(set, false);
208 + }
209 + mutex_unlock(&set->tag_list_lock);
210 +-
211 + synchronize_rcu();
212 ++ INIT_LIST_HEAD(&q->tag_set_list);
213 + }
214 +
215 + static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
216 +diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
217 +index 68422afc365f..bc5f05906bd1 100644
218 +--- a/drivers/acpi/acpica/psloop.c
219 ++++ b/drivers/acpi/acpica/psloop.c
220 +@@ -515,6 +515,22 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
221 + if (ACPI_FAILURE(status)) {
222 + return_ACPI_STATUS(status);
223 + }
224 ++ if (walk_state->opcode == AML_SCOPE_OP) {
225 ++ /*
226 ++ * If the scope op fails to parse, skip the body of the
227 ++ * scope op because the parse failure indicates that the
228 ++ * device may not exist.
229 ++ */
230 ++ walk_state->parser_state.aml =
231 ++ walk_state->aml + 1;
232 ++ walk_state->parser_state.aml =
233 ++ acpi_ps_get_next_package_end
234 ++ (&walk_state->parser_state);
235 ++ walk_state->aml =
236 ++ walk_state->parser_state.aml;
237 ++ ACPI_ERROR((AE_INFO,
238 ++ "Skipping Scope block"));
239 ++ }
240 +
241 + continue;
242 + }
243 +@@ -557,7 +573,40 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
244 + if (ACPI_FAILURE(status)) {
245 + return_ACPI_STATUS(status);
246 + }
247 +-
248 ++ if ((walk_state->control_state) &&
249 ++ ((walk_state->control_state->control.
250 ++ opcode == AML_IF_OP)
251 ++ || (walk_state->control_state->control.
252 ++ opcode == AML_WHILE_OP))) {
253 ++ /*
254 ++ * If the if/while op fails to parse, we will skip parsing
255 ++ * the body of the op.
256 ++ */
257 ++ parser_state->aml =
258 ++ walk_state->control_state->control.
259 ++ aml_predicate_start + 1;
260 ++ parser_state->aml =
261 ++ acpi_ps_get_next_package_end
262 ++ (parser_state);
263 ++ walk_state->aml = parser_state->aml;
264 ++
265 ++ ACPI_ERROR((AE_INFO,
266 ++ "Skipping While/If block"));
267 ++ if (*walk_state->aml == AML_ELSE_OP) {
268 ++ ACPI_ERROR((AE_INFO,
269 ++ "Skipping Else block"));
270 ++ walk_state->parser_state.aml =
271 ++ walk_state->aml + 1;
272 ++ walk_state->parser_state.aml =
273 ++ acpi_ps_get_next_package_end
274 ++ (parser_state);
275 ++ walk_state->aml =
276 ++ parser_state->aml;
277 ++ }
278 ++ ACPI_FREE(acpi_ut_pop_generic_state
279 ++ (&walk_state->control_state));
280 ++ }
281 ++ op = NULL;
282 + continue;
283 + }
284 + }
285 +diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
286 +index 7d9d0151ee54..3138e7a00da8 100644
287 +--- a/drivers/acpi/acpica/psobject.c
288 ++++ b/drivers/acpi/acpica/psobject.c
289 +@@ -12,6 +12,7 @@
290 + #include "acparser.h"
291 + #include "amlcode.h"
292 + #include "acconvert.h"
293 ++#include "acnamesp.h"
294 +
295 + #define _COMPONENT ACPI_PARSER
296 + ACPI_MODULE_NAME("psobject")
297 +@@ -549,6 +550,21 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
298 +
299 + do {
300 + if (*op) {
301 ++ /*
302 ++ * These Opcodes need to be removed from the namespace because they
303 ++ * get created even if these opcodes cannot be created due to
304 ++ * errors.
305 ++ */
306 ++ if (((*op)->common.aml_opcode == AML_REGION_OP)
307 ++ || ((*op)->common.aml_opcode ==
308 ++ AML_DATA_REGION_OP)) {
309 ++ acpi_ns_delete_children((*op)->common.
310 ++ node);
311 ++ acpi_ns_remove_node((*op)->common.node);
312 ++ (*op)->common.node = NULL;
313 ++ acpi_ps_delete_parse_tree(*op);
314 ++ }
315 ++
316 + status2 =
317 + acpi_ps_complete_this_op(walk_state, *op);
318 + if (ACPI_FAILURE(status2)) {
319 +@@ -574,6 +590,20 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
320 + #endif
321 + walk_state->prev_op = NULL;
322 + walk_state->prev_arg_types = walk_state->arg_types;
323 ++
324 ++ if (walk_state->parse_flags & ACPI_PARSE_MODULE_LEVEL) {
325 ++ /*
326 ++ * There was something that went wrong while executing code at the
327 ++ * module-level. We need to skip parsing whatever caused the
328 ++ * error and keep going. One runtime error during the table load
329 ++ * should not cause the entire table to not be loaded. This is
330 ++ * because there could be correct AML beyond the parts that caused
331 ++ * the runtime error.
332 ++ */
333 ++ ACPI_ERROR((AE_INFO,
334 ++ "Ignore error and continue table load"));
335 ++ return_ACPI_STATUS(AE_OK);
336 ++ }
337 + return_ACPI_STATUS(status);
338 + }
339 +
340 +diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
341 +index 12d4a0f6b8d2..5a64ddaed8a3 100644
342 +--- a/drivers/acpi/acpica/uterror.c
343 ++++ b/drivers/acpi/acpica/uterror.c
344 +@@ -182,20 +182,20 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
345 + switch (lookup_status) {
346 + case AE_ALREADY_EXISTS:
347 +
348 +- acpi_os_printf(ACPI_MSG_BIOS_ERROR);
349 ++ acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
350 + message = "Failure creating";
351 + break;
352 +
353 + case AE_NOT_FOUND:
354 +
355 +- acpi_os_printf(ACPI_MSG_BIOS_ERROR);
356 +- message = "Failure looking up";
357 ++ acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
358 ++ message = "Could not resolve";
359 + break;
360 +
361 + default:
362 +
363 +- acpi_os_printf(ACPI_MSG_ERROR);
364 +- message = "Failure looking up";
365 ++ acpi_os_printf("\n" ACPI_MSG_ERROR);
366 ++ message = "Failure resolving";
367 + break;
368 + }
369 +
370 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
371 +index 346b163f6e89..9bfd2f7e4542 100644
372 +--- a/drivers/ata/libata-core.c
373 ++++ b/drivers/ata/libata-core.c
374 +@@ -4557,9 +4557,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
375 + { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
376 + { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
377 +
378 +- /* Sandisk devices which are known to not handle LPM well */
379 +- { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
380 +-
381 + /* devices that don't properly handle queued TRIM commands */
382 + { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
383 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
384 +diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
385 +index de4ddd0e8550..b3ed8f9953a8 100644
386 +--- a/drivers/ata/libata-zpodd.c
387 ++++ b/drivers/ata/libata-zpodd.c
388 +@@ -35,7 +35,7 @@ struct zpodd {
389 + static int eject_tray(struct ata_device *dev)
390 + {
391 + struct ata_taskfile tf;
392 +- static const char cdb[] = { GPCMD_START_STOP_UNIT,
393 ++ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_START_STOP_UNIT,
394 + 0, 0, 0,
395 + 0x02, /* LoEj */
396 + 0, 0, 0, 0, 0, 0, 0,
397 +diff --git a/drivers/base/core.c b/drivers/base/core.c
398 +index b610816eb887..d680fd030316 100644
399 +--- a/drivers/base/core.c
400 ++++ b/drivers/base/core.c
401 +@@ -1467,7 +1467,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
402 +
403 + dir = kzalloc(sizeof(*dir), GFP_KERNEL);
404 + if (!dir)
405 +- return NULL;
406 ++ return ERR_PTR(-ENOMEM);
407 +
408 + dir->class = class;
409 + kobject_init(&dir->kobj, &class_dir_ktype);
410 +@@ -1477,7 +1477,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
411 + retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
412 + if (retval < 0) {
413 + kobject_put(&dir->kobj);
414 +- return NULL;
415 ++ return ERR_PTR(retval);
416 + }
417 + return &dir->kobj;
418 + }
419 +@@ -1784,6 +1784,10 @@ int device_add(struct device *dev)
420 +
421 + parent = get_device(dev->parent);
422 + kobj = get_device_parent(dev, parent);
423 ++ if (IS_ERR(kobj)) {
424 ++ error = PTR_ERR(kobj);
425 ++ goto parent_error;
426 ++ }
427 + if (kobj)
428 + dev->kobj.parent = kobj;
429 +
430 +@@ -1882,6 +1886,7 @@ int device_add(struct device *dev)
431 + kobject_del(&dev->kobj);
432 + Error:
433 + cleanup_glue_dir(dev, glue_dir);
434 ++parent_error:
435 + put_device(parent);
436 + name_error:
437 + kfree(dev->p);
438 +@@ -2701,6 +2706,11 @@ int device_move(struct device *dev, struct device *new_parent,
439 + device_pm_lock();
440 + new_parent = get_device(new_parent);
441 + new_parent_kobj = get_device_parent(dev, new_parent);
442 ++ if (IS_ERR(new_parent_kobj)) {
443 ++ error = PTR_ERR(new_parent_kobj);
444 ++ put_device(new_parent);
445 ++ goto out;
446 ++ }
447 +
448 + pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
449 + __func__, new_parent ? dev_name(new_parent) : "<NULL>");
450 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
451 +index afbc202ca6fd..64278f472efe 100644
452 +--- a/drivers/block/nbd.c
453 ++++ b/drivers/block/nbd.c
454 +@@ -173,9 +173,12 @@ static const struct device_attribute pid_attr = {
455 + static void nbd_dev_remove(struct nbd_device *nbd)
456 + {
457 + struct gendisk *disk = nbd->disk;
458 ++ struct request_queue *q;
459 ++
460 + if (disk) {
461 ++ q = disk->queue;
462 + del_gendisk(disk);
463 +- blk_cleanup_queue(disk->queue);
464 ++ blk_cleanup_queue(q);
465 + blk_mq_free_tag_set(&nbd->tag_set);
466 + disk->private_data = NULL;
467 + put_disk(disk);
468 +@@ -231,9 +234,18 @@ static void nbd_size_clear(struct nbd_device *nbd)
469 + static void nbd_size_update(struct nbd_device *nbd)
470 + {
471 + struct nbd_config *config = nbd->config;
472 ++ struct block_device *bdev = bdget_disk(nbd->disk, 0);
473 ++
474 + blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
475 + blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
476 + set_capacity(nbd->disk, config->bytesize >> 9);
477 ++ if (bdev) {
478 ++ if (bdev->bd_disk)
479 ++ bd_set_size(bdev, config->bytesize);
480 ++ else
481 ++ bdev->bd_invalidated = 1;
482 ++ bdput(bdev);
483 ++ }
484 + kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
485 + }
486 +
487 +@@ -243,6 +255,8 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
488 + struct nbd_config *config = nbd->config;
489 + config->blksize = blocksize;
490 + config->bytesize = blocksize * nr_blocks;
491 ++ if (nbd->task_recv != NULL)
492 ++ nbd_size_update(nbd);
493 + }
494 +
495 + static void nbd_complete_rq(struct request *req)
496 +@@ -1109,7 +1123,6 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
497 + if (ret)
498 + return ret;
499 +
500 +- bd_set_size(bdev, config->bytesize);
501 + if (max_part)
502 + bdev->bd_invalidated = 1;
503 + mutex_unlock(&nbd->config_lock);
504 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
505 +index 075d18f6ba7a..54d4c0f999ec 100644
506 +--- a/drivers/cpufreq/cpufreq.c
507 ++++ b/drivers/cpufreq/cpufreq.c
508 +@@ -696,6 +696,8 @@ static ssize_t store_##file_name \
509 + struct cpufreq_policy new_policy; \
510 + \
511 + memcpy(&new_policy, policy, sizeof(*policy)); \
512 ++ new_policy.min = policy->user_policy.min; \
513 ++ new_policy.max = policy->user_policy.max; \
514 + \
515 + ret = sscanf(buf, "%u", &new_policy.object); \
516 + if (ret != 1) \
517 +diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
518 +index ca38229b045a..43e14bb512c8 100644
519 +--- a/drivers/cpufreq/cpufreq_governor.c
520 ++++ b/drivers/cpufreq/cpufreq_governor.c
521 +@@ -165,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
522 + * calls, so the previous load value can be used then.
523 + */
524 + load = j_cdbs->prev_load;
525 +- } else if (unlikely(time_elapsed > 2 * sampling_rate &&
526 ++ } else if (unlikely((int)idle_time > 2 * sampling_rate &&
527 + j_cdbs->prev_load)) {
528 + /*
529 + * If the CPU had gone completely idle and a task has
530 +@@ -185,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
531 + * clear prev_load to guarantee that the load will be
532 + * computed again next time.
533 + *
534 +- * Detecting this situation is easy: the governor's
535 +- * utilization update handler would not have run during
536 +- * CPU-idle periods. Hence, an unusually large
537 +- * 'time_elapsed' (as compared to the sampling rate)
538 ++ * Detecting this situation is easy: an unusually large
539 ++ * 'idle_time' (as compared to the sampling rate)
540 + * indicates this scenario.
541 + */
542 + load = j_cdbs->prev_load;
543 +@@ -217,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
544 + j_cdbs->prev_load = load;
545 + }
546 +
547 +- if (time_elapsed > 2 * sampling_rate) {
548 +- unsigned int periods = time_elapsed / sampling_rate;
549 ++ if (unlikely((int)idle_time > 2 * sampling_rate)) {
550 ++ unsigned int periods = idle_time / sampling_rate;
551 +
552 + if (periods < idle_periods)
553 + idle_periods = periods;
554 +diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
555 +index 6ba709b6f095..896caba5dfe5 100644
556 +--- a/drivers/cpufreq/ti-cpufreq.c
557 ++++ b/drivers/cpufreq/ti-cpufreq.c
558 +@@ -226,7 +226,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
559 + opp_data->cpu_dev = get_cpu_device(0);
560 + if (!opp_data->cpu_dev) {
561 + pr_err("%s: Failed to get device for CPU0\n", __func__);
562 +- ret = ENODEV;
563 ++ ret = -ENODEV;
564 + goto free_opp_data;
565 + }
566 +
567 +diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
568 +index 582e449be9fe..a2c53ea3b5ed 100644
569 +--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
570 ++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
571 +@@ -205,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev)
572 + kfree(ishtp_dev);
573 + }
574 +
575 +-#ifdef CONFIG_PM
576 +-static struct device *ish_resume_device;
577 ++static struct device __maybe_unused *ish_resume_device;
578 +
579 + /* 50ms to get resume response */
580 + #define WAIT_FOR_RESUME_ACK_MS 50
581 +@@ -220,7 +219,7 @@ static struct device *ish_resume_device;
582 + * in that case a simple resume message is enough, others we need
583 + * a reset sequence.
584 + */
585 +-static void ish_resume_handler(struct work_struct *work)
586 ++static void __maybe_unused ish_resume_handler(struct work_struct *work)
587 + {
588 + struct pci_dev *pdev = to_pci_dev(ish_resume_device);
589 + struct ishtp_device *dev = pci_get_drvdata(pdev);
590 +@@ -262,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work)
591 + *
592 + * Return: 0 to the pm core
593 + */
594 +-static int ish_suspend(struct device *device)
595 ++static int __maybe_unused ish_suspend(struct device *device)
596 + {
597 + struct pci_dev *pdev = to_pci_dev(device);
598 + struct ishtp_device *dev = pci_get_drvdata(pdev);
599 +@@ -288,7 +287,7 @@ static int ish_suspend(struct device *device)
600 + return 0;
601 + }
602 +
603 +-static DECLARE_WORK(resume_work, ish_resume_handler);
604 ++static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
605 + /**
606 + * ish_resume() - ISH resume callback
607 + * @device: device pointer
608 +@@ -297,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
609 + *
610 + * Return: 0 to the pm core
611 + */
612 +-static int ish_resume(struct device *device)
613 ++static int __maybe_unused ish_resume(struct device *device)
614 + {
615 + struct pci_dev *pdev = to_pci_dev(device);
616 + struct ishtp_device *dev = pci_get_drvdata(pdev);
617 +@@ -311,21 +310,14 @@ static int ish_resume(struct device *device)
618 + return 0;
619 + }
620 +
621 +-static const struct dev_pm_ops ish_pm_ops = {
622 +- .suspend = ish_suspend,
623 +- .resume = ish_resume,
624 +-};
625 +-#define ISHTP_ISH_PM_OPS (&ish_pm_ops)
626 +-#else
627 +-#define ISHTP_ISH_PM_OPS NULL
628 +-#endif /* CONFIG_PM */
629 ++static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
630 +
631 + static struct pci_driver ish_driver = {
632 + .name = KBUILD_MODNAME,
633 + .id_table = ish_pci_tbl,
634 + .probe = ish_probe,
635 + .remove = ish_remove,
636 +- .driver.pm = ISHTP_ISH_PM_OPS,
637 ++ .driver.pm = &ish_pm_ops,
638 + };
639 +
640 + module_pci_driver(ish_driver);
641 +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
642 +index ee7a37eb159a..545986cfb978 100644
643 +--- a/drivers/hid/wacom_sys.c
644 ++++ b/drivers/hid/wacom_sys.c
645 +@@ -395,6 +395,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
646 + }
647 + }
648 +
649 ++ /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
650 ++ if (hdev->vendor == USB_VENDOR_ID_WACOM &&
651 ++ hdev->product == 0x0358 &&
652 ++ WACOM_PEN_FIELD(field) &&
653 ++ wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
654 ++ field->logical_maximum = 43200;
655 ++ }
656 ++
657 + switch (usage->hid) {
658 + case HID_GD_X:
659 + features->x_max = field->logical_maximum;
660 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
661 +index 8fb8c737fffe..b0b30a568db7 100644
662 +--- a/drivers/iommu/amd_iommu.c
663 ++++ b/drivers/iommu/amd_iommu.c
664 +@@ -4379,7 +4379,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
665 +
666 + static struct irq_chip amd_ir_chip = {
667 + .name = "AMD-IR",
668 +- .irq_ack = ir_ack_apic_edge,
669 ++ .irq_ack = apic_ack_irq,
670 + .irq_set_affinity = amd_ir_set_affinity,
671 + .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
672 + .irq_compose_msi_msg = ir_compose_msi_msg,
673 +diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
674 +index 3062a154a9fb..967450bd421a 100644
675 +--- a/drivers/iommu/intel_irq_remapping.c
676 ++++ b/drivers/iommu/intel_irq_remapping.c
677 +@@ -1223,7 +1223,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
678 +
679 + static struct irq_chip intel_ir_chip = {
680 + .name = "INTEL-IR",
681 +- .irq_ack = ir_ack_apic_edge,
682 ++ .irq_ack = apic_ack_irq,
683 + .irq_set_affinity = intel_ir_set_affinity,
684 + .irq_compose_msi_msg = intel_ir_compose_msi_msg,
685 + .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
686 +diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
687 +index 496deee3ae3a..7d0f3074d41d 100644
688 +--- a/drivers/iommu/irq_remapping.c
689 ++++ b/drivers/iommu/irq_remapping.c
690 +@@ -156,11 +156,6 @@ void panic_if_irq_remap(const char *msg)
691 + panic(msg);
692 + }
693 +
694 +-void ir_ack_apic_edge(struct irq_data *data)
695 +-{
696 +- ack_APIC_irq();
697 +-}
698 +-
699 + /**
700 + * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU
701 + * device serving request @info
702 +diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
703 +index 039c7af7b190..0afef6e43be4 100644
704 +--- a/drivers/iommu/irq_remapping.h
705 ++++ b/drivers/iommu/irq_remapping.h
706 +@@ -65,8 +65,6 @@ struct irq_remap_ops {
707 + extern struct irq_remap_ops intel_irq_remap_ops;
708 + extern struct irq_remap_ops amd_iommu_irq_ops;
709 +
710 +-extern void ir_ack_apic_edge(struct irq_data *data);
711 +-
712 + #else /* CONFIG_IRQ_REMAP */
713 +
714 + #define irq_remapping_enabled 0
715 +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
716 +index b67be33bd62f..cea7b2d2e60a 100644
717 +--- a/drivers/media/rc/rc-main.c
718 ++++ b/drivers/media/rc/rc-main.c
719 +@@ -1860,6 +1860,8 @@ int rc_register_device(struct rc_dev *dev)
720 + dev->device_name ?: "Unspecified device", path ?: "N/A");
721 + kfree(path);
722 +
723 ++ dev->registered = true;
724 ++
725 + if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
726 + rc = rc_setup_rx_device(dev);
727 + if (rc)
728 +@@ -1879,8 +1881,6 @@ int rc_register_device(struct rc_dev *dev)
729 + goto out_lirc;
730 + }
731 +
732 +- dev->registered = true;
733 +-
734 + dev_dbg(&dev->dev, "Registered rc%u (driver: %s)\n", dev->minor,
735 + dev->driver_name ? dev->driver_name : "unknown");
736 +
737 +diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
738 +index 102594ec3e97..a36b4fb949fa 100644
739 +--- a/drivers/media/usb/uvc/uvc_ctrl.c
740 ++++ b/drivers/media/usb/uvc/uvc_ctrl.c
741 +@@ -1607,14 +1607,12 @@ static int uvc_ctrl_get_flags(struct uvc_device *dev,
742 + ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id, dev->intfnum,
743 + info->selector, data, 1);
744 + if (!ret)
745 +- info->flags = UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX
746 +- | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF
747 +- | (data[0] & UVC_CONTROL_CAP_GET ?
748 +- UVC_CTRL_FLAG_GET_CUR : 0)
749 +- | (data[0] & UVC_CONTROL_CAP_SET ?
750 +- UVC_CTRL_FLAG_SET_CUR : 0)
751 +- | (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ?
752 +- UVC_CTRL_FLAG_AUTO_UPDATE : 0);
753 ++ info->flags |= (data[0] & UVC_CONTROL_CAP_GET ?
754 ++ UVC_CTRL_FLAG_GET_CUR : 0)
755 ++ | (data[0] & UVC_CONTROL_CAP_SET ?
756 ++ UVC_CTRL_FLAG_SET_CUR : 0)
757 ++ | (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ?
758 ++ UVC_CTRL_FLAG_AUTO_UPDATE : 0);
759 +
760 + kfree(data);
761 + return ret;
762 +@@ -1689,6 +1687,9 @@ static int uvc_ctrl_fill_xu_info(struct uvc_device *dev,
763 +
764 + info->size = le16_to_cpup((__le16 *)data);
765 +
766 ++ info->flags = UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX
767 ++ | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF;
768 ++
769 + ret = uvc_ctrl_get_flags(dev, ctrl, info);
770 + if (ret < 0) {
771 + uvc_trace(UVC_TRACE_CONTROL,
772 +diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
773 +index 58c705f24f96..b594bae1adbd 100644
774 +--- a/drivers/net/bonding/bond_options.c
775 ++++ b/drivers/net/bonding/bond_options.c
776 +@@ -1142,6 +1142,7 @@ static int bond_option_primary_set(struct bonding *bond,
777 + slave->dev->name);
778 + rcu_assign_pointer(bond->primary_slave, slave);
779 + strcpy(bond->params.primary, slave->dev->name);
780 ++ bond->force_primary = true;
781 + bond_select_active_slave(bond);
782 + goto out;
783 + }
784 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
785 +index a50e08bb4748..750007513f9d 100644
786 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
787 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
788 +@@ -267,14 +267,13 @@ static int aq_pci_probe(struct pci_dev *pdev,
789 + numvecs = min(numvecs, num_online_cpus());
790 + /*enable interrupts */
791 + #if !AQ_CFG_FORCE_LEGACY_INT
792 +- numvecs = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
793 +- PCI_IRQ_MSIX | PCI_IRQ_MSI |
794 +- PCI_IRQ_LEGACY);
795 ++ err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
796 ++ PCI_IRQ_MSIX | PCI_IRQ_MSI |
797 ++ PCI_IRQ_LEGACY);
798 +
799 +- if (numvecs < 0) {
800 +- err = numvecs;
801 ++ if (err < 0)
802 + goto err_hwinit;
803 +- }
804 ++ numvecs = err;
805 + #endif
806 + self->irqvecs = numvecs;
807 +
808 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
809 +index da07ccdf84bf..eb8dccd24abf 100644
810 +--- a/drivers/net/hyperv/netvsc_drv.c
811 ++++ b/drivers/net/hyperv/netvsc_drv.c
812 +@@ -126,8 +126,10 @@ static int netvsc_open(struct net_device *net)
813 + }
814 +
815 + rdev = nvdev->extension;
816 +- if (!rdev->link_state)
817 ++ if (!rdev->link_state) {
818 + netif_carrier_on(net);
819 ++ netif_tx_wake_all_queues(net);
820 ++ }
821 +
822 + if (vf_netdev) {
823 + /* Setting synthetic device up transparently sets
824 +diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
825 +index cd09c3af2117..6e8e42361fd5 100644
826 +--- a/drivers/net/phy/dp83848.c
827 ++++ b/drivers/net/phy/dp83848.c
828 +@@ -74,6 +74,25 @@ static int dp83848_config_intr(struct phy_device *phydev)
829 + return phy_write(phydev, DP83848_MICR, control);
830 + }
831 +
832 ++static int dp83848_config_init(struct phy_device *phydev)
833 ++{
834 ++ int err;
835 ++ int val;
836 ++
837 ++ err = genphy_config_init(phydev);
838 ++ if (err < 0)
839 ++ return err;
840 ++
841 ++ /* DP83620 always reports Auto Negotiation Ability on BMSR. Instead,
842 ++ * we check initial value of BMCR Auto negotiation enable bit
843 ++ */
844 ++ val = phy_read(phydev, MII_BMCR);
845 ++ if (!(val & BMCR_ANENABLE))
846 ++ phydev->autoneg = AUTONEG_DISABLE;
847 ++
848 ++ return 0;
849 ++}
850 ++
851 + static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
852 + { TI_DP83848C_PHY_ID, 0xfffffff0 },
853 + { NS_DP83848C_PHY_ID, 0xfffffff0 },
854 +@@ -83,7 +102,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
855 + };
856 + MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
857 +
858 +-#define DP83848_PHY_DRIVER(_id, _name) \
859 ++#define DP83848_PHY_DRIVER(_id, _name, _config_init) \
860 + { \
861 + .phy_id = _id, \
862 + .phy_id_mask = 0xfffffff0, \
863 +@@ -92,7 +111,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
864 + .flags = PHY_HAS_INTERRUPT, \
865 + \
866 + .soft_reset = genphy_soft_reset, \
867 +- .config_init = genphy_config_init, \
868 ++ .config_init = _config_init, \
869 + .suspend = genphy_suspend, \
870 + .resume = genphy_resume, \
871 + \
872 +@@ -102,10 +121,14 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
873 + }
874 +
875 + static struct phy_driver dp83848_driver[] = {
876 +- DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
877 +- DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
878 +- DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
879 +- DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
880 ++ DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY",
881 ++ genphy_config_init),
882 ++ DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY",
883 ++ genphy_config_init),
884 ++ DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY",
885 ++ dp83848_config_init),
886 ++ DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY",
887 ++ genphy_config_init),
888 + };
889 + module_phy_driver(dp83848_driver);
890 +
891 +diff --git a/drivers/net/tap.c b/drivers/net/tap.c
892 +index 9b6cb780affe..f0f7cd977667 100644
893 +--- a/drivers/net/tap.c
894 ++++ b/drivers/net/tap.c
895 +@@ -774,13 +774,16 @@ static ssize_t tap_put_user(struct tap_queue *q,
896 + int total;
897 +
898 + if (q->flags & IFF_VNET_HDR) {
899 ++ int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
900 + struct virtio_net_hdr vnet_hdr;
901 ++
902 + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
903 + if (iov_iter_count(iter) < vnet_hdr_len)
904 + return -EINVAL;
905 +
906 + if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
907 +- tap_is_little_endian(q), true))
908 ++ tap_is_little_endian(q), true,
909 ++ vlan_hlen))
910 + BUG();
911 +
912 + if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
913 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
914 +index 23e9eb66197f..409eb8b74740 100644
915 +--- a/drivers/net/tun.c
916 ++++ b/drivers/net/tun.c
917 +@@ -2078,7 +2078,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
918 + return -EINVAL;
919 +
920 + if (virtio_net_hdr_from_skb(skb, &gso,
921 +- tun_is_little_endian(tun), true)) {
922 ++ tun_is_little_endian(tun), true,
923 ++ vlan_hlen)) {
924 + struct skb_shared_info *sinfo = skb_shinfo(skb);
925 + pr_err("unexpected GSO type: "
926 + "0x%x, gso_size %d, hdr_len %d\n",
927 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
928 +index 90d07ed224d5..b0e8b9613054 100644
929 +--- a/drivers/net/usb/cdc_ncm.c
930 ++++ b/drivers/net/usb/cdc_ncm.c
931 +@@ -1124,7 +1124,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
932 + * accordingly. Otherwise, we should check here.
933 + */
934 + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
935 +- delayed_ndp_size = ctx->max_ndp_size;
936 ++ delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
937 + else
938 + delayed_ndp_size = 0;
939 +
940 +@@ -1285,7 +1285,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
941 + /* If requested, put NDP at end of frame. */
942 + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
943 + nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
944 +- cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
945 ++ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
946 + nth16->wNdpIndex = cpu_to_le16(skb_out->len);
947 + skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
948 +
949 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
950 +index 032e1ac10a30..8c7207535179 100644
951 +--- a/drivers/net/virtio_net.c
952 ++++ b/drivers/net/virtio_net.c
953 +@@ -1358,7 +1358,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
954 + hdr = skb_vnet_hdr(skb);
955 +
956 + if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
957 +- virtio_is_little_endian(vi->vdev), false))
958 ++ virtio_is_little_endian(vi->vdev), false,
959 ++ 0))
960 + BUG();
961 +
962 + if (vi->mergeable_rx_bufs)
963 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
964 +index 1fec8e3a6b35..6afcfd1f0eec 100644
965 +--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
966 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
967 +@@ -8,6 +8,7 @@
968 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
969 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
970 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
971 ++ * Copyright(c) 2018 Intel Corporation
972 + *
973 + * This program is free software; you can redistribute it and/or modify
974 + * it under the terms of version 2 of the GNU General Public License as
975 +@@ -30,6 +31,7 @@
976 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
977 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
978 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
979 ++ * Copyright(c) 2018 Intel Corporation
980 + * All rights reserved.
981 + *
982 + * Redistribution and use in source and binary forms, with or without
983 +@@ -163,7 +165,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
984 + static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
985 + const struct fw_img *image)
986 + {
987 +- int sec_idx, idx;
988 ++ int sec_idx, idx, ret;
989 + u32 offset = 0;
990 +
991 + /*
992 +@@ -190,17 +192,23 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
993 + */
994 + if (sec_idx >= image->num_sec - 1) {
995 + IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n");
996 +- iwl_free_fw_paging(fwrt);
997 +- return -EINVAL;
998 ++ ret = -EINVAL;
999 ++ goto err;
1000 + }
1001 +
1002 + /* copy the CSS block to the dram */
1003 + IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n",
1004 + sec_idx);
1005 +
1006 ++ if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) {
1007 ++ IWL_ERR(fwrt, "CSS block is larger than paging size\n");
1008 ++ ret = -EINVAL;
1009 ++ goto err;
1010 ++ }
1011 ++
1012 + memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block),
1013 + image->sec[sec_idx].data,
1014 +- fwrt->fw_paging_db[0].fw_paging_size);
1015 ++ image->sec[sec_idx].len);
1016 + dma_sync_single_for_device(fwrt->trans->dev,
1017 + fwrt->fw_paging_db[0].fw_paging_phys,
1018 + fwrt->fw_paging_db[0].fw_paging_size,
1019 +@@ -221,6 +229,14 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
1020 + for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) {
1021 + struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
1022 +
1023 ++ if (block->fw_paging_size > image->sec[sec_idx].len - offset) {
1024 ++ IWL_ERR(fwrt,
1025 ++ "Paging: paging size is larger than remaining data in block %d\n",
1026 ++ idx);
1027 ++ ret = -EINVAL;
1028 ++ goto err;
1029 ++ }
1030 ++
1031 + memcpy(page_address(block->fw_paging_block),
1032 + image->sec[sec_idx].data + offset,
1033 + block->fw_paging_size);
1034 +@@ -231,19 +247,32 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
1035 +
1036 + IWL_DEBUG_FW(fwrt,
1037 + "Paging: copied %d paging bytes to block %d\n",
1038 +- fwrt->fw_paging_db[idx].fw_paging_size,
1039 +- idx);
1040 ++ block->fw_paging_size, idx);
1041 +
1042 +- offset += fwrt->fw_paging_db[idx].fw_paging_size;
1043 ++ offset += block->fw_paging_size;
1044 ++
1045 ++ if (offset > image->sec[sec_idx].len) {
1046 ++ IWL_ERR(fwrt,
1047 ++ "Paging: offset goes over section size\n");
1048 ++ ret = -EINVAL;
1049 ++ goto err;
1050 ++ }
1051 + }
1052 +
1053 + /* copy the last paging block */
1054 + if (fwrt->num_of_pages_in_last_blk > 0) {
1055 + struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
1056 +
1057 ++ if (image->sec[sec_idx].len - offset > block->fw_paging_size) {
1058 ++ IWL_ERR(fwrt,
1059 ++ "Paging: last block is larger than paging size\n");
1060 ++ ret = -EINVAL;
1061 ++ goto err;
1062 ++ }
1063 ++
1064 + memcpy(page_address(block->fw_paging_block),
1065 + image->sec[sec_idx].data + offset,
1066 +- FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk);
1067 ++ image->sec[sec_idx].len - offset);
1068 + dma_sync_single_for_device(fwrt->trans->dev,
1069 + block->fw_paging_phys,
1070 + block->fw_paging_size,
1071 +@@ -255,6 +284,10 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
1072 + }
1073 +
1074 + return 0;
1075 ++
1076 ++err:
1077 ++ iwl_free_fw_paging(fwrt);
1078 ++ return ret;
1079 + }
1080 +
1081 + static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt,
1082 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
1083 +index 17a0190bd88f..5dbb0f0c02ef 100644
1084 +--- a/drivers/nvme/host/pci.c
1085 ++++ b/drivers/nvme/host/pci.c
1086 +@@ -2679,8 +2679,15 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
1087 +
1088 + dev_info(dev->ctrl.device, "restart after slot reset\n");
1089 + pci_restore_state(pdev);
1090 +- nvme_reset_ctrl(&dev->ctrl);
1091 +- return PCI_ERS_RESULT_RECOVERED;
1092 ++ nvme_reset_ctrl_sync(&dev->ctrl);
1093 ++
1094 ++ switch (dev->ctrl.state) {
1095 ++ case NVME_CTRL_LIVE:
1096 ++ case NVME_CTRL_ADMIN_ONLY:
1097 ++ return PCI_ERS_RESULT_RECOVERED;
1098 ++ default:
1099 ++ return PCI_ERS_RESULT_DISCONNECT;
1100 ++ }
1101 + }
1102 +
1103 + static void nvme_error_resume(struct pci_dev *pdev)
1104 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
1105 +index f0be5f35ab28..9beefa6ed1ce 100644
1106 +--- a/drivers/vhost/vhost.c
1107 ++++ b/drivers/vhost/vhost.c
1108 +@@ -2345,6 +2345,9 @@ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
1109 + struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
1110 + if (!node)
1111 + return NULL;
1112 ++
1113 ++ /* Make sure all padding within the structure is initialized. */
1114 ++ memset(&node->msg, 0, sizeof node->msg);
1115 + node->vq = vq;
1116 + node->msg.type = type;
1117 + return node;
1118 +diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
1119 +index 74f2e6e6202a..8851d441e5fd 100644
1120 +--- a/drivers/w1/masters/mxc_w1.c
1121 ++++ b/drivers/w1/masters/mxc_w1.c
1122 +@@ -112,6 +112,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
1123 + if (IS_ERR(mdev->clk))
1124 + return PTR_ERR(mdev->clk);
1125 +
1126 ++ err = clk_prepare_enable(mdev->clk);
1127 ++ if (err)
1128 ++ return err;
1129 ++
1130 + clkrate = clk_get_rate(mdev->clk);
1131 + if (clkrate < 10000000)
1132 + dev_warn(&pdev->dev,
1133 +@@ -125,12 +129,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
1134 +
1135 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1136 + mdev->regs = devm_ioremap_resource(&pdev->dev, res);
1137 +- if (IS_ERR(mdev->regs))
1138 +- return PTR_ERR(mdev->regs);
1139 +-
1140 +- err = clk_prepare_enable(mdev->clk);
1141 +- if (err)
1142 +- return err;
1143 ++ if (IS_ERR(mdev->regs)) {
1144 ++ err = PTR_ERR(mdev->regs);
1145 ++ goto out_disable_clk;
1146 ++ }
1147 +
1148 + /* Software reset 1-Wire module */
1149 + writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
1150 +@@ -146,8 +148,12 @@ static int mxc_w1_probe(struct platform_device *pdev)
1151 +
1152 + err = w1_add_master_device(&mdev->bus_master);
1153 + if (err)
1154 +- clk_disable_unprepare(mdev->clk);
1155 ++ goto out_disable_clk;
1156 +
1157 ++ return 0;
1158 ++
1159 ++out_disable_clk:
1160 ++ clk_disable_unprepare(mdev->clk);
1161 + return err;
1162 + }
1163 +
1164 +diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
1165 +index a41b48f82a70..4de191563261 100644
1166 +--- a/fs/binfmt_misc.c
1167 ++++ b/fs/binfmt_misc.c
1168 +@@ -387,8 +387,13 @@ static Node *create_entry(const char __user *buffer, size_t count)
1169 + s = strchr(p, del);
1170 + if (!s)
1171 + goto einval;
1172 +- *s++ = '\0';
1173 +- e->offset = simple_strtoul(p, &p, 10);
1174 ++ *s = '\0';
1175 ++ if (p != s) {
1176 ++ int r = kstrtoint(p, 10, &e->offset);
1177 ++ if (r != 0 || e->offset < 0)
1178 ++ goto einval;
1179 ++ }
1180 ++ p = s;
1181 + if (*p++)
1182 + goto einval;
1183 + pr_debug("register: offset: %#x\n", e->offset);
1184 +@@ -428,7 +433,8 @@ static Node *create_entry(const char __user *buffer, size_t count)
1185 + if (e->mask &&
1186 + string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size)
1187 + goto einval;
1188 +- if (e->size + e->offset > BINPRM_BUF_SIZE)
1189 ++ if (e->size > BINPRM_BUF_SIZE ||
1190 ++ BINPRM_BUF_SIZE - e->size < e->offset)
1191 + goto einval;
1192 + pr_debug("register: magic/mask length: %i\n", e->size);
1193 + if (USE_DEBUG) {
1194 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1195 +index 0b86cf10cf2a..775a0f2d0b45 100644
1196 +--- a/fs/btrfs/inode.c
1197 ++++ b/fs/btrfs/inode.c
1198 +@@ -1018,8 +1018,10 @@ static noinline int cow_file_range(struct inode *inode,
1199 + ram_size, /* ram_bytes */
1200 + BTRFS_COMPRESS_NONE, /* compress_type */
1201 + BTRFS_ORDERED_REGULAR /* type */);
1202 +- if (IS_ERR(em))
1203 ++ if (IS_ERR(em)) {
1204 ++ ret = PTR_ERR(em);
1205 + goto out_reserve;
1206 ++ }
1207 + free_extent_map(em);
1208 +
1209 + ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1210 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1211 +index 632e26d6f7ce..28fed3e8960b 100644
1212 +--- a/fs/btrfs/ioctl.c
1213 ++++ b/fs/btrfs/ioctl.c
1214 +@@ -2654,8 +2654,10 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
1215 + }
1216 +
1217 + /* Check for compatibility reject unknown flags */
1218 +- if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED)
1219 +- return -EOPNOTSUPP;
1220 ++ if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) {
1221 ++ ret = -EOPNOTSUPP;
1222 ++ goto out;
1223 ++ }
1224 +
1225 + if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
1226 + ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
1227 +@@ -3826,11 +3828,6 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
1228 + src->i_sb != inode->i_sb)
1229 + return -EXDEV;
1230 +
1231 +- /* don't make the dst file partly checksummed */
1232 +- if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
1233 +- (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
1234 +- return -EINVAL;
1235 +-
1236 + if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
1237 + return -EISDIR;
1238 +
1239 +@@ -3840,6 +3837,13 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
1240 + inode_lock(src);
1241 + }
1242 +
1243 ++ /* don't make the dst file partly checksummed */
1244 ++ if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
1245 ++ (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
1246 ++ ret = -EINVAL;
1247 ++ goto out_unlock;
1248 ++ }
1249 ++
1250 + /* determine range to clone */
1251 + ret = -EINVAL;
1252 + if (off + len > src->i_size || off + len < off)
1253 +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
1254 +index 52b39a0924e9..ad8a69ba7f13 100644
1255 +--- a/fs/btrfs/scrub.c
1256 ++++ b/fs/btrfs/scrub.c
1257 +@@ -2799,7 +2799,7 @@ static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
1258 + have_csum = scrub_find_csum(sctx, logical, csum);
1259 + if (have_csum == 0)
1260 + ++sctx->stat.no_csum;
1261 +- if (sctx->is_dev_replace && !have_csum) {
1262 ++ if (0 && sctx->is_dev_replace && !have_csum) {
1263 + ret = copy_nocow_pages(sctx, logical, l,
1264 + mirror_num,
1265 + physical_for_dev_replace);
1266 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
1267 +index 0628092b0b1b..f82152a0cb38 100644
1268 +--- a/fs/btrfs/super.c
1269 ++++ b/fs/btrfs/super.c
1270 +@@ -323,6 +323,7 @@ enum {
1271 + Opt_ssd, Opt_nossd,
1272 + Opt_ssd_spread, Opt_nossd_spread,
1273 + Opt_subvol,
1274 ++ Opt_subvol_empty,
1275 + Opt_subvolid,
1276 + Opt_thread_pool,
1277 + Opt_treelog, Opt_notreelog,
1278 +@@ -388,6 +389,7 @@ static const match_table_t tokens = {
1279 + {Opt_ssd_spread, "ssd_spread"},
1280 + {Opt_nossd_spread, "nossd_spread"},
1281 + {Opt_subvol, "subvol=%s"},
1282 ++ {Opt_subvol_empty, "subvol="},
1283 + {Opt_subvolid, "subvolid=%s"},
1284 + {Opt_thread_pool, "thread_pool=%u"},
1285 + {Opt_treelog, "treelog"},
1286 +@@ -461,6 +463,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
1287 + btrfs_set_opt(info->mount_opt, DEGRADED);
1288 + break;
1289 + case Opt_subvol:
1290 ++ case Opt_subvol_empty:
1291 + case Opt_subvolid:
1292 + case Opt_subvolrootid:
1293 + case Opt_device:
1294 +diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
1295 +index 4f3884835267..dd95a6fa24bf 100644
1296 +--- a/fs/cifs/cifsacl.h
1297 ++++ b/fs/cifs/cifsacl.h
1298 +@@ -98,4 +98,18 @@ struct cifs_ace {
1299 + struct cifs_sid sid; /* ie UUID of user or group who gets these perms */
1300 + } __attribute__((packed));
1301 +
1302 ++/*
1303 ++ * Minimum security identifier can be one for system defined Users
1304 ++ * and Groups such as NULL SID and World or Built-in accounts such
1305 ++ * as Administrator and Guest and consists of
1306 ++ * Revision + Num (Sub)Auths + Authority + Domain (one Subauthority)
1307 ++ */
1308 ++#define MIN_SID_LEN (1 + 1 + 6 + 4) /* in bytes */
1309 ++
1310 ++/*
1311 ++ * Minimum security descriptor can be one without any SACL and DACL and can
1312 ++ * consist of revision, type, and two sids of minimum size for owner and group
1313 ++ */
1314 ++#define MIN_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN))
1315 ++
1316 + #endif /* _CIFSACL_H */
1317 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
1318 +index 9c6d95ffca97..4ee32488ff74 100644
1319 +--- a/fs/cifs/smb2ops.c
1320 ++++ b/fs/cifs/smb2ops.c
1321 +@@ -1277,10 +1277,11 @@ smb2_is_session_expired(char *buf)
1322 + {
1323 + struct smb2_sync_hdr *shdr = get_sync_hdr(buf);
1324 +
1325 +- if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED)
1326 ++ if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
1327 ++ shdr->Status != STATUS_USER_SESSION_DELETED)
1328 + return false;
1329 +
1330 +- cifs_dbg(FYI, "Session expired\n");
1331 ++ cifs_dbg(FYI, "Session expired or deleted\n");
1332 + return true;
1333 + }
1334 +
1335 +@@ -1593,8 +1594,11 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
1336 + oparms.create_options = 0;
1337 +
1338 + utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1339 +- if (!utf16_path)
1340 +- return ERR_PTR(-ENOMEM);
1341 ++ if (!utf16_path) {
1342 ++ rc = -ENOMEM;
1343 ++ free_xid(xid);
1344 ++ return ERR_PTR(rc);
1345 ++ }
1346 +
1347 + oparms.tcon = tcon;
1348 + oparms.desired_access = READ_CONTROL;
1349 +@@ -1652,8 +1656,11 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1350 + access_flags = WRITE_DAC;
1351 +
1352 + utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1353 +- if (!utf16_path)
1354 +- return -ENOMEM;
1355 ++ if (!utf16_path) {
1356 ++ rc = -ENOMEM;
1357 ++ free_xid(xid);
1358 ++ return rc;
1359 ++ }
1360 +
1361 + oparms.tcon = tcon;
1362 + oparms.desired_access = access_flags;
1363 +@@ -1713,15 +1720,21 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
1364 +
1365 + /* if file not oplocked can't be sure whether asking to extend size */
1366 + if (!CIFS_CACHE_READ(cifsi))
1367 +- if (keep_size == false)
1368 +- return -EOPNOTSUPP;
1369 ++ if (keep_size == false) {
1370 ++ rc = -EOPNOTSUPP;
1371 ++ free_xid(xid);
1372 ++ return rc;
1373 ++ }
1374 +
1375 + /*
1376 + * Must check if file sparse since fallocate -z (zero range) assumes
1377 + * non-sparse allocation
1378 + */
1379 +- if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE))
1380 +- return -EOPNOTSUPP;
1381 ++ if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
1382 ++ rc = -EOPNOTSUPP;
1383 ++ free_xid(xid);
1384 ++ return rc;
1385 ++ }
1386 +
1387 + /*
1388 + * need to make sure we are not asked to extend the file since the SMB3
1389 +@@ -1730,8 +1743,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
1390 + * which for a non sparse file would zero the newly extended range
1391 + */
1392 + if (keep_size == false)
1393 +- if (i_size_read(inode) < offset + len)
1394 +- return -EOPNOTSUPP;
1395 ++ if (i_size_read(inode) < offset + len) {
1396 ++ rc = -EOPNOTSUPP;
1397 ++ free_xid(xid);
1398 ++ return rc;
1399 ++ }
1400 +
1401 + cifs_dbg(FYI, "offset %lld len %lld", offset, len);
1402 +
1403 +@@ -1764,8 +1780,11 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
1404 +
1405 + /* Need to make file sparse, if not already, before freeing range. */
1406 + /* Consider adding equivalent for compressed since it could also work */
1407 +- if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse))
1408 +- return -EOPNOTSUPP;
1409 ++ if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
1410 ++ rc = -EOPNOTSUPP;
1411 ++ free_xid(xid);
1412 ++ return rc;
1413 ++ }
1414 +
1415 + cifs_dbg(FYI, "offset %lld len %lld", offset, len);
1416 +
1417 +@@ -1796,8 +1815,10 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
1418 +
1419 + /* if file not oplocked can't be sure whether asking to extend size */
1420 + if (!CIFS_CACHE_READ(cifsi))
1421 +- if (keep_size == false)
1422 +- return -EOPNOTSUPP;
1423 ++ if (keep_size == false) {
1424 ++ free_xid(xid);
1425 ++ return rc;
1426 ++ }
1427 +
1428 + /*
1429 + * Files are non-sparse by default so falloc may be a no-op
1430 +@@ -1806,14 +1827,16 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
1431 + */
1432 + if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
1433 + if (keep_size == true)
1434 +- return 0;
1435 ++ rc = 0;
1436 + /* check if extending file */
1437 + else if (i_size_read(inode) >= off + len)
1438 + /* not extending file and already not sparse */
1439 +- return 0;
1440 ++ rc = 0;
1441 + /* BB: in future add else clause to extend file */
1442 + else
1443 +- return -EOPNOTSUPP;
1444 ++ rc = -EOPNOTSUPP;
1445 ++ free_xid(xid);
1446 ++ return rc;
1447 + }
1448 +
1449 + if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
1450 +@@ -1825,8 +1848,11 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
1451 + * ie potentially making a few extra pages at the beginning
1452 + * or end of the file non-sparse via set_sparse is harmless.
1453 + */
1454 +- if ((off > 8192) || (off + len + 8192 < i_size_read(inode)))
1455 +- return -EOPNOTSUPP;
1456 ++ if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
1457 ++ rc = -EOPNOTSUPP;
1458 ++ free_xid(xid);
1459 ++ return rc;
1460 ++ }
1461 +
1462 + rc = smb2_set_sparse(xid, tcon, cfile, inode, false);
1463 + }
1464 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1465 +index 0f48741a0130..32d7fd830aae 100644
1466 +--- a/fs/cifs/smb2pdu.c
1467 ++++ b/fs/cifs/smb2pdu.c
1468 +@@ -1276,6 +1276,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
1469 + sess_data->ses = ses;
1470 + sess_data->buf0_type = CIFS_NO_BUFFER;
1471 + sess_data->nls_cp = (struct nls_table *) nls_cp;
1472 ++ sess_data->previous_session = ses->Suid;
1473 +
1474 + #ifdef CONFIG_CIFS_SMB311
1475 + /*
1476 +@@ -2377,8 +2378,7 @@ SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
1477 +
1478 + return query_info(xid, tcon, persistent_fid, volatile_fid,
1479 + 0, SMB2_O_INFO_SECURITY, additional_info,
1480 +- SMB2_MAX_BUFFER_SIZE,
1481 +- sizeof(struct smb2_file_all_info), data, plen);
1482 ++ SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
1483 + }
1484 +
1485 + int
1486 +diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
1487 +index c32802c956d5..bf7fa1507e81 100644
1488 +--- a/fs/ext4/indirect.c
1489 ++++ b/fs/ext4/indirect.c
1490 +@@ -561,10 +561,16 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
1491 + unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
1492 + int i;
1493 +
1494 +- /* Count number blocks in a subtree under 'partial' */
1495 +- count = 1;
1496 +- for (i = 0; partial + i != chain + depth - 1; i++)
1497 +- count *= epb;
1498 ++ /*
1499 ++ * Count number blocks in a subtree under 'partial'. At each
1500 ++ * level we count number of complete empty subtrees beyond
1501 ++ * current offset and then descend into the subtree only
1502 ++ * partially beyond current offset.
1503 ++ */
1504 ++ count = 0;
1505 ++ for (i = partial - chain + 1; i < depth; i++)
1506 ++ count = count * epb + (epb - offsets[i] - 1);
1507 ++ count++;
1508 + /* Fill in size of a hole we found */
1509 + map->m_pblk = 0;
1510 + map->m_len = min_t(unsigned int, map->m_len, count);
1511 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
1512 +index 70cf4c7b268a..44b4fcdc3755 100644
1513 +--- a/fs/ext4/inline.c
1514 ++++ b/fs/ext4/inline.c
1515 +@@ -144,6 +144,12 @@ int ext4_find_inline_data_nolock(struct inode *inode)
1516 + goto out;
1517 +
1518 + if (!is.s.not_found) {
1519 ++ if (is.s.here->e_value_inum) {
1520 ++ EXT4_ERROR_INODE(inode, "inline data xattr refers "
1521 ++ "to an external xattr inode");
1522 ++ error = -EFSCORRUPTED;
1523 ++ goto out;
1524 ++ }
1525 + EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
1526 + (void *)ext4_raw_inode(&is.iloc));
1527 + EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
1528 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1529 +index 1e50c5efae67..c73cb9346aee 100644
1530 +--- a/fs/ext4/inode.c
1531 ++++ b/fs/ext4/inode.c
1532 +@@ -4298,28 +4298,28 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
1533 + EXT4_BLOCK_SIZE_BITS(sb);
1534 + stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
1535 +
1536 +- /* If there are no blocks to remove, return now */
1537 +- if (first_block >= stop_block)
1538 +- goto out_stop;
1539 ++ /* If there are blocks to remove, do it */
1540 ++ if (stop_block > first_block) {
1541 +
1542 +- down_write(&EXT4_I(inode)->i_data_sem);
1543 +- ext4_discard_preallocations(inode);
1544 ++ down_write(&EXT4_I(inode)->i_data_sem);
1545 ++ ext4_discard_preallocations(inode);
1546 +
1547 +- ret = ext4_es_remove_extent(inode, first_block,
1548 +- stop_block - first_block);
1549 +- if (ret) {
1550 +- up_write(&EXT4_I(inode)->i_data_sem);
1551 +- goto out_stop;
1552 +- }
1553 ++ ret = ext4_es_remove_extent(inode, first_block,
1554 ++ stop_block - first_block);
1555 ++ if (ret) {
1556 ++ up_write(&EXT4_I(inode)->i_data_sem);
1557 ++ goto out_stop;
1558 ++ }
1559 +
1560 +- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1561 +- ret = ext4_ext_remove_space(inode, first_block,
1562 +- stop_block - 1);
1563 +- else
1564 +- ret = ext4_ind_remove_space(handle, inode, first_block,
1565 +- stop_block);
1566 ++ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1567 ++ ret = ext4_ext_remove_space(inode, first_block,
1568 ++ stop_block - 1);
1569 ++ else
1570 ++ ret = ext4_ind_remove_space(handle, inode, first_block,
1571 ++ stop_block);
1572 +
1573 +- up_write(&EXT4_I(inode)->i_data_sem);
1574 ++ up_write(&EXT4_I(inode)->i_data_sem);
1575 ++ }
1576 + if (IS_SYNC(inode))
1577 + ext4_handle_sync(handle);
1578 +
1579 +@@ -4701,19 +4701,21 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
1580 + }
1581 + }
1582 +
1583 +-static inline void ext4_iget_extra_inode(struct inode *inode,
1584 ++static inline int ext4_iget_extra_inode(struct inode *inode,
1585 + struct ext4_inode *raw_inode,
1586 + struct ext4_inode_info *ei)
1587 + {
1588 + __le32 *magic = (void *)raw_inode +
1589 + EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
1590 ++
1591 + if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
1592 + EXT4_INODE_SIZE(inode->i_sb) &&
1593 + *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
1594 + ext4_set_inode_state(inode, EXT4_STATE_XATTR);
1595 +- ext4_find_inline_data_nolock(inode);
1596 ++ return ext4_find_inline_data_nolock(inode);
1597 + } else
1598 + EXT4_I(inode)->i_inline_off = 0;
1599 ++ return 0;
1600 + }
1601 +
1602 + int ext4_get_projid(struct inode *inode, kprojid_t *projid)
1603 +@@ -4893,7 +4895,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1604 + ei->i_extra_isize = sizeof(struct ext4_inode) -
1605 + EXT4_GOOD_OLD_INODE_SIZE;
1606 + } else {
1607 +- ext4_iget_extra_inode(inode, raw_inode, ei);
1608 ++ ret = ext4_iget_extra_inode(inode, raw_inode, ei);
1609 ++ if (ret)
1610 ++ goto bad_inode;
1611 + }
1612 + }
1613 +
1614 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
1615 +index b6bec270a8e4..d792b7689d92 100644
1616 +--- a/fs/ext4/resize.c
1617 ++++ b/fs/ext4/resize.c
1618 +@@ -1933,7 +1933,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1619 + return 0;
1620 +
1621 + n_group = ext4_get_group_number(sb, n_blocks_count - 1);
1622 +- if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1623 ++ if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1624 + ext4_warning(sb, "resize would cause inodes_count overflow");
1625 + return -EINVAL;
1626 + }
1627 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
1628 +index 499cb4b1fbd2..fc4ced59c565 100644
1629 +--- a/fs/ext4/xattr.c
1630 ++++ b/fs/ext4/xattr.c
1631 +@@ -1688,7 +1688,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
1632 +
1633 + /* No failures allowed past this point. */
1634 +
1635 +- if (!s->not_found && here->e_value_offs) {
1636 ++ if (!s->not_found && here->e_value_size && here->e_value_offs) {
1637 + /* Remove the old value. */
1638 + void *first_val = s->base + min_offs;
1639 + size_t offs = le16_to_cpu(here->e_value_offs);
1640 +diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
1641 +index 79c61da8b1bc..c65a51d87cac 100644
1642 +--- a/fs/orangefs/inode.c
1643 ++++ b/fs/orangefs/inode.c
1644 +@@ -269,6 +269,13 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
1645 + else
1646 + stat->result_mask = STATX_BASIC_STATS &
1647 + ~STATX_SIZE;
1648 ++
1649 ++ stat->attributes_mask = STATX_ATTR_IMMUTABLE |
1650 ++ STATX_ATTR_APPEND;
1651 ++ if (inode->i_flags & S_IMMUTABLE)
1652 ++ stat->attributes |= STATX_ATTR_IMMUTABLE;
1653 ++ if (inode->i_flags & S_APPEND)
1654 ++ stat->attributes |= STATX_ATTR_APPEND;
1655 + }
1656 + return ret;
1657 + }
1658 +diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
1659 +index 1b5707c44c3f..e026bee02a66 100644
1660 +--- a/fs/orangefs/namei.c
1661 ++++ b/fs/orangefs/namei.c
1662 +@@ -326,6 +326,13 @@ static int orangefs_symlink(struct inode *dir,
1663 + ret = PTR_ERR(inode);
1664 + goto out;
1665 + }
1666 ++ /*
1667 ++ * This is necessary because orangefs_inode_getattr will not
1668 ++ * re-read symlink size as it is impossible for it to change.
1669 ++ * Invalidating the cache does not help. orangefs_new_inode
1670 ++ * does not set the correct size (it does not know symname).
1671 ++ */
1672 ++ inode->i_size = strlen(symname);
1673 +
1674 + gossip_debug(GOSSIP_NAME_DEBUG,
1675 + "Assigned symlink inode new number of %pU\n",
1676 +diff --git a/include/linux/irq.h b/include/linux/irq.h
1677 +index 65916a305f3d..4e66378f290b 100644
1678 +--- a/include/linux/irq.h
1679 ++++ b/include/linux/irq.h
1680 +@@ -551,7 +551,12 @@ extern int irq_affinity_online_cpu(unsigned int cpu);
1681 + #endif
1682 +
1683 + #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
1684 +-void irq_move_irq(struct irq_data *data);
1685 ++void __irq_move_irq(struct irq_data *data);
1686 ++static inline void irq_move_irq(struct irq_data *data)
1687 ++{
1688 ++ if (unlikely(irqd_is_setaffinity_pending(data)))
1689 ++ __irq_move_irq(data);
1690 ++}
1691 + void irq_move_masked_irq(struct irq_data *data);
1692 + void irq_force_complete_move(struct irq_desc *desc);
1693 + #else
1694 +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
1695 +index f144216febc6..9397628a1967 100644
1696 +--- a/include/linux/virtio_net.h
1697 ++++ b/include/linux/virtio_net.h
1698 +@@ -58,7 +58,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
1699 + static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
1700 + struct virtio_net_hdr *hdr,
1701 + bool little_endian,
1702 +- bool has_data_valid)
1703 ++ bool has_data_valid,
1704 ++ int vlan_hlen)
1705 + {
1706 + memset(hdr, 0, sizeof(*hdr)); /* no info leak */
1707 +
1708 +@@ -83,12 +84,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
1709 +
1710 + if (skb->ip_summed == CHECKSUM_PARTIAL) {
1711 + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1712 +- if (skb_vlan_tag_present(skb))
1713 +- hdr->csum_start = __cpu_to_virtio16(little_endian,
1714 +- skb_checksum_start_offset(skb) + VLAN_HLEN);
1715 +- else
1716 +- hdr->csum_start = __cpu_to_virtio16(little_endian,
1717 +- skb_checksum_start_offset(skb));
1718 ++ hdr->csum_start = __cpu_to_virtio16(little_endian,
1719 ++ skb_checksum_start_offset(skb) + vlan_hlen);
1720 + hdr->csum_offset = __cpu_to_virtio16(little_endian,
1721 + skb->csum_offset);
1722 + } else if (has_data_valid &&
1723 +diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
1724 +index c4f5caaf3778..f6a3543e5247 100644
1725 +--- a/include/net/transp_v6.h
1726 ++++ b/include/net/transp_v6.h
1727 +@@ -45,8 +45,15 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
1728 + struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
1729 + struct sockcm_cookie *sockc);
1730 +
1731 +-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1732 +- __u16 srcp, __u16 destp, int bucket);
1733 ++void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1734 ++ __u16 srcp, __u16 destp, int rqueue, int bucket);
1735 ++static inline void
1736 ++ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
1737 ++ __u16 destp, int bucket)
1738 ++{
1739 ++ __ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp),
1740 ++ bucket);
1741 ++}
1742 +
1743 + #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
1744 +
1745 +diff --git a/include/net/udp.h b/include/net/udp.h
1746 +index 0676b272f6ac..1db85dcb06f6 100644
1747 +--- a/include/net/udp.h
1748 ++++ b/include/net/udp.h
1749 +@@ -244,6 +244,11 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
1750 + return htons((((u64) hash * (max - min)) >> 32) + min);
1751 + }
1752 +
1753 ++static inline int udp_rqueue_get(struct sock *sk)
1754 ++{
1755 ++ return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
1756 ++}
1757 ++
1758 + /* net/ipv4/udp.c */
1759 + void udp_destruct_sock(struct sock *sk);
1760 + void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
1761 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
1762 +index e3336d904f64..facfecfc543c 100644
1763 +--- a/kernel/irq/manage.c
1764 ++++ b/kernel/irq/manage.c
1765 +@@ -204,6 +204,39 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
1766 + return ret;
1767 + }
1768 +
1769 ++#ifdef CONFIG_GENERIC_PENDING_IRQ
1770 ++static inline int irq_set_affinity_pending(struct irq_data *data,
1771 ++ const struct cpumask *dest)
1772 ++{
1773 ++ struct irq_desc *desc = irq_data_to_desc(data);
1774 ++
1775 ++ irqd_set_move_pending(data);
1776 ++ irq_copy_pending(desc, dest);
1777 ++ return 0;
1778 ++}
1779 ++#else
1780 ++static inline int irq_set_affinity_pending(struct irq_data *data,
1781 ++ const struct cpumask *dest)
1782 ++{
1783 ++ return -EBUSY;
1784 ++}
1785 ++#endif
1786 ++
1787 ++static int irq_try_set_affinity(struct irq_data *data,
1788 ++ const struct cpumask *dest, bool force)
1789 ++{
1790 ++ int ret = irq_do_set_affinity(data, dest, force);
1791 ++
1792 ++ /*
1793 ++ * In case that the underlying vector management is busy and the
1794 ++ * architecture supports the generic pending mechanism then utilize
1795 ++ * this to avoid returning an error to user space.
1796 ++ */
1797 ++ if (ret == -EBUSY && !force)
1798 ++ ret = irq_set_affinity_pending(data, dest);
1799 ++ return ret;
1800 ++}
1801 ++
1802 + int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
1803 + bool force)
1804 + {
1805 +@@ -214,8 +247,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
1806 + if (!chip || !chip->irq_set_affinity)
1807 + return -EINVAL;
1808 +
1809 +- if (irq_can_move_pcntxt(data)) {
1810 +- ret = irq_do_set_affinity(data, mask, force);
1811 ++ if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
1812 ++ ret = irq_try_set_affinity(data, mask, force);
1813 + } else {
1814 + irqd_set_move_pending(data);
1815 + irq_copy_pending(desc, mask);
1816 +diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
1817 +index 86ae0eb80b53..def48589ea48 100644
1818 +--- a/kernel/irq/migration.c
1819 ++++ b/kernel/irq/migration.c
1820 +@@ -38,17 +38,18 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
1821 + void irq_move_masked_irq(struct irq_data *idata)
1822 + {
1823 + struct irq_desc *desc = irq_data_to_desc(idata);
1824 +- struct irq_chip *chip = desc->irq_data.chip;
1825 ++ struct irq_data *data = &desc->irq_data;
1826 ++ struct irq_chip *chip = data->chip;
1827 +
1828 +- if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
1829 ++ if (likely(!irqd_is_setaffinity_pending(data)))
1830 + return;
1831 +
1832 +- irqd_clr_move_pending(&desc->irq_data);
1833 ++ irqd_clr_move_pending(data);
1834 +
1835 + /*
1836 + * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
1837 + */
1838 +- if (irqd_is_per_cpu(&desc->irq_data)) {
1839 ++ if (irqd_is_per_cpu(data)) {
1840 + WARN_ON(1);
1841 + return;
1842 + }
1843 +@@ -73,13 +74,24 @@ void irq_move_masked_irq(struct irq_data *idata)
1844 + * For correct operation this depends on the caller
1845 + * masking the irqs.
1846 + */
1847 +- if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
1848 +- irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
1849 +-
1850 ++ if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
1851 ++ int ret;
1852 ++
1853 ++ ret = irq_do_set_affinity(data, desc->pending_mask, false);
1854 ++ /*
1855 ++ * If the there is a cleanup pending in the underlying
1856 ++ * vector management, reschedule the move for the next
1857 ++ * interrupt. Leave desc->pending_mask intact.
1858 ++ */
1859 ++ if (ret == -EBUSY) {
1860 ++ irqd_set_move_pending(data);
1861 ++ return;
1862 ++ }
1863 ++ }
1864 + cpumask_clear(desc->pending_mask);
1865 + }
1866 +
1867 +-void irq_move_irq(struct irq_data *idata)
1868 ++void __irq_move_irq(struct irq_data *idata)
1869 + {
1870 + bool masked;
1871 +
1872 +@@ -90,9 +102,6 @@ void irq_move_irq(struct irq_data *idata)
1873 + */
1874 + idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
1875 +
1876 +- if (likely(!irqd_is_setaffinity_pending(idata)))
1877 +- return;
1878 +-
1879 + if (unlikely(irqd_irq_disabled(idata)))
1880 + return;
1881 +
1882 +diff --git a/mm/backing-dev.c b/mm/backing-dev.c
1883 +index 7441bd93b732..8fe3ebd6ac00 100644
1884 +--- a/mm/backing-dev.c
1885 ++++ b/mm/backing-dev.c
1886 +@@ -412,6 +412,7 @@ static void wb_exit(struct bdi_writeback *wb)
1887 + * protected.
1888 + */
1889 + static DEFINE_SPINLOCK(cgwb_lock);
1890 ++static struct workqueue_struct *cgwb_release_wq;
1891 +
1892 + /**
1893 + * wb_congested_get_create - get or create a wb_congested
1894 +@@ -522,7 +523,7 @@ static void cgwb_release(struct percpu_ref *refcnt)
1895 + {
1896 + struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
1897 + refcnt);
1898 +- schedule_work(&wb->release_work);
1899 ++ queue_work(cgwb_release_wq, &wb->release_work);
1900 + }
1901 +
1902 + static void cgwb_kill(struct bdi_writeback *wb)
1903 +@@ -784,6 +785,21 @@ static void cgwb_bdi_register(struct backing_dev_info *bdi)
1904 + spin_unlock_irq(&cgwb_lock);
1905 + }
1906 +
1907 ++static int __init cgwb_init(void)
1908 ++{
1909 ++ /*
1910 ++ * There can be many concurrent release work items overwhelming
1911 ++ * system_wq. Put them in a separate wq and limit concurrency.
1912 ++ * There's no point in executing many of these in parallel.
1913 ++ */
1914 ++ cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
1915 ++ if (!cgwb_release_wq)
1916 ++ return -ENOMEM;
1917 ++
1918 ++ return 0;
1919 ++}
1920 ++subsys_initcall(cgwb_init);
1921 ++
1922 + #else /* CONFIG_CGROUP_WRITEBACK */
1923 +
1924 + static int cgwb_bdi_init(struct backing_dev_info *bdi)
1925 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1926 +index 22320ea27489..d2d0eb9536a3 100644
1927 +--- a/mm/page_alloc.c
1928 ++++ b/mm/page_alloc.c
1929 +@@ -4162,7 +4162,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1930 + * orientated.
1931 + */
1932 + if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
1933 +- ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
1934 + ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
1935 + ac->high_zoneidx, ac->nodemask);
1936 + }
1937 +diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
1938 +index 7d20e1f3de28..56197f0d9608 100644
1939 +--- a/net/dsa/tag_trailer.c
1940 ++++ b/net/dsa/tag_trailer.c
1941 +@@ -75,7 +75,8 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
1942 + if (!skb->dev)
1943 + return NULL;
1944 +
1945 +- pskb_trim_rcsum(skb, skb->len - 4);
1946 ++ if (pskb_trim_rcsum(skb, skb->len - 4))
1947 ++ return NULL;
1948 +
1949 + return skb;
1950 + }
1951 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1952 +index f70586b50838..ef8cd0f7db89 100644
1953 +--- a/net/ipv4/tcp_ipv4.c
1954 ++++ b/net/ipv4/tcp_ipv4.c
1955 +@@ -1689,6 +1689,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
1956 + reqsk_put(req);
1957 + goto discard_it;
1958 + }
1959 ++ if (tcp_checksum_complete(skb)) {
1960 ++ reqsk_put(req);
1961 ++ goto csum_error;
1962 ++ }
1963 + if (unlikely(sk->sk_state != TCP_LISTEN)) {
1964 + inet_csk_reqsk_queue_drop_and_put(sk, req);
1965 + goto lookup;
1966 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1967 +index b61a770884fa..5f7bc5c6366a 100644
1968 +--- a/net/ipv4/udp.c
1969 ++++ b/net/ipv4/udp.c
1970 +@@ -2718,7 +2718,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
1971 + " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
1972 + bucket, src, srcp, dest, destp, sp->sk_state,
1973 + sk_wmem_alloc_get(sp),
1974 +- sk_rmem_alloc_get(sp),
1975 ++ udp_rqueue_get(sp),
1976 + 0, 0L, 0,
1977 + from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
1978 + 0, sock_i_ino(sp),
1979 +diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
1980 +index d0390d844ac8..d9ad986c7b2c 100644
1981 +--- a/net/ipv4/udp_diag.c
1982 ++++ b/net/ipv4/udp_diag.c
1983 +@@ -163,7 +163,7 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
1984 + static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
1985 + void *info)
1986 + {
1987 +- r->idiag_rqueue = sk_rmem_alloc_get(sk);
1988 ++ r->idiag_rqueue = udp_rqueue_get(sk);
1989 + r->idiag_wqueue = sk_wmem_alloc_get(sk);
1990 + }
1991 +
1992 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1993 +index a02ad100f0d7..2ee08b6a86a4 100644
1994 +--- a/net/ipv6/datagram.c
1995 ++++ b/net/ipv6/datagram.c
1996 +@@ -1019,8 +1019,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
1997 + }
1998 + EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
1999 +
2000 +-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
2001 +- __u16 srcp, __u16 destp, int bucket)
2002 ++void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
2003 ++ __u16 srcp, __u16 destp, int rqueue, int bucket)
2004 + {
2005 + const struct in6_addr *dest, *src;
2006 +
2007 +@@ -1036,7 +1036,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
2008 + dest->s6_addr32[2], dest->s6_addr32[3], destp,
2009 + sp->sk_state,
2010 + sk_wmem_alloc_get(sp),
2011 +- sk_rmem_alloc_get(sp),
2012 ++ rqueue,
2013 + 0, 0L, 0,
2014 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
2015 + 0,
2016 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2017 +index 4530a82aaa2e..b94345e657f7 100644
2018 +--- a/net/ipv6/route.c
2019 ++++ b/net/ipv6/route.c
2020 +@@ -2149,9 +2149,6 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2021 + const struct in6_addr *daddr, *saddr;
2022 + struct rt6_info *rt6 = (struct rt6_info *)dst;
2023 +
2024 +- if (rt6->rt6i_flags & RTF_LOCAL)
2025 +- return;
2026 +-
2027 + if (dst_metric_locked(dst, RTAX_MTU))
2028 + return;
2029 +
2030 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
2031 +index 6d664d83cd16..5d4eb9d2c3a7 100644
2032 +--- a/net/ipv6/tcp_ipv6.c
2033 ++++ b/net/ipv6/tcp_ipv6.c
2034 +@@ -1475,6 +1475,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
2035 + reqsk_put(req);
2036 + goto discard_it;
2037 + }
2038 ++ if (tcp_checksum_complete(skb)) {
2039 ++ reqsk_put(req);
2040 ++ goto csum_error;
2041 ++ }
2042 + if (unlikely(sk->sk_state != TCP_LISTEN)) {
2043 + inet_csk_reqsk_queue_drop_and_put(sk, req);
2044 + goto lookup;
2045 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
2046 +index ea0730028e5d..977bd5a07cab 100644
2047 +--- a/net/ipv6/udp.c
2048 ++++ b/net/ipv6/udp.c
2049 +@@ -1475,7 +1475,8 @@ int udp6_seq_show(struct seq_file *seq, void *v)
2050 + struct inet_sock *inet = inet_sk(v);
2051 + __u16 srcp = ntohs(inet->inet_sport);
2052 + __u16 destp = ntohs(inet->inet_dport);
2053 +- ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
2054 ++ __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
2055 ++ udp_rqueue_get(v), bucket);
2056 + }
2057 + return 0;
2058 + }
2059 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2060 +index 60c2a252bdf5..38d132d007ba 100644
2061 +--- a/net/packet/af_packet.c
2062 ++++ b/net/packet/af_packet.c
2063 +@@ -2037,7 +2037,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2064 + return -EINVAL;
2065 + *len -= sizeof(vnet_hdr);
2066 +
2067 +- if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
2068 ++ if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2069 + return -EINVAL;
2070 +
2071 + return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2072 +@@ -2304,7 +2304,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2073 + if (do_vnet) {
2074 + if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
2075 + sizeof(struct virtio_net_hdr),
2076 +- vio_le(), true)) {
2077 ++ vio_le(), true, 0)) {
2078 + spin_lock(&sk->sk_receive_queue.lock);
2079 + goto drop_n_account;
2080 + }
2081 +diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
2082 +index 9618b4a83cee..98c4afe7c15b 100644
2083 +--- a/net/sched/act_simple.c
2084 ++++ b/net/sched/act_simple.c
2085 +@@ -53,22 +53,22 @@ static void tcf_simp_release(struct tc_action *a)
2086 + kfree(d->tcfd_defdata);
2087 + }
2088 +
2089 +-static int alloc_defdata(struct tcf_defact *d, char *defdata)
2090 ++static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
2091 + {
2092 + d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL);
2093 + if (unlikely(!d->tcfd_defdata))
2094 + return -ENOMEM;
2095 +- strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
2096 ++ nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
2097 + return 0;
2098 + }
2099 +
2100 +-static void reset_policy(struct tcf_defact *d, char *defdata,
2101 ++static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata,
2102 + struct tc_defact *p)
2103 + {
2104 + spin_lock_bh(&d->tcf_lock);
2105 + d->tcf_action = p->action;
2106 + memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
2107 +- strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
2108 ++ nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
2109 + spin_unlock_bh(&d->tcf_lock);
2110 + }
2111 +
2112 +@@ -87,7 +87,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
2113 + struct tcf_defact *d;
2114 + bool exists = false;
2115 + int ret = 0, err;
2116 +- char *defdata;
2117 +
2118 + if (nla == NULL)
2119 + return -EINVAL;
2120 +@@ -110,8 +109,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
2121 + return -EINVAL;
2122 + }
2123 +
2124 +- defdata = nla_data(tb[TCA_DEF_DATA]);
2125 +-
2126 + if (!exists) {
2127 + ret = tcf_idr_create(tn, parm->index, est, a,
2128 + &act_simp_ops, bind, false);
2129 +@@ -119,7 +116,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
2130 + return ret;
2131 +
2132 + d = to_defact(*a);
2133 +- ret = alloc_defdata(d, defdata);
2134 ++ ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
2135 + if (ret < 0) {
2136 + tcf_idr_release(*a, bind);
2137 + return ret;
2138 +@@ -133,7 +130,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
2139 + if (!ovr)
2140 + return -EEXIST;
2141 +
2142 +- reset_policy(d, defdata, parm);
2143 ++ reset_policy(d, tb[TCA_DEF_DATA], parm);
2144 + }
2145 +
2146 + if (ret == ACT_P_CREATED)
2147 +diff --git a/net/socket.c b/net/socket.c
2148 +index f10f1d947c78..d1b02f161429 100644
2149 +--- a/net/socket.c
2150 ++++ b/net/socket.c
2151 +@@ -537,7 +537,10 @@ static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
2152 + if (!err && (iattr->ia_valid & ATTR_UID)) {
2153 + struct socket *sock = SOCKET_I(d_inode(dentry));
2154 +
2155 +- sock->sk->sk_uid = iattr->ia_uid;
2156 ++ if (sock->sk)
2157 ++ sock->sk->sk_uid = iattr->ia_uid;
2158 ++ else
2159 ++ err = -ENOENT;
2160 + }
2161 +
2162 + return err;
2163 +@@ -586,12 +589,16 @@ EXPORT_SYMBOL(sock_alloc);
2164 + * an inode not a file.
2165 + */
2166 +
2167 +-void sock_release(struct socket *sock)
2168 ++static void __sock_release(struct socket *sock, struct inode *inode)
2169 + {
2170 + if (sock->ops) {
2171 + struct module *owner = sock->ops->owner;
2172 +
2173 ++ if (inode)
2174 ++ inode_lock(inode);
2175 + sock->ops->release(sock);
2176 ++ if (inode)
2177 ++ inode_unlock(inode);
2178 + sock->ops = NULL;
2179 + module_put(owner);
2180 + }
2181 +@@ -605,6 +612,11 @@ void sock_release(struct socket *sock)
2182 + }
2183 + sock->file = NULL;
2184 + }
2185 ++
2186 ++void sock_release(struct socket *sock)
2187 ++{
2188 ++ __sock_release(sock, NULL);
2189 ++}
2190 + EXPORT_SYMBOL(sock_release);
2191 +
2192 + void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
2193 +@@ -1146,7 +1158,7 @@ static int sock_mmap(struct file *file, struct vm_area_struct *vma)
2194 +
2195 + static int sock_close(struct inode *inode, struct file *filp)
2196 + {
2197 +- sock_release(SOCKET_I(inode));
2198 ++ __sock_release(SOCKET_I(inode), inode);
2199 + return 0;
2200 + }
2201 +
2202 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
2203 +index e1c93ce74e0f..5fe29121b9a8 100644
2204 +--- a/net/tls/tls_sw.c
2205 ++++ b/net/tls/tls_sw.c
2206 +@@ -191,18 +191,12 @@ static void tls_free_both_sg(struct sock *sk)
2207 + }
2208 +
2209 + static int tls_do_encryption(struct tls_context *tls_ctx,
2210 +- struct tls_sw_context *ctx, size_t data_len,
2211 +- gfp_t flags)
2212 ++ struct tls_sw_context *ctx,
2213 ++ struct aead_request *aead_req,
2214 ++ size_t data_len)
2215 + {
2216 +- unsigned int req_size = sizeof(struct aead_request) +
2217 +- crypto_aead_reqsize(ctx->aead_send);
2218 +- struct aead_request *aead_req;
2219 + int rc;
2220 +
2221 +- aead_req = kzalloc(req_size, flags);
2222 +- if (!aead_req)
2223 +- return -ENOMEM;
2224 +-
2225 + ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
2226 + ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
2227 +
2228 +@@ -219,7 +213,6 @@ static int tls_do_encryption(struct tls_context *tls_ctx,
2229 + ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
2230 + ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
2231 +
2232 +- kfree(aead_req);
2233 + return rc;
2234 + }
2235 +
2236 +@@ -228,8 +221,14 @@ static int tls_push_record(struct sock *sk, int flags,
2237 + {
2238 + struct tls_context *tls_ctx = tls_get_ctx(sk);
2239 + struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
2240 ++ struct aead_request *req;
2241 + int rc;
2242 +
2243 ++ req = kzalloc(sizeof(struct aead_request) +
2244 ++ crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
2245 ++ if (!req)
2246 ++ return -ENOMEM;
2247 ++
2248 + sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
2249 + sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
2250 +
2251 +@@ -245,15 +244,14 @@ static int tls_push_record(struct sock *sk, int flags,
2252 + tls_ctx->pending_open_record_frags = 0;
2253 + set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
2254 +
2255 +- rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
2256 +- sk->sk_allocation);
2257 ++ rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
2258 + if (rc < 0) {
2259 + /* If we are called from write_space and
2260 + * we fail, we need to set this SOCK_NOSPACE
2261 + * to trigger another write_space in the future.
2262 + */
2263 + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2264 +- return rc;
2265 ++ goto out_req;
2266 + }
2267 +
2268 + free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
2269 +@@ -268,6 +266,8 @@ static int tls_push_record(struct sock *sk, int flags,
2270 + tls_err_abort(sk, EBADMSG);
2271 +
2272 + tls_advance_record_sn(sk, &tls_ctx->tx);
2273 ++out_req:
2274 ++ kfree(req);
2275 + return rc;
2276 + }
2277 +
2278 +@@ -755,7 +755,7 @@ int tls_sw_recvmsg(struct sock *sk,
2279 + struct sk_buff *skb;
2280 + ssize_t copied = 0;
2281 + bool cmsg = false;
2282 +- int err = 0;
2283 ++ int target, err = 0;
2284 + long timeo;
2285 +
2286 + flags |= nonblock;
2287 +@@ -765,6 +765,7 @@ int tls_sw_recvmsg(struct sock *sk,
2288 +
2289 + lock_sock(sk);
2290 +
2291 ++ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2292 + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2293 + do {
2294 + bool zc = false;
2295 +@@ -857,6 +858,9 @@ int tls_sw_recvmsg(struct sock *sk,
2296 + goto recv_end;
2297 + }
2298 + }
2299 ++ /* If we have a new message from strparser, continue now. */
2300 ++ if (copied >= target && !ctx->recv_pkt)
2301 ++ break;
2302 + } while (len);
2303 +
2304 + recv_end:
2305 +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
2306 +index d1eb14842340..a12e594d4e3b 100644
2307 +--- a/sound/pci/hda/hda_controller.c
2308 ++++ b/sound/pci/hda/hda_controller.c
2309 +@@ -748,8 +748,10 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
2310 + return err;
2311 + strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
2312 + apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
2313 +- if (apcm == NULL)
2314 ++ if (apcm == NULL) {
2315 ++ snd_device_free(chip->card, pcm);
2316 + return -ENOMEM;
2317 ++ }
2318 + apcm->chip = chip;
2319 + apcm->pcm = pcm;
2320 + apcm->codec = codec;
2321 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2322 +index 5b4dbcec6de8..ba9a7e552183 100644
2323 +--- a/sound/pci/hda/patch_conexant.c
2324 ++++ b/sound/pci/hda/patch_conexant.c
2325 +@@ -959,12 +959,15 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
2326 + SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
2327 + SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
2328 + SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
2329 ++ SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
2330 ++ SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
2331 + SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
2332 + SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
2333 + SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
2334 + SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
2335 + SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
2336 + SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
2337 ++ SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
2338 + SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
2339 + SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
2340 + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
2341 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2342 +index 01a6643fc7d4..06c2c80a045b 100644
2343 +--- a/sound/pci/hda/patch_realtek.c
2344 ++++ b/sound/pci/hda/patch_realtek.c
2345 +@@ -6580,7 +6580,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2346 + SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
2347 + SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
2348 + SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
2349 +- SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
2350 + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
2351 + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
2352 + SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
2353 +@@ -6752,6 +6751,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
2354 + {0x1b, 0x01111010},
2355 + {0x1e, 0x01451130},
2356 + {0x21, 0x02211020}),
2357 ++ SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
2358 ++ {0x12, 0x90a60140},
2359 ++ {0x14, 0x90170110},
2360 ++ {0x19, 0x02a11030},
2361 ++ {0x21, 0x02211020}),
2362 + SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2363 + {0x12, 0x90a60140},
2364 + {0x14, 0x90170110},
2365 +diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
2366 +index 754e632a27bd..02b7ad1946db 100644
2367 +--- a/sound/usb/quirks-table.h
2368 ++++ b/sound/usb/quirks-table.h
2369 +@@ -3277,6 +3277,10 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
2370 + }
2371 + },
2372 +
2373 ++/* disabled due to regression for other devices;
2374 ++ * see https://bugzilla.kernel.org/show_bug.cgi?id=199905
2375 ++ */
2376 ++#if 0
2377 + {
2378 + /*
2379 + * Nura's first gen headphones use Cambridge Silicon Radio's vendor
2380 +@@ -3324,6 +3328,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
2381 + }
2382 + }
2383 + },
2384 ++#endif /* disabled */
2385 +
2386 + {
2387 + /*