Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 26 Jun 2018 16:32:22
Message-Id: 1530030406.020afff5dabcb5a040d3a9b9647e8c756b97b1b1.alicef@gentoo
1 commit: 020afff5dabcb5a040d3a9b9647e8c756b97b1b1
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jun 26 16:26:46 2018 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Tue Jun 26 16:26:46 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=020afff5
7
8 linux kernel 4.14.52
9
10 0000_README | 4 +
11 1051_linux-4.14.52.patch | 1921 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1925 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index f22887e..87d7638 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -247,6 +247,10 @@ Patch: 1050_linux-4.14.51.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.14.51
21
22 +Patch: 1051_linux-4.14.52.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.14.52
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1051_linux-4.14.52.patch b/1051_linux-4.14.52.patch
31 new file mode 100644
32 index 0000000..a5b3f53
33 --- /dev/null
34 +++ b/1051_linux-4.14.52.patch
35 @@ -0,0 +1,1921 @@
36 +diff --git a/Makefile b/Makefile
37 +index a33376204c17..e2e4009bbfed 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 14
44 +-SUBLEVEL = 51
45 ++SUBLEVEL = 52
46 + EXTRAVERSION =
47 + NAME = Petit Gorille
48 +
49 +diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
50 +index 18dd8f22e353..665d0f6cd62f 100644
51 +--- a/arch/x86/kernel/cpu/intel_rdt.c
52 ++++ b/arch/x86/kernel/cpu/intel_rdt.c
53 +@@ -773,6 +773,8 @@ static __init void rdt_quirks(void)
54 + case INTEL_FAM6_SKYLAKE_X:
55 + if (boot_cpu_data.x86_stepping <= 4)
56 + set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
57 ++ else
58 ++ set_rdt_options("!l3cat");
59 + }
60 + }
61 +
62 +diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
63 +index 231ad23b24a9..8fec687b3e44 100644
64 +--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
65 ++++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
66 +@@ -48,7 +48,7 @@ static struct dentry *dfs_inj;
67 +
68 + static u8 n_banks;
69 +
70 +-#define MAX_FLAG_OPT_SIZE 3
71 ++#define MAX_FLAG_OPT_SIZE 4
72 + #define NBCFG 0x44
73 +
74 + enum injection_type {
75 +diff --git a/block/blk-mq.c b/block/blk-mq.c
76 +index 74c35513ada5..49979c095f31 100644
77 +--- a/block/blk-mq.c
78 ++++ b/block/blk-mq.c
79 +@@ -2252,7 +2252,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
80 +
81 + mutex_lock(&set->tag_list_lock);
82 + list_del_rcu(&q->tag_set_list);
83 +- INIT_LIST_HEAD(&q->tag_set_list);
84 + if (list_is_singular(&set->tag_list)) {
85 + /* just transitioned to unshared */
86 + set->flags &= ~BLK_MQ_F_TAG_SHARED;
87 +@@ -2260,8 +2259,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
88 + blk_mq_update_tag_set_depth(set, false);
89 + }
90 + mutex_unlock(&set->tag_list_lock);
91 +-
92 + synchronize_rcu();
93 ++ INIT_LIST_HEAD(&q->tag_set_list);
94 + }
95 +
96 + static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
97 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
98 +index 71008dbabe98..cad2530a5b52 100644
99 +--- a/drivers/ata/libata-core.c
100 ++++ b/drivers/ata/libata-core.c
101 +@@ -4543,9 +4543,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
102 + ATA_HORKAGE_ZERO_AFTER_TRIM |
103 + ATA_HORKAGE_NOLPM, },
104 +
105 +- /* Sandisk devices which are known to not handle LPM well */
106 +- { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
107 +-
108 + /* devices that don't properly handle queued TRIM commands */
109 + { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
110 + ATA_HORKAGE_ZERO_AFTER_TRIM, },
111 +diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
112 +index de4ddd0e8550..b3ed8f9953a8 100644
113 +--- a/drivers/ata/libata-zpodd.c
114 ++++ b/drivers/ata/libata-zpodd.c
115 +@@ -35,7 +35,7 @@ struct zpodd {
116 + static int eject_tray(struct ata_device *dev)
117 + {
118 + struct ata_taskfile tf;
119 +- static const char cdb[] = { GPCMD_START_STOP_UNIT,
120 ++ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_START_STOP_UNIT,
121 + 0, 0, 0,
122 + 0x02, /* LoEj */
123 + 0, 0, 0, 0, 0, 0, 0,
124 +diff --git a/drivers/base/core.c b/drivers/base/core.c
125 +index c8501cdb95f4..a359934ffd85 100644
126 +--- a/drivers/base/core.c
127 ++++ b/drivers/base/core.c
128 +@@ -1461,7 +1461,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
129 +
130 + dir = kzalloc(sizeof(*dir), GFP_KERNEL);
131 + if (!dir)
132 +- return NULL;
133 ++ return ERR_PTR(-ENOMEM);
134 +
135 + dir->class = class;
136 + kobject_init(&dir->kobj, &class_dir_ktype);
137 +@@ -1471,7 +1471,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
138 + retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
139 + if (retval < 0) {
140 + kobject_put(&dir->kobj);
141 +- return NULL;
142 ++ return ERR_PTR(retval);
143 + }
144 + return &dir->kobj;
145 + }
146 +@@ -1778,6 +1778,10 @@ int device_add(struct device *dev)
147 +
148 + parent = get_device(dev->parent);
149 + kobj = get_device_parent(dev, parent);
150 ++ if (IS_ERR(kobj)) {
151 ++ error = PTR_ERR(kobj);
152 ++ goto parent_error;
153 ++ }
154 + if (kobj)
155 + dev->kobj.parent = kobj;
156 +
157 +@@ -1876,6 +1880,7 @@ int device_add(struct device *dev)
158 + kobject_del(&dev->kobj);
159 + Error:
160 + cleanup_glue_dir(dev, glue_dir);
161 ++parent_error:
162 + put_device(parent);
163 + name_error:
164 + kfree(dev->p);
165 +@@ -2695,6 +2700,11 @@ int device_move(struct device *dev, struct device *new_parent,
166 + device_pm_lock();
167 + new_parent = get_device(new_parent);
168 + new_parent_kobj = get_device_parent(dev, new_parent);
169 ++ if (IS_ERR(new_parent_kobj)) {
170 ++ error = PTR_ERR(new_parent_kobj);
171 ++ put_device(new_parent);
172 ++ goto out;
173 ++ }
174 +
175 + pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
176 + __func__, new_parent ? dev_name(new_parent) : "<NULL>");
177 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
178 +index 86258b00a1d4..6fb64e73bc96 100644
179 +--- a/drivers/block/nbd.c
180 ++++ b/drivers/block/nbd.c
181 +@@ -173,9 +173,12 @@ static const struct device_attribute pid_attr = {
182 + static void nbd_dev_remove(struct nbd_device *nbd)
183 + {
184 + struct gendisk *disk = nbd->disk;
185 ++ struct request_queue *q;
186 ++
187 + if (disk) {
188 ++ q = disk->queue;
189 + del_gendisk(disk);
190 +- blk_cleanup_queue(disk->queue);
191 ++ blk_cleanup_queue(q);
192 + blk_mq_free_tag_set(&nbd->tag_set);
193 + disk->private_data = NULL;
194 + put_disk(disk);
195 +@@ -231,9 +234,18 @@ static void nbd_size_clear(struct nbd_device *nbd)
196 + static void nbd_size_update(struct nbd_device *nbd)
197 + {
198 + struct nbd_config *config = nbd->config;
199 ++ struct block_device *bdev = bdget_disk(nbd->disk, 0);
200 ++
201 + blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
202 + blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
203 + set_capacity(nbd->disk, config->bytesize >> 9);
204 ++ if (bdev) {
205 ++ if (bdev->bd_disk)
206 ++ bd_set_size(bdev, config->bytesize);
207 ++ else
208 ++ bdev->bd_invalidated = 1;
209 ++ bdput(bdev);
210 ++ }
211 + kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
212 + }
213 +
214 +@@ -243,6 +255,8 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
215 + struct nbd_config *config = nbd->config;
216 + config->blksize = blocksize;
217 + config->bytesize = blocksize * nr_blocks;
218 ++ if (nbd->task_recv != NULL)
219 ++ nbd_size_update(nbd);
220 + }
221 +
222 + static void nbd_complete_rq(struct request *req)
223 +@@ -1109,7 +1123,6 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
224 + if (ret)
225 + return ret;
226 +
227 +- bd_set_size(bdev, config->bytesize);
228 + if (max_part)
229 + bdev->bd_invalidated = 1;
230 + mutex_unlock(&nbd->config_lock);
231 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
232 +index 789fc3a8289f..93754300cb57 100644
233 +--- a/drivers/cpufreq/cpufreq.c
234 ++++ b/drivers/cpufreq/cpufreq.c
235 +@@ -693,6 +693,8 @@ static ssize_t store_##file_name \
236 + struct cpufreq_policy new_policy; \
237 + \
238 + memcpy(&new_policy, policy, sizeof(*policy)); \
239 ++ new_policy.min = policy->user_policy.min; \
240 ++ new_policy.max = policy->user_policy.max; \
241 + \
242 + ret = sscanf(buf, "%u", &new_policy.object); \
243 + if (ret != 1) \
244 +diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
245 +index ca38229b045a..43e14bb512c8 100644
246 +--- a/drivers/cpufreq/cpufreq_governor.c
247 ++++ b/drivers/cpufreq/cpufreq_governor.c
248 +@@ -165,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
249 + * calls, so the previous load value can be used then.
250 + */
251 + load = j_cdbs->prev_load;
252 +- } else if (unlikely(time_elapsed > 2 * sampling_rate &&
253 ++ } else if (unlikely((int)idle_time > 2 * sampling_rate &&
254 + j_cdbs->prev_load)) {
255 + /*
256 + * If the CPU had gone completely idle and a task has
257 +@@ -185,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
258 + * clear prev_load to guarantee that the load will be
259 + * computed again next time.
260 + *
261 +- * Detecting this situation is easy: the governor's
262 +- * utilization update handler would not have run during
263 +- * CPU-idle periods. Hence, an unusually large
264 +- * 'time_elapsed' (as compared to the sampling rate)
265 ++ * Detecting this situation is easy: an unusually large
266 ++ * 'idle_time' (as compared to the sampling rate)
267 + * indicates this scenario.
268 + */
269 + load = j_cdbs->prev_load;
270 +@@ -217,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
271 + j_cdbs->prev_load = load;
272 + }
273 +
274 +- if (time_elapsed > 2 * sampling_rate) {
275 +- unsigned int periods = time_elapsed / sampling_rate;
276 ++ if (unlikely((int)idle_time > 2 * sampling_rate)) {
277 ++ unsigned int periods = idle_time / sampling_rate;
278 +
279 + if (periods < idle_periods)
280 + idle_periods = periods;
281 +diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
282 +index 20d824f74f99..90d7be08fea0 100644
283 +--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
284 ++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
285 +@@ -204,8 +204,7 @@ static void ish_remove(struct pci_dev *pdev)
286 + kfree(ishtp_dev);
287 + }
288 +
289 +-#ifdef CONFIG_PM
290 +-static struct device *ish_resume_device;
291 ++static struct device __maybe_unused *ish_resume_device;
292 +
293 + /* 50ms to get resume response */
294 + #define WAIT_FOR_RESUME_ACK_MS 50
295 +@@ -219,7 +218,7 @@ static struct device *ish_resume_device;
296 + * in that case a simple resume message is enough, others we need
297 + * a reset sequence.
298 + */
299 +-static void ish_resume_handler(struct work_struct *work)
300 ++static void __maybe_unused ish_resume_handler(struct work_struct *work)
301 + {
302 + struct pci_dev *pdev = to_pci_dev(ish_resume_device);
303 + struct ishtp_device *dev = pci_get_drvdata(pdev);
304 +@@ -261,7 +260,7 @@ static void ish_resume_handler(struct work_struct *work)
305 + *
306 + * Return: 0 to the pm core
307 + */
308 +-static int ish_suspend(struct device *device)
309 ++static int __maybe_unused ish_suspend(struct device *device)
310 + {
311 + struct pci_dev *pdev = to_pci_dev(device);
312 + struct ishtp_device *dev = pci_get_drvdata(pdev);
313 +@@ -287,7 +286,7 @@ static int ish_suspend(struct device *device)
314 + return 0;
315 + }
316 +
317 +-static DECLARE_WORK(resume_work, ish_resume_handler);
318 ++static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
319 + /**
320 + * ish_resume() - ISH resume callback
321 + * @device: device pointer
322 +@@ -296,7 +295,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
323 + *
324 + * Return: 0 to the pm core
325 + */
326 +-static int ish_resume(struct device *device)
327 ++static int __maybe_unused ish_resume(struct device *device)
328 + {
329 + struct pci_dev *pdev = to_pci_dev(device);
330 + struct ishtp_device *dev = pci_get_drvdata(pdev);
331 +@@ -310,21 +309,14 @@ static int ish_resume(struct device *device)
332 + return 0;
333 + }
334 +
335 +-static const struct dev_pm_ops ish_pm_ops = {
336 +- .suspend = ish_suspend,
337 +- .resume = ish_resume,
338 +-};
339 +-#define ISHTP_ISH_PM_OPS (&ish_pm_ops)
340 +-#else
341 +-#define ISHTP_ISH_PM_OPS NULL
342 +-#endif /* CONFIG_PM */
343 ++static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
344 +
345 + static struct pci_driver ish_driver = {
346 + .name = KBUILD_MODNAME,
347 + .id_table = ish_pci_tbl,
348 + .probe = ish_probe,
349 + .remove = ish_remove,
350 +- .driver.pm = ISHTP_ISH_PM_OPS,
351 ++ .driver.pm = &ish_pm_ops,
352 + };
353 +
354 + module_pci_driver(ish_driver);
355 +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
356 +index 69afd7968d9c..18d5b99d13f1 100644
357 +--- a/drivers/hid/wacom_sys.c
358 ++++ b/drivers/hid/wacom_sys.c
359 +@@ -284,6 +284,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
360 + }
361 + }
362 +
363 ++ /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
364 ++ if (hdev->vendor == USB_VENDOR_ID_WACOM &&
365 ++ hdev->product == 0x0358 &&
366 ++ WACOM_PEN_FIELD(field) &&
367 ++ wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
368 ++ field->logical_maximum = 43200;
369 ++ }
370 ++
371 + switch (usage->hid) {
372 + case HID_GD_X:
373 + features->x_max = field->logical_maximum;
374 +diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
375 +index 5931aa2fe997..61084ba69a99 100644
376 +--- a/drivers/net/bonding/bond_options.c
377 ++++ b/drivers/net/bonding/bond_options.c
378 +@@ -1142,6 +1142,7 @@ static int bond_option_primary_set(struct bonding *bond,
379 + slave->dev->name);
380 + rcu_assign_pointer(bond->primary_slave, slave);
381 + strcpy(bond->params.primary, slave->dev->name);
382 ++ bond->force_primary = true;
383 + bond_select_active_slave(bond);
384 + goto out;
385 + }
386 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
387 +index 3a7241c8713c..6890478a0851 100644
388 +--- a/drivers/net/hyperv/netvsc_drv.c
389 ++++ b/drivers/net/hyperv/netvsc_drv.c
390 +@@ -123,8 +123,10 @@ static int netvsc_open(struct net_device *net)
391 + }
392 +
393 + rdev = nvdev->extension;
394 +- if (!rdev->link_state)
395 ++ if (!rdev->link_state) {
396 + netif_carrier_on(net);
397 ++ netif_tx_wake_all_queues(net);
398 ++ }
399 +
400 + if (vf_netdev) {
401 + /* Setting synthetic device up transparently sets
402 +diff --git a/drivers/net/tap.c b/drivers/net/tap.c
403 +index bfd4ded0a53f..773a3fea8f0e 100644
404 +--- a/drivers/net/tap.c
405 ++++ b/drivers/net/tap.c
406 +@@ -777,13 +777,16 @@ static ssize_t tap_put_user(struct tap_queue *q,
407 + int total;
408 +
409 + if (q->flags & IFF_VNET_HDR) {
410 ++ int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
411 + struct virtio_net_hdr vnet_hdr;
412 ++
413 + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
414 + if (iov_iter_count(iter) < vnet_hdr_len)
415 + return -EINVAL;
416 +
417 + if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
418 +- tap_is_little_endian(q), true))
419 ++ tap_is_little_endian(q), true,
420 ++ vlan_hlen))
421 + BUG();
422 +
423 + if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
424 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
425 +index 3d9ad11e4f28..cb17ffadfc30 100644
426 +--- a/drivers/net/tun.c
427 ++++ b/drivers/net/tun.c
428 +@@ -1648,7 +1648,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
429 + return -EINVAL;
430 +
431 + if (virtio_net_hdr_from_skb(skb, &gso,
432 +- tun_is_little_endian(tun), true)) {
433 ++ tun_is_little_endian(tun), true,
434 ++ vlan_hlen)) {
435 + struct skb_shared_info *sinfo = skb_shinfo(skb);
436 + pr_err("unexpected GSO type: "
437 + "0x%x, gso_size %d, hdr_len %d\n",
438 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
439 +index 9e1b74590682..f5316ab68a0a 100644
440 +--- a/drivers/net/usb/cdc_ncm.c
441 ++++ b/drivers/net/usb/cdc_ncm.c
442 +@@ -1124,7 +1124,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
443 + * accordingly. Otherwise, we should check here.
444 + */
445 + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
446 +- delayed_ndp_size = ctx->max_ndp_size;
447 ++ delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
448 + else
449 + delayed_ndp_size = 0;
450 +
451 +@@ -1285,7 +1285,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
452 + /* If requested, put NDP at end of frame. */
453 + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
454 + nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
455 +- cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
456 ++ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
457 + nth16->wNdpIndex = cpu_to_le16(skb_out->len);
458 + skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
459 +
460 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
461 +index 9e93e7a5df7e..910c46b47769 100644
462 +--- a/drivers/net/virtio_net.c
463 ++++ b/drivers/net/virtio_net.c
464 +@@ -1237,7 +1237,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
465 + hdr = skb_vnet_hdr(skb);
466 +
467 + if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
468 +- virtio_is_little_endian(vi->vdev), false))
469 ++ virtio_is_little_endian(vi->vdev), false,
470 ++ 0))
471 + BUG();
472 +
473 + if (vi->mergeable_rx_bufs)
474 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
475 +index 1610722b8099..747eef82cefd 100644
476 +--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
477 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
478 +@@ -8,6 +8,7 @@
479 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
480 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
481 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
482 ++ * Copyright(c) 2018 Intel Corporation
483 + *
484 + * This program is free software; you can redistribute it and/or modify
485 + * it under the terms of version 2 of the GNU General Public License as
486 +@@ -30,6 +31,7 @@
487 + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
488 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
489 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
490 ++ * Copyright(c) 2018 Intel Corporation
491 + * All rights reserved.
492 + *
493 + * Redistribution and use in source and binary forms, with or without
494 +@@ -174,7 +176,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
495 + static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
496 + const struct fw_img *image)
497 + {
498 +- int sec_idx, idx;
499 ++ int sec_idx, idx, ret;
500 + u32 offset = 0;
501 +
502 + /*
503 +@@ -201,17 +203,23 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
504 + */
505 + if (sec_idx >= image->num_sec - 1) {
506 + IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n");
507 +- iwl_free_fw_paging(fwrt);
508 +- return -EINVAL;
509 ++ ret = -EINVAL;
510 ++ goto err;
511 + }
512 +
513 + /* copy the CSS block to the dram */
514 + IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n",
515 + sec_idx);
516 +
517 ++ if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) {
518 ++ IWL_ERR(fwrt, "CSS block is larger than paging size\n");
519 ++ ret = -EINVAL;
520 ++ goto err;
521 ++ }
522 ++
523 + memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block),
524 + image->sec[sec_idx].data,
525 +- fwrt->fw_paging_db[0].fw_paging_size);
526 ++ image->sec[sec_idx].len);
527 + dma_sync_single_for_device(fwrt->trans->dev,
528 + fwrt->fw_paging_db[0].fw_paging_phys,
529 + fwrt->fw_paging_db[0].fw_paging_size,
530 +@@ -232,6 +240,14 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
531 + for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) {
532 + struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
533 +
534 ++ if (block->fw_paging_size > image->sec[sec_idx].len - offset) {
535 ++ IWL_ERR(fwrt,
536 ++ "Paging: paging size is larger than remaining data in block %d\n",
537 ++ idx);
538 ++ ret = -EINVAL;
539 ++ goto err;
540 ++ }
541 ++
542 + memcpy(page_address(block->fw_paging_block),
543 + image->sec[sec_idx].data + offset,
544 + block->fw_paging_size);
545 +@@ -242,19 +258,32 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
546 +
547 + IWL_DEBUG_FW(fwrt,
548 + "Paging: copied %d paging bytes to block %d\n",
549 +- fwrt->fw_paging_db[idx].fw_paging_size,
550 +- idx);
551 ++ block->fw_paging_size, idx);
552 ++
553 ++ offset += block->fw_paging_size;
554 +
555 +- offset += fwrt->fw_paging_db[idx].fw_paging_size;
556 ++ if (offset > image->sec[sec_idx].len) {
557 ++ IWL_ERR(fwrt,
558 ++ "Paging: offset goes over section size\n");
559 ++ ret = -EINVAL;
560 ++ goto err;
561 ++ }
562 + }
563 +
564 + /* copy the last paging block */
565 + if (fwrt->num_of_pages_in_last_blk > 0) {
566 + struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
567 +
568 ++ if (image->sec[sec_idx].len - offset > block->fw_paging_size) {
569 ++ IWL_ERR(fwrt,
570 ++ "Paging: last block is larger than paging size\n");
571 ++ ret = -EINVAL;
572 ++ goto err;
573 ++ }
574 ++
575 + memcpy(page_address(block->fw_paging_block),
576 + image->sec[sec_idx].data + offset,
577 +- FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk);
578 ++ image->sec[sec_idx].len - offset);
579 + dma_sync_single_for_device(fwrt->trans->dev,
580 + block->fw_paging_phys,
581 + block->fw_paging_size,
582 +@@ -266,6 +295,10 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
583 + }
584 +
585 + return 0;
586 ++
587 ++err:
588 ++ iwl_free_fw_paging(fwrt);
589 ++ return ret;
590 + }
591 +
592 + static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt,
593 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
594 +index 50e48afd88ff..244e5256c526 100644
595 +--- a/drivers/vhost/vhost.c
596 ++++ b/drivers/vhost/vhost.c
597 +@@ -2382,6 +2382,9 @@ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
598 + struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
599 + if (!node)
600 + return NULL;
601 ++
602 ++ /* Make sure all padding within the structure is initialized. */
603 ++ memset(&node->msg, 0, sizeof node->msg);
604 + node->vq = vq;
605 + node->msg.type = type;
606 + return node;
607 +diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
608 +index 74f2e6e6202a..8851d441e5fd 100644
609 +--- a/drivers/w1/masters/mxc_w1.c
610 ++++ b/drivers/w1/masters/mxc_w1.c
611 +@@ -112,6 +112,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
612 + if (IS_ERR(mdev->clk))
613 + return PTR_ERR(mdev->clk);
614 +
615 ++ err = clk_prepare_enable(mdev->clk);
616 ++ if (err)
617 ++ return err;
618 ++
619 + clkrate = clk_get_rate(mdev->clk);
620 + if (clkrate < 10000000)
621 + dev_warn(&pdev->dev,
622 +@@ -125,12 +129,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
623 +
624 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
625 + mdev->regs = devm_ioremap_resource(&pdev->dev, res);
626 +- if (IS_ERR(mdev->regs))
627 +- return PTR_ERR(mdev->regs);
628 +-
629 +- err = clk_prepare_enable(mdev->clk);
630 +- if (err)
631 +- return err;
632 ++ if (IS_ERR(mdev->regs)) {
633 ++ err = PTR_ERR(mdev->regs);
634 ++ goto out_disable_clk;
635 ++ }
636 +
637 + /* Software reset 1-Wire module */
638 + writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
639 +@@ -146,8 +148,12 @@ static int mxc_w1_probe(struct platform_device *pdev)
640 +
641 + err = w1_add_master_device(&mdev->bus_master);
642 + if (err)
643 +- clk_disable_unprepare(mdev->clk);
644 ++ goto out_disable_clk;
645 +
646 ++ return 0;
647 ++
648 ++out_disable_clk:
649 ++ clk_disable_unprepare(mdev->clk);
650 + return err;
651 + }
652 +
653 +diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
654 +index a7c5a9861bef..8311e8ed76de 100644
655 +--- a/fs/binfmt_misc.c
656 ++++ b/fs/binfmt_misc.c
657 +@@ -387,8 +387,13 @@ static Node *create_entry(const char __user *buffer, size_t count)
658 + s = strchr(p, del);
659 + if (!s)
660 + goto einval;
661 +- *s++ = '\0';
662 +- e->offset = simple_strtoul(p, &p, 10);
663 ++ *s = '\0';
664 ++ if (p != s) {
665 ++ int r = kstrtoint(p, 10, &e->offset);
666 ++ if (r != 0 || e->offset < 0)
667 ++ goto einval;
668 ++ }
669 ++ p = s;
670 + if (*p++)
671 + goto einval;
672 + pr_debug("register: offset: %#x\n", e->offset);
673 +@@ -428,7 +433,8 @@ static Node *create_entry(const char __user *buffer, size_t count)
674 + if (e->mask &&
675 + string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size)
676 + goto einval;
677 +- if (e->size + e->offset > BINPRM_BUF_SIZE)
678 ++ if (e->size > BINPRM_BUF_SIZE ||
679 ++ BINPRM_BUF_SIZE - e->size < e->offset)
680 + goto einval;
681 + pr_debug("register: magic/mask length: %i\n", e->size);
682 + if (USE_DEBUG) {
683 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
684 +index 8ecbac3b862e..3a07900971c3 100644
685 +--- a/fs/btrfs/inode.c
686 ++++ b/fs/btrfs/inode.c
687 +@@ -1027,8 +1027,10 @@ static noinline int cow_file_range(struct inode *inode,
688 + ram_size, /* ram_bytes */
689 + BTRFS_COMPRESS_NONE, /* compress_type */
690 + BTRFS_ORDERED_REGULAR /* type */);
691 +- if (IS_ERR(em))
692 ++ if (IS_ERR(em)) {
693 ++ ret = PTR_ERR(em);
694 + goto out_reserve;
695 ++ }
696 + free_extent_map(em);
697 +
698 + ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
699 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
700 +index 2763f3184ac5..7303ba108112 100644
701 +--- a/fs/btrfs/ioctl.c
702 ++++ b/fs/btrfs/ioctl.c
703 +@@ -2682,8 +2682,10 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
704 + }
705 +
706 + /* Check for compatibility reject unknown flags */
707 +- if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED)
708 +- return -EOPNOTSUPP;
709 ++ if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) {
710 ++ ret = -EOPNOTSUPP;
711 ++ goto out;
712 ++ }
713 +
714 + if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
715 + ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
716 +@@ -3861,11 +3863,6 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
717 + src->i_sb != inode->i_sb)
718 + return -EXDEV;
719 +
720 +- /* don't make the dst file partly checksummed */
721 +- if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
722 +- (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
723 +- return -EINVAL;
724 +-
725 + if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
726 + return -EISDIR;
727 +
728 +@@ -3875,6 +3872,13 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
729 + inode_lock(src);
730 + }
731 +
732 ++ /* don't make the dst file partly checksummed */
733 ++ if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
734 ++ (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
735 ++ ret = -EINVAL;
736 ++ goto out_unlock;
737 ++ }
738 ++
739 + /* determine range to clone */
740 + ret = -EINVAL;
741 + if (off + len > src->i_size || off + len < off)
742 +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
743 +index 24613b4e224c..936d58ca2b49 100644
744 +--- a/fs/btrfs/scrub.c
745 ++++ b/fs/btrfs/scrub.c
746 +@@ -2775,7 +2775,7 @@ static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
747 + have_csum = scrub_find_csum(sctx, logical, csum);
748 + if (have_csum == 0)
749 + ++sctx->stat.no_csum;
750 +- if (sctx->is_dev_replace && !have_csum) {
751 ++ if (0 && sctx->is_dev_replace && !have_csum) {
752 + ret = copy_nocow_pages(sctx, logical, l,
753 + mirror_num,
754 + physical_for_dev_replace);
755 +diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
756 +index 4f3884835267..dd95a6fa24bf 100644
757 +--- a/fs/cifs/cifsacl.h
758 ++++ b/fs/cifs/cifsacl.h
759 +@@ -98,4 +98,18 @@ struct cifs_ace {
760 + struct cifs_sid sid; /* ie UUID of user or group who gets these perms */
761 + } __attribute__((packed));
762 +
763 ++/*
764 ++ * Minimum security identifier can be one for system defined Users
765 ++ * and Groups such as NULL SID and World or Built-in accounts such
766 ++ * as Administrator and Guest and consists of
767 ++ * Revision + Num (Sub)Auths + Authority + Domain (one Subauthority)
768 ++ */
769 ++#define MIN_SID_LEN (1 + 1 + 6 + 4) /* in bytes */
770 ++
771 ++/*
772 ++ * Minimum security descriptor can be one without any SACL and DACL and can
773 ++ * consist of revision, type, and two sids of minimum size for owner and group
774 ++ */
775 ++#define MIN_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN))
776 ++
777 + #endif /* _CIFSACL_H */
778 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
779 +index 839327f75e3d..36bc9a7eb8ea 100644
780 +--- a/fs/cifs/smb2ops.c
781 ++++ b/fs/cifs/smb2ops.c
782 +@@ -1256,10 +1256,11 @@ smb2_is_session_expired(char *buf)
783 + {
784 + struct smb2_sync_hdr *shdr = get_sync_hdr(buf);
785 +
786 +- if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED)
787 ++ if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
788 ++ shdr->Status != STATUS_USER_SESSION_DELETED)
789 + return false;
790 +
791 +- cifs_dbg(FYI, "Session expired\n");
792 ++ cifs_dbg(FYI, "Session expired or deleted\n");
793 + return true;
794 + }
795 +
796 +@@ -1571,8 +1572,11 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
797 + oparms.create_options = 0;
798 +
799 + utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
800 +- if (!utf16_path)
801 +- return ERR_PTR(-ENOMEM);
802 ++ if (!utf16_path) {
803 ++ rc = -ENOMEM;
804 ++ free_xid(xid);
805 ++ return ERR_PTR(rc);
806 ++ }
807 +
808 + oparms.tcon = tcon;
809 + oparms.desired_access = READ_CONTROL;
810 +@@ -1630,8 +1634,11 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
811 + access_flags = WRITE_DAC;
812 +
813 + utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
814 +- if (!utf16_path)
815 +- return -ENOMEM;
816 ++ if (!utf16_path) {
817 ++ rc = -ENOMEM;
818 ++ free_xid(xid);
819 ++ return rc;
820 ++ }
821 +
822 + oparms.tcon = tcon;
823 + oparms.desired_access = access_flags;
824 +@@ -1691,15 +1698,21 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
825 +
826 + /* if file not oplocked can't be sure whether asking to extend size */
827 + if (!CIFS_CACHE_READ(cifsi))
828 +- if (keep_size == false)
829 +- return -EOPNOTSUPP;
830 ++ if (keep_size == false) {
831 ++ rc = -EOPNOTSUPP;
832 ++ free_xid(xid);
833 ++ return rc;
834 ++ }
835 +
836 + /*
837 + * Must check if file sparse since fallocate -z (zero range) assumes
838 + * non-sparse allocation
839 + */
840 +- if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE))
841 +- return -EOPNOTSUPP;
842 ++ if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
843 ++ rc = -EOPNOTSUPP;
844 ++ free_xid(xid);
845 ++ return rc;
846 ++ }
847 +
848 + /*
849 + * need to make sure we are not asked to extend the file since the SMB3
850 +@@ -1708,8 +1721,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
851 + * which for a non sparse file would zero the newly extended range
852 + */
853 + if (keep_size == false)
854 +- if (i_size_read(inode) < offset + len)
855 +- return -EOPNOTSUPP;
856 ++ if (i_size_read(inode) < offset + len) {
857 ++ rc = -EOPNOTSUPP;
858 ++ free_xid(xid);
859 ++ return rc;
860 ++ }
861 +
862 + cifs_dbg(FYI, "offset %lld len %lld", offset, len);
863 +
864 +@@ -1743,8 +1759,11 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
865 +
866 + /* Need to make file sparse, if not already, before freeing range. */
867 + /* Consider adding equivalent for compressed since it could also work */
868 +- if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse))
869 +- return -EOPNOTSUPP;
870 ++ if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
871 ++ rc = -EOPNOTSUPP;
872 ++ free_xid(xid);
873 ++ return rc;
874 ++ }
875 +
876 + cifs_dbg(FYI, "offset %lld len %lld", offset, len);
877 +
878 +@@ -1776,8 +1795,10 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
879 +
880 + /* if file not oplocked can't be sure whether asking to extend size */
881 + if (!CIFS_CACHE_READ(cifsi))
882 +- if (keep_size == false)
883 +- return -EOPNOTSUPP;
884 ++ if (keep_size == false) {
885 ++ free_xid(xid);
886 ++ return rc;
887 ++ }
888 +
889 + /*
890 + * Files are non-sparse by default so falloc may be a no-op
891 +@@ -1786,14 +1807,16 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
892 + */
893 + if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
894 + if (keep_size == true)
895 +- return 0;
896 ++ rc = 0;
897 + /* check if extending file */
898 + else if (i_size_read(inode) >= off + len)
899 + /* not extending file and already not sparse */
900 +- return 0;
901 ++ rc = 0;
902 + /* BB: in future add else clause to extend file */
903 + else
904 +- return -EOPNOTSUPP;
905 ++ rc = -EOPNOTSUPP;
906 ++ free_xid(xid);
907 ++ return rc;
908 + }
909 +
910 + if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
911 +@@ -1805,8 +1828,11 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
912 + * ie potentially making a few extra pages at the beginning
913 + * or end of the file non-sparse via set_sparse is harmless.
914 + */
915 +- if ((off > 8192) || (off + len + 8192 < i_size_read(inode)))
916 +- return -EOPNOTSUPP;
917 ++ if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
918 ++ rc = -EOPNOTSUPP;
919 ++ free_xid(xid);
920 ++ return rc;
921 ++ }
922 +
923 + rc = smb2_set_sparse(xid, tcon, cfile, inode, false);
924 + }
925 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
926 +index 49779d952cd5..5247b40e57f6 100644
927 +--- a/fs/cifs/smb2pdu.c
928 ++++ b/fs/cifs/smb2pdu.c
929 +@@ -1182,6 +1182,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
930 + sess_data->ses = ses;
931 + sess_data->buf0_type = CIFS_NO_BUFFER;
932 + sess_data->nls_cp = (struct nls_table *) nls_cp;
933 ++ sess_data->previous_session = ses->Suid;
934 +
935 + while (sess_data->func)
936 + sess_data->func(sess_data);
937 +@@ -2278,8 +2279,7 @@ SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
938 +
939 + return query_info(xid, tcon, persistent_fid, volatile_fid,
940 + 0, SMB2_O_INFO_SECURITY, additional_info,
941 +- SMB2_MAX_BUFFER_SIZE,
942 +- sizeof(struct smb2_file_all_info), data, plen);
943 ++ SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
944 + }
945 +
946 + int
947 +diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
948 +index c32802c956d5..bf7fa1507e81 100644
949 +--- a/fs/ext4/indirect.c
950 ++++ b/fs/ext4/indirect.c
951 +@@ -561,10 +561,16 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
952 + unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
953 + int i;
954 +
955 +- /* Count number blocks in a subtree under 'partial' */
956 +- count = 1;
957 +- for (i = 0; partial + i != chain + depth - 1; i++)
958 +- count *= epb;
959 ++ /*
960 ++ * Count number blocks in a subtree under 'partial'. At each
961 ++ * level we count number of complete empty subtrees beyond
962 ++ * current offset and then descend into the subtree only
963 ++ * partially beyond current offset.
964 ++ */
965 ++ count = 0;
966 ++ for (i = partial - chain + 1; i < depth; i++)
967 ++ count = count * epb + (epb - offsets[i] - 1);
968 ++ count++;
969 + /* Fill in size of a hole we found */
970 + map->m_pblk = 0;
971 + map->m_len = min_t(unsigned int, map->m_len, count);
972 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
973 +index fd9501977f1c..8f5dc243effd 100644
974 +--- a/fs/ext4/inline.c
975 ++++ b/fs/ext4/inline.c
976 +@@ -150,6 +150,12 @@ int ext4_find_inline_data_nolock(struct inode *inode)
977 + goto out;
978 +
979 + if (!is.s.not_found) {
980 ++ if (is.s.here->e_value_inum) {
981 ++ EXT4_ERROR_INODE(inode, "inline data xattr refers "
982 ++ "to an external xattr inode");
983 ++ error = -EFSCORRUPTED;
984 ++ goto out;
985 ++ }
986 + EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
987 + (void *)ext4_raw_inode(&is.iloc));
988 + EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
989 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
990 +index 09014c3c4207..bd6453e78992 100644
991 +--- a/fs/ext4/inode.c
992 ++++ b/fs/ext4/inode.c
993 +@@ -4246,28 +4246,28 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
994 + EXT4_BLOCK_SIZE_BITS(sb);
995 + stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
996 +
997 +- /* If there are no blocks to remove, return now */
998 +- if (first_block >= stop_block)
999 +- goto out_stop;
1000 ++ /* If there are blocks to remove, do it */
1001 ++ if (stop_block > first_block) {
1002 +
1003 +- down_write(&EXT4_I(inode)->i_data_sem);
1004 +- ext4_discard_preallocations(inode);
1005 ++ down_write(&EXT4_I(inode)->i_data_sem);
1006 ++ ext4_discard_preallocations(inode);
1007 +
1008 +- ret = ext4_es_remove_extent(inode, first_block,
1009 +- stop_block - first_block);
1010 +- if (ret) {
1011 +- up_write(&EXT4_I(inode)->i_data_sem);
1012 +- goto out_stop;
1013 +- }
1014 ++ ret = ext4_es_remove_extent(inode, first_block,
1015 ++ stop_block - first_block);
1016 ++ if (ret) {
1017 ++ up_write(&EXT4_I(inode)->i_data_sem);
1018 ++ goto out_stop;
1019 ++ }
1020 +
1021 +- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1022 +- ret = ext4_ext_remove_space(inode, first_block,
1023 +- stop_block - 1);
1024 +- else
1025 +- ret = ext4_ind_remove_space(handle, inode, first_block,
1026 +- stop_block);
1027 ++ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1028 ++ ret = ext4_ext_remove_space(inode, first_block,
1029 ++ stop_block - 1);
1030 ++ else
1031 ++ ret = ext4_ind_remove_space(handle, inode, first_block,
1032 ++ stop_block);
1033 +
1034 +- up_write(&EXT4_I(inode)->i_data_sem);
1035 ++ up_write(&EXT4_I(inode)->i_data_sem);
1036 ++ }
1037 + if (IS_SYNC(inode))
1038 + ext4_handle_sync(handle);
1039 +
1040 +@@ -4634,19 +4634,21 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
1041 + }
1042 + }
1043 +
1044 +-static inline void ext4_iget_extra_inode(struct inode *inode,
1045 ++static inline int ext4_iget_extra_inode(struct inode *inode,
1046 + struct ext4_inode *raw_inode,
1047 + struct ext4_inode_info *ei)
1048 + {
1049 + __le32 *magic = (void *)raw_inode +
1050 + EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
1051 ++
1052 + if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
1053 + EXT4_INODE_SIZE(inode->i_sb) &&
1054 + *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
1055 + ext4_set_inode_state(inode, EXT4_STATE_XATTR);
1056 +- ext4_find_inline_data_nolock(inode);
1057 ++ return ext4_find_inline_data_nolock(inode);
1058 + } else
1059 + EXT4_I(inode)->i_inline_off = 0;
1060 ++ return 0;
1061 + }
1062 +
1063 + int ext4_get_projid(struct inode *inode, kprojid_t *projid)
1064 +@@ -4826,7 +4828,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1065 + ei->i_extra_isize = sizeof(struct ext4_inode) -
1066 + EXT4_GOOD_OLD_INODE_SIZE;
1067 + } else {
1068 +- ext4_iget_extra_inode(inode, raw_inode, ei);
1069 ++ ret = ext4_iget_extra_inode(inode, raw_inode, ei);
1070 ++ if (ret)
1071 ++ goto bad_inode;
1072 + }
1073 + }
1074 +
1075 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
1076 +index 1dac59c24792..823c0b82dfeb 100644
1077 +--- a/fs/ext4/resize.c
1078 ++++ b/fs/ext4/resize.c
1079 +@@ -1905,7 +1905,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1080 + return 0;
1081 +
1082 + n_group = ext4_get_group_number(sb, n_blocks_count - 1);
1083 +- if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1084 ++ if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
1085 + ext4_warning(sb, "resize would cause inodes_count overflow");
1086 + return -EINVAL;
1087 + }
1088 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
1089 +index 1718354e6322..ed1cf24a7831 100644
1090 +--- a/fs/ext4/xattr.c
1091 ++++ b/fs/ext4/xattr.c
1092 +@@ -1687,7 +1687,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
1093 +
1094 + /* No failures allowed past this point. */
1095 +
1096 +- if (!s->not_found && here->e_value_offs) {
1097 ++ if (!s->not_found && here->e_value_size && here->e_value_offs) {
1098 + /* Remove the old value. */
1099 + void *first_val = s->base + min_offs;
1100 + size_t offs = le16_to_cpu(here->e_value_offs);
1101 +diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
1102 +index dcfcf7fd7438..a73144b3cb8c 100644
1103 +--- a/fs/nfs/nfs4_fs.h
1104 ++++ b/fs/nfs/nfs4_fs.h
1105 +@@ -465,7 +465,7 @@ extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
1106 + extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
1107 + extern void nfs_release_seqid(struct nfs_seqid *seqid);
1108 + extern void nfs_free_seqid(struct nfs_seqid *seqid);
1109 +-extern int nfs4_setup_sequence(const struct nfs_client *client,
1110 ++extern int nfs4_setup_sequence(struct nfs_client *client,
1111 + struct nfs4_sequence_args *args,
1112 + struct nfs4_sequence_res *res,
1113 + struct rpc_task *task);
1114 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1115 +index ae8f43d270d6..8ff98bbe479b 100644
1116 +--- a/fs/nfs/nfs4proc.c
1117 ++++ b/fs/nfs/nfs4proc.c
1118 +@@ -96,6 +96,10 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1119 + struct nfs_open_context *ctx, struct nfs4_label *ilabel,
1120 + struct nfs4_label *olabel);
1121 + #ifdef CONFIG_NFS_V4_1
1122 ++static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
1123 ++ struct rpc_cred *cred,
1124 ++ struct nfs4_slot *slot,
1125 ++ bool is_privileged);
1126 + static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
1127 + struct rpc_cred *);
1128 + static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
1129 +@@ -641,13 +645,14 @@ static int nfs40_sequence_done(struct rpc_task *task,
1130 +
1131 + #if defined(CONFIG_NFS_V4_1)
1132 +
1133 +-static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
1134 ++static void nfs41_release_slot(struct nfs4_slot *slot)
1135 + {
1136 + struct nfs4_session *session;
1137 + struct nfs4_slot_table *tbl;
1138 +- struct nfs4_slot *slot = res->sr_slot;
1139 + bool send_new_highest_used_slotid = false;
1140 +
1141 ++ if (!slot)
1142 ++ return;
1143 + tbl = slot->table;
1144 + session = tbl->session;
1145 +
1146 +@@ -673,13 +678,18 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
1147 + send_new_highest_used_slotid = false;
1148 + out_unlock:
1149 + spin_unlock(&tbl->slot_tbl_lock);
1150 +- res->sr_slot = NULL;
1151 + if (send_new_highest_used_slotid)
1152 + nfs41_notify_server(session->clp);
1153 + if (waitqueue_active(&tbl->slot_waitq))
1154 + wake_up_all(&tbl->slot_waitq);
1155 + }
1156 +
1157 ++static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
1158 ++{
1159 ++ nfs41_release_slot(res->sr_slot);
1160 ++ res->sr_slot = NULL;
1161 ++}
1162 ++
1163 + static int nfs41_sequence_process(struct rpc_task *task,
1164 + struct nfs4_sequence_res *res)
1165 + {
1166 +@@ -707,13 +717,6 @@ static int nfs41_sequence_process(struct rpc_task *task,
1167 + /* Check the SEQUENCE operation status */
1168 + switch (res->sr_status) {
1169 + case 0:
1170 +- /* If previous op on slot was interrupted and we reused
1171 +- * the seq# and got a reply from the cache, then retry
1172 +- */
1173 +- if (task->tk_status == -EREMOTEIO && interrupted) {
1174 +- ++slot->seq_nr;
1175 +- goto retry_nowait;
1176 +- }
1177 + /* Update the slot's sequence and clientid lease timer */
1178 + slot->seq_done = 1;
1179 + clp = session->clp;
1180 +@@ -747,16 +750,16 @@ static int nfs41_sequence_process(struct rpc_task *task,
1181 + * The slot id we used was probably retired. Try again
1182 + * using a different slot id.
1183 + */
1184 ++ if (slot->seq_nr < slot->table->target_highest_slotid)
1185 ++ goto session_recover;
1186 + goto retry_nowait;
1187 + case -NFS4ERR_SEQ_MISORDERED:
1188 + /*
1189 + * Was the last operation on this sequence interrupted?
1190 + * If so, retry after bumping the sequence number.
1191 + */
1192 +- if (interrupted) {
1193 +- ++slot->seq_nr;
1194 +- goto retry_nowait;
1195 +- }
1196 ++ if (interrupted)
1197 ++ goto retry_new_seq;
1198 + /*
1199 + * Could this slot have been previously retired?
1200 + * If so, then the server may be expecting seq_nr = 1!
1201 +@@ -765,10 +768,11 @@ static int nfs41_sequence_process(struct rpc_task *task,
1202 + slot->seq_nr = 1;
1203 + goto retry_nowait;
1204 + }
1205 +- break;
1206 ++ goto session_recover;
1207 + case -NFS4ERR_SEQ_FALSE_RETRY:
1208 +- ++slot->seq_nr;
1209 +- goto retry_nowait;
1210 ++ if (interrupted)
1211 ++ goto retry_new_seq;
1212 ++ goto session_recover;
1213 + default:
1214 + /* Just update the slot sequence no. */
1215 + slot->seq_done = 1;
1216 +@@ -778,6 +782,11 @@ static int nfs41_sequence_process(struct rpc_task *task,
1217 + dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
1218 + out_noaction:
1219 + return ret;
1220 ++session_recover:
1221 ++ nfs4_schedule_session_recovery(session, res->sr_status);
1222 ++ goto retry_nowait;
1223 ++retry_new_seq:
1224 ++ ++slot->seq_nr;
1225 + retry_nowait:
1226 + if (rpc_restart_call_prepare(task)) {
1227 + nfs41_sequence_free_slot(res);
1228 +@@ -854,6 +863,17 @@ static const struct rpc_call_ops nfs41_call_sync_ops = {
1229 + .rpc_call_done = nfs41_call_sync_done,
1230 + };
1231 +
1232 ++static void
1233 ++nfs4_sequence_process_interrupted(struct nfs_client *client,
1234 ++ struct nfs4_slot *slot, struct rpc_cred *cred)
1235 ++{
1236 ++ struct rpc_task *task;
1237 ++
1238 ++ task = _nfs41_proc_sequence(client, cred, slot, true);
1239 ++ if (!IS_ERR(task))
1240 ++ rpc_put_task_async(task);
1241 ++}
1242 ++
1243 + #else /* !CONFIG_NFS_V4_1 */
1244 +
1245 + static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
1246 +@@ -874,9 +894,34 @@ int nfs4_sequence_done(struct rpc_task *task,
1247 + }
1248 + EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1249 +
1250 ++static void
1251 ++nfs4_sequence_process_interrupted(struct nfs_client *client,
1252 ++ struct nfs4_slot *slot, struct rpc_cred *cred)
1253 ++{
1254 ++ WARN_ON_ONCE(1);
1255 ++ slot->interrupted = 0;
1256 ++}
1257 ++
1258 + #endif /* !CONFIG_NFS_V4_1 */
1259 +
1260 +-int nfs4_setup_sequence(const struct nfs_client *client,
1261 ++static
1262 ++void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
1263 ++ struct nfs4_sequence_res *res,
1264 ++ struct nfs4_slot *slot)
1265 ++{
1266 ++ if (!slot)
1267 ++ return;
1268 ++ slot->privileged = args->sa_privileged ? 1 : 0;
1269 ++ args->sa_slot = slot;
1270 ++
1271 ++ res->sr_slot = slot;
1272 ++ res->sr_timestamp = jiffies;
1273 ++ res->sr_status_flags = 0;
1274 ++ res->sr_status = 1;
1275 ++
1276 ++}
1277 ++
1278 ++int nfs4_setup_sequence(struct nfs_client *client,
1279 + struct nfs4_sequence_args *args,
1280 + struct nfs4_sequence_res *res,
1281 + struct rpc_task *task)
1282 +@@ -894,29 +939,28 @@ int nfs4_setup_sequence(const struct nfs_client *client,
1283 + task->tk_timeout = 0;
1284 + }
1285 +
1286 +- spin_lock(&tbl->slot_tbl_lock);
1287 +- /* The state manager will wait until the slot table is empty */
1288 +- if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
1289 +- goto out_sleep;
1290 ++ for (;;) {
1291 ++ spin_lock(&tbl->slot_tbl_lock);
1292 ++ /* The state manager will wait until the slot table is empty */
1293 ++ if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
1294 ++ goto out_sleep;
1295 ++
1296 ++ slot = nfs4_alloc_slot(tbl);
1297 ++ if (IS_ERR(slot)) {
1298 ++ /* Try again in 1/4 second */
1299 ++ if (slot == ERR_PTR(-ENOMEM))
1300 ++ task->tk_timeout = HZ >> 2;
1301 ++ goto out_sleep;
1302 ++ }
1303 ++ spin_unlock(&tbl->slot_tbl_lock);
1304 +
1305 +- slot = nfs4_alloc_slot(tbl);
1306 +- if (IS_ERR(slot)) {
1307 +- /* Try again in 1/4 second */
1308 +- if (slot == ERR_PTR(-ENOMEM))
1309 +- task->tk_timeout = HZ >> 2;
1310 +- goto out_sleep;
1311 ++ if (likely(!slot->interrupted))
1312 ++ break;
1313 ++ nfs4_sequence_process_interrupted(client,
1314 ++ slot, task->tk_msg.rpc_cred);
1315 + }
1316 +- spin_unlock(&tbl->slot_tbl_lock);
1317 +-
1318 +- slot->privileged = args->sa_privileged ? 1 : 0;
1319 +- args->sa_slot = slot;
1320 +
1321 +- res->sr_slot = slot;
1322 +- if (session) {
1323 +- res->sr_timestamp = jiffies;
1324 +- res->sr_status_flags = 0;
1325 +- res->sr_status = 1;
1326 +- }
1327 ++ nfs4_sequence_attach_slot(args, res, slot);
1328 +
1329 + trace_nfs4_setup_sequence(session, args);
1330 + out_start:
1331 +@@ -8151,6 +8195,7 @@ static const struct rpc_call_ops nfs41_sequence_ops = {
1332 +
1333 + static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
1334 + struct rpc_cred *cred,
1335 ++ struct nfs4_slot *slot,
1336 + bool is_privileged)
1337 + {
1338 + struct nfs4_sequence_data *calldata;
1339 +@@ -8164,15 +8209,18 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
1340 + .callback_ops = &nfs41_sequence_ops,
1341 + .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
1342 + };
1343 ++ struct rpc_task *ret;
1344 +
1345 ++ ret = ERR_PTR(-EIO);
1346 + if (!atomic_inc_not_zero(&clp->cl_count))
1347 +- return ERR_PTR(-EIO);
1348 ++ goto out_err;
1349 ++
1350 ++ ret = ERR_PTR(-ENOMEM);
1351 + calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
1352 +- if (calldata == NULL) {
1353 +- nfs_put_client(clp);
1354 +- return ERR_PTR(-ENOMEM);
1355 +- }
1356 ++ if (calldata == NULL)
1357 ++ goto out_put_clp;
1358 + nfs4_init_sequence(&calldata->args, &calldata->res, 0);
1359 ++ nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
1360 + if (is_privileged)
1361 + nfs4_set_sequence_privileged(&calldata->args);
1362 + msg.rpc_argp = &calldata->args;
1363 +@@ -8180,7 +8228,15 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
1364 + calldata->clp = clp;
1365 + task_setup_data.callback_data = calldata;
1366 +
1367 +- return rpc_run_task(&task_setup_data);
1368 ++ ret = rpc_run_task(&task_setup_data);
1369 ++ if (IS_ERR(ret))
1370 ++ goto out_err;
1371 ++ return ret;
1372 ++out_put_clp:
1373 ++ nfs_put_client(clp);
1374 ++out_err:
1375 ++ nfs41_release_slot(slot);
1376 ++ return ret;
1377 + }
1378 +
1379 + static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
1380 +@@ -8190,7 +8246,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
1381 +
1382 + if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
1383 + return -EAGAIN;
1384 +- task = _nfs41_proc_sequence(clp, cred, false);
1385 ++ task = _nfs41_proc_sequence(clp, cred, NULL, false);
1386 + if (IS_ERR(task))
1387 + ret = PTR_ERR(task);
1388 + else
1389 +@@ -8204,7 +8260,7 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
1390 + struct rpc_task *task;
1391 + int ret;
1392 +
1393 +- task = _nfs41_proc_sequence(clp, cred, true);
1394 ++ task = _nfs41_proc_sequence(clp, cred, NULL, true);
1395 + if (IS_ERR(task)) {
1396 + ret = PTR_ERR(task);
1397 + goto out;
1398 +diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
1399 +index 28825a5b6d09..902b72dac41a 100644
1400 +--- a/fs/orangefs/inode.c
1401 ++++ b/fs/orangefs/inode.c
1402 +@@ -269,6 +269,13 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
1403 + else
1404 + stat->result_mask = STATX_BASIC_STATS &
1405 + ~STATX_SIZE;
1406 ++
1407 ++ stat->attributes_mask = STATX_ATTR_IMMUTABLE |
1408 ++ STATX_ATTR_APPEND;
1409 ++ if (inode->i_flags & S_IMMUTABLE)
1410 ++ stat->attributes |= STATX_ATTR_IMMUTABLE;
1411 ++ if (inode->i_flags & S_APPEND)
1412 ++ stat->attributes |= STATX_ATTR_APPEND;
1413 + }
1414 + return ret;
1415 + }
1416 +diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
1417 +index f8f3c73d2664..05b3abbdbc4b 100644
1418 +--- a/fs/orangefs/namei.c
1419 ++++ b/fs/orangefs/namei.c
1420 +@@ -314,6 +314,13 @@ static int orangefs_symlink(struct inode *dir,
1421 + ret = PTR_ERR(inode);
1422 + goto out;
1423 + }
1424 ++ /*
1425 ++ * This is necessary because orangefs_inode_getattr will not
1426 ++ * re-read symlink size as it is impossible for it to change.
1427 ++ * Invalidating the cache does not help. orangefs_new_inode
1428 ++ * does not set the correct size (it does not know symname).
1429 ++ */
1430 ++ inode->i_size = strlen(symname);
1431 +
1432 + gossip_debug(GOSSIP_NAME_DEBUG,
1433 + "Assigned symlink inode new number of %pU\n",
1434 +diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
1435 +index f144216febc6..9397628a1967 100644
1436 +--- a/include/linux/virtio_net.h
1437 ++++ b/include/linux/virtio_net.h
1438 +@@ -58,7 +58,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
1439 + static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
1440 + struct virtio_net_hdr *hdr,
1441 + bool little_endian,
1442 +- bool has_data_valid)
1443 ++ bool has_data_valid,
1444 ++ int vlan_hlen)
1445 + {
1446 + memset(hdr, 0, sizeof(*hdr)); /* no info leak */
1447 +
1448 +@@ -83,12 +84,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
1449 +
1450 + if (skb->ip_summed == CHECKSUM_PARTIAL) {
1451 + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1452 +- if (skb_vlan_tag_present(skb))
1453 +- hdr->csum_start = __cpu_to_virtio16(little_endian,
1454 +- skb_checksum_start_offset(skb) + VLAN_HLEN);
1455 +- else
1456 +- hdr->csum_start = __cpu_to_virtio16(little_endian,
1457 +- skb_checksum_start_offset(skb));
1458 ++ hdr->csum_start = __cpu_to_virtio16(little_endian,
1459 ++ skb_checksum_start_offset(skb) + vlan_hlen);
1460 + hdr->csum_offset = __cpu_to_virtio16(little_endian,
1461 + skb->csum_offset);
1462 + } else if (has_data_valid &&
1463 +diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
1464 +index c4f5caaf3778..f6a3543e5247 100644
1465 +--- a/include/net/transp_v6.h
1466 ++++ b/include/net/transp_v6.h
1467 +@@ -45,8 +45,15 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
1468 + struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
1469 + struct sockcm_cookie *sockc);
1470 +
1471 +-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1472 +- __u16 srcp, __u16 destp, int bucket);
1473 ++void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1474 ++ __u16 srcp, __u16 destp, int rqueue, int bucket);
1475 ++static inline void
1476 ++ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
1477 ++ __u16 destp, int bucket)
1478 ++{
1479 ++ __ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp),
1480 ++ bucket);
1481 ++}
1482 +
1483 + #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
1484 +
1485 +diff --git a/include/net/udp.h b/include/net/udp.h
1486 +index 6c759c8594e2..18391015233e 100644
1487 +--- a/include/net/udp.h
1488 ++++ b/include/net/udp.h
1489 +@@ -244,6 +244,11 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
1490 + return htons((((u64) hash * (max - min)) >> 32) + min);
1491 + }
1492 +
1493 ++static inline int udp_rqueue_get(struct sock *sk)
1494 ++{
1495 ++ return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
1496 ++}
1497 ++
1498 + /* net/ipv4/udp.c */
1499 + void udp_destruct_sock(struct sock *sk);
1500 + void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
1501 +diff --git a/mm/backing-dev.c b/mm/backing-dev.c
1502 +index dee049a0ec5b..6774e0369ebe 100644
1503 +--- a/mm/backing-dev.c
1504 ++++ b/mm/backing-dev.c
1505 +@@ -409,6 +409,7 @@ static void wb_exit(struct bdi_writeback *wb)
1506 + * protected.
1507 + */
1508 + static DEFINE_SPINLOCK(cgwb_lock);
1509 ++static struct workqueue_struct *cgwb_release_wq;
1510 +
1511 + /**
1512 + * wb_congested_get_create - get or create a wb_congested
1513 +@@ -519,7 +520,7 @@ static void cgwb_release(struct percpu_ref *refcnt)
1514 + {
1515 + struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
1516 + refcnt);
1517 +- schedule_work(&wb->release_work);
1518 ++ queue_work(cgwb_release_wq, &wb->release_work);
1519 + }
1520 +
1521 + static void cgwb_kill(struct bdi_writeback *wb)
1522 +@@ -783,6 +784,21 @@ static void cgwb_bdi_register(struct backing_dev_info *bdi)
1523 + spin_unlock_irq(&cgwb_lock);
1524 + }
1525 +
1526 ++static int __init cgwb_init(void)
1527 ++{
1528 ++ /*
1529 ++ * There can be many concurrent release work items overwhelming
1530 ++ * system_wq. Put them in a separate wq and limit concurrency.
1531 ++ * There's no point in executing many of these in parallel.
1532 ++ */
1533 ++ cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
1534 ++ if (!cgwb_release_wq)
1535 ++ return -ENOMEM;
1536 ++
1537 ++ return 0;
1538 ++}
1539 ++subsys_initcall(cgwb_init);
1540 ++
1541 + #else /* CONFIG_CGROUP_WRITEBACK */
1542 +
1543 + static int cgwb_bdi_init(struct backing_dev_info *bdi)
1544 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1545 +index 1d7693c35424..59ccf455fcbd 100644
1546 +--- a/mm/page_alloc.c
1547 ++++ b/mm/page_alloc.c
1548 +@@ -3981,7 +3981,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
1549 + * orientated.
1550 + */
1551 + if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
1552 +- ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
1553 + ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
1554 + ac->high_zoneidx, ac->nodemask);
1555 + }
1556 +diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
1557 +index fcc9aa72877d..374d586b4a2c 100644
1558 +--- a/net/dsa/tag_trailer.c
1559 ++++ b/net/dsa/tag_trailer.c
1560 +@@ -79,7 +79,8 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
1561 + if (unlikely(ds->cpu_port_mask & BIT(source_port)))
1562 + return NULL;
1563 +
1564 +- pskb_trim_rcsum(skb, skb->len - 4);
1565 ++ if (pskb_trim_rcsum(skb, skb->len - 4))
1566 ++ return NULL;
1567 +
1568 + skb->dev = ds->ports[source_port].netdev;
1569 +
1570 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1571 +index cab4b935e474..a95ccdceb797 100644
1572 +--- a/net/ipv4/tcp_ipv4.c
1573 ++++ b/net/ipv4/tcp_ipv4.c
1574 +@@ -1675,6 +1675,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
1575 + reqsk_put(req);
1576 + goto discard_it;
1577 + }
1578 ++ if (tcp_checksum_complete(skb)) {
1579 ++ reqsk_put(req);
1580 ++ goto csum_error;
1581 ++ }
1582 + if (unlikely(sk->sk_state != TCP_LISTEN)) {
1583 + inet_csk_reqsk_queue_drop_and_put(sk, req);
1584 + goto lookup;
1585 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1586 +index b0ad62bd38f7..5752bf7593dc 100644
1587 +--- a/net/ipv4/udp.c
1588 ++++ b/net/ipv4/udp.c
1589 +@@ -2720,7 +2720,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
1590 + " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
1591 + bucket, src, srcp, dest, destp, sp->sk_state,
1592 + sk_wmem_alloc_get(sp),
1593 +- sk_rmem_alloc_get(sp),
1594 ++ udp_rqueue_get(sp),
1595 + 0, 0L, 0,
1596 + from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
1597 + 0, sock_i_ino(sp),
1598 +diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
1599 +index d0390d844ac8..d9ad986c7b2c 100644
1600 +--- a/net/ipv4/udp_diag.c
1601 ++++ b/net/ipv4/udp_diag.c
1602 +@@ -163,7 +163,7 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
1603 + static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
1604 + void *info)
1605 + {
1606 +- r->idiag_rqueue = sk_rmem_alloc_get(sk);
1607 ++ r->idiag_rqueue = udp_rqueue_get(sk);
1608 + r->idiag_wqueue = sk_wmem_alloc_get(sk);
1609 + }
1610 +
1611 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1612 +index 287112da3c06..453dc3726199 100644
1613 +--- a/net/ipv6/datagram.c
1614 ++++ b/net/ipv6/datagram.c
1615 +@@ -1026,8 +1026,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
1616 + }
1617 + EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
1618 +
1619 +-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1620 +- __u16 srcp, __u16 destp, int bucket)
1621 ++void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1622 ++ __u16 srcp, __u16 destp, int rqueue, int bucket)
1623 + {
1624 + const struct in6_addr *dest, *src;
1625 +
1626 +@@ -1043,7 +1043,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
1627 + dest->s6_addr32[2], dest->s6_addr32[3], destp,
1628 + sp->sk_state,
1629 + sk_wmem_alloc_get(sp),
1630 +- sk_rmem_alloc_get(sp),
1631 ++ rqueue,
1632 + 0, 0L, 0,
1633 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1634 + 0,
1635 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1636 +index 375b20d5bbd7..60efd326014b 100644
1637 +--- a/net/ipv6/route.c
1638 ++++ b/net/ipv6/route.c
1639 +@@ -1476,9 +1476,6 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1640 + const struct in6_addr *daddr, *saddr;
1641 + struct rt6_info *rt6 = (struct rt6_info *)dst;
1642 +
1643 +- if (rt6->rt6i_flags & RTF_LOCAL)
1644 +- return;
1645 +-
1646 + if (dst_metric_locked(dst, RTAX_MTU))
1647 + return;
1648 +
1649 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1650 +index 237cc6187c5a..35e8aef9ceed 100644
1651 +--- a/net/ipv6/tcp_ipv6.c
1652 ++++ b/net/ipv6/tcp_ipv6.c
1653 +@@ -1453,6 +1453,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
1654 + reqsk_put(req);
1655 + goto discard_it;
1656 + }
1657 ++ if (tcp_checksum_complete(skb)) {
1658 ++ reqsk_put(req);
1659 ++ goto csum_error;
1660 ++ }
1661 + if (unlikely(sk->sk_state != TCP_LISTEN)) {
1662 + inet_csk_reqsk_queue_drop_and_put(sk, req);
1663 + goto lookup;
1664 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1665 +index 0146dcdc5c40..330d5ea8451b 100644
1666 +--- a/net/ipv6/udp.c
1667 ++++ b/net/ipv6/udp.c
1668 +@@ -1503,7 +1503,8 @@ int udp6_seq_show(struct seq_file *seq, void *v)
1669 + struct inet_sock *inet = inet_sk(v);
1670 + __u16 srcp = ntohs(inet->inet_sport);
1671 + __u16 destp = ntohs(inet->inet_dport);
1672 +- ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
1673 ++ __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1674 ++ udp_rqueue_get(v), bucket);
1675 + }
1676 + return 0;
1677 + }
1678 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1679 +index 7806e166669a..4fe2e34522d6 100644
1680 +--- a/net/packet/af_packet.c
1681 ++++ b/net/packet/af_packet.c
1682 +@@ -2046,7 +2046,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
1683 + return -EINVAL;
1684 + *len -= sizeof(vnet_hdr);
1685 +
1686 +- if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
1687 ++ if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
1688 + return -EINVAL;
1689 +
1690 + return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
1691 +@@ -2313,7 +2313,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1692 + if (do_vnet) {
1693 + if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
1694 + sizeof(struct virtio_net_hdr),
1695 +- vio_le(), true)) {
1696 ++ vio_le(), true, 0)) {
1697 + spin_lock(&sk->sk_receive_queue.lock);
1698 + goto drop_n_account;
1699 + }
1700 +diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
1701 +index b5f80e675783..f3ed63aa4111 100644
1702 +--- a/net/sched/act_simple.c
1703 ++++ b/net/sched/act_simple.c
1704 +@@ -53,22 +53,22 @@ static void tcf_simp_release(struct tc_action *a, int bind)
1705 + kfree(d->tcfd_defdata);
1706 + }
1707 +
1708 +-static int alloc_defdata(struct tcf_defact *d, char *defdata)
1709 ++static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
1710 + {
1711 + d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL);
1712 + if (unlikely(!d->tcfd_defdata))
1713 + return -ENOMEM;
1714 +- strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
1715 ++ nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
1716 + return 0;
1717 + }
1718 +
1719 +-static void reset_policy(struct tcf_defact *d, char *defdata,
1720 ++static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata,
1721 + struct tc_defact *p)
1722 + {
1723 + spin_lock_bh(&d->tcf_lock);
1724 + d->tcf_action = p->action;
1725 + memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
1726 +- strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
1727 ++ nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
1728 + spin_unlock_bh(&d->tcf_lock);
1729 + }
1730 +
1731 +@@ -87,7 +87,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
1732 + struct tcf_defact *d;
1733 + bool exists = false;
1734 + int ret = 0, err;
1735 +- char *defdata;
1736 +
1737 + if (nla == NULL)
1738 + return -EINVAL;
1739 +@@ -110,8 +109,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
1740 + return -EINVAL;
1741 + }
1742 +
1743 +- defdata = nla_data(tb[TCA_DEF_DATA]);
1744 +-
1745 + if (!exists) {
1746 + ret = tcf_idr_create(tn, parm->index, est, a,
1747 + &act_simp_ops, bind, false);
1748 +@@ -119,7 +116,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
1749 + return ret;
1750 +
1751 + d = to_defact(*a);
1752 +- ret = alloc_defdata(d, defdata);
1753 ++ ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
1754 + if (ret < 0) {
1755 + tcf_idr_release(*a, bind);
1756 + return ret;
1757 +@@ -133,7 +130,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
1758 + if (!ovr)
1759 + return -EEXIST;
1760 +
1761 +- reset_policy(d, defdata, parm);
1762 ++ reset_policy(d, tb[TCA_DEF_DATA], parm);
1763 + }
1764 +
1765 + if (ret == ACT_P_CREATED)
1766 +diff --git a/net/socket.c b/net/socket.c
1767 +index 43d2f17f5eea..8b2bef6cfe42 100644
1768 +--- a/net/socket.c
1769 ++++ b/net/socket.c
1770 +@@ -538,7 +538,10 @@ static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
1771 + if (!err && (iattr->ia_valid & ATTR_UID)) {
1772 + struct socket *sock = SOCKET_I(d_inode(dentry));
1773 +
1774 +- sock->sk->sk_uid = iattr->ia_uid;
1775 ++ if (sock->sk)
1776 ++ sock->sk->sk_uid = iattr->ia_uid;
1777 ++ else
1778 ++ err = -ENOENT;
1779 + }
1780 +
1781 + return err;
1782 +@@ -588,12 +591,16 @@ EXPORT_SYMBOL(sock_alloc);
1783 + * an inode not a file.
1784 + */
1785 +
1786 +-void sock_release(struct socket *sock)
1787 ++static void __sock_release(struct socket *sock, struct inode *inode)
1788 + {
1789 + if (sock->ops) {
1790 + struct module *owner = sock->ops->owner;
1791 +
1792 ++ if (inode)
1793 ++ inode_lock(inode);
1794 + sock->ops->release(sock);
1795 ++ if (inode)
1796 ++ inode_unlock(inode);
1797 + sock->ops = NULL;
1798 + module_put(owner);
1799 + }
1800 +@@ -608,6 +615,11 @@ void sock_release(struct socket *sock)
1801 + }
1802 + sock->file = NULL;
1803 + }
1804 ++
1805 ++void sock_release(struct socket *sock)
1806 ++{
1807 ++ __sock_release(sock, NULL);
1808 ++}
1809 + EXPORT_SYMBOL(sock_release);
1810 +
1811 + void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
1812 +@@ -1122,7 +1134,7 @@ static int sock_mmap(struct file *file, struct vm_area_struct *vma)
1813 +
1814 + static int sock_close(struct inode *inode, struct file *filp)
1815 + {
1816 +- sock_release(SOCKET_I(inode));
1817 ++ __sock_release(SOCKET_I(inode), inode);
1818 + return 0;
1819 + }
1820 +
1821 +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
1822 +index 83f886d7c1f8..3c86614462f6 100644
1823 +--- a/net/tls/tls_sw.c
1824 ++++ b/net/tls/tls_sw.c
1825 +@@ -211,18 +211,12 @@ static void tls_free_both_sg(struct sock *sk)
1826 + }
1827 +
1828 + static int tls_do_encryption(struct tls_context *tls_ctx,
1829 +- struct tls_sw_context *ctx, size_t data_len,
1830 +- gfp_t flags)
1831 ++ struct tls_sw_context *ctx,
1832 ++ struct aead_request *aead_req,
1833 ++ size_t data_len)
1834 + {
1835 +- unsigned int req_size = sizeof(struct aead_request) +
1836 +- crypto_aead_reqsize(ctx->aead_send);
1837 +- struct aead_request *aead_req;
1838 + int rc;
1839 +
1840 +- aead_req = kzalloc(req_size, flags);
1841 +- if (!aead_req)
1842 +- return -ENOMEM;
1843 +-
1844 + ctx->sg_encrypted_data[0].offset += tls_ctx->prepend_size;
1845 + ctx->sg_encrypted_data[0].length -= tls_ctx->prepend_size;
1846 +
1847 +@@ -235,7 +229,6 @@ static int tls_do_encryption(struct tls_context *tls_ctx,
1848 + ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size;
1849 + ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size;
1850 +
1851 +- kfree(aead_req);
1852 + return rc;
1853 + }
1854 +
1855 +@@ -244,8 +237,14 @@ static int tls_push_record(struct sock *sk, int flags,
1856 + {
1857 + struct tls_context *tls_ctx = tls_get_ctx(sk);
1858 + struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
1859 ++ struct aead_request *req;
1860 + int rc;
1861 +
1862 ++ req = kzalloc(sizeof(struct aead_request) +
1863 ++ crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
1864 ++ if (!req)
1865 ++ return -ENOMEM;
1866 ++
1867 + sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
1868 + sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
1869 +
1870 +@@ -261,15 +260,14 @@ static int tls_push_record(struct sock *sk, int flags,
1871 + tls_ctx->pending_open_record_frags = 0;
1872 + set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
1873 +
1874 +- rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
1875 +- sk->sk_allocation);
1876 ++ rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
1877 + if (rc < 0) {
1878 + /* If we are called from write_space and
1879 + * we fail, we need to set this SOCK_NOSPACE
1880 + * to trigger another write_space in the future.
1881 + */
1882 + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1883 +- return rc;
1884 ++ goto out_req;
1885 + }
1886 +
1887 + free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
1888 +@@ -284,6 +282,8 @@ static int tls_push_record(struct sock *sk, int flags,
1889 + tls_err_abort(sk);
1890 +
1891 + tls_advance_record_sn(sk, tls_ctx);
1892 ++out_req:
1893 ++ kfree(req);
1894 + return rc;
1895 + }
1896 +
1897 +diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
1898 +index d1eb14842340..a12e594d4e3b 100644
1899 +--- a/sound/pci/hda/hda_controller.c
1900 ++++ b/sound/pci/hda/hda_controller.c
1901 +@@ -748,8 +748,10 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
1902 + return err;
1903 + strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
1904 + apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
1905 +- if (apcm == NULL)
1906 ++ if (apcm == NULL) {
1907 ++ snd_device_free(chip->card, pcm);
1908 + return -ENOMEM;
1909 ++ }
1910 + apcm->chip = chip;
1911 + apcm->pcm = pcm;
1912 + apcm->codec = codec;
1913 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
1914 +index 5b4dbcec6de8..ba9a7e552183 100644
1915 +--- a/sound/pci/hda/patch_conexant.c
1916 ++++ b/sound/pci/hda/patch_conexant.c
1917 +@@ -959,12 +959,15 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
1918 + SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
1919 + SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
1920 + SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
1921 ++ SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
1922 ++ SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
1923 + SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
1924 + SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
1925 + SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
1926 + SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
1927 + SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1928 + SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1929 ++ SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
1930 + SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
1931 + SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
1932 + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
1933 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1934 +index 6ae061183eff..2a8aa2bc5c30 100644
1935 +--- a/sound/pci/hda/patch_realtek.c
1936 ++++ b/sound/pci/hda/patch_realtek.c
1937 +@@ -6439,7 +6439,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1938 + SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
1939 + SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
1940 + SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
1941 +- SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
1942 + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
1943 + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
1944 + SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
1945 +@@ -6610,6 +6609,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
1946 + {0x12, 0x90a60140},
1947 + {0x14, 0x90170110},
1948 + {0x21, 0x02211020}),
1949 ++ SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
1950 ++ {0x12, 0x90a60140},
1951 ++ {0x14, 0x90170110},
1952 ++ {0x19, 0x02a11030},
1953 ++ {0x21, 0x02211020}),
1954 + SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
1955 + {0x12, 0x90a60140},
1956 + {0x14, 0x90170150},