Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Tue, 10 Sep 2019 11:12:56
Message-Id: 1568113954.c0678b2b22ff8412e43d12fbd0c2b879023c728c.mpagano@gentoo
1 commit: c0678b2b22ff8412e43d12fbd0c2b879023c728c
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Sep 10 11:12:34 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Sep 10 11:12:34 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c0678b2b
7
8 Linux patch 4.19.72
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1071_linux-4.19.72.patch | 2096 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2100 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 807b793..5a202ee 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -323,6 +323,10 @@ Patch: 1070_linux-4.19.70.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.70
23
24 +Patch: 1071_linux-4.19.71.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.71
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1071_linux-4.19.72.patch b/1071_linux-4.19.72.patch
33 new file mode 100644
34 index 0000000..3d76e8b
35 --- /dev/null
36 +++ b/1071_linux-4.19.72.patch
37 @@ -0,0 +1,2096 @@
38 +diff --git a/Makefile b/Makefile
39 +index f6c9d5757470..ef80b1dfb753 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 71
47 ++SUBLEVEL = 72
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
52 +index f8debf7aeb4c..76e1edf5bf12 100644
53 +--- a/arch/x86/boot/compressed/pgtable_64.c
54 ++++ b/arch/x86/boot/compressed/pgtable_64.c
55 +@@ -73,6 +73,8 @@ static unsigned long find_trampoline_placement(void)
56 +
57 + /* Find the first usable memory region under bios_start. */
58 + for (i = boot_params->e820_entries - 1; i >= 0; i--) {
59 ++ unsigned long new = bios_start;
60 ++
61 + entry = &boot_params->e820_table[i];
62 +
63 + /* Skip all entries above bios_start. */
64 +@@ -85,15 +87,20 @@ static unsigned long find_trampoline_placement(void)
65 +
66 + /* Adjust bios_start to the end of the entry if needed. */
67 + if (bios_start > entry->addr + entry->size)
68 +- bios_start = entry->addr + entry->size;
69 ++ new = entry->addr + entry->size;
70 +
71 + /* Keep bios_start page-aligned. */
72 +- bios_start = round_down(bios_start, PAGE_SIZE);
73 ++ new = round_down(new, PAGE_SIZE);
74 +
75 + /* Skip the entry if it's too small. */
76 +- if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr)
77 ++ if (new - TRAMPOLINE_32BIT_SIZE < entry->addr)
78 + continue;
79 +
80 ++ /* Protect against underflow. */
81 ++ if (new - TRAMPOLINE_32BIT_SIZE > bios_start)
82 ++ break;
83 ++
84 ++ bios_start = new;
85 + break;
86 + }
87 +
88 +diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
89 +index d3983fdf1012..8fa49cf1211d 100644
90 +--- a/arch/x86/include/asm/bootparam_utils.h
91 ++++ b/arch/x86/include/asm/bootparam_utils.h
92 +@@ -71,6 +71,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
93 + BOOT_PARAM_PRESERVE(eddbuf_entries),
94 + BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
95 + BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
96 ++ BOOT_PARAM_PRESERVE(secure_boot),
97 + BOOT_PARAM_PRESERVE(hdr),
98 + BOOT_PARAM_PRESERVE(e820_table),
99 + BOOT_PARAM_PRESERVE(eddbuf),
100 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
101 +index 90be3a1506d3..b316bd61a6ac 100644
102 +--- a/arch/x86/kernel/apic/apic.c
103 ++++ b/arch/x86/kernel/apic/apic.c
104 +@@ -1140,10 +1140,6 @@ void clear_local_APIC(void)
105 + apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
106 + v = apic_read(APIC_LVT1);
107 + apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
108 +- if (!x2apic_enabled()) {
109 +- v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
110 +- apic_write(APIC_LDR, v);
111 +- }
112 + if (maxlvt >= 4) {
113 + v = apic_read(APIC_LVTPC);
114 + apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
115 +diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
116 +index ec9e03a6b778..9e70f7c7e565 100644
117 +--- a/drivers/bluetooth/btqca.c
118 ++++ b/drivers/bluetooth/btqca.c
119 +@@ -363,6 +363,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
120 + return err;
121 + }
122 +
123 ++ /* Give the controller some time to get ready to receive the NVM */
124 ++ msleep(10);
125 ++
126 + /* Download NVM configuration */
127 + config.type = TLV_TYPE_NVM;
128 + if (soc_type == QCA_WCN3990)
129 +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
130 +index fd83046d8376..f6389479fccb 100644
131 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
132 ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
133 +@@ -220,6 +220,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
134 + struct mtk_drm_private *private = drm->dev_private;
135 + struct platform_device *pdev;
136 + struct device_node *np;
137 ++ struct device *dma_dev;
138 + int ret;
139 +
140 + if (!iommu_present(&platform_bus_type))
141 +@@ -282,7 +283,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
142 + goto err_component_unbind;
143 + }
144 +
145 +- private->dma_dev = &pdev->dev;
146 ++ dma_dev = &pdev->dev;
147 ++ private->dma_dev = dma_dev;
148 ++
149 ++ /*
150 ++ * Configure the DMA segment size to make sure we get contiguous IOVA
151 ++ * when importing PRIME buffers.
152 ++ */
153 ++ if (!dma_dev->dma_parms) {
154 ++ private->dma_parms_allocated = true;
155 ++ dma_dev->dma_parms =
156 ++ devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
157 ++ GFP_KERNEL);
158 ++ }
159 ++ if (!dma_dev->dma_parms) {
160 ++ ret = -ENOMEM;
161 ++ goto err_component_unbind;
162 ++ }
163 ++
164 ++ ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
165 ++ if (ret) {
166 ++ dev_err(dma_dev, "Failed to set DMA segment size\n");
167 ++ goto err_unset_dma_parms;
168 ++ }
169 +
170 + /*
171 + * We don't use the drm_irq_install() helpers provided by the DRM
172 +@@ -292,13 +315,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
173 + drm->irq_enabled = true;
174 + ret = drm_vblank_init(drm, MAX_CRTC);
175 + if (ret < 0)
176 +- goto err_component_unbind;
177 ++ goto err_unset_dma_parms;
178 +
179 + drm_kms_helper_poll_init(drm);
180 + drm_mode_config_reset(drm);
181 +
182 + return 0;
183 +
184 ++err_unset_dma_parms:
185 ++ if (private->dma_parms_allocated)
186 ++ dma_dev->dma_parms = NULL;
187 + err_component_unbind:
188 + component_unbind_all(drm->dev, drm);
189 + err_config_cleanup:
190 +@@ -309,9 +335,14 @@ err_config_cleanup:
191 +
192 + static void mtk_drm_kms_deinit(struct drm_device *drm)
193 + {
194 ++ struct mtk_drm_private *private = drm->dev_private;
195 ++
196 + drm_kms_helper_poll_fini(drm);
197 + drm_atomic_helper_shutdown(drm);
198 +
199 ++ if (private->dma_parms_allocated)
200 ++ private->dma_dev->dma_parms = NULL;
201 ++
202 + component_unbind_all(drm->dev, drm);
203 + drm_mode_config_cleanup(drm);
204 + }
205 +@@ -327,6 +358,18 @@ static const struct file_operations mtk_drm_fops = {
206 + .compat_ioctl = drm_compat_ioctl,
207 + };
208 +
209 ++/*
210 ++ * We need to override this because the device used to import the memory is
211 ++ * not dev->dev, as drm_gem_prime_import() expects.
212 ++ */
213 ++struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
214 ++ struct dma_buf *dma_buf)
215 ++{
216 ++ struct mtk_drm_private *private = dev->dev_private;
217 ++
218 ++ return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
219 ++}
220 ++
221 + static struct drm_driver mtk_drm_driver = {
222 + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
223 + DRIVER_ATOMIC,
224 +@@ -338,7 +381,7 @@ static struct drm_driver mtk_drm_driver = {
225 + .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
226 + .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
227 + .gem_prime_export = drm_gem_prime_export,
228 +- .gem_prime_import = drm_gem_prime_import,
229 ++ .gem_prime_import = mtk_drm_gem_prime_import,
230 + .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
231 + .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
232 + .gem_prime_mmap = mtk_drm_gem_mmap_buf,
233 +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
234 +index ecc00ca3221d..8fa60d46f860 100644
235 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
236 ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
237 +@@ -59,6 +59,8 @@ struct mtk_drm_private {
238 + } commit;
239 +
240 + struct drm_atomic_state *suspend_state;
241 ++
242 ++ bool dma_parms_allocated;
243 + };
244 +
245 + extern struct platform_driver mtk_ddp_driver;
246 +diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
247 +index 271f31461da4..6f65f5257236 100644
248 +--- a/drivers/hid/hid-cp2112.c
249 ++++ b/drivers/hid/hid-cp2112.c
250 +@@ -1160,8 +1160,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
251 +
252 + INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
253 +
254 +- cp2112_gpio_direction_input(gc, d->hwirq);
255 +-
256 + if (!dev->gpio_poll) {
257 + dev->gpio_poll = true;
258 + schedule_delayed_work(&dev->gpio_poll_worker, 0);
259 +@@ -1209,6 +1207,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
260 + return PTR_ERR(dev->desc[pin]);
261 + }
262 +
263 ++ ret = cp2112_gpio_direction_input(&dev->gc, pin);
264 ++ if (ret < 0) {
265 ++ dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
266 ++ goto err_desc;
267 ++ }
268 ++
269 + ret = gpiochip_lock_as_irq(&dev->gc, pin);
270 + if (ret) {
271 + dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
272 +diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
273 +index 7eaff4dcbfd7..5bc811b7e6cf 100644
274 +--- a/drivers/infiniband/hw/hfi1/fault.c
275 ++++ b/drivers/infiniband/hw/hfi1/fault.c
276 +@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
277 + if (!data)
278 + return -ENOMEM;
279 + copy = min(len, datalen - 1);
280 +- if (copy_from_user(data, buf, copy))
281 +- return -EFAULT;
282 ++ if (copy_from_user(data, buf, copy)) {
283 ++ ret = -EFAULT;
284 ++ goto free_data;
285 ++ }
286 +
287 + ret = debugfs_file_get(file->f_path.dentry);
288 + if (unlikely(ret))
289 +- return ret;
290 ++ goto free_data;
291 + ptr = data;
292 + token = ptr;
293 + for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
294 +@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
295 + ret = len;
296 +
297 + debugfs_file_put(file->f_path.dentry);
298 ++free_data:
299 + kfree(data);
300 + return ret;
301 + }
302 +@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
303 + return -ENOMEM;
304 + ret = debugfs_file_get(file->f_path.dentry);
305 + if (unlikely(ret))
306 +- return ret;
307 ++ goto free_data;
308 + bit = find_first_bit(fault->opcodes, bitsize);
309 + while (bit < bitsize) {
310 + zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
311 +@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
312 + data[size - 1] = '\n';
313 + data[size] = '\0';
314 + ret = simple_read_from_buffer(buf, len, pos, data, size);
315 ++free_data:
316 + kfree(data);
317 + return ret;
318 + }
319 +diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
320 +index e5466d786bb1..5aaa2a6c431b 100644
321 +--- a/drivers/infiniband/hw/mlx4/mad.c
322 ++++ b/drivers/infiniband/hw/mlx4/mad.c
323 +@@ -1668,8 +1668,6 @@ tx_err:
324 + tx_buf_size, DMA_TO_DEVICE);
325 + kfree(tun_qp->tx_ring[i].buf.addr);
326 + }
327 +- kfree(tun_qp->tx_ring);
328 +- tun_qp->tx_ring = NULL;
329 + i = MLX4_NUM_TUNNEL_BUFS;
330 + err:
331 + while (i > 0) {
332 +@@ -1678,6 +1676,8 @@ err:
333 + rx_buf_size, DMA_FROM_DEVICE);
334 + kfree(tun_qp->ring[i].addr);
335 + }
336 ++ kfree(tun_qp->tx_ring);
337 ++ tun_qp->tx_ring = NULL;
338 + kfree(tun_qp->ring);
339 + tun_qp->ring = NULL;
340 + return -ENOMEM;
341 +diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
342 +index a8b9be3e28db..7d0a5ccf5775 100644
343 +--- a/drivers/input/serio/hyperv-keyboard.c
344 ++++ b/drivers/input/serio/hyperv-keyboard.c
345 +@@ -245,40 +245,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
346 +
347 + static void hv_kbd_on_channel_callback(void *context)
348 + {
349 ++ struct vmpacket_descriptor *desc;
350 + struct hv_device *hv_dev = context;
351 +- void *buffer;
352 +- int bufferlen = 0x100; /* Start with sensible size */
353 + u32 bytes_recvd;
354 + u64 req_id;
355 +- int error;
356 +
357 +- buffer = kmalloc(bufferlen, GFP_ATOMIC);
358 +- if (!buffer)
359 +- return;
360 +-
361 +- while (1) {
362 +- error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
363 +- &bytes_recvd, &req_id);
364 +- switch (error) {
365 +- case 0:
366 +- if (bytes_recvd == 0) {
367 +- kfree(buffer);
368 +- return;
369 +- }
370 +-
371 +- hv_kbd_handle_received_packet(hv_dev, buffer,
372 +- bytes_recvd, req_id);
373 +- break;
374 ++ foreach_vmbus_pkt(desc, hv_dev->channel) {
375 ++ bytes_recvd = desc->len8 * 8;
376 ++ req_id = desc->trans_id;
377 +
378 +- case -ENOBUFS:
379 +- kfree(buffer);
380 +- /* Handle large packet */
381 +- bufferlen = bytes_recvd;
382 +- buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
383 +- if (!buffer)
384 +- return;
385 +- break;
386 +- }
387 ++ hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
388 ++ req_id);
389 + }
390 + }
391 +
392 +diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
393 +index 6aeb1045c302..1ab40c97403b 100644
394 +--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
395 ++++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
396 +@@ -10,7 +10,7 @@
397 +
398 + #include "cavium_ptp.h"
399 +
400 +-#define DRV_NAME "Cavium PTP Driver"
401 ++#define DRV_NAME "cavium_ptp"
402 +
403 + #define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
404 + #define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
405 +diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
406 +index 8f746e1348d4..3deb3c07681f 100644
407 +--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
408 ++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
409 +@@ -238,8 +238,10 @@ int octeon_setup_iq(struct octeon_device *oct,
410 + }
411 +
412 + oct->num_iqs++;
413 +- if (oct->fn_list.enable_io_queues(oct))
414 ++ if (oct->fn_list.enable_io_queues(oct)) {
415 ++ octeon_delete_instr_queue(oct, iq_no);
416 + return 1;
417 ++ }
418 +
419 + return 0;
420 + }
421 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
422 +index 0f72f9c4ec74..b429b726b987 100644
423 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
424 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
425 +@@ -3276,8 +3276,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
426 + return -ENOMEM;
427 +
428 + err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
429 +- if (err)
430 ++ if (err) {
431 ++ kvfree(t);
432 + return err;
433 ++ }
434 +
435 + bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
436 + kvfree(t);
437 +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
438 +index f70cb4d3c684..40ad1e503255 100644
439 +--- a/drivers/net/ethernet/ibm/ibmveth.c
440 ++++ b/drivers/net/ethernet/ibm/ibmveth.c
441 +@@ -1618,7 +1618,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
442 + struct net_device *netdev;
443 + struct ibmveth_adapter *adapter;
444 + unsigned char *mac_addr_p;
445 +- unsigned int *mcastFilterSize_p;
446 ++ __be32 *mcastFilterSize_p;
447 + long ret;
448 + unsigned long ret_attr;
449 +
450 +@@ -1640,8 +1640,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
451 + return -EINVAL;
452 + }
453 +
454 +- mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
455 +- VETH_MCAST_FILTER_SIZE, NULL);
456 ++ mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
457 ++ VETH_MCAST_FILTER_SIZE,
458 ++ NULL);
459 + if (!mcastFilterSize_p) {
460 + dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
461 + "attribute\n");
462 +@@ -1658,7 +1659,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
463 +
464 + adapter->vdev = dev;
465 + adapter->netdev = netdev;
466 +- adapter->mcastFilterSize = *mcastFilterSize_p;
467 ++ adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
468 + adapter->pool_config = 0;
469 +
470 + netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
471 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
472 +index 0ae43d27cdcf..255de7d68cd3 100644
473 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
474 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
475 +@@ -1586,6 +1586,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
476 + lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
477 + (u64)tx_buff->indir_dma,
478 + (u64)num_entries);
479 ++ dma_unmap_single(dev, tx_buff->indir_dma,
480 ++ sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
481 + } else {
482 + tx_buff->num_entries = num_entries;
483 + lpar_rc = send_subcrq(adapter, handle_array[queue_num],
484 +@@ -2747,7 +2749,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
485 + union sub_crq *next;
486 + int index;
487 + int i, j;
488 +- u8 *first;
489 +
490 + restart_loop:
491 + while (pending_scrq(adapter, scrq)) {
492 +@@ -2777,14 +2778,6 @@ restart_loop:
493 +
494 + txbuff->data_dma[j] = 0;
495 + }
496 +- /* if sub_crq was sent indirectly */
497 +- first = &txbuff->indir_arr[0].generic.first;
498 +- if (*first == IBMVNIC_CRQ_CMD) {
499 +- dma_unmap_single(dev, txbuff->indir_dma,
500 +- sizeof(txbuff->indir_arr),
501 +- DMA_TO_DEVICE);
502 +- *first = 0;
503 +- }
504 +
505 + if (txbuff->last_frag) {
506 + dev_kfree_skb_any(txbuff->skb);
507 +diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
508 +index b2d2ec8c11e2..6789eed78ff7 100644
509 +--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
510 ++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
511 +@@ -3922,7 +3922,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
512 + * setup (if available). */
513 + status = myri10ge_request_irq(mgp);
514 + if (status != 0)
515 +- goto abort_with_firmware;
516 ++ goto abort_with_slices;
517 + myri10ge_free_irq(mgp);
518 +
519 + /* Save configuration space to be restored if the
520 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
521 +index 5f092bbd0514..5462d2e8a1b7 100644
522 +--- a/drivers/net/ethernet/renesas/ravb_main.c
523 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
524 +@@ -1,7 +1,7 @@
525 + // SPDX-License-Identifier: GPL-2.0
526 + /* Renesas Ethernet AVB device driver
527 + *
528 +- * Copyright (C) 2014-2015 Renesas Electronics Corporation
529 ++ * Copyright (C) 2014-2019 Renesas Electronics Corporation
530 + * Copyright (C) 2015 Renesas Solutions Corp.
531 + * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@××××××××××××××.com>
532 + *
533 +@@ -514,7 +514,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
534 + kfree(ts_skb);
535 + if (tag == tfa_tag) {
536 + skb_tstamp_tx(skb, &shhwtstamps);
537 ++ dev_consume_skb_any(skb);
538 + break;
539 ++ } else {
540 ++ dev_kfree_skb_any(skb);
541 + }
542 + }
543 + ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
544 +@@ -1556,7 +1559,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
545 + DMA_TO_DEVICE);
546 + goto unmap;
547 + }
548 +- ts_skb->skb = skb;
549 ++ ts_skb->skb = skb_get(skb);
550 + ts_skb->tag = priv->ts_skb_tag++;
551 + priv->ts_skb_tag &= 0x3ff;
552 + list_add_tail(&ts_skb->list, &priv->ts_skb_list);
553 +@@ -1685,6 +1688,7 @@ static int ravb_close(struct net_device *ndev)
554 + /* Clear the timestamp list */
555 + list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
556 + list_del(&ts_skb->list);
557 ++ kfree_skb(ts_skb->skb);
558 + kfree(ts_skb);
559 + }
560 +
561 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
562 +index 3b174eae77c1..f45df6df6932 100644
563 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
564 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
565 +@@ -1203,10 +1203,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
566 + int ret;
567 + struct device *dev = &bsp_priv->pdev->dev;
568 +
569 +- if (!ldo) {
570 +- dev_err(dev, "no regulator found\n");
571 +- return -1;
572 +- }
573 ++ if (!ldo)
574 ++ return 0;
575 +
576 + if (enable) {
577 + ret = regulator_enable(ldo);
578 +diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
579 +index cce9c9ed46aa..9146068979d2 100644
580 +--- a/drivers/net/ethernet/toshiba/tc35815.c
581 ++++ b/drivers/net/ethernet/toshiba/tc35815.c
582 +@@ -1497,7 +1497,7 @@ tc35815_rx(struct net_device *dev, int limit)
583 + pci_unmap_single(lp->pci_dev,
584 + lp->rx_skbs[cur_bd].skb_dma,
585 + RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
586 +- if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
587 ++ if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
588 + memmove(skb->data, skb->data - NET_IP_ALIGN,
589 + pkt_len);
590 + data = skb_put(skb, pkt_len);
591 +diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
592 +index edcd1e60b30d..f076050c8ad3 100644
593 +--- a/drivers/net/ethernet/tundra/tsi108_eth.c
594 ++++ b/drivers/net/ethernet/tundra/tsi108_eth.c
595 +@@ -383,9 +383,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
596 + static void tsi108_stat_carry(struct net_device *dev)
597 + {
598 + struct tsi108_prv_data *data = netdev_priv(dev);
599 ++ unsigned long flags;
600 + u32 carry1, carry2;
601 +
602 +- spin_lock_irq(&data->misclock);
603 ++ spin_lock_irqsave(&data->misclock, flags);
604 +
605 + carry1 = TSI_READ(TSI108_STAT_CARRY1);
606 + carry2 = TSI_READ(TSI108_STAT_CARRY2);
607 +@@ -453,7 +454,7 @@ static void tsi108_stat_carry(struct net_device *dev)
608 + TSI108_STAT_TXPAUSEDROP_CARRY,
609 + &data->tx_pause_drop);
610 +
611 +- spin_unlock_irq(&data->misclock);
612 ++ spin_unlock_irqrestore(&data->misclock, flags);
613 + }
614 +
615 + /* Read a stat counter atomically with respect to carries.
616 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
617 +index cc60ef9634db..6f6c0dbd91fc 100644
618 +--- a/drivers/net/hyperv/netvsc_drv.c
619 ++++ b/drivers/net/hyperv/netvsc_drv.c
620 +@@ -1248,12 +1248,15 @@ static void netvsc_get_stats64(struct net_device *net,
621 + struct rtnl_link_stats64 *t)
622 + {
623 + struct net_device_context *ndev_ctx = netdev_priv(net);
624 +- struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
625 ++ struct netvsc_device *nvdev;
626 + struct netvsc_vf_pcpu_stats vf_tot;
627 + int i;
628 +
629 ++ rcu_read_lock();
630 ++
631 ++ nvdev = rcu_dereference(ndev_ctx->nvdev);
632 + if (!nvdev)
633 +- return;
634 ++ goto out;
635 +
636 + netdev_stats_to_stats64(t, &net->stats);
637 +
638 +@@ -1292,6 +1295,8 @@ static void netvsc_get_stats64(struct net_device *net,
639 + t->rx_packets += packets;
640 + t->multicast += multicast;
641 + }
642 ++out:
643 ++ rcu_read_unlock();
644 + }
645 +
646 + static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
647 +diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
648 +index 947bea81d924..dfbdea22fbad 100644
649 +--- a/drivers/net/usb/cx82310_eth.c
650 ++++ b/drivers/net/usb/cx82310_eth.c
651 +@@ -175,7 +175,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
652 + }
653 + if (!timeout) {
654 + dev_err(&udev->dev, "firmware not ready in time\n");
655 +- return -ETIMEDOUT;
656 ++ ret = -ETIMEDOUT;
657 ++ goto err;
658 + }
659 +
660 + /* enable ethernet mode (?) */
661 +diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
662 +index bd2ba3659028..0cc6993c279a 100644
663 +--- a/drivers/net/usb/kalmia.c
664 ++++ b/drivers/net/usb/kalmia.c
665 +@@ -117,16 +117,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
666 + status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1),
667 + usb_buf, 24);
668 + if (status != 0)
669 +- return status;
670 ++ goto out;
671 +
672 + memcpy(usb_buf, init_msg_2, 12);
673 + status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2),
674 + usb_buf, 28);
675 + if (status != 0)
676 +- return status;
677 ++ goto out;
678 +
679 + memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
680 +-
681 ++out:
682 + kfree(usb_buf);
683 + return status;
684 + }
685 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
686 +index 8d140495da79..e20266bd209e 100644
687 +--- a/drivers/net/usb/lan78xx.c
688 ++++ b/drivers/net/usb/lan78xx.c
689 +@@ -3799,7 +3799,7 @@ static int lan78xx_probe(struct usb_interface *intf,
690 + ret = register_netdev(netdev);
691 + if (ret != 0) {
692 + netif_err(dev, probe, netdev, "couldn't register the device\n");
693 +- goto out3;
694 ++ goto out4;
695 + }
696 +
697 + usb_set_intfdata(intf, dev);
698 +@@ -3814,12 +3814,14 @@ static int lan78xx_probe(struct usb_interface *intf,
699 +
700 + ret = lan78xx_phy_init(dev);
701 + if (ret < 0)
702 +- goto out4;
703 ++ goto out5;
704 +
705 + return 0;
706 +
707 +-out4:
708 ++out5:
709 + unregister_netdev(netdev);
710 ++out4:
711 ++ usb_free_urb(dev->urb_intr);
712 + out3:
713 + lan78xx_unbind(dev, intf);
714 + out2:
715 +diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
716 +index e9fc168bb734..489cba9b284d 100644
717 +--- a/drivers/net/wimax/i2400m/fw.c
718 ++++ b/drivers/net/wimax/i2400m/fw.c
719 +@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
720 + }
721 + result = i2400m_barker_db_add(barker);
722 + if (result < 0)
723 +- goto error_add;
724 ++ goto error_parse_add;
725 + }
726 + kfree(options_orig);
727 + }
728 + return 0;
729 +
730 ++error_parse_add:
731 + error_parse:
732 ++ kfree(options_orig);
733 + error_add:
734 + kfree(i2400m_barker_db);
735 + return result;
736 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
737 +index 05d6371c7f38..f57feb8fdea4 100644
738 +--- a/drivers/nvme/host/multipath.c
739 ++++ b/drivers/nvme/host/multipath.c
740 +@@ -323,6 +323,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
741 + "failed to create id group.\n");
742 + }
743 +
744 ++ synchronize_srcu(&ns->head->srcu);
745 + kblockd_schedule_work(&ns->head->requeue_work);
746 + }
747 +
748 +diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
749 +index f8f4d3ea67f3..15d493f30810 100644
750 +--- a/drivers/scsi/qla2xxx/qla_attr.c
751 ++++ b/drivers/scsi/qla2xxx/qla_attr.c
752 +@@ -2191,6 +2191,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
753 + dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
754 + vha->gnl.ldma);
755 +
756 ++ vha->gnl.l = NULL;
757 ++
758 + vfree(vha->scan.l);
759 +
760 + if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
761 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
762 +index 42b8f0d3e580..02fa81f122c2 100644
763 +--- a/drivers/scsi/qla2xxx/qla_os.c
764 ++++ b/drivers/scsi/qla2xxx/qla_os.c
765 +@@ -3395,6 +3395,12 @@ skip_dpc:
766 + return 0;
767 +
768 + probe_failed:
769 ++ if (base_vha->gnl.l) {
770 ++ dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
771 ++ base_vha->gnl.l, base_vha->gnl.ldma);
772 ++ base_vha->gnl.l = NULL;
773 ++ }
774 ++
775 + if (base_vha->timer_active)
776 + qla2x00_stop_timer(base_vha);
777 + base_vha->flags.online = 0;
778 +@@ -3624,7 +3630,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
779 + if (!atomic_read(&pdev->enable_cnt)) {
780 + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
781 + base_vha->gnl.l, base_vha->gnl.ldma);
782 +-
783 ++ base_vha->gnl.l = NULL;
784 + scsi_host_put(base_vha->host);
785 + kfree(ha);
786 + pci_set_drvdata(pdev, NULL);
787 +@@ -3663,6 +3669,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
788 + dma_free_coherent(&ha->pdev->dev,
789 + base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
790 +
791 ++ base_vha->gnl.l = NULL;
792 ++
793 + vfree(base_vha->scan.l);
794 +
795 + if (IS_QLAFX00(ha))
796 +@@ -4602,6 +4610,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
797 + "Alloc failed for scan database.\n");
798 + dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
799 + vha->gnl.l, vha->gnl.ldma);
800 ++ vha->gnl.l = NULL;
801 + scsi_remove_host(vha->host);
802 + return NULL;
803 + }
804 +diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
805 +index 3094d818cf06..12c1fa5b06c5 100644
806 +--- a/drivers/spi/spi-bcm2835aux.c
807 ++++ b/drivers/spi/spi-bcm2835aux.c
808 +@@ -178,24 +178,14 @@ static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
809 + BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
810 + }
811 +
812 +-static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
813 ++static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
814 + {
815 +- struct spi_master *master = dev_id;
816 +- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
817 +- irqreturn_t ret = IRQ_NONE;
818 +-
819 +- /* IRQ may be shared, so return if our interrupts are disabled */
820 +- if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
821 +- (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
822 +- return ret;
823 ++ u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
824 +
825 + /* check if we have data to read */
826 +- while (bs->rx_len &&
827 +- (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
828 +- BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
829 ++ for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL);
830 ++ stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT))
831 + bcm2835aux_rd_fifo(bs);
832 +- ret = IRQ_HANDLED;
833 +- }
834 +
835 + /* check if we have data to write */
836 + while (bs->tx_len &&
837 +@@ -203,16 +193,21 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
838 + (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
839 + BCM2835_AUX_SPI_STAT_TX_FULL))) {
840 + bcm2835aux_wr_fifo(bs);
841 +- ret = IRQ_HANDLED;
842 + }
843 ++}
844 +
845 +- /* and check if we have reached "done" */
846 +- while (bs->rx_len &&
847 +- (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
848 +- BCM2835_AUX_SPI_STAT_BUSY))) {
849 +- bcm2835aux_rd_fifo(bs);
850 +- ret = IRQ_HANDLED;
851 +- }
852 ++static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
853 ++{
854 ++ struct spi_master *master = dev_id;
855 ++ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
856 ++
857 ++ /* IRQ may be shared, so return if our interrupts are disabled */
858 ++ if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
859 ++ (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
860 ++ return IRQ_NONE;
861 ++
862 ++ /* do common fifo handling */
863 ++ bcm2835aux_spi_transfer_helper(bs);
864 +
865 + if (!bs->tx_len) {
866 + /* disable tx fifo empty interrupt */
867 +@@ -226,8 +221,7 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
868 + complete(&master->xfer_completion);
869 + }
870 +
871 +- /* and return */
872 +- return ret;
873 ++ return IRQ_HANDLED;
874 + }
875 +
876 + static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
877 +@@ -273,7 +267,6 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
878 + {
879 + struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
880 + unsigned long timeout;
881 +- u32 stat;
882 +
883 + /* configure spi */
884 + bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
885 +@@ -284,24 +277,9 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
886 +
887 + /* loop until finished the transfer */
888 + while (bs->rx_len) {
889 +- /* read status */
890 +- stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
891 +-
892 +- /* fill in tx fifo with remaining data */
893 +- if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
894 +- bcm2835aux_wr_fifo(bs);
895 +- continue;
896 +- }
897 +
898 +- /* read data from fifo for both cases */
899 +- if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
900 +- bcm2835aux_rd_fifo(bs);
901 +- continue;
902 +- }
903 +- if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
904 +- bcm2835aux_rd_fifo(bs);
905 +- continue;
906 +- }
907 ++ /* do common fifo handling */
908 ++ bcm2835aux_spi_transfer_helper(bs);
909 +
910 + /* there is still data pending to read check the timeout */
911 + if (bs->rx_len && time_after(jiffies, timeout)) {
912 +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
913 +index c46efa47d68a..7159e8363b83 100644
914 +--- a/drivers/target/target_core_user.c
915 ++++ b/drivers/target/target_core_user.c
916 +@@ -1143,14 +1143,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
917 + struct se_cmd *se_cmd = cmd->se_cmd;
918 + struct tcmu_dev *udev = cmd->tcmu_dev;
919 + bool read_len_valid = false;
920 +- uint32_t read_len = se_cmd->data_length;
921 ++ uint32_t read_len;
922 +
923 + /*
924 + * cmd has been completed already from timeout, just reclaim
925 + * data area space and free cmd
926 + */
927 +- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
928 ++ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
929 ++ WARN_ON_ONCE(se_cmd);
930 + goto out;
931 ++ }
932 +
933 + list_del_init(&cmd->queue_entry);
934 +
935 +@@ -1163,6 +1165,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
936 + goto done;
937 + }
938 +
939 ++ read_len = se_cmd->data_length;
940 + if (se_cmd->data_direction == DMA_FROM_DEVICE &&
941 + (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
942 + read_len_valid = true;
943 +@@ -1318,6 +1321,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
944 + */
945 + scsi_status = SAM_STAT_CHECK_CONDITION;
946 + list_del_init(&cmd->queue_entry);
947 ++ cmd->se_cmd = NULL;
948 + } else {
949 + list_del_init(&cmd->queue_entry);
950 + idr_remove(&udev->commands, id);
951 +@@ -2036,6 +2040,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
952 +
953 + idr_remove(&udev->commands, i);
954 + if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
955 ++ WARN_ON(!cmd->se_cmd);
956 + list_del_init(&cmd->queue_entry);
957 + if (err_level == 1) {
958 + /*
959 +diff --git a/fs/afs/cell.c b/fs/afs/cell.c
960 +index 6127f0fcd62c..ee07162d35c7 100644
961 +--- a/fs/afs/cell.c
962 ++++ b/fs/afs/cell.c
963 +@@ -76,6 +76,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
964 + cell = rcu_dereference_raw(net->ws_cell);
965 + if (cell) {
966 + afs_get_cell(cell);
967 ++ ret = 0;
968 + break;
969 + }
970 + ret = -EDESTADDRREQ;
971 +@@ -110,6 +111,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
972 +
973 + done_seqretry(&net->cells_lock, seq);
974 +
975 ++ if (ret != 0 && cell)
976 ++ afs_put_cell(net, cell);
977 ++
978 + return ret == 0 ? cell : ERR_PTR(ret);
979 + }
980 +
981 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
982 +index a11fa0b6b34d..db547af01b59 100644
983 +--- a/fs/ceph/caps.c
984 ++++ b/fs/ceph/caps.c
985 +@@ -1280,6 +1280,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
986 + {
987 + struct ceph_inode_info *ci = cap->ci;
988 + struct inode *inode = &ci->vfs_inode;
989 ++ struct ceph_buffer *old_blob = NULL;
990 + struct cap_msg_args arg;
991 + int held, revoking;
992 + int wake = 0;
993 +@@ -1344,7 +1345,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
994 + ci->i_requested_max_size = arg.max_size;
995 +
996 + if (flushing & CEPH_CAP_XATTR_EXCL) {
997 +- __ceph_build_xattrs_blob(ci);
998 ++ old_blob = __ceph_build_xattrs_blob(ci);
999 + arg.xattr_version = ci->i_xattrs.version;
1000 + arg.xattr_buf = ci->i_xattrs.blob;
1001 + } else {
1002 +@@ -1379,6 +1380,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1003 +
1004 + spin_unlock(&ci->i_ceph_lock);
1005 +
1006 ++ ceph_buffer_put(old_blob);
1007 ++
1008 + ret = send_cap_msg(&arg);
1009 + if (ret < 0) {
1010 + dout("error sending cap msg, must requeue %p\n", inode);
1011 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
1012 +index 3e518c2ae2bf..11f19432a74c 100644
1013 +--- a/fs/ceph/inode.c
1014 ++++ b/fs/ceph/inode.c
1015 +@@ -742,6 +742,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
1016 + int issued, new_issued, info_caps;
1017 + struct timespec64 mtime, atime, ctime;
1018 + struct ceph_buffer *xattr_blob = NULL;
1019 ++ struct ceph_buffer *old_blob = NULL;
1020 + struct ceph_string *pool_ns = NULL;
1021 + struct ceph_cap *new_cap = NULL;
1022 + int err = 0;
1023 +@@ -878,7 +879,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
1024 + if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
1025 + le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
1026 + if (ci->i_xattrs.blob)
1027 +- ceph_buffer_put(ci->i_xattrs.blob);
1028 ++ old_blob = ci->i_xattrs.blob;
1029 + ci->i_xattrs.blob = xattr_blob;
1030 + if (xattr_blob)
1031 + memcpy(ci->i_xattrs.blob->vec.iov_base,
1032 +@@ -1017,8 +1018,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
1033 + out:
1034 + if (new_cap)
1035 + ceph_put_cap(mdsc, new_cap);
1036 +- if (xattr_blob)
1037 +- ceph_buffer_put(xattr_blob);
1038 ++ ceph_buffer_put(old_blob);
1039 ++ ceph_buffer_put(xattr_blob);
1040 + ceph_put_string(pool_ns);
1041 + return err;
1042 + }
1043 +diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
1044 +index 1f46b02f7314..5cf7b5f4db94 100644
1045 +--- a/fs/ceph/snap.c
1046 ++++ b/fs/ceph/snap.c
1047 +@@ -460,6 +460,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
1048 + struct inode *inode = &ci->vfs_inode;
1049 + struct ceph_cap_snap *capsnap;
1050 + struct ceph_snap_context *old_snapc, *new_snapc;
1051 ++ struct ceph_buffer *old_blob = NULL;
1052 + int used, dirty;
1053 +
1054 + capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
1055 +@@ -536,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
1056 + capsnap->gid = inode->i_gid;
1057 +
1058 + if (dirty & CEPH_CAP_XATTR_EXCL) {
1059 +- __ceph_build_xattrs_blob(ci);
1060 ++ old_blob = __ceph_build_xattrs_blob(ci);
1061 + capsnap->xattr_blob =
1062 + ceph_buffer_get(ci->i_xattrs.blob);
1063 + capsnap->xattr_version = ci->i_xattrs.version;
1064 +@@ -579,6 +580,7 @@ update_snapc:
1065 + }
1066 + spin_unlock(&ci->i_ceph_lock);
1067 +
1068 ++ ceph_buffer_put(old_blob);
1069 + kfree(capsnap);
1070 + ceph_put_snap_context(old_snapc);
1071 + }
1072 +diff --git a/fs/ceph/super.h b/fs/ceph/super.h
1073 +index d8579a56e5dc..018019309790 100644
1074 +--- a/fs/ceph/super.h
1075 ++++ b/fs/ceph/super.h
1076 +@@ -896,7 +896,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
1077 + int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
1078 + ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
1079 + extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
1080 +-extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci);
1081 ++extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
1082 + extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
1083 + extern void __init ceph_xattr_init(void);
1084 + extern void ceph_xattr_exit(void);
1085 +diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
1086 +index 0a2d4898ee16..5e4f3f833e85 100644
1087 +--- a/fs/ceph/xattr.c
1088 ++++ b/fs/ceph/xattr.c
1089 +@@ -734,12 +734,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
1090 +
1091 + /*
1092 + * If there are dirty xattrs, reencode xattrs into the prealloc_blob
1093 +- * and swap into place.
1094 ++ * and swap into place. It returns the old i_xattrs.blob (or NULL) so
1095 ++ * that it can be freed by the caller as the i_ceph_lock is likely to be
1096 ++ * held.
1097 + */
1098 +-void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
1099 ++struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
1100 + {
1101 + struct rb_node *p;
1102 + struct ceph_inode_xattr *xattr = NULL;
1103 ++ struct ceph_buffer *old_blob = NULL;
1104 + void *dest;
1105 +
1106 + dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
1107 +@@ -770,12 +773,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
1108 + dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
1109 +
1110 + if (ci->i_xattrs.blob)
1111 +- ceph_buffer_put(ci->i_xattrs.blob);
1112 ++ old_blob = ci->i_xattrs.blob;
1113 + ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
1114 + ci->i_xattrs.prealloc_blob = NULL;
1115 + ci->i_xattrs.dirty = false;
1116 + ci->i_xattrs.version++;
1117 + }
1118 ++
1119 ++ return old_blob;
1120 + }
1121 +
1122 + static inline int __get_request_mask(struct inode *in) {
1123 +@@ -1011,6 +1016,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
1124 + struct ceph_inode_info *ci = ceph_inode(inode);
1125 + struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1126 + struct ceph_cap_flush *prealloc_cf = NULL;
1127 ++ struct ceph_buffer *old_blob = NULL;
1128 + int issued;
1129 + int err;
1130 + int dirty = 0;
1131 +@@ -1084,13 +1090,15 @@ retry:
1132 + struct ceph_buffer *blob;
1133 +
1134 + spin_unlock(&ci->i_ceph_lock);
1135 +- dout(" preaallocating new blob size=%d\n", required_blob_size);
1136 ++ ceph_buffer_put(old_blob); /* Shouldn't be required */
1137 ++ dout(" pre-allocating new blob size=%d\n", required_blob_size);
1138 + blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1139 + if (!blob)
1140 + goto do_sync_unlocked;
1141 + spin_lock(&ci->i_ceph_lock);
1142 ++ /* prealloc_blob can't be released while holding i_ceph_lock */
1143 + if (ci->i_xattrs.prealloc_blob)
1144 +- ceph_buffer_put(ci->i_xattrs.prealloc_blob);
1145 ++ old_blob = ci->i_xattrs.prealloc_blob;
1146 + ci->i_xattrs.prealloc_blob = blob;
1147 + goto retry;
1148 + }
1149 +@@ -1106,6 +1114,7 @@ retry:
1150 + }
1151 +
1152 + spin_unlock(&ci->i_ceph_lock);
1153 ++ ceph_buffer_put(old_blob);
1154 + if (lock_snap_rwsem)
1155 + up_read(&mdsc->snap_rwsem);
1156 + if (dirty)
1157 +diff --git a/fs/read_write.c b/fs/read_write.c
1158 +index 85fd7a8ee29e..5fb5ee5b8cd7 100644
1159 +--- a/fs/read_write.c
1160 ++++ b/fs/read_write.c
1161 +@@ -1888,10 +1888,7 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
1162 + }
1163 + EXPORT_SYMBOL(vfs_clone_file_range);
1164 +
1165 +-/*
1166 +- * Read a page's worth of file data into the page cache. Return the page
1167 +- * locked.
1168 +- */
1169 ++/* Read a page's worth of file data into the page cache. */
1170 + static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1171 + {
1172 + struct address_space *mapping;
1173 +@@ -1907,10 +1904,32 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1174 + put_page(page);
1175 + return ERR_PTR(-EIO);
1176 + }
1177 +- lock_page(page);
1178 + return page;
1179 + }
1180 +
1181 ++/*
1182 ++ * Lock two pages, ensuring that we lock in offset order if the pages are from
1183 ++ * the same file.
1184 ++ */
1185 ++static void vfs_lock_two_pages(struct page *page1, struct page *page2)
1186 ++{
1187 ++ /* Always lock in order of increasing index. */
1188 ++ if (page1->index > page2->index)
1189 ++ swap(page1, page2);
1190 ++
1191 ++ lock_page(page1);
1192 ++ if (page1 != page2)
1193 ++ lock_page(page2);
1194 ++}
1195 ++
1196 ++/* Unlock two pages, being careful not to unlock the same page twice. */
1197 ++static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
1198 ++{
1199 ++ unlock_page(page1);
1200 ++ if (page1 != page2)
1201 ++ unlock_page(page2);
1202 ++}
1203 ++
1204 + /*
1205 + * Compare extents of two files to see if they are the same.
1206 + * Caller must have locked both inodes to prevent write races.
1207 +@@ -1948,10 +1967,24 @@ int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1208 + dest_page = vfs_dedupe_get_page(dest, destoff);
1209 + if (IS_ERR(dest_page)) {
1210 + error = PTR_ERR(dest_page);
1211 +- unlock_page(src_page);
1212 + put_page(src_page);
1213 + goto out_error;
1214 + }
1215 ++
1216 ++ vfs_lock_two_pages(src_page, dest_page);
1217 ++
1218 ++ /*
1219 ++ * Now that we've locked both pages, make sure they're still
1220 ++ * mapped to the file data we're interested in. If not,
1221 ++ * someone is invalidating pages on us and we lose.
1222 ++ */
1223 ++ if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
1224 ++ src_page->mapping != src->i_mapping ||
1225 ++ dest_page->mapping != dest->i_mapping) {
1226 ++ same = false;
1227 ++ goto unlock;
1228 ++ }
1229 ++
1230 + src_addr = kmap_atomic(src_page);
1231 + dest_addr = kmap_atomic(dest_page);
1232 +
1233 +@@ -1963,8 +1996,8 @@ int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1234 +
1235 + kunmap_atomic(dest_addr);
1236 + kunmap_atomic(src_addr);
1237 +- unlock_page(dest_page);
1238 +- unlock_page(src_page);
1239 ++unlock:
1240 ++ vfs_unlock_two_pages(src_page, dest_page);
1241 + put_page(dest_page);
1242 + put_page(src_page);
1243 +
1244 +diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
1245 +index 5e58bb29b1a3..11cdc7c60480 100644
1246 +--- a/include/linux/ceph/buffer.h
1247 ++++ b/include/linux/ceph/buffer.h
1248 +@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
1249 +
1250 + static inline void ceph_buffer_put(struct ceph_buffer *b)
1251 + {
1252 +- kref_put(&b->kref, ceph_buffer_release);
1253 ++ if (b)
1254 ++ kref_put(&b->kref, ceph_buffer_release);
1255 + }
1256 +
1257 + extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
1258 +diff --git a/include/linux/gpio.h b/include/linux/gpio.h
1259 +index 39745b8bdd65..b3115d1a7d49 100644
1260 +--- a/include/linux/gpio.h
1261 ++++ b/include/linux/gpio.h
1262 +@@ -240,30 +240,6 @@ static inline int irq_to_gpio(unsigned irq)
1263 + return -EINVAL;
1264 + }
1265 +
1266 +-static inline int
1267 +-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
1268 +- unsigned int gpio_offset, unsigned int pin_offset,
1269 +- unsigned int npins)
1270 +-{
1271 +- WARN_ON(1);
1272 +- return -EINVAL;
1273 +-}
1274 +-
1275 +-static inline int
1276 +-gpiochip_add_pingroup_range(struct gpio_chip *chip,
1277 +- struct pinctrl_dev *pctldev,
1278 +- unsigned int gpio_offset, const char *pin_group)
1279 +-{
1280 +- WARN_ON(1);
1281 +- return -EINVAL;
1282 +-}
1283 +-
1284 +-static inline void
1285 +-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
1286 +-{
1287 +- WARN_ON(1);
1288 +-}
1289 +-
1290 + static inline int devm_gpio_request(struct device *dev, unsigned gpio,
1291 + const char *label)
1292 + {
1293 +diff --git a/include/net/act_api.h b/include/net/act_api.h
1294 +index 970303448c90..0c82d7ea6ee1 100644
1295 +--- a/include/net/act_api.h
1296 ++++ b/include/net/act_api.h
1297 +@@ -15,6 +15,7 @@
1298 + struct tcf_idrinfo {
1299 + spinlock_t lock;
1300 + struct idr action_idr;
1301 ++ struct net *net;
1302 + };
1303 +
1304 + struct tc_action_ops;
1305 +@@ -107,7 +108,7 @@ struct tc_action_net {
1306 + };
1307 +
1308 + static inline
1309 +-int tc_action_net_init(struct tc_action_net *tn,
1310 ++int tc_action_net_init(struct net *net, struct tc_action_net *tn,
1311 + const struct tc_action_ops *ops)
1312 + {
1313 + int err = 0;
1314 +@@ -116,6 +117,7 @@ int tc_action_net_init(struct tc_action_net *tn,
1315 + if (!tn->idrinfo)
1316 + return -ENOMEM;
1317 + tn->ops = ops;
1318 ++ tn->idrinfo->net = net;
1319 + spin_lock_init(&tn->idrinfo->lock);
1320 + idr_init(&tn->idrinfo->action_idr);
1321 + return err;
1322 +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
1323 +index f2be5d041ba3..7685cbda9f28 100644
1324 +--- a/include/net/netfilter/nf_tables.h
1325 ++++ b/include/net/netfilter/nf_tables.h
1326 +@@ -418,8 +418,7 @@ struct nft_set {
1327 + unsigned char *udata;
1328 + /* runtime data below here */
1329 + const struct nft_set_ops *ops ____cacheline_aligned;
1330 +- u16 flags:13,
1331 +- bound:1,
1332 ++ u16 flags:14,
1333 + genmask:2;
1334 + u8 klen;
1335 + u8 dlen;
1336 +@@ -1337,12 +1336,15 @@ struct nft_trans_rule {
1337 + struct nft_trans_set {
1338 + struct nft_set *set;
1339 + u32 set_id;
1340 ++ bool bound;
1341 + };
1342 +
1343 + #define nft_trans_set(trans) \
1344 + (((struct nft_trans_set *)trans->data)->set)
1345 + #define nft_trans_set_id(trans) \
1346 + (((struct nft_trans_set *)trans->data)->set_id)
1347 ++#define nft_trans_set_bound(trans) \
1348 ++ (((struct nft_trans_set *)trans->data)->bound)
1349 +
1350 + struct nft_trans_chain {
1351 + bool update;
1352 +@@ -1373,12 +1375,15 @@ struct nft_trans_table {
1353 + struct nft_trans_elem {
1354 + struct nft_set *set;
1355 + struct nft_set_elem elem;
1356 ++ bool bound;
1357 + };
1358 +
1359 + #define nft_trans_elem_set(trans) \
1360 + (((struct nft_trans_elem *)trans->data)->set)
1361 + #define nft_trans_elem(trans) \
1362 + (((struct nft_trans_elem *)trans->data)->elem)
1363 ++#define nft_trans_elem_set_bound(trans) \
1364 ++ (((struct nft_trans_elem *)trans->data)->bound)
1365 +
1366 + struct nft_trans_obj {
1367 + struct nft_object *obj;
1368 +diff --git a/include/net/psample.h b/include/net/psample.h
1369 +index 9b80f814ab04..94cb37a7bf75 100644
1370 +--- a/include/net/psample.h
1371 ++++ b/include/net/psample.h
1372 +@@ -12,6 +12,7 @@ struct psample_group {
1373 + u32 group_num;
1374 + u32 refcount;
1375 + u32 seq;
1376 ++ struct rcu_head rcu;
1377 + };
1378 +
1379 + struct psample_group *psample_group_get(struct net *net, u32 group_num);
1380 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
1381 +index 29ff6635d259..714d63f60460 100644
1382 +--- a/kernel/kprobes.c
1383 ++++ b/kernel/kprobes.c
1384 +@@ -483,6 +483,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
1385 + */
1386 + static void do_optimize_kprobes(void)
1387 + {
1388 ++ lockdep_assert_held(&text_mutex);
1389 + /*
1390 + * The optimization/unoptimization refers online_cpus via
1391 + * stop_machine() and cpu-hotplug modifies online_cpus.
1392 +@@ -500,9 +501,7 @@ static void do_optimize_kprobes(void)
1393 + list_empty(&optimizing_list))
1394 + return;
1395 +
1396 +- mutex_lock(&text_mutex);
1397 + arch_optimize_kprobes(&optimizing_list);
1398 +- mutex_unlock(&text_mutex);
1399 + }
1400 +
1401 + /*
1402 +@@ -513,6 +512,7 @@ static void do_unoptimize_kprobes(void)
1403 + {
1404 + struct optimized_kprobe *op, *tmp;
1405 +
1406 ++ lockdep_assert_held(&text_mutex);
1407 + /* See comment in do_optimize_kprobes() */
1408 + lockdep_assert_cpus_held();
1409 +
1410 +@@ -520,7 +520,6 @@ static void do_unoptimize_kprobes(void)
1411 + if (list_empty(&unoptimizing_list))
1412 + return;
1413 +
1414 +- mutex_lock(&text_mutex);
1415 + arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
1416 + /* Loop free_list for disarming */
1417 + list_for_each_entry_safe(op, tmp, &freeing_list, list) {
1418 +@@ -537,7 +536,6 @@ static void do_unoptimize_kprobes(void)
1419 + } else
1420 + list_del_init(&op->list);
1421 + }
1422 +- mutex_unlock(&text_mutex);
1423 + }
1424 +
1425 + /* Reclaim all kprobes on the free_list */
1426 +@@ -563,6 +561,7 @@ static void kprobe_optimizer(struct work_struct *work)
1427 + {
1428 + mutex_lock(&kprobe_mutex);
1429 + cpus_read_lock();
1430 ++ mutex_lock(&text_mutex);
1431 + /* Lock modules while optimizing kprobes */
1432 + mutex_lock(&module_mutex);
1433 +
1434 +@@ -590,6 +589,7 @@ static void kprobe_optimizer(struct work_struct *work)
1435 + do_free_cleaned_kprobes();
1436 +
1437 + mutex_unlock(&module_mutex);
1438 ++ mutex_unlock(&text_mutex);
1439 + cpus_read_unlock();
1440 + mutex_unlock(&kprobe_mutex);
1441 +
1442 +diff --git a/net/core/netpoll.c b/net/core/netpoll.c
1443 +index 3ae899805f8b..a581cf101cd9 100644
1444 +--- a/net/core/netpoll.c
1445 ++++ b/net/core/netpoll.c
1446 +@@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work)
1447 + txq = netdev_get_tx_queue(dev, q_index);
1448 + HARD_TX_LOCK(dev, txq, smp_processor_id());
1449 + if (netif_xmit_frozen_or_stopped(txq) ||
1450 +- netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
1451 ++ !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
1452 + skb_queue_head(&npinfo->txq, skb);
1453 + HARD_TX_UNLOCK(dev, txq);
1454 + local_irq_restore(flags);
1455 +@@ -335,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
1456 +
1457 + HARD_TX_UNLOCK(dev, txq);
1458 +
1459 +- if (status == NETDEV_TX_OK)
1460 ++ if (dev_xmit_complete(status))
1461 + break;
1462 +
1463 + }
1464 +@@ -352,7 +352,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
1465 +
1466 + }
1467 +
1468 +- if (status != NETDEV_TX_OK) {
1469 ++ if (!dev_xmit_complete(status)) {
1470 + skb_queue_tail(&npinfo->txq, skb);
1471 + schedule_delayed_work(&npinfo->tx_work,0);
1472 + }
1473 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1474 +index b7ef367fe6a1..611ba174265c 100644
1475 +--- a/net/ipv4/tcp.c
1476 ++++ b/net/ipv4/tcp.c
1477 +@@ -934,6 +934,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
1478 + return mss_now;
1479 + }
1480 +
1481 ++/* In some cases, both sendpage() and sendmsg() could have added
1482 ++ * an skb to the write queue, but failed adding payload on it.
1483 ++ * We need to remove it to consume less memory, but more
1484 ++ * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
1485 ++ * users.
1486 ++ */
1487 ++static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
1488 ++{
1489 ++ if (skb && !skb->len) {
1490 ++ tcp_unlink_write_queue(skb, sk);
1491 ++ if (tcp_write_queue_empty(sk))
1492 ++ tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1493 ++ sk_wmem_free_skb(sk, skb);
1494 ++ }
1495 ++}
1496 ++
1497 + ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
1498 + size_t size, int flags)
1499 + {
1500 +@@ -1056,6 +1072,7 @@ out:
1501 + return copied;
1502 +
1503 + do_error:
1504 ++ tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
1505 + if (copied)
1506 + goto out;
1507 + out_err:
1508 +@@ -1409,17 +1426,11 @@ out_nopush:
1509 + sock_zerocopy_put(uarg);
1510 + return copied + copied_syn;
1511 +
1512 ++do_error:
1513 ++ skb = tcp_write_queue_tail(sk);
1514 + do_fault:
1515 +- if (!skb->len) {
1516 +- tcp_unlink_write_queue(skb, sk);
1517 +- /* It is the one place in all of TCP, except connection
1518 +- * reset, where we can be unlinking the send_head.
1519 +- */
1520 +- tcp_check_send_head(sk, skb);
1521 +- sk_wmem_free_skb(sk, skb);
1522 +- }
1523 ++ tcp_remove_empty_skb(sk, skb);
1524 +
1525 +-do_error:
1526 + if (copied + copied_syn)
1527 + goto out;
1528 + out_err:
1529 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1530 +index 88c7e821fd11..2697e4397e46 100644
1531 +--- a/net/ipv4/tcp_output.c
1532 ++++ b/net/ipv4/tcp_output.c
1533 +@@ -2046,7 +2046,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
1534 + if (len <= skb->len)
1535 + break;
1536 +
1537 +- if (unlikely(TCP_SKB_CB(skb)->eor))
1538 ++ if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
1539 + return false;
1540 +
1541 + len -= skb->len;
1542 +@@ -2162,6 +2162,7 @@ static int tcp_mtu_probe(struct sock *sk)
1543 + * we need to propagate it to the new skb.
1544 + */
1545 + TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
1546 ++ tcp_skb_collapse_tstamp(nskb, skb);
1547 + tcp_unlink_write_queue(skb, sk);
1548 + sk_wmem_free_skb(sk, skb);
1549 + } else {
1550 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
1551 +index dbab62e3f0d7..2d80e913b82f 100644
1552 +--- a/net/ipv6/mcast.c
1553 ++++ b/net/ipv6/mcast.c
1554 +@@ -791,14 +791,15 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
1555 + if (pmc) {
1556 + im->idev = pmc->idev;
1557 + if (im->mca_sfmode == MCAST_INCLUDE) {
1558 +- im->mca_tomb = pmc->mca_tomb;
1559 +- im->mca_sources = pmc->mca_sources;
1560 ++ swap(im->mca_tomb, pmc->mca_tomb);
1561 ++ swap(im->mca_sources, pmc->mca_sources);
1562 + for (psf = im->mca_sources; psf; psf = psf->sf_next)
1563 + psf->sf_crcount = idev->mc_qrv;
1564 + } else {
1565 + im->mca_crcount = idev->mc_qrv;
1566 + }
1567 + in6_dev_put(pmc->idev);
1568 ++ ip6_mc_clear_src(pmc);
1569 + kfree(pmc);
1570 + }
1571 + spin_unlock_bh(&im->mca_lock);
1572 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
1573 +index 29ff59dd99ac..2145581d7b3d 100644
1574 +--- a/net/netfilter/nf_tables_api.c
1575 ++++ b/net/netfilter/nf_tables_api.c
1576 +@@ -121,9 +121,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
1577 + return;
1578 +
1579 + list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
1580 +- if (trans->msg_type == NFT_MSG_NEWSET &&
1581 +- nft_trans_set(trans) == set) {
1582 +- set->bound = true;
1583 ++ switch (trans->msg_type) {
1584 ++ case NFT_MSG_NEWSET:
1585 ++ if (nft_trans_set(trans) == set)
1586 ++ nft_trans_set_bound(trans) = true;
1587 ++ break;
1588 ++ case NFT_MSG_NEWSETELEM:
1589 ++ if (nft_trans_elem_set(trans) == set)
1590 ++ nft_trans_elem_set_bound(trans) = true;
1591 + break;
1592 + }
1593 + }
1594 +@@ -6656,7 +6661,7 @@ static int __nf_tables_abort(struct net *net)
1595 + break;
1596 + case NFT_MSG_NEWSET:
1597 + trans->ctx.table->use--;
1598 +- if (nft_trans_set(trans)->bound) {
1599 ++ if (nft_trans_set_bound(trans)) {
1600 + nft_trans_destroy(trans);
1601 + break;
1602 + }
1603 +@@ -6668,7 +6673,7 @@ static int __nf_tables_abort(struct net *net)
1604 + nft_trans_destroy(trans);
1605 + break;
1606 + case NFT_MSG_NEWSETELEM:
1607 +- if (nft_trans_elem_set(trans)->bound) {
1608 ++ if (nft_trans_elem_set_bound(trans)) {
1609 + nft_trans_destroy(trans);
1610 + break;
1611 + }
1612 +diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
1613 +index 6e0c26025ab1..69decbe2c988 100644
1614 +--- a/net/netfilter/nft_flow_offload.c
1615 ++++ b/net/netfilter/nft_flow_offload.c
1616 +@@ -71,11 +71,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
1617 + {
1618 + struct nft_flow_offload *priv = nft_expr_priv(expr);
1619 + struct nf_flowtable *flowtable = &priv->flowtable->data;
1620 ++ struct tcphdr _tcph, *tcph = NULL;
1621 + enum ip_conntrack_info ctinfo;
1622 + struct nf_flow_route route;
1623 + struct flow_offload *flow;
1624 + enum ip_conntrack_dir dir;
1625 +- bool is_tcp = false;
1626 + struct nf_conn *ct;
1627 + int ret;
1628 +
1629 +@@ -88,7 +88,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
1630 +
1631 + switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
1632 + case IPPROTO_TCP:
1633 +- is_tcp = true;
1634 ++ tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
1635 ++ sizeof(_tcph), &_tcph);
1636 ++ if (unlikely(!tcph || tcph->fin || tcph->rst))
1637 ++ goto out;
1638 + break;
1639 + case IPPROTO_UDP:
1640 + break;
1641 +@@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
1642 + if (!flow)
1643 + goto err_flow_alloc;
1644 +
1645 +- if (is_tcp) {
1646 ++ if (tcph) {
1647 + ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
1648 + ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
1649 + }
1650 +diff --git a/net/psample/psample.c b/net/psample/psample.c
1651 +index 64f95624f219..4cea353221da 100644
1652 +--- a/net/psample/psample.c
1653 ++++ b/net/psample/psample.c
1654 +@@ -156,7 +156,7 @@ static void psample_group_destroy(struct psample_group *group)
1655 + {
1656 + psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
1657 + list_del(&group->list);
1658 +- kfree(group);
1659 ++ kfree_rcu(group, rcu);
1660 + }
1661 +
1662 + static struct psample_group *
1663 +diff --git a/net/rds/recv.c b/net/rds/recv.c
1664 +index 504cd6bcc54c..c0b945516cdb 100644
1665 +--- a/net/rds/recv.c
1666 ++++ b/net/rds/recv.c
1667 +@@ -1,5 +1,5 @@
1668 + /*
1669 +- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
1670 ++ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
1671 + *
1672 + * This software is available to you under a choice of one of two
1673 + * licenses. You may choose to be licensed under the terms of the GNU
1674 +@@ -803,6 +803,7 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
1675 +
1676 + minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
1677 + minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
1678 ++ minfo6.tos = 0;
1679 +
1680 + if (flip) {
1681 + minfo6.laddr = *daddr;
1682 +@@ -816,6 +817,8 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
1683 + minfo6.fport = inc->i_hdr.h_dport;
1684 + }
1685 +
1686 ++ minfo6.flags = 0;
1687 ++
1688 + rds_info_copy(iter, &minfo6, sizeof(minfo6));
1689 + }
1690 + #endif
1691 +diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
1692 +index 20fae5ca87fa..800846d77a56 100644
1693 +--- a/net/sched/act_bpf.c
1694 ++++ b/net/sched/act_bpf.c
1695 +@@ -413,7 +413,7 @@ static __net_init int bpf_init_net(struct net *net)
1696 + {
1697 + struct tc_action_net *tn = net_generic(net, bpf_net_id);
1698 +
1699 +- return tc_action_net_init(tn, &act_bpf_ops);
1700 ++ return tc_action_net_init(net, tn, &act_bpf_ops);
1701 + }
1702 +
1703 + static void __net_exit bpf_exit_net(struct list_head *net_list)
1704 +diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
1705 +index 605436747978..538dedd84e21 100644
1706 +--- a/net/sched/act_connmark.c
1707 ++++ b/net/sched/act_connmark.c
1708 +@@ -215,7 +215,7 @@ static __net_init int connmark_init_net(struct net *net)
1709 + {
1710 + struct tc_action_net *tn = net_generic(net, connmark_net_id);
1711 +
1712 +- return tc_action_net_init(tn, &act_connmark_ops);
1713 ++ return tc_action_net_init(net, tn, &act_connmark_ops);
1714 + }
1715 +
1716 + static void __net_exit connmark_exit_net(struct list_head *net_list)
1717 +diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
1718 +index 40437197e053..1e269441065a 100644
1719 +--- a/net/sched/act_csum.c
1720 ++++ b/net/sched/act_csum.c
1721 +@@ -678,7 +678,7 @@ static __net_init int csum_init_net(struct net *net)
1722 + {
1723 + struct tc_action_net *tn = net_generic(net, csum_net_id);
1724 +
1725 +- return tc_action_net_init(tn, &act_csum_ops);
1726 ++ return tc_action_net_init(net, tn, &act_csum_ops);
1727 + }
1728 +
1729 + static void __net_exit csum_exit_net(struct list_head *net_list)
1730 +diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
1731 +index 72d3347bdd41..dfef9621375e 100644
1732 +--- a/net/sched/act_gact.c
1733 ++++ b/net/sched/act_gact.c
1734 +@@ -263,7 +263,7 @@ static __net_init int gact_init_net(struct net *net)
1735 + {
1736 + struct tc_action_net *tn = net_generic(net, gact_net_id);
1737 +
1738 +- return tc_action_net_init(tn, &act_gact_ops);
1739 ++ return tc_action_net_init(net, tn, &act_gact_ops);
1740 + }
1741 +
1742 + static void __net_exit gact_exit_net(struct list_head *net_list)
1743 +diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
1744 +index 24047e0e5db0..bac353bea02f 100644
1745 +--- a/net/sched/act_ife.c
1746 ++++ b/net/sched/act_ife.c
1747 +@@ -887,7 +887,7 @@ static __net_init int ife_init_net(struct net *net)
1748 + {
1749 + struct tc_action_net *tn = net_generic(net, ife_net_id);
1750 +
1751 +- return tc_action_net_init(tn, &act_ife_ops);
1752 ++ return tc_action_net_init(net, tn, &act_ife_ops);
1753 + }
1754 +
1755 + static void __net_exit ife_exit_net(struct list_head *net_list)
1756 +diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
1757 +index 334f3a057671..01d3669ef498 100644
1758 +--- a/net/sched/act_ipt.c
1759 ++++ b/net/sched/act_ipt.c
1760 +@@ -65,12 +65,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
1761 + return 0;
1762 + }
1763 +
1764 +-static void ipt_destroy_target(struct xt_entry_target *t)
1765 ++static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
1766 + {
1767 + struct xt_tgdtor_param par = {
1768 + .target = t->u.kernel.target,
1769 + .targinfo = t->data,
1770 + .family = NFPROTO_IPV4,
1771 ++ .net = net,
1772 + };
1773 + if (par.target->destroy != NULL)
1774 + par.target->destroy(&par);
1775 +@@ -82,7 +83,7 @@ static void tcf_ipt_release(struct tc_action *a)
1776 + struct tcf_ipt *ipt = to_ipt(a);
1777 +
1778 + if (ipt->tcfi_t) {
1779 +- ipt_destroy_target(ipt->tcfi_t);
1780 ++ ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
1781 + kfree(ipt->tcfi_t);
1782 + }
1783 + kfree(ipt->tcfi_tname);
1784 +@@ -182,7 +183,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
1785 +
1786 + spin_lock_bh(&ipt->tcf_lock);
1787 + if (ret != ACT_P_CREATED) {
1788 +- ipt_destroy_target(ipt->tcfi_t);
1789 ++ ipt_destroy_target(ipt->tcfi_t, net);
1790 + kfree(ipt->tcfi_tname);
1791 + kfree(ipt->tcfi_t);
1792 + }
1793 +@@ -353,7 +354,7 @@ static __net_init int ipt_init_net(struct net *net)
1794 + {
1795 + struct tc_action_net *tn = net_generic(net, ipt_net_id);
1796 +
1797 +- return tc_action_net_init(tn, &act_ipt_ops);
1798 ++ return tc_action_net_init(net, tn, &act_ipt_ops);
1799 + }
1800 +
1801 + static void __net_exit ipt_exit_net(struct list_head *net_list)
1802 +@@ -403,7 +404,7 @@ static __net_init int xt_init_net(struct net *net)
1803 + {
1804 + struct tc_action_net *tn = net_generic(net, xt_net_id);
1805 +
1806 +- return tc_action_net_init(tn, &act_xt_ops);
1807 ++ return tc_action_net_init(net, tn, &act_xt_ops);
1808 + }
1809 +
1810 + static void __net_exit xt_exit_net(struct list_head *net_list)
1811 +diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
1812 +index 548614bd9366..399e3beae6cf 100644
1813 +--- a/net/sched/act_mirred.c
1814 ++++ b/net/sched/act_mirred.c
1815 +@@ -419,7 +419,7 @@ static __net_init int mirred_init_net(struct net *net)
1816 + {
1817 + struct tc_action_net *tn = net_generic(net, mirred_net_id);
1818 +
1819 +- return tc_action_net_init(tn, &act_mirred_ops);
1820 ++ return tc_action_net_init(net, tn, &act_mirred_ops);
1821 + }
1822 +
1823 + static void __net_exit mirred_exit_net(struct list_head *net_list)
1824 +diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
1825 +index 619828920b97..d1b47a1b145c 100644
1826 +--- a/net/sched/act_nat.c
1827 ++++ b/net/sched/act_nat.c
1828 +@@ -317,7 +317,7 @@ static __net_init int nat_init_net(struct net *net)
1829 + {
1830 + struct tc_action_net *tn = net_generic(net, nat_net_id);
1831 +
1832 +- return tc_action_net_init(tn, &act_nat_ops);
1833 ++ return tc_action_net_init(net, tn, &act_nat_ops);
1834 + }
1835 +
1836 + static void __net_exit nat_exit_net(struct list_head *net_list)
1837 +diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
1838 +index 82d258b2a75a..33c0cc5ef229 100644
1839 +--- a/net/sched/act_pedit.c
1840 ++++ b/net/sched/act_pedit.c
1841 +@@ -488,7 +488,7 @@ static __net_init int pedit_init_net(struct net *net)
1842 + {
1843 + struct tc_action_net *tn = net_generic(net, pedit_net_id);
1844 +
1845 +- return tc_action_net_init(tn, &act_pedit_ops);
1846 ++ return tc_action_net_init(net, tn, &act_pedit_ops);
1847 + }
1848 +
1849 + static void __net_exit pedit_exit_net(struct list_head *net_list)
1850 +diff --git a/net/sched/act_police.c b/net/sched/act_police.c
1851 +index 997c34db1491..4db25959e156 100644
1852 +--- a/net/sched/act_police.c
1853 ++++ b/net/sched/act_police.c
1854 +@@ -342,7 +342,7 @@ static __net_init int police_init_net(struct net *net)
1855 + {
1856 + struct tc_action_net *tn = net_generic(net, police_net_id);
1857 +
1858 +- return tc_action_net_init(tn, &act_police_ops);
1859 ++ return tc_action_net_init(net, tn, &act_police_ops);
1860 + }
1861 +
1862 + static void __net_exit police_exit_net(struct list_head *net_list)
1863 +diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
1864 +index ac37654ca292..98635311a5a0 100644
1865 +--- a/net/sched/act_sample.c
1866 ++++ b/net/sched/act_sample.c
1867 +@@ -99,7 +99,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
1868 + s->tcf_action = parm->action;
1869 + s->rate = rate;
1870 + s->psample_group_num = psample_group_num;
1871 +- RCU_INIT_POINTER(s->psample_group, psample_group);
1872 ++ rcu_swap_protected(s->psample_group, psample_group,
1873 ++ lockdep_is_held(&s->tcf_lock));
1874 +
1875 + if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
1876 + s->truncate = true;
1877 +@@ -107,6 +108,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
1878 + }
1879 + spin_unlock_bh(&s->tcf_lock);
1880 +
1881 ++ if (psample_group)
1882 ++ psample_group_put(psample_group);
1883 + if (ret == ACT_P_CREATED)
1884 + tcf_idr_insert(tn, *a);
1885 + return ret;
1886 +@@ -255,7 +258,7 @@ static __net_init int sample_init_net(struct net *net)
1887 + {
1888 + struct tc_action_net *tn = net_generic(net, sample_net_id);
1889 +
1890 +- return tc_action_net_init(tn, &act_sample_ops);
1891 ++ return tc_action_net_init(net, tn, &act_sample_ops);
1892 + }
1893 +
1894 + static void __net_exit sample_exit_net(struct list_head *net_list)
1895 +diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
1896 +index 658efae71a09..b418ef62e0a4 100644
1897 +--- a/net/sched/act_simple.c
1898 ++++ b/net/sched/act_simple.c
1899 +@@ -215,7 +215,7 @@ static __net_init int simp_init_net(struct net *net)
1900 + {
1901 + struct tc_action_net *tn = net_generic(net, simp_net_id);
1902 +
1903 +- return tc_action_net_init(tn, &act_simp_ops);
1904 ++ return tc_action_net_init(net, tn, &act_simp_ops);
1905 + }
1906 +
1907 + static void __net_exit simp_exit_net(struct list_head *net_list)
1908 +diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
1909 +index 7709710a41f7..a80179c1075f 100644
1910 +--- a/net/sched/act_skbedit.c
1911 ++++ b/net/sched/act_skbedit.c
1912 +@@ -316,7 +316,7 @@ static __net_init int skbedit_init_net(struct net *net)
1913 + {
1914 + struct tc_action_net *tn = net_generic(net, skbedit_net_id);
1915 +
1916 +- return tc_action_net_init(tn, &act_skbedit_ops);
1917 ++ return tc_action_net_init(net, tn, &act_skbedit_ops);
1918 + }
1919 +
1920 + static void __net_exit skbedit_exit_net(struct list_head *net_list)
1921 +diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
1922 +index 3038493d18ca..21d195296121 100644
1923 +--- a/net/sched/act_skbmod.c
1924 ++++ b/net/sched/act_skbmod.c
1925 +@@ -277,7 +277,7 @@ static __net_init int skbmod_init_net(struct net *net)
1926 + {
1927 + struct tc_action_net *tn = net_generic(net, skbmod_net_id);
1928 +
1929 +- return tc_action_net_init(tn, &act_skbmod_ops);
1930 ++ return tc_action_net_init(net, tn, &act_skbmod_ops);
1931 + }
1932 +
1933 + static void __net_exit skbmod_exit_net(struct list_head *net_list)
1934 +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
1935 +index 66bfe57e74ae..43309ff2b5dc 100644
1936 +--- a/net/sched/act_tunnel_key.c
1937 ++++ b/net/sched/act_tunnel_key.c
1938 +@@ -579,7 +579,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
1939 + {
1940 + struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
1941 +
1942 +- return tc_action_net_init(tn, &act_tunnel_key_ops);
1943 ++ return tc_action_net_init(net, tn, &act_tunnel_key_ops);
1944 + }
1945 +
1946 + static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
1947 +diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
1948 +index da993edd2e40..41528b966440 100644
1949 +--- a/net/sched/act_vlan.c
1950 ++++ b/net/sched/act_vlan.c
1951 +@@ -324,7 +324,7 @@ static __net_init int vlan_init_net(struct net *net)
1952 + {
1953 + struct tc_action_net *tn = net_generic(net, vlan_net_id);
1954 +
1955 +- return tc_action_net_init(tn, &act_vlan_ops);
1956 ++ return tc_action_net_init(net, tn, &act_vlan_ops);
1957 + }
1958 +
1959 + static void __net_exit vlan_exit_net(struct list_head *net_list)
1960 +diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
1961 +index fcaf00621102..be7aebff0c1e 100644
1962 +--- a/tools/bpf/bpftool/common.c
1963 ++++ b/tools/bpf/bpftool/common.c
1964 +@@ -238,7 +238,7 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
1965 +
1966 + fd = get_fd_by_id(id);
1967 + if (fd < 0) {
1968 +- p_err("can't get prog by id (%u): %s", id, strerror(errno));
1969 ++ p_err("can't open object by id (%u): %s", id, strerror(errno));
1970 + return -1;
1971 + }
1972 +
1973 +diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
1974 +index 0ce50c319cfd..ef8a82f29f02 100644
1975 +--- a/tools/hv/hv_kvp_daemon.c
1976 ++++ b/tools/hv/hv_kvp_daemon.c
1977 +@@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
1978 + int sn_offset = 0;
1979 + int error = 0;
1980 + char *buffer;
1981 +- struct hv_kvp_ipaddr_value *ip_buffer;
1982 ++ struct hv_kvp_ipaddr_value *ip_buffer = NULL;
1983 + char cidr_mask[5]; /* /xyz */
1984 + int weight;
1985 + int i;
1986 +diff --git a/tools/testing/selftests/kvm/lib/x86.c b/tools/testing/selftests/kvm/lib/x86.c
1987 +index a3122f1949a8..4d35eba73dc9 100644
1988 +--- a/tools/testing/selftests/kvm/lib/x86.c
1989 ++++ b/tools/testing/selftests/kvm/lib/x86.c
1990 +@@ -809,9 +809,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
1991 + TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
1992 + r);
1993 +
1994 +- r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
1995 +- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
1996 +- r);
1997 ++ if (kvm_check_cap(KVM_CAP_XCRS)) {
1998 ++ r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
1999 ++ TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
2000 ++ r);
2001 ++ }
2002 +
2003 + r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
2004 + TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
2005 +@@ -858,9 +860,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
2006 + TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
2007 + r);
2008 +
2009 +- r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
2010 +- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
2011 +- r);
2012 ++ if (kvm_check_cap(KVM_CAP_XCRS)) {
2013 ++ r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
2014 ++ TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
2015 ++ r);
2016 ++ }
2017 +
2018 + r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
2019 + TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
2020 +diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/platform_info_test.c
2021 +index 3764e7121265..65db510dddc3 100644
2022 +--- a/tools/testing/selftests/kvm/platform_info_test.c
2023 ++++ b/tools/testing/selftests/kvm/platform_info_test.c
2024 +@@ -100,8 +100,8 @@ int main(int argc, char *argv[])
2025 + msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
2026 + vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
2027 + msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
2028 +- test_msr_platform_info_disabled(vm);
2029 + test_msr_platform_info_enabled(vm);
2030 ++ test_msr_platform_info_disabled(vm);
2031 + vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
2032 +
2033 + kvm_vm_free(vm);
2034 +diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
2035 +index 08443a15e6be..3caee91bca08 100644
2036 +--- a/virt/kvm/arm/mmio.c
2037 ++++ b/virt/kvm/arm/mmio.c
2038 +@@ -98,6 +98,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
2039 + unsigned int len;
2040 + int mask;
2041 +
2042 ++ /* Detect an already handled MMIO return */
2043 ++ if (unlikely(!vcpu->mmio_needed))
2044 ++ return 0;
2045 ++
2046 ++ vcpu->mmio_needed = 0;
2047 ++
2048 + if (!run->mmio.is_write) {
2049 + len = run->mmio.len;
2050 + if (len > sizeof(unsigned long))
2051 +@@ -200,6 +206,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
2052 + run->mmio.is_write = is_write;
2053 + run->mmio.phys_addr = fault_ipa;
2054 + run->mmio.len = len;
2055 ++ vcpu->mmio_needed = 1;
2056 +
2057 + if (!ret) {
2058 + /* We handled the access successfully in the kernel. */
2059 +diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
2060 +index 8196e4f8731f..cd75df25fe14 100644
2061 +--- a/virt/kvm/arm/vgic/vgic-init.c
2062 ++++ b/virt/kvm/arm/vgic/vgic-init.c
2063 +@@ -19,6 +19,7 @@
2064 + #include <linux/cpu.h>
2065 + #include <linux/kvm_host.h>
2066 + #include <kvm/arm_vgic.h>
2067 ++#include <asm/kvm_emulate.h>
2068 + #include <asm/kvm_mmu.h>
2069 + #include "vgic.h"
2070 +
2071 +@@ -175,12 +176,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
2072 + irq->vcpu = NULL;
2073 + irq->target_vcpu = vcpu0;
2074 + kref_init(&irq->refcount);
2075 +- if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
2076 ++ switch (dist->vgic_model) {
2077 ++ case KVM_DEV_TYPE_ARM_VGIC_V2:
2078 + irq->targets = 0;
2079 + irq->group = 0;
2080 +- } else {
2081 ++ break;
2082 ++ case KVM_DEV_TYPE_ARM_VGIC_V3:
2083 + irq->mpidr = 0;
2084 + irq->group = 1;
2085 ++ break;
2086 ++ default:
2087 ++ kfree(dist->spis);
2088 ++ return -EINVAL;
2089 + }
2090 + }
2091 + return 0;
2092 +@@ -220,7 +227,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
2093 + irq->intid = i;
2094 + irq->vcpu = NULL;
2095 + irq->target_vcpu = vcpu;
2096 +- irq->targets = 1U << vcpu->vcpu_id;
2097 + kref_init(&irq->refcount);
2098 + if (vgic_irq_is_sgi(i)) {
2099 + /* SGIs */
2100 +@@ -230,11 +236,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
2101 + /* PPIs */
2102 + irq->config = VGIC_CONFIG_LEVEL;
2103 + }
2104 +-
2105 +- if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
2106 +- irq->group = 1;
2107 +- else
2108 +- irq->group = 0;
2109 + }
2110 +
2111 + if (!irqchip_in_kernel(vcpu->kvm))
2112 +@@ -297,10 +298,19 @@ int vgic_init(struct kvm *kvm)
2113 +
2114 + for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
2115 + struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
2116 +- if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
2117 ++ switch (dist->vgic_model) {
2118 ++ case KVM_DEV_TYPE_ARM_VGIC_V3:
2119 + irq->group = 1;
2120 +- else
2121 ++ irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
2122 ++ break;
2123 ++ case KVM_DEV_TYPE_ARM_VGIC_V2:
2124 + irq->group = 0;
2125 ++ irq->targets = 1U << idx;
2126 ++ break;
2127 ++ default:
2128 ++ ret = -EINVAL;
2129 ++ goto out;
2130 ++ }
2131 + }
2132 + }
2133 +