Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 29 Oct 2019 14:00:29
Message-Id: 1572357542.028590e4e5c04847f3e818d7e56df9ec4c1b638e.mpagano@gentoo
1 commit: 028590e4e5c04847f3e818d7e56df9ec4c1b638e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Sep 10 11:11:38 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Oct 29 13:59:02 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=028590e4
7
8 Linux patch 4.14.143
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1142_linux-4.14.143.patch | 1534 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1538 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 1bb6a46..3e99608 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -611,6 +611,10 @@ Patch: 1141_linux-4.14.142.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.142
23
24 +Patch: 1142_linux-4.14.143.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.143
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1142_linux-4.14.143.patch b/1142_linux-4.14.143.patch
33 new file mode 100644
34 index 0000000..cbe0fd9
35 --- /dev/null
36 +++ b/1142_linux-4.14.143.patch
37 @@ -0,0 +1,1534 @@
38 +diff --git a/Makefile b/Makefile
39 +index ccced427d9de..caa2fba089a5 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 142
47 ++SUBLEVEL = 143
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
52 +index d3983fdf1012..8fa49cf1211d 100644
53 +--- a/arch/x86/include/asm/bootparam_utils.h
54 ++++ b/arch/x86/include/asm/bootparam_utils.h
55 +@@ -71,6 +71,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
56 + BOOT_PARAM_PRESERVE(eddbuf_entries),
57 + BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
58 + BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
59 ++ BOOT_PARAM_PRESERVE(secure_boot),
60 + BOOT_PARAM_PRESERVE(hdr),
61 + BOOT_PARAM_PRESERVE(e820_table),
62 + BOOT_PARAM_PRESERVE(eddbuf),
63 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
64 +index 8199b7e4aff9..f8f9cfded97d 100644
65 +--- a/arch/x86/kernel/apic/apic.c
66 ++++ b/arch/x86/kernel/apic/apic.c
67 +@@ -1148,10 +1148,6 @@ void clear_local_APIC(void)
68 + apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
69 + v = apic_read(APIC_LVT1);
70 + apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
71 +- if (!x2apic_enabled()) {
72 +- v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
73 +- apic_write(APIC_LDR, v);
74 +- }
75 + if (maxlvt >= 4) {
76 + v = apic_read(APIC_LVTPC);
77 + apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
78 +diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
79 +index 0bbdfcef2aa8..a48a61f22f82 100644
80 +--- a/drivers/bluetooth/btqca.c
81 ++++ b/drivers/bluetooth/btqca.c
82 +@@ -363,6 +363,9 @@ int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
83 + return err;
84 + }
85 +
86 ++ /* Give the controller some time to get ready to receive the NVM */
87 ++ msleep(10);
88 ++
89 + /* Download NVM configuration */
90 + config.type = TLV_TYPE_NVM;
91 + snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
92 +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
93 +index cada1c75c41c..034b50080304 100644
94 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
95 ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
96 +@@ -185,6 +185,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
97 + struct mtk_drm_private *private = drm->dev_private;
98 + struct platform_device *pdev;
99 + struct device_node *np;
100 ++ struct device *dma_dev;
101 + int ret;
102 +
103 + if (!iommu_present(&platform_bus_type))
104 +@@ -242,7 +243,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
105 + goto err_component_unbind;
106 + }
107 +
108 +- private->dma_dev = &pdev->dev;
109 ++ dma_dev = &pdev->dev;
110 ++ private->dma_dev = dma_dev;
111 ++
112 ++ /*
113 ++ * Configure the DMA segment size to make sure we get contiguous IOVA
114 ++ * when importing PRIME buffers.
115 ++ */
116 ++ if (!dma_dev->dma_parms) {
117 ++ private->dma_parms_allocated = true;
118 ++ dma_dev->dma_parms =
119 ++ devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
120 ++ GFP_KERNEL);
121 ++ }
122 ++ if (!dma_dev->dma_parms) {
123 ++ ret = -ENOMEM;
124 ++ goto err_component_unbind;
125 ++ }
126 ++
127 ++ ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
128 ++ if (ret) {
129 ++ dev_err(dma_dev, "Failed to set DMA segment size\n");
130 ++ goto err_unset_dma_parms;
131 ++ }
132 +
133 + /*
134 + * We don't use the drm_irq_install() helpers provided by the DRM
135 +@@ -252,13 +275,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
136 + drm->irq_enabled = true;
137 + ret = drm_vblank_init(drm, MAX_CRTC);
138 + if (ret < 0)
139 +- goto err_component_unbind;
140 ++ goto err_unset_dma_parms;
141 +
142 + drm_kms_helper_poll_init(drm);
143 + drm_mode_config_reset(drm);
144 +
145 + return 0;
146 +
147 ++err_unset_dma_parms:
148 ++ if (private->dma_parms_allocated)
149 ++ dma_dev->dma_parms = NULL;
150 + err_component_unbind:
151 + component_unbind_all(drm->dev, drm);
152 + err_config_cleanup:
153 +@@ -269,9 +295,14 @@ err_config_cleanup:
154 +
155 + static void mtk_drm_kms_deinit(struct drm_device *drm)
156 + {
157 ++ struct mtk_drm_private *private = drm->dev_private;
158 ++
159 + drm_kms_helper_poll_fini(drm);
160 + drm_atomic_helper_shutdown(drm);
161 +
162 ++ if (private->dma_parms_allocated)
163 ++ private->dma_dev->dma_parms = NULL;
164 ++
165 + component_unbind_all(drm->dev, drm);
166 + drm_mode_config_cleanup(drm);
167 + }
168 +@@ -287,6 +318,18 @@ static const struct file_operations mtk_drm_fops = {
169 + .compat_ioctl = drm_compat_ioctl,
170 + };
171 +
172 ++/*
173 ++ * We need to override this because the device used to import the memory is
174 ++ * not dev->dev, as drm_gem_prime_import() expects.
175 ++ */
176 ++struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
177 ++ struct dma_buf *dma_buf)
178 ++{
179 ++ struct mtk_drm_private *private = dev->dev_private;
180 ++
181 ++ return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
182 ++}
183 ++
184 + static struct drm_driver mtk_drm_driver = {
185 + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
186 + DRIVER_ATOMIC,
187 +@@ -298,7 +341,7 @@ static struct drm_driver mtk_drm_driver = {
188 + .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
189 + .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
190 + .gem_prime_export = drm_gem_prime_export,
191 +- .gem_prime_import = drm_gem_prime_import,
192 ++ .gem_prime_import = mtk_drm_gem_prime_import,
193 + .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
194 + .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
195 + .gem_prime_mmap = mtk_drm_gem_mmap_buf,
196 +diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
197 +index c3378c452c0a..445dd45e65eb 100644
198 +--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
199 ++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
200 +@@ -56,6 +56,8 @@ struct mtk_drm_private {
201 + } commit;
202 +
203 + struct drm_atomic_state *suspend_state;
204 ++
205 ++ bool dma_parms_allocated;
206 + };
207 +
208 + extern struct platform_driver mtk_ddp_driver;
209 +diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
210 +index 4e940a096b2a..abf107945766 100644
211 +--- a/drivers/hid/hid-cp2112.c
212 ++++ b/drivers/hid/hid-cp2112.c
213 +@@ -1149,8 +1149,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
214 +
215 + INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
216 +
217 +- cp2112_gpio_direction_input(gc, d->hwirq);
218 +-
219 + if (!dev->gpio_poll) {
220 + dev->gpio_poll = true;
221 + schedule_delayed_work(&dev->gpio_poll_worker, 0);
222 +@@ -1198,6 +1196,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
223 + return PTR_ERR(dev->desc[pin]);
224 + }
225 +
226 ++ ret = cp2112_gpio_direction_input(&dev->gc, pin);
227 ++ if (ret < 0) {
228 ++ dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
229 ++ goto err_desc;
230 ++ }
231 ++
232 + ret = gpiochip_lock_as_irq(&dev->gc, pin);
233 + if (ret) {
234 + dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
235 +diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
236 +index d604b3d5aa3e..c69158ccab82 100644
237 +--- a/drivers/infiniband/hw/mlx4/mad.c
238 ++++ b/drivers/infiniband/hw/mlx4/mad.c
239 +@@ -1680,8 +1680,6 @@ tx_err:
240 + tx_buf_size, DMA_TO_DEVICE);
241 + kfree(tun_qp->tx_ring[i].buf.addr);
242 + }
243 +- kfree(tun_qp->tx_ring);
244 +- tun_qp->tx_ring = NULL;
245 + i = MLX4_NUM_TUNNEL_BUFS;
246 + err:
247 + while (i > 0) {
248 +@@ -1690,6 +1688,8 @@ err:
249 + rx_buf_size, DMA_FROM_DEVICE);
250 + kfree(tun_qp->ring[i].addr);
251 + }
252 ++ kfree(tun_qp->tx_ring);
253 ++ tun_qp->tx_ring = NULL;
254 + kfree(tun_qp->ring);
255 + tun_qp->ring = NULL;
256 + return -ENOMEM;
257 +diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
258 +index 55288a026e4e..c137ffa6fdec 100644
259 +--- a/drivers/input/serio/hyperv-keyboard.c
260 ++++ b/drivers/input/serio/hyperv-keyboard.c
261 +@@ -245,40 +245,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
262 +
263 + static void hv_kbd_on_channel_callback(void *context)
264 + {
265 ++ struct vmpacket_descriptor *desc;
266 + struct hv_device *hv_dev = context;
267 +- void *buffer;
268 +- int bufferlen = 0x100; /* Start with sensible size */
269 + u32 bytes_recvd;
270 + u64 req_id;
271 +- int error;
272 +
273 +- buffer = kmalloc(bufferlen, GFP_ATOMIC);
274 +- if (!buffer)
275 +- return;
276 +-
277 +- while (1) {
278 +- error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
279 +- &bytes_recvd, &req_id);
280 +- switch (error) {
281 +- case 0:
282 +- if (bytes_recvd == 0) {
283 +- kfree(buffer);
284 +- return;
285 +- }
286 +-
287 +- hv_kbd_handle_received_packet(hv_dev, buffer,
288 +- bytes_recvd, req_id);
289 +- break;
290 ++ foreach_vmbus_pkt(desc, hv_dev->channel) {
291 ++ bytes_recvd = desc->len8 * 8;
292 ++ req_id = desc->trans_id;
293 +
294 +- case -ENOBUFS:
295 +- kfree(buffer);
296 +- /* Handle large packet */
297 +- bufferlen = bytes_recvd;
298 +- buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
299 +- if (!buffer)
300 +- return;
301 +- break;
302 +- }
303 ++ hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
304 ++ req_id);
305 + }
306 + }
307 +
308 +diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
309 +index 1e0fbce86d60..55e873126463 100644
310 +--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
311 ++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
312 +@@ -232,8 +232,10 @@ int octeon_setup_iq(struct octeon_device *oct,
313 + }
314 +
315 + oct->num_iqs++;
316 +- if (oct->fn_list.enable_io_queues(oct))
317 ++ if (oct->fn_list.enable_io_queues(oct)) {
318 ++ octeon_delete_instr_queue(oct, iq_no);
319 + return 1;
320 ++ }
321 +
322 + return 0;
323 + }
324 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
325 +index 76540b0e082d..9e5cd18e7358 100644
326 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
327 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
328 +@@ -2777,8 +2777,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
329 + return -ENOMEM;
330 +
331 + err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
332 +- if (err)
333 ++ if (err) {
334 ++ kvfree(t);
335 + return err;
336 ++ }
337 +
338 + bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
339 + kvfree(t);
340 +diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
341 +index 754dff4c1771..880d925438c1 100644
342 +--- a/drivers/net/ethernet/ibm/ibmveth.c
343 ++++ b/drivers/net/ethernet/ibm/ibmveth.c
344 +@@ -1618,7 +1618,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
345 + struct net_device *netdev;
346 + struct ibmveth_adapter *adapter;
347 + unsigned char *mac_addr_p;
348 +- unsigned int *mcastFilterSize_p;
349 ++ __be32 *mcastFilterSize_p;
350 + long ret;
351 + unsigned long ret_attr;
352 +
353 +@@ -1640,8 +1640,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
354 + return -EINVAL;
355 + }
356 +
357 +- mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
358 +- VETH_MCAST_FILTER_SIZE, NULL);
359 ++ mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
360 ++ VETH_MCAST_FILTER_SIZE,
361 ++ NULL);
362 + if (!mcastFilterSize_p) {
363 + dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
364 + "attribute\n");
365 +@@ -1658,7 +1659,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
366 +
367 + adapter->vdev = dev;
368 + adapter->netdev = netdev;
369 +- adapter->mcastFilterSize = *mcastFilterSize_p;
370 ++ adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
371 + adapter->pool_config = 0;
372 +
373 + netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
374 +diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
375 +index b171ed2015fe..a0a555052d8c 100644
376 +--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
377 ++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
378 +@@ -3922,7 +3922,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
379 + * setup (if available). */
380 + status = myri10ge_request_irq(mgp);
381 + if (status != 0)
382 +- goto abort_with_firmware;
383 ++ goto abort_with_slices;
384 + myri10ge_free_irq(mgp);
385 +
386 + /* Save configuration space to be restored if the
387 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
388 +index ce79af4a7f6f..d73617cc3b15 100644
389 +--- a/drivers/net/ethernet/renesas/ravb_main.c
390 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
391 +@@ -1,6 +1,6 @@
392 + /* Renesas Ethernet AVB device driver
393 + *
394 +- * Copyright (C) 2014-2015 Renesas Electronics Corporation
395 ++ * Copyright (C) 2014-2019 Renesas Electronics Corporation
396 + * Copyright (C) 2015 Renesas Solutions Corp.
397 + * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@××××××××××××××.com>
398 + *
399 +@@ -513,7 +513,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
400 + kfree(ts_skb);
401 + if (tag == tfa_tag) {
402 + skb_tstamp_tx(skb, &shhwtstamps);
403 ++ dev_consume_skb_any(skb);
404 + break;
405 ++ } else {
406 ++ dev_kfree_skb_any(skb);
407 + }
408 + }
409 + ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
410 +@@ -1576,7 +1579,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
411 + DMA_TO_DEVICE);
412 + goto unmap;
413 + }
414 +- ts_skb->skb = skb;
415 ++ ts_skb->skb = skb_get(skb);
416 + ts_skb->tag = priv->ts_skb_tag++;
417 + priv->ts_skb_tag &= 0x3ff;
418 + list_add_tail(&ts_skb->list, &priv->ts_skb_list);
419 +@@ -1704,6 +1707,7 @@ static int ravb_close(struct net_device *ndev)
420 + /* Clear the timestamp list */
421 + list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
422 + list_del(&ts_skb->list);
423 ++ kfree_skb(ts_skb->skb);
424 + kfree(ts_skb);
425 + }
426 +
427 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
428 +index 01787344f6e5..712b5eb3507a 100644
429 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
430 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
431 +@@ -1145,10 +1145,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
432 + int ret;
433 + struct device *dev = &bsp_priv->pdev->dev;
434 +
435 +- if (!ldo) {
436 +- dev_err(dev, "no regulator found\n");
437 +- return -1;
438 +- }
439 ++ if (!ldo)
440 ++ return 0;
441 +
442 + if (enable) {
443 + ret = regulator_enable(ldo);
444 +diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
445 +index cce9c9ed46aa..9146068979d2 100644
446 +--- a/drivers/net/ethernet/toshiba/tc35815.c
447 ++++ b/drivers/net/ethernet/toshiba/tc35815.c
448 +@@ -1497,7 +1497,7 @@ tc35815_rx(struct net_device *dev, int limit)
449 + pci_unmap_single(lp->pci_dev,
450 + lp->rx_skbs[cur_bd].skb_dma,
451 + RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
452 +- if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
453 ++ if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
454 + memmove(skb->data, skb->data - NET_IP_ALIGN,
455 + pkt_len);
456 + data = skb_put(skb, pkt_len);
457 +diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
458 +index c2d15d9c0c33..455979e47424 100644
459 +--- a/drivers/net/ethernet/tundra/tsi108_eth.c
460 ++++ b/drivers/net/ethernet/tundra/tsi108_eth.c
461 +@@ -381,9 +381,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
462 + static void tsi108_stat_carry(struct net_device *dev)
463 + {
464 + struct tsi108_prv_data *data = netdev_priv(dev);
465 ++ unsigned long flags;
466 + u32 carry1, carry2;
467 +
468 +- spin_lock_irq(&data->misclock);
469 ++ spin_lock_irqsave(&data->misclock, flags);
470 +
471 + carry1 = TSI_READ(TSI108_STAT_CARRY1);
472 + carry2 = TSI_READ(TSI108_STAT_CARRY2);
473 +@@ -451,7 +452,7 @@ static void tsi108_stat_carry(struct net_device *dev)
474 + TSI108_STAT_TXPAUSEDROP_CARRY,
475 + &data->tx_pause_drop);
476 +
477 +- spin_unlock_irq(&data->misclock);
478 ++ spin_unlock_irqrestore(&data->misclock, flags);
479 + }
480 +
481 + /* Read a stat counter atomically with respect to carries.
482 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
483 +index eb92720dd1c4..33c1f6548fb7 100644
484 +--- a/drivers/net/hyperv/netvsc_drv.c
485 ++++ b/drivers/net/hyperv/netvsc_drv.c
486 +@@ -1170,12 +1170,15 @@ static void netvsc_get_stats64(struct net_device *net,
487 + struct rtnl_link_stats64 *t)
488 + {
489 + struct net_device_context *ndev_ctx = netdev_priv(net);
490 +- struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
491 ++ struct netvsc_device *nvdev;
492 + struct netvsc_vf_pcpu_stats vf_tot;
493 + int i;
494 +
495 ++ rcu_read_lock();
496 ++
497 ++ nvdev = rcu_dereference(ndev_ctx->nvdev);
498 + if (!nvdev)
499 +- return;
500 ++ goto out;
501 +
502 + netdev_stats_to_stats64(t, &net->stats);
503 +
504 +@@ -1214,6 +1217,8 @@ static void netvsc_get_stats64(struct net_device *net,
505 + t->rx_packets += packets;
506 + t->multicast += multicast;
507 + }
508 ++out:
509 ++ rcu_read_unlock();
510 + }
511 +
512 + static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
513 +diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
514 +index 947bea81d924..dfbdea22fbad 100644
515 +--- a/drivers/net/usb/cx82310_eth.c
516 ++++ b/drivers/net/usb/cx82310_eth.c
517 +@@ -175,7 +175,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
518 + }
519 + if (!timeout) {
520 + dev_err(&udev->dev, "firmware not ready in time\n");
521 +- return -ETIMEDOUT;
522 ++ ret = -ETIMEDOUT;
523 ++ goto err;
524 + }
525 +
526 + /* enable ethernet mode (?) */
527 +diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
528 +index ce0b0b4e3a57..c677ec2bae18 100644
529 +--- a/drivers/net/usb/kalmia.c
530 ++++ b/drivers/net/usb/kalmia.c
531 +@@ -117,16 +117,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
532 + status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_1)
533 + / sizeof(init_msg_1[0]), usb_buf, 24);
534 + if (status != 0)
535 +- return status;
536 ++ goto out;
537 +
538 + memcpy(usb_buf, init_msg_2, 12);
539 + status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_2)
540 + / sizeof(init_msg_2[0]), usb_buf, 28);
541 + if (status != 0)
542 +- return status;
543 ++ goto out;
544 +
545 + memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
546 +-
547 ++out:
548 + kfree(usb_buf);
549 + return status;
550 + }
551 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
552 +index b62c41114e34..24b994c68bcc 100644
553 +--- a/drivers/net/usb/lan78xx.c
554 ++++ b/drivers/net/usb/lan78xx.c
555 +@@ -3645,7 +3645,7 @@ static int lan78xx_probe(struct usb_interface *intf,
556 + ret = register_netdev(netdev);
557 + if (ret != 0) {
558 + netif_err(dev, probe, netdev, "couldn't register the device\n");
559 +- goto out3;
560 ++ goto out4;
561 + }
562 +
563 + usb_set_intfdata(intf, dev);
564 +@@ -3660,12 +3660,14 @@ static int lan78xx_probe(struct usb_interface *intf,
565 +
566 + ret = lan78xx_phy_init(dev);
567 + if (ret < 0)
568 +- goto out4;
569 ++ goto out5;
570 +
571 + return 0;
572 +
573 +-out4:
574 ++out5:
575 + unregister_netdev(netdev);
576 ++out4:
577 ++ usb_free_urb(dev->urb_intr);
578 + out3:
579 + lan78xx_unbind(dev, intf);
580 + out2:
581 +diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
582 +index a89b5685e68b..4577ee5bbddd 100644
583 +--- a/drivers/net/wimax/i2400m/fw.c
584 ++++ b/drivers/net/wimax/i2400m/fw.c
585 +@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
586 + }
587 + result = i2400m_barker_db_add(barker);
588 + if (result < 0)
589 +- goto error_add;
590 ++ goto error_parse_add;
591 + }
592 + kfree(options_orig);
593 + }
594 + return 0;
595 +
596 ++error_parse_add:
597 + error_parse:
598 ++ kfree(options_orig);
599 + error_add:
600 + kfree(i2400m_barker_db);
601 + return result;
602 +diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
603 +index bd00b7cc8b78..5c89bbb05441 100644
604 +--- a/drivers/spi/spi-bcm2835aux.c
605 ++++ b/drivers/spi/spi-bcm2835aux.c
606 +@@ -178,24 +178,14 @@ static void bcm2835aux_spi_reset_hw(struct bcm2835aux_spi *bs)
607 + BCM2835_AUX_SPI_CNTL0_CLEARFIFO);
608 + }
609 +
610 +-static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
611 ++static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
612 + {
613 +- struct spi_master *master = dev_id;
614 +- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
615 +- irqreturn_t ret = IRQ_NONE;
616 +-
617 +- /* IRQ may be shared, so return if our interrupts are disabled */
618 +- if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
619 +- (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
620 +- return ret;
621 ++ u32 stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
622 +
623 + /* check if we have data to read */
624 +- while (bs->rx_len &&
625 +- (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
626 +- BCM2835_AUX_SPI_STAT_RX_EMPTY))) {
627 ++ for (; bs->rx_len && (stat & BCM2835_AUX_SPI_STAT_RX_LVL);
628 ++ stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT))
629 + bcm2835aux_rd_fifo(bs);
630 +- ret = IRQ_HANDLED;
631 +- }
632 +
633 + /* check if we have data to write */
634 + while (bs->tx_len &&
635 +@@ -203,16 +193,21 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
636 + (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
637 + BCM2835_AUX_SPI_STAT_TX_FULL))) {
638 + bcm2835aux_wr_fifo(bs);
639 +- ret = IRQ_HANDLED;
640 + }
641 ++}
642 +
643 +- /* and check if we have reached "done" */
644 +- while (bs->rx_len &&
645 +- (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
646 +- BCM2835_AUX_SPI_STAT_BUSY))) {
647 +- bcm2835aux_rd_fifo(bs);
648 +- ret = IRQ_HANDLED;
649 +- }
650 ++static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
651 ++{
652 ++ struct spi_master *master = dev_id;
653 ++ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
654 ++
655 ++ /* IRQ may be shared, so return if our interrupts are disabled */
656 ++ if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
657 ++ (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
658 ++ return IRQ_NONE;
659 ++
660 ++ /* do common fifo handling */
661 ++ bcm2835aux_spi_transfer_helper(bs);
662 +
663 + if (!bs->tx_len) {
664 + /* disable tx fifo empty interrupt */
665 +@@ -226,8 +221,7 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
666 + complete(&master->xfer_completion);
667 + }
668 +
669 +- /* and return */
670 +- return ret;
671 ++ return IRQ_HANDLED;
672 + }
673 +
674 + static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
675 +@@ -273,7 +267,6 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
676 + {
677 + struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
678 + unsigned long timeout;
679 +- u32 stat;
680 +
681 + /* configure spi */
682 + bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
683 +@@ -284,24 +277,9 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
684 +
685 + /* loop until finished the transfer */
686 + while (bs->rx_len) {
687 +- /* read status */
688 +- stat = bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT);
689 +-
690 +- /* fill in tx fifo with remaining data */
691 +- if ((bs->tx_len) && (!(stat & BCM2835_AUX_SPI_STAT_TX_FULL))) {
692 +- bcm2835aux_wr_fifo(bs);
693 +- continue;
694 +- }
695 +
696 +- /* read data from fifo for both cases */
697 +- if (!(stat & BCM2835_AUX_SPI_STAT_RX_EMPTY)) {
698 +- bcm2835aux_rd_fifo(bs);
699 +- continue;
700 +- }
701 +- if (!(stat & BCM2835_AUX_SPI_STAT_BUSY)) {
702 +- bcm2835aux_rd_fifo(bs);
703 +- continue;
704 +- }
705 ++ /* do common fifo handling */
706 ++ bcm2835aux_spi_transfer_helper(bs);
707 +
708 + /* there is still data pending to read check the timeout */
709 + if (bs->rx_len && time_after(jiffies, timeout)) {
710 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
711 +index 238d24348a98..df95e39ccd45 100644
712 +--- a/fs/ceph/caps.c
713 ++++ b/fs/ceph/caps.c
714 +@@ -1162,6 +1162,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
715 + {
716 + struct ceph_inode_info *ci = cap->ci;
717 + struct inode *inode = &ci->vfs_inode;
718 ++ struct ceph_buffer *old_blob = NULL;
719 + struct cap_msg_args arg;
720 + int held, revoking, dropping;
721 + int wake = 0;
722 +@@ -1227,7 +1228,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
723 + ci->i_requested_max_size = arg.max_size;
724 +
725 + if (flushing & CEPH_CAP_XATTR_EXCL) {
726 +- __ceph_build_xattrs_blob(ci);
727 ++ old_blob = __ceph_build_xattrs_blob(ci);
728 + arg.xattr_version = ci->i_xattrs.version;
729 + arg.xattr_buf = ci->i_xattrs.blob;
730 + } else {
731 +@@ -1262,6 +1263,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
732 +
733 + spin_unlock(&ci->i_ceph_lock);
734 +
735 ++ ceph_buffer_put(old_blob);
736 ++
737 + ret = send_cap_msg(&arg);
738 + if (ret < 0) {
739 + dout("error sending cap msg, must requeue %p\n", inode);
740 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
741 +index f2b722f0df5d..9bda8c7a80a0 100644
742 +--- a/fs/ceph/inode.c
743 ++++ b/fs/ceph/inode.c
744 +@@ -730,6 +730,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
745 + int issued = 0, implemented, new_issued;
746 + struct timespec mtime, atime, ctime;
747 + struct ceph_buffer *xattr_blob = NULL;
748 ++ struct ceph_buffer *old_blob = NULL;
749 + struct ceph_string *pool_ns = NULL;
750 + struct ceph_cap *new_cap = NULL;
751 + int err = 0;
752 +@@ -847,7 +848,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
753 + if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
754 + le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
755 + if (ci->i_xattrs.blob)
756 +- ceph_buffer_put(ci->i_xattrs.blob);
757 ++ old_blob = ci->i_xattrs.blob;
758 + ci->i_xattrs.blob = xattr_blob;
759 + if (xattr_blob)
760 + memcpy(ci->i_xattrs.blob->vec.iov_base,
761 +@@ -993,8 +994,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
762 + out:
763 + if (new_cap)
764 + ceph_put_cap(mdsc, new_cap);
765 +- if (xattr_blob)
766 +- ceph_buffer_put(xattr_blob);
767 ++ ceph_buffer_put(old_blob);
768 ++ ceph_buffer_put(xattr_blob);
769 + ceph_put_string(pool_ns);
770 + return err;
771 + }
772 +diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
773 +index a7e763dac038..29ed1688a1d3 100644
774 +--- a/fs/ceph/snap.c
775 ++++ b/fs/ceph/snap.c
776 +@@ -460,6 +460,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
777 + struct inode *inode = &ci->vfs_inode;
778 + struct ceph_cap_snap *capsnap;
779 + struct ceph_snap_context *old_snapc, *new_snapc;
780 ++ struct ceph_buffer *old_blob = NULL;
781 + int used, dirty;
782 +
783 + capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
784 +@@ -536,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
785 + capsnap->gid = inode->i_gid;
786 +
787 + if (dirty & CEPH_CAP_XATTR_EXCL) {
788 +- __ceph_build_xattrs_blob(ci);
789 ++ old_blob = __ceph_build_xattrs_blob(ci);
790 + capsnap->xattr_blob =
791 + ceph_buffer_get(ci->i_xattrs.blob);
792 + capsnap->xattr_version = ci->i_xattrs.version;
793 +@@ -579,6 +580,7 @@ update_snapc:
794 + }
795 + spin_unlock(&ci->i_ceph_lock);
796 +
797 ++ ceph_buffer_put(old_blob);
798 + kfree(capsnap);
799 + ceph_put_snap_context(old_snapc);
800 + }
801 +diff --git a/fs/ceph/super.h b/fs/ceph/super.h
802 +index 60b70f0985f6..46f600107cb5 100644
803 +--- a/fs/ceph/super.h
804 ++++ b/fs/ceph/super.h
805 +@@ -835,7 +835,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
806 + int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
807 + ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
808 + extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
809 +-extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci);
810 ++extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
811 + extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
812 + extern void __init ceph_xattr_init(void);
813 + extern void ceph_xattr_exit(void);
814 +diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
815 +index 0376db8a74f8..3a166f860b6c 100644
816 +--- a/fs/ceph/xattr.c
817 ++++ b/fs/ceph/xattr.c
818 +@@ -681,12 +681,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
819 +
820 + /*
821 + * If there are dirty xattrs, reencode xattrs into the prealloc_blob
822 +- * and swap into place.
823 ++ * and swap into place. It returns the old i_xattrs.blob (or NULL) so
824 ++ * that it can be freed by the caller as the i_ceph_lock is likely to be
825 ++ * held.
826 + */
827 +-void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
828 ++struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
829 + {
830 + struct rb_node *p;
831 + struct ceph_inode_xattr *xattr = NULL;
832 ++ struct ceph_buffer *old_blob = NULL;
833 + void *dest;
834 +
835 + dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
836 +@@ -717,12 +720,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
837 + dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
838 +
839 + if (ci->i_xattrs.blob)
840 +- ceph_buffer_put(ci->i_xattrs.blob);
841 ++ old_blob = ci->i_xattrs.blob;
842 + ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
843 + ci->i_xattrs.prealloc_blob = NULL;
844 + ci->i_xattrs.dirty = false;
845 + ci->i_xattrs.version++;
846 + }
847 ++
848 ++ return old_blob;
849 + }
850 +
851 + static inline int __get_request_mask(struct inode *in) {
852 +@@ -955,6 +960,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
853 + struct ceph_inode_info *ci = ceph_inode(inode);
854 + struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
855 + struct ceph_cap_flush *prealloc_cf = NULL;
856 ++ struct ceph_buffer *old_blob = NULL;
857 + int issued;
858 + int err;
859 + int dirty = 0;
860 +@@ -1023,13 +1029,15 @@ retry:
861 + struct ceph_buffer *blob;
862 +
863 + spin_unlock(&ci->i_ceph_lock);
864 +- dout(" preaallocating new blob size=%d\n", required_blob_size);
865 ++ ceph_buffer_put(old_blob); /* Shouldn't be required */
866 ++ dout(" pre-allocating new blob size=%d\n", required_blob_size);
867 + blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
868 + if (!blob)
869 + goto do_sync_unlocked;
870 + spin_lock(&ci->i_ceph_lock);
871 ++ /* prealloc_blob can't be released while holding i_ceph_lock */
872 + if (ci->i_xattrs.prealloc_blob)
873 +- ceph_buffer_put(ci->i_xattrs.prealloc_blob);
874 ++ old_blob = ci->i_xattrs.prealloc_blob;
875 + ci->i_xattrs.prealloc_blob = blob;
876 + goto retry;
877 + }
878 +@@ -1045,6 +1053,7 @@ retry:
879 + }
880 +
881 + spin_unlock(&ci->i_ceph_lock);
882 ++ ceph_buffer_put(old_blob);
883 + if (lock_snap_rwsem)
884 + up_read(&mdsc->snap_rwsem);
885 + if (dirty)
886 +diff --git a/fs/read_write.c b/fs/read_write.c
887 +index d6f8bfb0f794..38a8bcccf0dd 100644
888 +--- a/fs/read_write.c
889 ++++ b/fs/read_write.c
890 +@@ -1882,10 +1882,7 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
891 + }
892 + EXPORT_SYMBOL(vfs_clone_file_range);
893 +
894 +-/*
895 +- * Read a page's worth of file data into the page cache. Return the page
896 +- * locked.
897 +- */
898 ++/* Read a page's worth of file data into the page cache. */
899 + static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
900 + {
901 + struct address_space *mapping;
902 +@@ -1901,10 +1898,32 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
903 + put_page(page);
904 + return ERR_PTR(-EIO);
905 + }
906 +- lock_page(page);
907 + return page;
908 + }
909 +
910 ++/*
911 ++ * Lock two pages, ensuring that we lock in offset order if the pages are from
912 ++ * the same file.
913 ++ */
914 ++static void vfs_lock_two_pages(struct page *page1, struct page *page2)
915 ++{
916 ++ /* Always lock in order of increasing index. */
917 ++ if (page1->index > page2->index)
918 ++ swap(page1, page2);
919 ++
920 ++ lock_page(page1);
921 ++ if (page1 != page2)
922 ++ lock_page(page2);
923 ++}
924 ++
925 ++/* Unlock two pages, being careful not to unlock the same page twice. */
926 ++static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
927 ++{
928 ++ unlock_page(page1);
929 ++ if (page1 != page2)
930 ++ unlock_page(page2);
931 ++}
932 ++
933 + /*
934 + * Compare extents of two files to see if they are the same.
935 + * Caller must have locked both inodes to prevent write races.
936 +@@ -1942,10 +1961,24 @@ int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
937 + dest_page = vfs_dedupe_get_page(dest, destoff);
938 + if (IS_ERR(dest_page)) {
939 + error = PTR_ERR(dest_page);
940 +- unlock_page(src_page);
941 + put_page(src_page);
942 + goto out_error;
943 + }
944 ++
945 ++ vfs_lock_two_pages(src_page, dest_page);
946 ++
947 ++ /*
948 ++ * Now that we've locked both pages, make sure they're still
949 ++ * mapped to the file data we're interested in. If not,
950 ++ * someone is invalidating pages on us and we lose.
951 ++ */
952 ++ if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
953 ++ src_page->mapping != src->i_mapping ||
954 ++ dest_page->mapping != dest->i_mapping) {
955 ++ same = false;
956 ++ goto unlock;
957 ++ }
958 ++
959 + src_addr = kmap_atomic(src_page);
960 + dest_addr = kmap_atomic(dest_page);
961 +
962 +@@ -1957,8 +1990,8 @@ int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
963 +
964 + kunmap_atomic(dest_addr);
965 + kunmap_atomic(src_addr);
966 +- unlock_page(dest_page);
967 +- unlock_page(src_page);
968 ++unlock:
969 ++ vfs_unlock_two_pages(src_page, dest_page);
970 + put_page(dest_page);
971 + put_page(src_page);
972 +
973 +diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
974 +index 5e58bb29b1a3..11cdc7c60480 100644
975 +--- a/include/linux/ceph/buffer.h
976 ++++ b/include/linux/ceph/buffer.h
977 +@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
978 +
979 + static inline void ceph_buffer_put(struct ceph_buffer *b)
980 + {
981 +- kref_put(&b->kref, ceph_buffer_release);
982 ++ if (b)
983 ++ kref_put(&b->kref, ceph_buffer_release);
984 + }
985 +
986 + extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
987 +diff --git a/include/linux/gpio.h b/include/linux/gpio.h
988 +index 8ef7fc0ce0f0..b2f103b170a9 100644
989 +--- a/include/linux/gpio.h
990 ++++ b/include/linux/gpio.h
991 +@@ -230,30 +230,6 @@ static inline int irq_to_gpio(unsigned irq)
992 + return -EINVAL;
993 + }
994 +
995 +-static inline int
996 +-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
997 +- unsigned int gpio_offset, unsigned int pin_offset,
998 +- unsigned int npins)
999 +-{
1000 +- WARN_ON(1);
1001 +- return -EINVAL;
1002 +-}
1003 +-
1004 +-static inline int
1005 +-gpiochip_add_pingroup_range(struct gpio_chip *chip,
1006 +- struct pinctrl_dev *pctldev,
1007 +- unsigned int gpio_offset, const char *pin_group)
1008 +-{
1009 +- WARN_ON(1);
1010 +- return -EINVAL;
1011 +-}
1012 +-
1013 +-static inline void
1014 +-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
1015 +-{
1016 +- WARN_ON(1);
1017 +-}
1018 +-
1019 + static inline int devm_gpio_request(struct device *dev, unsigned gpio,
1020 + const char *label)
1021 + {
1022 +diff --git a/include/net/act_api.h b/include/net/act_api.h
1023 +index a10a3b1813f3..775387d6ca95 100644
1024 +--- a/include/net/act_api.h
1025 ++++ b/include/net/act_api.h
1026 +@@ -14,6 +14,7 @@
1027 + struct tcf_idrinfo {
1028 + spinlock_t lock;
1029 + struct idr action_idr;
1030 ++ struct net *net;
1031 + };
1032 +
1033 + struct tc_action_ops;
1034 +@@ -104,7 +105,7 @@ struct tc_action_net {
1035 + };
1036 +
1037 + static inline
1038 +-int tc_action_net_init(struct tc_action_net *tn,
1039 ++int tc_action_net_init(struct net *net, struct tc_action_net *tn,
1040 + const struct tc_action_ops *ops)
1041 + {
1042 + int err = 0;
1043 +@@ -113,6 +114,7 @@ int tc_action_net_init(struct tc_action_net *tn,
1044 + if (!tn->idrinfo)
1045 + return -ENOMEM;
1046 + tn->ops = ops;
1047 ++ tn->idrinfo->net = net;
1048 + spin_lock_init(&tn->idrinfo->lock);
1049 + idr_init(&tn->idrinfo->action_idr);
1050 + return err;
1051 +diff --git a/include/net/psample.h b/include/net/psample.h
1052 +index 9b80f814ab04..94cb37a7bf75 100644
1053 +--- a/include/net/psample.h
1054 ++++ b/include/net/psample.h
1055 +@@ -12,6 +12,7 @@ struct psample_group {
1056 + u32 group_num;
1057 + u32 refcount;
1058 + u32 seq;
1059 ++ struct rcu_head rcu;
1060 + };
1061 +
1062 + struct psample_group *psample_group_get(struct net *net, u32 group_num);
1063 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
1064 +index ec11bb986a8b..c43bc2bc5b2c 100644
1065 +--- a/kernel/kprobes.c
1066 ++++ b/kernel/kprobes.c
1067 +@@ -483,6 +483,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
1068 + */
1069 + static void do_optimize_kprobes(void)
1070 + {
1071 ++ lockdep_assert_held(&text_mutex);
1072 + /*
1073 + * The optimization/unoptimization refers online_cpus via
1074 + * stop_machine() and cpu-hotplug modifies online_cpus.
1075 +@@ -500,9 +501,7 @@ static void do_optimize_kprobes(void)
1076 + list_empty(&optimizing_list))
1077 + return;
1078 +
1079 +- mutex_lock(&text_mutex);
1080 + arch_optimize_kprobes(&optimizing_list);
1081 +- mutex_unlock(&text_mutex);
1082 + }
1083 +
1084 + /*
1085 +@@ -513,6 +512,7 @@ static void do_unoptimize_kprobes(void)
1086 + {
1087 + struct optimized_kprobe *op, *tmp;
1088 +
1089 ++ lockdep_assert_held(&text_mutex);
1090 + /* See comment in do_optimize_kprobes() */
1091 + lockdep_assert_cpus_held();
1092 +
1093 +@@ -520,7 +520,6 @@ static void do_unoptimize_kprobes(void)
1094 + if (list_empty(&unoptimizing_list))
1095 + return;
1096 +
1097 +- mutex_lock(&text_mutex);
1098 + arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
1099 + /* Loop free_list for disarming */
1100 + list_for_each_entry_safe(op, tmp, &freeing_list, list) {
1101 +@@ -537,7 +536,6 @@ static void do_unoptimize_kprobes(void)
1102 + } else
1103 + list_del_init(&op->list);
1104 + }
1105 +- mutex_unlock(&text_mutex);
1106 + }
1107 +
1108 + /* Reclaim all kprobes on the free_list */
1109 +@@ -563,6 +561,7 @@ static void kprobe_optimizer(struct work_struct *work)
1110 + {
1111 + mutex_lock(&kprobe_mutex);
1112 + cpus_read_lock();
1113 ++ mutex_lock(&text_mutex);
1114 + /* Lock modules while optimizing kprobes */
1115 + mutex_lock(&module_mutex);
1116 +
1117 +@@ -590,6 +589,7 @@ static void kprobe_optimizer(struct work_struct *work)
1118 + do_free_cleaned_kprobes();
1119 +
1120 + mutex_unlock(&module_mutex);
1121 ++ mutex_unlock(&text_mutex);
1122 + cpus_read_unlock();
1123 + mutex_unlock(&kprobe_mutex);
1124 +
1125 +diff --git a/net/core/netpoll.c b/net/core/netpoll.c
1126 +index 912731bed7b7..abab3753a9e0 100644
1127 +--- a/net/core/netpoll.c
1128 ++++ b/net/core/netpoll.c
1129 +@@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work)
1130 + txq = netdev_get_tx_queue(dev, q_index);
1131 + HARD_TX_LOCK(dev, txq, smp_processor_id());
1132 + if (netif_xmit_frozen_or_stopped(txq) ||
1133 +- netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
1134 ++ !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
1135 + skb_queue_head(&npinfo->txq, skb);
1136 + HARD_TX_UNLOCK(dev, txq);
1137 + local_irq_restore(flags);
1138 +@@ -357,7 +357,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
1139 +
1140 + HARD_TX_UNLOCK(dev, txq);
1141 +
1142 +- if (status == NETDEV_TX_OK)
1143 ++ if (dev_xmit_complete(status))
1144 + break;
1145 +
1146 + }
1147 +@@ -374,7 +374,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
1148 +
1149 + }
1150 +
1151 +- if (status != NETDEV_TX_OK) {
1152 ++ if (!dev_xmit_complete(status)) {
1153 + skb_queue_tail(&npinfo->txq, skb);
1154 + schedule_delayed_work(&npinfo->tx_work,0);
1155 + }
1156 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1157 +index 541686f3f835..5ce069ce2a97 100644
1158 +--- a/net/ipv4/tcp.c
1159 ++++ b/net/ipv4/tcp.c
1160 +@@ -914,6 +914,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
1161 + return mss_now;
1162 + }
1163 +
1164 ++/* In some cases, both sendpage() and sendmsg() could have added
1165 ++ * an skb to the write queue, but failed adding payload on it.
1166 ++ * We need to remove it to consume less memory, but more
1167 ++ * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
1168 ++ * users.
1169 ++ */
1170 ++static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
1171 ++{
1172 ++ if (skb && !skb->len) {
1173 ++ tcp_unlink_write_queue(skb, sk);
1174 ++ if (tcp_write_queue_empty(sk))
1175 ++ tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1176 ++ sk_wmem_free_skb(sk, skb);
1177 ++ }
1178 ++}
1179 ++
1180 + ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
1181 + size_t size, int flags)
1182 + {
1183 +@@ -1034,6 +1050,7 @@ out:
1184 + return copied;
1185 +
1186 + do_error:
1187 ++ tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
1188 + if (copied)
1189 + goto out;
1190 + out_err:
1191 +@@ -1412,17 +1429,11 @@ out_nopush:
1192 + sock_zerocopy_put(uarg);
1193 + return copied + copied_syn;
1194 +
1195 ++do_error:
1196 ++ skb = tcp_write_queue_tail(sk);
1197 + do_fault:
1198 +- if (!skb->len) {
1199 +- tcp_unlink_write_queue(skb, sk);
1200 +- /* It is the one place in all of TCP, except connection
1201 +- * reset, where we can be unlinking the send_head.
1202 +- */
1203 +- tcp_check_send_head(sk, skb);
1204 +- sk_wmem_free_skb(sk, skb);
1205 +- }
1206 ++ tcp_remove_empty_skb(sk, skb);
1207 +
1208 +-do_error:
1209 + if (copied + copied_syn)
1210 + goto out;
1211 + out_err:
1212 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1213 +index a99086bf26ea..5b808089eff8 100644
1214 +--- a/net/ipv4/tcp_output.c
1215 ++++ b/net/ipv4/tcp_output.c
1216 +@@ -2025,7 +2025,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
1217 + if (len <= skb->len)
1218 + break;
1219 +
1220 +- if (unlikely(TCP_SKB_CB(skb)->eor))
1221 ++ if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
1222 + return false;
1223 +
1224 + len -= skb->len;
1225 +@@ -2148,6 +2148,7 @@ static int tcp_mtu_probe(struct sock *sk)
1226 + * we need to propagate it to the new skb.
1227 + */
1228 + TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
1229 ++ tcp_skb_collapse_tstamp(nskb, skb);
1230 + tcp_unlink_write_queue(skb, sk);
1231 + sk_wmem_free_skb(sk, skb);
1232 + } else {
1233 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
1234 +index bd269e78272a..611dc5d55fa0 100644
1235 +--- a/net/ipv6/mcast.c
1236 ++++ b/net/ipv6/mcast.c
1237 +@@ -772,12 +772,13 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
1238 + im->idev = pmc->idev;
1239 + im->mca_crcount = idev->mc_qrv;
1240 + if (im->mca_sfmode == MCAST_INCLUDE) {
1241 +- im->mca_tomb = pmc->mca_tomb;
1242 +- im->mca_sources = pmc->mca_sources;
1243 ++ swap(im->mca_tomb, pmc->mca_tomb);
1244 ++ swap(im->mca_sources, pmc->mca_sources);
1245 + for (psf = im->mca_sources; psf; psf = psf->sf_next)
1246 + psf->sf_crcount = im->mca_crcount;
1247 + }
1248 + in6_dev_put(pmc->idev);
1249 ++ ip6_mc_clear_src(pmc);
1250 + kfree(pmc);
1251 + }
1252 + spin_unlock_bh(&im->mca_lock);
1253 +diff --git a/net/psample/psample.c b/net/psample/psample.c
1254 +index 64f95624f219..4cea353221da 100644
1255 +--- a/net/psample/psample.c
1256 ++++ b/net/psample/psample.c
1257 +@@ -156,7 +156,7 @@ static void psample_group_destroy(struct psample_group *group)
1258 + {
1259 + psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
1260 + list_del(&group->list);
1261 +- kfree(group);
1262 ++ kfree_rcu(group, rcu);
1263 + }
1264 +
1265 + static struct psample_group *
1266 +diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
1267 +index 364a878e51cb..bdc8885c0448 100644
1268 +--- a/net/sched/act_bpf.c
1269 ++++ b/net/sched/act_bpf.c
1270 +@@ -402,7 +402,7 @@ static __net_init int bpf_init_net(struct net *net)
1271 + {
1272 + struct tc_action_net *tn = net_generic(net, bpf_net_id);
1273 +
1274 +- return tc_action_net_init(tn, &act_bpf_ops);
1275 ++ return tc_action_net_init(net, tn, &act_bpf_ops);
1276 + }
1277 +
1278 + static void __net_exit bpf_exit_net(struct net *net)
1279 +diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
1280 +index 10b7a8855a6c..de0cd73a5a5d 100644
1281 +--- a/net/sched/act_connmark.c
1282 ++++ b/net/sched/act_connmark.c
1283 +@@ -206,7 +206,7 @@ static __net_init int connmark_init_net(struct net *net)
1284 + {
1285 + struct tc_action_net *tn = net_generic(net, connmark_net_id);
1286 +
1287 +- return tc_action_net_init(tn, &act_connmark_ops);
1288 ++ return tc_action_net_init(net, tn, &act_connmark_ops);
1289 + }
1290 +
1291 + static void __net_exit connmark_exit_net(struct net *net)
1292 +diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
1293 +index d836f998117b..a449594553d0 100644
1294 +--- a/net/sched/act_csum.c
1295 ++++ b/net/sched/act_csum.c
1296 +@@ -632,7 +632,7 @@ static __net_init int csum_init_net(struct net *net)
1297 + {
1298 + struct tc_action_net *tn = net_generic(net, csum_net_id);
1299 +
1300 +- return tc_action_net_init(tn, &act_csum_ops);
1301 ++ return tc_action_net_init(net, tn, &act_csum_ops);
1302 + }
1303 +
1304 + static void __net_exit csum_exit_net(struct net *net)
1305 +diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
1306 +index a0ac42b3ed06..69512d3d0818 100644
1307 +--- a/net/sched/act_gact.c
1308 ++++ b/net/sched/act_gact.c
1309 +@@ -232,7 +232,7 @@ static __net_init int gact_init_net(struct net *net)
1310 + {
1311 + struct tc_action_net *tn = net_generic(net, gact_net_id);
1312 +
1313 +- return tc_action_net_init(tn, &act_gact_ops);
1314 ++ return tc_action_net_init(net, tn, &act_gact_ops);
1315 + }
1316 +
1317 + static void __net_exit gact_exit_net(struct net *net)
1318 +diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
1319 +index 16a403d17f44..aea8ee40e76b 100644
1320 +--- a/net/sched/act_ife.c
1321 ++++ b/net/sched/act_ife.c
1322 +@@ -837,7 +837,7 @@ static __net_init int ife_init_net(struct net *net)
1323 + {
1324 + struct tc_action_net *tn = net_generic(net, ife_net_id);
1325 +
1326 +- return tc_action_net_init(tn, &act_ife_ops);
1327 ++ return tc_action_net_init(net, tn, &act_ife_ops);
1328 + }
1329 +
1330 + static void __net_exit ife_exit_net(struct net *net)
1331 +diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
1332 +index 18b2fd2ba7d7..a2687dd95a3d 100644
1333 +--- a/net/sched/act_ipt.c
1334 ++++ b/net/sched/act_ipt.c
1335 +@@ -65,12 +65,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
1336 + return 0;
1337 + }
1338 +
1339 +-static void ipt_destroy_target(struct xt_entry_target *t)
1340 ++static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
1341 + {
1342 + struct xt_tgdtor_param par = {
1343 + .target = t->u.kernel.target,
1344 + .targinfo = t->data,
1345 + .family = NFPROTO_IPV4,
1346 ++ .net = net,
1347 + };
1348 + if (par.target->destroy != NULL)
1349 + par.target->destroy(&par);
1350 +@@ -82,7 +83,7 @@ static void tcf_ipt_release(struct tc_action *a, int bind)
1351 + struct tcf_ipt *ipt = to_ipt(a);
1352 +
1353 + if (ipt->tcfi_t) {
1354 +- ipt_destroy_target(ipt->tcfi_t);
1355 ++ ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
1356 + kfree(ipt->tcfi_t);
1357 + }
1358 + kfree(ipt->tcfi_tname);
1359 +@@ -172,7 +173,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
1360 +
1361 + spin_lock_bh(&ipt->tcf_lock);
1362 + if (ret != ACT_P_CREATED) {
1363 +- ipt_destroy_target(ipt->tcfi_t);
1364 ++ ipt_destroy_target(ipt->tcfi_t, net);
1365 + kfree(ipt->tcfi_tname);
1366 + kfree(ipt->tcfi_t);
1367 + }
1368 +@@ -337,7 +338,7 @@ static __net_init int ipt_init_net(struct net *net)
1369 + {
1370 + struct tc_action_net *tn = net_generic(net, ipt_net_id);
1371 +
1372 +- return tc_action_net_init(tn, &act_ipt_ops);
1373 ++ return tc_action_net_init(net, tn, &act_ipt_ops);
1374 + }
1375 +
1376 + static void __net_exit ipt_exit_net(struct net *net)
1377 +@@ -387,7 +388,7 @@ static __net_init int xt_init_net(struct net *net)
1378 + {
1379 + struct tc_action_net *tn = net_generic(net, xt_net_id);
1380 +
1381 +- return tc_action_net_init(tn, &act_xt_ops);
1382 ++ return tc_action_net_init(net, tn, &act_xt_ops);
1383 + }
1384 +
1385 + static void __net_exit xt_exit_net(struct net *net)
1386 +diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
1387 +index 6ce8de373f83..529bb064c4a4 100644
1388 +--- a/net/sched/act_mirred.c
1389 ++++ b/net/sched/act_mirred.c
1390 +@@ -343,7 +343,7 @@ static __net_init int mirred_init_net(struct net *net)
1391 + {
1392 + struct tc_action_net *tn = net_generic(net, mirred_net_id);
1393 +
1394 +- return tc_action_net_init(tn, &act_mirred_ops);
1395 ++ return tc_action_net_init(net, tn, &act_mirred_ops);
1396 + }
1397 +
1398 + static void __net_exit mirred_exit_net(struct net *net)
1399 +diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
1400 +index c365d01b99c8..5a136943af27 100644
1401 +--- a/net/sched/act_nat.c
1402 ++++ b/net/sched/act_nat.c
1403 +@@ -307,7 +307,7 @@ static __net_init int nat_init_net(struct net *net)
1404 + {
1405 + struct tc_action_net *tn = net_generic(net, nat_net_id);
1406 +
1407 +- return tc_action_net_init(tn, &act_nat_ops);
1408 ++ return tc_action_net_init(net, tn, &act_nat_ops);
1409 + }
1410 +
1411 + static void __net_exit nat_exit_net(struct net *net)
1412 +diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
1413 +index 656b6ada9221..b6f6bfad8b2a 100644
1414 +--- a/net/sched/act_pedit.c
1415 ++++ b/net/sched/act_pedit.c
1416 +@@ -458,7 +458,7 @@ static __net_init int pedit_init_net(struct net *net)
1417 + {
1418 + struct tc_action_net *tn = net_generic(net, pedit_net_id);
1419 +
1420 +- return tc_action_net_init(tn, &act_pedit_ops);
1421 ++ return tc_action_net_init(net, tn, &act_pedit_ops);
1422 + }
1423 +
1424 + static void __net_exit pedit_exit_net(struct net *net)
1425 +diff --git a/net/sched/act_police.c b/net/sched/act_police.c
1426 +index c16127109f21..a7fcc591c241 100644
1427 +--- a/net/sched/act_police.c
1428 ++++ b/net/sched/act_police.c
1429 +@@ -331,7 +331,7 @@ static __net_init int police_init_net(struct net *net)
1430 + {
1431 + struct tc_action_net *tn = net_generic(net, police_net_id);
1432 +
1433 +- return tc_action_net_init(tn, &act_police_ops);
1434 ++ return tc_action_net_init(net, tn, &act_police_ops);
1435 + }
1436 +
1437 + static void __net_exit police_exit_net(struct net *net)
1438 +diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
1439 +index 64fd1e9818a6..489db1064d5b 100644
1440 +--- a/net/sched/act_sample.c
1441 ++++ b/net/sched/act_sample.c
1442 +@@ -92,13 +92,16 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
1443 + tcf_idr_release(*a, bind);
1444 + return -ENOMEM;
1445 + }
1446 +- RCU_INIT_POINTER(s->psample_group, psample_group);
1447 ++ rcu_swap_protected(s->psample_group, psample_group,
1448 ++ lockdep_is_held(&s->tcf_lock));
1449 +
1450 + if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
1451 + s->truncate = true;
1452 + s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
1453 + }
1454 +
1455 ++ if (psample_group)
1456 ++ psample_group_put(psample_group);
1457 + if (ret == ACT_P_CREATED)
1458 + tcf_idr_insert(tn, *a);
1459 + return ret;
1460 +@@ -249,7 +252,7 @@ static __net_init int sample_init_net(struct net *net)
1461 + {
1462 + struct tc_action_net *tn = net_generic(net, sample_net_id);
1463 +
1464 +- return tc_action_net_init(tn, &act_sample_ops);
1465 ++ return tc_action_net_init(net, tn, &act_sample_ops);
1466 + }
1467 +
1468 + static void __net_exit sample_exit_net(struct net *net)
1469 +diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
1470 +index f3ed63aa4111..86d8b66b9928 100644
1471 +--- a/net/sched/act_simple.c
1472 ++++ b/net/sched/act_simple.c
1473 +@@ -198,7 +198,7 @@ static __net_init int simp_init_net(struct net *net)
1474 + {
1475 + struct tc_action_net *tn = net_generic(net, simp_net_id);
1476 +
1477 +- return tc_action_net_init(tn, &act_simp_ops);
1478 ++ return tc_action_net_init(net, tn, &act_simp_ops);
1479 + }
1480 +
1481 + static void __net_exit simp_exit_net(struct net *net)
1482 +diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
1483 +index 6e749497009e..1a8a49e33320 100644
1484 +--- a/net/sched/act_skbedit.c
1485 ++++ b/net/sched/act_skbedit.c
1486 +@@ -239,7 +239,7 @@ static __net_init int skbedit_init_net(struct net *net)
1487 + {
1488 + struct tc_action_net *tn = net_generic(net, skbedit_net_id);
1489 +
1490 +- return tc_action_net_init(tn, &act_skbedit_ops);
1491 ++ return tc_action_net_init(net, tn, &act_skbedit_ops);
1492 + }
1493 +
1494 + static void __net_exit skbedit_exit_net(struct net *net)
1495 +diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
1496 +index d227599f7e73..20ea9d11821b 100644
1497 +--- a/net/sched/act_skbmod.c
1498 ++++ b/net/sched/act_skbmod.c
1499 +@@ -267,7 +267,7 @@ static __net_init int skbmod_init_net(struct net *net)
1500 + {
1501 + struct tc_action_net *tn = net_generic(net, skbmod_net_id);
1502 +
1503 +- return tc_action_net_init(tn, &act_skbmod_ops);
1504 ++ return tc_action_net_init(net, tn, &act_skbmod_ops);
1505 + }
1506 +
1507 + static void __net_exit skbmod_exit_net(struct net *net)
1508 +diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
1509 +index cd51f2ed55fa..62e22738022d 100644
1510 +--- a/net/sched/act_tunnel_key.c
1511 ++++ b/net/sched/act_tunnel_key.c
1512 +@@ -324,7 +324,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
1513 + {
1514 + struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
1515 +
1516 +- return tc_action_net_init(tn, &act_tunnel_key_ops);
1517 ++ return tc_action_net_init(net, tn, &act_tunnel_key_ops);
1518 + }
1519 +
1520 + static void __net_exit tunnel_key_exit_net(struct net *net)
1521 +diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
1522 +index 5c10a0fce35b..c9a3eeb351fa 100644
1523 +--- a/net/sched/act_vlan.c
1524 ++++ b/net/sched/act_vlan.c
1525 +@@ -271,7 +271,7 @@ static __net_init int vlan_init_net(struct net *net)
1526 + {
1527 + struct tc_action_net *tn = net_generic(net, vlan_net_id);
1528 +
1529 +- return tc_action_net_init(tn, &act_vlan_ops);
1530 ++ return tc_action_net_init(net, tn, &act_vlan_ops);
1531 + }
1532 +
1533 + static void __net_exit vlan_exit_net(struct net *net)
1534 +diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
1535 +index 0ef215061fb5..1b917eaffad8 100644
1536 +--- a/tools/hv/hv_kvp_daemon.c
1537 ++++ b/tools/hv/hv_kvp_daemon.c
1538 +@@ -867,7 +867,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
1539 + int sn_offset = 0;
1540 + int error = 0;
1541 + char *buffer;
1542 +- struct hv_kvp_ipaddr_value *ip_buffer;
1543 ++ struct hv_kvp_ipaddr_value *ip_buffer = NULL;
1544 + char cidr_mask[5]; /* /xyz */
1545 + int weight;
1546 + int i;
1547 +diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
1548 +index 08443a15e6be..3caee91bca08 100644
1549 +--- a/virt/kvm/arm/mmio.c
1550 ++++ b/virt/kvm/arm/mmio.c
1551 +@@ -98,6 +98,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
1552 + unsigned int len;
1553 + int mask;
1554 +
1555 ++ /* Detect an already handled MMIO return */
1556 ++ if (unlikely(!vcpu->mmio_needed))
1557 ++ return 0;
1558 ++
1559 ++ vcpu->mmio_needed = 0;
1560 ++
1561 + if (!run->mmio.is_write) {
1562 + len = run->mmio.len;
1563 + if (len > sizeof(unsigned long))
1564 +@@ -200,6 +206,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
1565 + run->mmio.is_write = is_write;
1566 + run->mmio.phys_addr = fault_ipa;
1567 + run->mmio.len = len;
1568 ++ vcpu->mmio_needed = 1;
1569 +
1570 + if (!ret) {
1571 + /* We handled the access successfully in the kernel. */