Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Sat, 21 Oct 2017 20:13:39
Message-Id: 1508616805.dc67168832635f5715bdc4de0d186e71d02b815f.mpagano@gentoo
1 commit: dc67168832635f5715bdc4de0d186e71d02b815f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Oct 21 20:13:25 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Oct 21 20:13:25 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dc671688
7
8 Linux patch 4.4.94
9
10 0000_README | 4 +
11 1093_linux-4.4.94.patch | 1389 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1393 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index a90a29a..c847c5f 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -415,6 +415,10 @@ Patch: 1092_linux-4.4.93.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.93
21
22 +Patch: 1093_linux-4.4.94.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.94
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1093_linux-4.4.94.patch b/1093_linux-4.4.94.patch
31 new file mode 100644
32 index 0000000..4a0013b
33 --- /dev/null
34 +++ b/1093_linux-4.4.94.patch
35 @@ -0,0 +1,1389 @@
36 +diff --git a/Makefile b/Makefile
37 +index 77a17fb24b6d..ff9d6bbf2210 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 93
44 ++SUBLEVEL = 94
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
49 +index 77edb22f855d..5433ccc9d706 100644
50 +--- a/arch/mips/include/asm/irq.h
51 ++++ b/arch/mips/include/asm/irq.h
52 +@@ -18,7 +18,7 @@
53 + #include <irq.h>
54 +
55 + #define IRQ_STACK_SIZE THREAD_SIZE
56 +-#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
57 ++#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
58 +
59 + extern void *irq_stack[NR_CPUS];
60 +
61 +diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
62 +index be0cc1beed41..3fae200dd251 100644
63 +--- a/arch/sparc/include/asm/setup.h
64 ++++ b/arch/sparc/include/asm/setup.h
65 +@@ -59,8 +59,11 @@ extern atomic_t dcpage_flushes;
66 + extern atomic_t dcpage_flushes_xcall;
67 +
68 + extern int sysctl_tsb_ratio;
69 +-#endif
70 +
71 ++#ifdef CONFIG_SERIAL_SUNHV
72 ++void sunhv_migrate_hvcons_irq(int cpu);
73 ++#endif
74 ++#endif
75 + void sun_do_break(void);
76 + extern int stop_a_enabled;
77 + extern int scons_pwroff;
78 +diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
79 +index 4511caa3b7e9..46866b2097e8 100644
80 +--- a/arch/sparc/kernel/smp_64.c
81 ++++ b/arch/sparc/kernel/smp_64.c
82 +@@ -1443,8 +1443,12 @@ void smp_send_stop(void)
83 + int cpu;
84 +
85 + if (tlb_type == hypervisor) {
86 ++ int this_cpu = smp_processor_id();
87 ++#ifdef CONFIG_SERIAL_SUNHV
88 ++ sunhv_migrate_hvcons_irq(this_cpu);
89 ++#endif
90 + for_each_online_cpu(cpu) {
91 +- if (cpu == smp_processor_id())
92 ++ if (cpu == this_cpu)
93 + continue;
94 + #ifdef CONFIG_SUN_LDOMS
95 + if (ldom_domaining_enabled) {
96 +diff --git a/block/bsg-lib.c b/block/bsg-lib.c
97 +index 341b8d858e67..650f427d915b 100644
98 +--- a/block/bsg-lib.c
99 ++++ b/block/bsg-lib.c
100 +@@ -147,6 +147,7 @@ static int bsg_create_job(struct device *dev, struct request *req)
101 + failjob_rls_rqst_payload:
102 + kfree(job->request_payload.sg_list);
103 + failjob_rls_job:
104 ++ kfree(job);
105 + return -ENOMEM;
106 + }
107 +
108 +diff --git a/crypto/Kconfig b/crypto/Kconfig
109 +index 7240821137fd..617bf4a7da56 100644
110 +--- a/crypto/Kconfig
111 ++++ b/crypto/Kconfig
112 +@@ -343,6 +343,7 @@ config CRYPTO_XTS
113 + select CRYPTO_BLKCIPHER
114 + select CRYPTO_MANAGER
115 + select CRYPTO_GF128MUL
116 ++ select CRYPTO_ECB
117 + help
118 + XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain,
119 + key size 256, 384 or 512 bits. This implementation currently
120 +diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
121 +index b1f8a73e5a94..eed1e073d96d 100644
122 +--- a/drivers/cpufreq/Kconfig.arm
123 ++++ b/drivers/cpufreq/Kconfig.arm
124 +@@ -241,7 +241,7 @@ config ARM_PXA2xx_CPUFREQ
125 +
126 + config ACPI_CPPC_CPUFREQ
127 + tristate "CPUFreq driver based on the ACPI CPPC spec"
128 +- depends on ACPI
129 ++ depends on ACPI_PROCESSOR
130 + select ACPI_CPPC_LIB
131 + default n
132 + help
133 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
134 +index 58bf94b69186..273e05a3c933 100644
135 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
136 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
137 +@@ -1802,6 +1802,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
138 + return -EINVAL;
139 + }
140 + req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
141 ++ req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
142 + } else {
143 + port = NULL;
144 + req_payload.num_slots = 0;
145 +@@ -1817,6 +1818,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
146 + if (req_payload.num_slots) {
147 + drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
148 + mgr->payloads[i].num_slots = req_payload.num_slots;
149 ++ mgr->payloads[i].vcpi = req_payload.vcpi;
150 + } else if (mgr->payloads[i].num_slots) {
151 + mgr->payloads[i].num_slots = 0;
152 + drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
153 +diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
154 +index 10835d1f559b..dee0fc421054 100644
155 +--- a/drivers/i2c/busses/i2c-at91.c
156 ++++ b/drivers/i2c/busses/i2c-at91.c
157 +@@ -1131,6 +1131,7 @@ static int at91_twi_suspend_noirq(struct device *dev)
158 +
159 + static int at91_twi_resume_noirq(struct device *dev)
160 + {
161 ++ struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
162 + int ret;
163 +
164 + if (!pm_runtime_status_suspended(dev)) {
165 +@@ -1142,6 +1143,8 @@ static int at91_twi_resume_noirq(struct device *dev)
166 + pm_runtime_mark_last_busy(dev);
167 + pm_request_autosuspend(dev);
168 +
169 ++ at91_init_twi_bus(twi_dev);
170 ++
171 + return 0;
172 + }
173 +
174 +diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
175 +index 02e636a1c49a..475c5a74f2d1 100644
176 +--- a/drivers/iio/adc/xilinx-xadc-core.c
177 ++++ b/drivers/iio/adc/xilinx-xadc-core.c
178 +@@ -1208,7 +1208,7 @@ static int xadc_probe(struct platform_device *pdev)
179 +
180 + ret = xadc->ops->setup(pdev, indio_dev, irq);
181 + if (ret)
182 +- goto err_free_samplerate_trigger;
183 ++ goto err_clk_disable_unprepare;
184 +
185 + ret = request_irq(irq, xadc->ops->interrupt_handler, 0,
186 + dev_name(&pdev->dev), indio_dev);
187 +@@ -1268,6 +1268,8 @@ static int xadc_probe(struct platform_device *pdev)
188 +
189 + err_free_irq:
190 + free_irq(irq, indio_dev);
191 ++err_clk_disable_unprepare:
192 ++ clk_disable_unprepare(xadc->clk);
193 + err_free_samplerate_trigger:
194 + if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
195 + iio_trigger_free(xadc->samplerate_trigger);
196 +@@ -1277,8 +1279,6 @@ err_free_convst_trigger:
197 + err_triggered_buffer_cleanup:
198 + if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
199 + iio_triggered_buffer_cleanup(indio_dev);
200 +-err_clk_disable_unprepare:
201 +- clk_disable_unprepare(xadc->clk);
202 + err_device_free:
203 + kfree(indio_dev->channels);
204 +
205 +diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
206 +index 75573fa431ba..63faee04a008 100644
207 +--- a/drivers/irqchip/irq-crossbar.c
208 ++++ b/drivers/irqchip/irq-crossbar.c
209 +@@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = {
210 +
211 + static int __init crossbar_of_init(struct device_node *node)
212 + {
213 +- int i, size, max = 0, reserved = 0, entry;
214 ++ int i, size, reserved = 0;
215 ++ u32 max = 0, entry;
216 + const __be32 *irqsr;
217 + int ret = -ENOMEM;
218 +
219 +diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
220 +index bf3fbd00a091..64b586458d3d 100644
221 +--- a/drivers/isdn/i4l/isdn_ppp.c
222 ++++ b/drivers/isdn/i4l/isdn_ppp.c
223 +@@ -828,7 +828,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
224 + isdn_net_local *lp;
225 + struct ippp_struct *is;
226 + int proto;
227 +- unsigned char protobuf[4];
228 +
229 + is = file->private_data;
230 +
231 +@@ -842,24 +841,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
232 + if (!lp)
233 + printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
234 + else {
235 +- /*
236 +- * Don't reset huptimer for
237 +- * LCP packets. (Echo requests).
238 +- */
239 +- if (copy_from_user(protobuf, buf, 4))
240 +- return -EFAULT;
241 +- proto = PPP_PROTOCOL(protobuf);
242 +- if (proto != PPP_LCP)
243 +- lp->huptimer = 0;
244 ++ if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
245 ++ unsigned char protobuf[4];
246 ++ /*
247 ++ * Don't reset huptimer for
248 ++ * LCP packets. (Echo requests).
249 ++ */
250 ++ if (copy_from_user(protobuf, buf, 4))
251 ++ return -EFAULT;
252 ++
253 ++ proto = PPP_PROTOCOL(protobuf);
254 ++ if (proto != PPP_LCP)
255 ++ lp->huptimer = 0;
256 +
257 +- if (lp->isdn_device < 0 || lp->isdn_channel < 0)
258 + return 0;
259 ++ }
260 +
261 + if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
262 + lp->dialstate == 0 &&
263 + (lp->flags & ISDN_NET_CONNECTED)) {
264 + unsigned short hl;
265 + struct sk_buff *skb;
266 ++ unsigned char *cpy_buf;
267 + /*
268 + * we need to reserve enough space in front of
269 + * sk_buff. old call to dev_alloc_skb only reserved
270 +@@ -872,11 +875,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
271 + return count;
272 + }
273 + skb_reserve(skb, hl);
274 +- if (copy_from_user(skb_put(skb, count), buf, count))
275 ++ cpy_buf = skb_put(skb, count);
276 ++ if (copy_from_user(cpy_buf, buf, count))
277 + {
278 + kfree_skb(skb);
279 + return -EFAULT;
280 + }
281 ++
282 ++ /*
283 ++ * Don't reset huptimer for
284 ++ * LCP packets. (Echo requests).
285 ++ */
286 ++ proto = PPP_PROTOCOL(cpy_buf);
287 ++ if (proto != PPP_LCP)
288 ++ lp->huptimer = 0;
289 ++
290 + if (is->debug & 0x40) {
291 + printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
292 + isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
293 +diff --git a/drivers/md/linear.c b/drivers/md/linear.c
294 +index 6ba3227e29b2..7ffb20ec1a46 100644
295 +--- a/drivers/md/linear.c
296 ++++ b/drivers/md/linear.c
297 +@@ -223,7 +223,8 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
298 + * oldconf until no one uses it anymore.
299 + */
300 + mddev_suspend(mddev);
301 +- oldconf = rcu_dereference(mddev->private);
302 ++ oldconf = rcu_dereference_protected(mddev->private,
303 ++ lockdep_is_held(&mddev->reconfig_mutex));
304 + mddev->raid_disks++;
305 + WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
306 + "copied raid_disks doesn't match mddev->raid_disks");
307 +diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
308 +index fdb5cdb3cd15..81abe46c9e0d 100644
309 +--- a/drivers/net/ethernet/ibm/emac/mal.c
310 ++++ b/drivers/net/ethernet/ibm/emac/mal.c
311 +@@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
312 + unsigned long flags;
313 +
314 + MAL_DBG2(mal, "poll(%d)" NL, budget);
315 +- again:
316 ++
317 + /* Process TX skbs */
318 + list_for_each(l, &mal->poll_list) {
319 + struct mal_commac *mc =
320 +@@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
321 + spin_lock_irqsave(&mal->lock, flags);
322 + mal_disable_eob_irq(mal);
323 + spin_unlock_irqrestore(&mal->lock, flags);
324 +- goto again;
325 + }
326 + mc->ops->poll_tx(mc->dev);
327 + }
328 +diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
329 +index 0e67145bc418..4f34e1b79705 100644
330 +--- a/drivers/net/ethernet/marvell/mvpp2.c
331 ++++ b/drivers/net/ethernet/marvell/mvpp2.c
332 +@@ -4415,13 +4415,12 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
333 + struct mvpp2_txq_pcpu_buf *tx_buf =
334 + txq_pcpu->buffs + txq_pcpu->txq_get_index;
335 +
336 +- mvpp2_txq_inc_get(txq_pcpu);
337 +-
338 + dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
339 + tx_buf->size, DMA_TO_DEVICE);
340 +- if (!tx_buf->skb)
341 +- continue;
342 +- dev_kfree_skb_any(tx_buf->skb);
343 ++ if (tx_buf->skb)
344 ++ dev_kfree_skb_any(tx_buf->skb);
345 ++
346 ++ mvpp2_txq_inc_get(txq_pcpu);
347 + }
348 + }
349 +
350 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
351 +index 1494997c4f7e..4dccf7287f0f 100644
352 +--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
353 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
354 +@@ -88,10 +88,17 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
355 + }
356 + }
357 +
358 ++#define MLX4_EN_WRAP_AROUND_SEC 10UL
359 ++/* By scheduling the overflow check every 5 seconds, we have a reasonably
360 ++ * good chance we wont miss a wrap around.
361 ++ * TOTO: Use a timer instead of a work queue to increase the guarantee.
362 ++ */
363 ++#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
364 ++
365 + void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
366 + {
367 + bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
368 +- mdev->overflow_period);
369 ++ MLX4_EN_OVERFLOW_PERIOD);
370 + unsigned long flags;
371 +
372 + if (timeout) {
373 +@@ -236,7 +243,6 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
374 + .enable = mlx4_en_phc_enable,
375 + };
376 +
377 +-#define MLX4_EN_WRAP_AROUND_SEC 10ULL
378 +
379 + /* This function calculates the max shift that enables the user range
380 + * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
381 +@@ -258,7 +264,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
382 + {
383 + struct mlx4_dev *dev = mdev->dev;
384 + unsigned long flags;
385 +- u64 ns, zero = 0;
386 +
387 + /* mlx4_en_init_timestamp is called for each netdev.
388 + * mdev->ptp_clock is common for all ports, skip initialization if
389 +@@ -282,13 +287,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
390 + ktime_to_ns(ktime_get_real()));
391 + write_unlock_irqrestore(&mdev->clock_lock, flags);
392 +
393 +- /* Calculate period in seconds to call the overflow watchdog - to make
394 +- * sure counter is checked at least once every wrap around.
395 +- */
396 +- ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
397 +- do_div(ns, NSEC_PER_SEC / 2 / HZ);
398 +- mdev->overflow_period = ns;
399 +-
400 + /* Configure the PHC */
401 + mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
402 + snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
403 +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
404 +index 31c491e02e69..99361352ed0d 100644
405 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c
406 ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
407 +@@ -791,8 +791,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
408 + return -ENOSYS;
409 + }
410 +
411 +- mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
412 +-
413 + dev->caps.hca_core_clock = hca_param.hca_core_clock;
414 +
415 + memset(&dev_cap, 0, sizeof(dev_cap));
416 +diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
417 +index c41f15102ae0..10aa6544cf4d 100644
418 +--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
419 ++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
420 +@@ -409,7 +409,6 @@ struct mlx4_en_dev {
421 + struct cyclecounter cycles;
422 + struct timecounter clock;
423 + unsigned long last_overflow_check;
424 +- unsigned long overflow_period;
425 + struct ptp_clock *ptp_clock;
426 + struct ptp_clock_info ptp_clock_info;
427 + struct notifier_block nb;
428 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
429 +index c31d8e74f131..cd191f82d816 100644
430 +--- a/drivers/net/tun.c
431 ++++ b/drivers/net/tun.c
432 +@@ -1195,11 +1195,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
433 + switch (tun->flags & TUN_TYPE_MASK) {
434 + case IFF_TUN:
435 + if (tun->flags & IFF_NO_PI) {
436 +- switch (skb->data[0] & 0xf0) {
437 +- case 0x40:
438 ++ u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
439 ++
440 ++ switch (ip_version) {
441 ++ case 4:
442 + pi.proto = htons(ETH_P_IP);
443 + break;
444 +- case 0x60:
445 ++ case 6:
446 + pi.proto = htons(ETH_P_IPV6);
447 + break;
448 + default:
449 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
450 +index 019d7165a045..2a996a68fc2b 100644
451 +--- a/drivers/net/wireless/mac80211_hwsim.c
452 ++++ b/drivers/net/wireless/mac80211_hwsim.c
453 +@@ -2884,6 +2884,7 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
454 + static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
455 + {
456 + struct hwsim_new_radio_params param = { 0 };
457 ++ const char *hwname = NULL;
458 +
459 + param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
460 + param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
461 +@@ -2897,8 +2898,14 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
462 + if (info->attrs[HWSIM_ATTR_NO_VIF])
463 + param.no_vif = true;
464 +
465 +- if (info->attrs[HWSIM_ATTR_RADIO_NAME])
466 +- param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
467 ++ if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
468 ++ hwname = kasprintf(GFP_KERNEL, "%.*s",
469 ++ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
470 ++ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
471 ++ if (!hwname)
472 ++ return -ENOMEM;
473 ++ param.hwname = hwname;
474 ++ }
475 +
476 + if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
477 + param.use_chanctx = true;
478 +@@ -2926,11 +2933,15 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
479 + s64 idx = -1;
480 + const char *hwname = NULL;
481 +
482 +- if (info->attrs[HWSIM_ATTR_RADIO_ID])
483 ++ if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
484 + idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
485 +- else if (info->attrs[HWSIM_ATTR_RADIO_NAME])
486 +- hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
487 +- else
488 ++ } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
489 ++ hwname = kasprintf(GFP_KERNEL, "%.*s",
490 ++ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
491 ++ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
492 ++ if (!hwname)
493 ++ return -ENOMEM;
494 ++ } else
495 + return -EINVAL;
496 +
497 + spin_lock_bh(&hwsim_radio_lock);
498 +@@ -2939,7 +2950,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
499 + if (data->idx != idx)
500 + continue;
501 + } else {
502 +- if (strcmp(hwname, wiphy_name(data->hw->wiphy)))
503 ++ if (!hwname ||
504 ++ strcmp(hwname, wiphy_name(data->hw->wiphy)))
505 + continue;
506 + }
507 +
508 +@@ -2947,10 +2959,12 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
509 + spin_unlock_bh(&hwsim_radio_lock);
510 + mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
511 + info);
512 ++ kfree(hwname);
513 + return 0;
514 + }
515 + spin_unlock_bh(&hwsim_radio_lock);
516 +
517 ++ kfree(hwname);
518 + return -ENODEV;
519 + }
520 +
521 +diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
522 +index e6fb97cb12f4..7c28dc1cb0dd 100644
523 +--- a/drivers/scsi/device_handler/scsi_dh_emc.c
524 ++++ b/drivers/scsi/device_handler/scsi_dh_emc.c
525 +@@ -456,7 +456,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
526 + static int clariion_std_inquiry(struct scsi_device *sdev,
527 + struct clariion_dh_data *csdev)
528 + {
529 +- int err;
530 ++ int err = SCSI_DH_OK;
531 + char *sp_model;
532 +
533 + err = send_inquiry_cmd(sdev, 0, csdev);
534 +diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
535 +index 6c88fb021444..4eeb82cf79e4 100644
536 +--- a/drivers/target/iscsi/iscsi_target_erl0.c
537 ++++ b/drivers/target/iscsi/iscsi_target_erl0.c
538 +@@ -44,10 +44,8 @@ void iscsit_set_dataout_sequence_values(
539 + */
540 + if (cmd->unsolicited_data) {
541 + cmd->seq_start_offset = cmd->write_data_done;
542 +- cmd->seq_end_offset = (cmd->write_data_done +
543 +- ((cmd->se_cmd.data_length >
544 +- conn->sess->sess_ops->FirstBurstLength) ?
545 +- conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
546 ++ cmd->seq_end_offset = min(cmd->se_cmd.data_length,
547 ++ conn->sess->sess_ops->FirstBurstLength);
548 + return;
549 + }
550 +
551 +diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
552 +index e04b57f79df8..0f82c0b146f6 100644
553 +--- a/drivers/tty/goldfish.c
554 ++++ b/drivers/tty/goldfish.c
555 +@@ -293,7 +293,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
556 + return 0;
557 +
558 + err_tty_register_device_failed:
559 +- free_irq(irq, qtty);
560 ++ free_irq(irq, pdev);
561 + err_request_irq_failed:
562 + goldfish_tty_current_line_count--;
563 + if (goldfish_tty_current_line_count == 0)
564 +diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
565 +index 4e603d060e80..59828d819145 100644
566 +--- a/drivers/tty/serial/sunhv.c
567 ++++ b/drivers/tty/serial/sunhv.c
568 +@@ -398,6 +398,12 @@ static struct uart_driver sunhv_reg = {
569 +
570 + static struct uart_port *sunhv_port;
571 +
572 ++void sunhv_migrate_hvcons_irq(int cpu)
573 ++{
574 ++ /* Migrate hvcons irq to param cpu */
575 ++ irq_force_affinity(sunhv_port->irq, cpumask_of(cpu));
576 ++}
577 ++
578 + /* Copy 's' into the con_write_page, decoding "\n" into
579 + * "\r\n" along the way. We have to return two lengths
580 + * because the caller needs to know how much to advance
581 +diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
582 +index 5bf931ce1353..978098f71761 100644
583 +--- a/drivers/watchdog/kempld_wdt.c
584 ++++ b/drivers/watchdog/kempld_wdt.c
585 +@@ -140,12 +140,19 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
586 + unsigned int timeout)
587 + {
588 + struct kempld_device_data *pld = wdt_data->pld;
589 +- u32 prescaler = kempld_prescaler[PRESCALER_21];
590 ++ u32 prescaler;
591 + u64 stage_timeout64;
592 + u32 stage_timeout;
593 + u32 remainder;
594 + u8 stage_cfg;
595 +
596 ++#if GCC_VERSION < 40400
597 ++ /* work around a bug compiling do_div() */
598 ++ prescaler = READ_ONCE(kempld_prescaler[PRESCALER_21]);
599 ++#else
600 ++ prescaler = kempld_prescaler[PRESCALER_21];
601 ++#endif
602 ++
603 + if (!stage)
604 + return -EINVAL;
605 +
606 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
607 +index 63a6152be04b..c5bbb5300658 100644
608 +--- a/fs/btrfs/send.c
609 ++++ b/fs/btrfs/send.c
610 +@@ -1648,6 +1648,9 @@ static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
611 + {
612 + int ret;
613 +
614 ++ if (ino == BTRFS_FIRST_FREE_OBJECTID)
615 ++ return 1;
616 ++
617 + ret = get_cur_inode_state(sctx, ino, gen);
618 + if (ret < 0)
619 + goto out;
620 +@@ -1833,7 +1836,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
621 + * not delted and then re-created, if it was then we have no overwrite
622 + * and we can just unlink this entry.
623 + */
624 +- if (sctx->parent_root) {
625 ++ if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
626 + ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
627 + NULL, NULL, NULL);
628 + if (ret < 0 && ret != -ENOENT)
629 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
630 +index f54f77037d22..ead89489ae71 100644
631 +--- a/fs/ceph/mds_client.c
632 ++++ b/fs/ceph/mds_client.c
633 +@@ -1845,13 +1845,18 @@ static int build_dentry_path(struct dentry *dentry,
634 + int *pfreepath)
635 + {
636 + char *path;
637 ++ struct inode *dir;
638 +
639 +- if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
640 +- *pino = ceph_ino(d_inode(dentry->d_parent));
641 ++ rcu_read_lock();
642 ++ dir = d_inode_rcu(dentry->d_parent);
643 ++ if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
644 ++ *pino = ceph_ino(dir);
645 ++ rcu_read_unlock();
646 + *ppath = dentry->d_name.name;
647 + *ppathlen = dentry->d_name.len;
648 + return 0;
649 + }
650 ++ rcu_read_unlock();
651 + path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
652 + if (IS_ERR(path))
653 + return PTR_ERR(path);
654 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
655 +index 972eab7ac071..98b2fc2678ff 100644
656 +--- a/fs/f2fs/data.c
657 ++++ b/fs/f2fs/data.c
658 +@@ -1416,7 +1416,12 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
659 + goto fail;
660 + }
661 + repeat:
662 +- page = grab_cache_page_write_begin(mapping, index, flags);
663 ++ /*
664 ++ * Do not use grab_cache_page_write_begin() to avoid deadlock due to
665 ++ * wait_for_stable_page. Will wait that below with our IO control.
666 ++ */
667 ++ page = pagecache_get_page(mapping, index,
668 ++ FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
669 + if (!page) {
670 + err = -ENOMEM;
671 + goto fail;
672 +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
673 +index 15bdc2d48cfe..24ace275160c 100644
674 +--- a/fs/nfsd/nfs4callback.c
675 ++++ b/fs/nfsd/nfs4callback.c
676 +@@ -696,6 +696,14 @@ int set_callback_cred(void)
677 + return 0;
678 + }
679 +
680 ++void cleanup_callback_cred(void)
681 ++{
682 ++ if (callback_cred) {
683 ++ put_rpccred(callback_cred);
684 ++ callback_cred = NULL;
685 ++ }
686 ++}
687 ++
688 + static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
689 + {
690 + if (clp->cl_minorversion == 0) {
691 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
692 +index 9e5a6842346e..ca9ebc3242d3 100644
693 +--- a/fs/nfsd/nfs4state.c
694 ++++ b/fs/nfsd/nfs4state.c
695 +@@ -6792,23 +6792,24 @@ nfs4_state_start(void)
696 +
697 + ret = set_callback_cred();
698 + if (ret)
699 +- return -ENOMEM;
700 ++ return ret;
701 ++
702 + laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
703 + if (laundry_wq == NULL) {
704 + ret = -ENOMEM;
705 +- goto out_recovery;
706 ++ goto out_cleanup_cred;
707 + }
708 + ret = nfsd4_create_callback_queue();
709 + if (ret)
710 + goto out_free_laundry;
711 +
712 + set_max_delegations();
713 +-
714 + return 0;
715 +
716 + out_free_laundry:
717 + destroy_workqueue(laundry_wq);
718 +-out_recovery:
719 ++out_cleanup_cred:
720 ++ cleanup_callback_cred();
721 + return ret;
722 + }
723 +
724 +@@ -6847,6 +6848,7 @@ nfs4_state_shutdown(void)
725 + {
726 + destroy_workqueue(laundry_wq);
727 + nfsd4_destroy_callback_queue();
728 ++ cleanup_callback_cred();
729 + }
730 +
731 + static void
732 +diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
733 +index 5134eedcb16c..86af697c21d3 100644
734 +--- a/fs/nfsd/state.h
735 ++++ b/fs/nfsd/state.h
736 +@@ -595,6 +595,7 @@ extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(const char *recdir,
737 + extern __be32 nfs4_check_open_reclaim(clientid_t *clid,
738 + struct nfsd4_compound_state *cstate, struct nfsd_net *nn);
739 + extern int set_callback_cred(void);
740 ++extern void cleanup_callback_cred(void);
741 + extern void nfsd4_probe_callback(struct nfs4_client *clp);
742 + extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
743 + extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
744 +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
745 +index 60a5f1548cd9..555b57a16499 100644
746 +--- a/fs/ocfs2/dlmglue.c
747 ++++ b/fs/ocfs2/dlmglue.c
748 +@@ -531,6 +531,7 @@ void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
749 + init_waitqueue_head(&res->l_event);
750 + INIT_LIST_HEAD(&res->l_blocked_list);
751 + INIT_LIST_HEAD(&res->l_mask_waiters);
752 ++ INIT_LIST_HEAD(&res->l_holders);
753 + }
754 +
755 + void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
756 +@@ -748,6 +749,50 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
757 + res->l_flags = 0UL;
758 + }
759 +
760 ++/*
761 ++ * Keep a list of processes who have interest in a lockres.
762 ++ * Note: this is now only uesed for check recursive cluster locking.
763 ++ */
764 ++static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
765 ++ struct ocfs2_lock_holder *oh)
766 ++{
767 ++ INIT_LIST_HEAD(&oh->oh_list);
768 ++ oh->oh_owner_pid = get_pid(task_pid(current));
769 ++
770 ++ spin_lock(&lockres->l_lock);
771 ++ list_add_tail(&oh->oh_list, &lockres->l_holders);
772 ++ spin_unlock(&lockres->l_lock);
773 ++}
774 ++
775 ++static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
776 ++ struct ocfs2_lock_holder *oh)
777 ++{
778 ++ spin_lock(&lockres->l_lock);
779 ++ list_del(&oh->oh_list);
780 ++ spin_unlock(&lockres->l_lock);
781 ++
782 ++ put_pid(oh->oh_owner_pid);
783 ++}
784 ++
785 ++static inline int ocfs2_is_locked_by_me(struct ocfs2_lock_res *lockres)
786 ++{
787 ++ struct ocfs2_lock_holder *oh;
788 ++ struct pid *pid;
789 ++
790 ++ /* look in the list of holders for one with the current task as owner */
791 ++ spin_lock(&lockres->l_lock);
792 ++ pid = task_pid(current);
793 ++ list_for_each_entry(oh, &lockres->l_holders, oh_list) {
794 ++ if (oh->oh_owner_pid == pid) {
795 ++ spin_unlock(&lockres->l_lock);
796 ++ return 1;
797 ++ }
798 ++ }
799 ++ spin_unlock(&lockres->l_lock);
800 ++
801 ++ return 0;
802 ++}
803 ++
804 + static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
805 + int level)
806 + {
807 +@@ -2343,8 +2388,9 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
808 + goto getbh;
809 + }
810 +
811 +- if (ocfs2_mount_local(osb))
812 +- goto local;
813 ++ if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
814 ++ ocfs2_mount_local(osb))
815 ++ goto update;
816 +
817 + if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
818 + ocfs2_wait_for_recovery(osb);
819 +@@ -2373,7 +2419,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
820 + if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
821 + ocfs2_wait_for_recovery(osb);
822 +
823 +-local:
824 ++update:
825 + /*
826 + * We only see this flag if we're being called from
827 + * ocfs2_read_locked_inode(). It means we're locking an inode
828 +@@ -2515,6 +2561,59 @@ void ocfs2_inode_unlock(struct inode *inode,
829 + ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
830 + }
831 +
832 ++/*
833 ++ * This _tracker variantes are introduced to deal with the recursive cluster
834 ++ * locking issue. The idea is to keep track of a lock holder on the stack of
835 ++ * the current process. If there's a lock holder on the stack, we know the
836 ++ * task context is already protected by cluster locking. Currently, they're
837 ++ * used in some VFS entry routines.
838 ++ *
839 ++ * return < 0 on error, return == 0 if there's no lock holder on the stack
840 ++ * before this call, return == 1 if this call would be a recursive locking.
841 ++ */
842 ++int ocfs2_inode_lock_tracker(struct inode *inode,
843 ++ struct buffer_head **ret_bh,
844 ++ int ex,
845 ++ struct ocfs2_lock_holder *oh)
846 ++{
847 ++ int status;
848 ++ int arg_flags = 0, has_locked;
849 ++ struct ocfs2_lock_res *lockres;
850 ++
851 ++ lockres = &OCFS2_I(inode)->ip_inode_lockres;
852 ++ has_locked = ocfs2_is_locked_by_me(lockres);
853 ++ /* Just get buffer head if the cluster lock has been taken */
854 ++ if (has_locked)
855 ++ arg_flags = OCFS2_META_LOCK_GETBH;
856 ++
857 ++ if (likely(!has_locked || ret_bh)) {
858 ++ status = ocfs2_inode_lock_full(inode, ret_bh, ex, arg_flags);
859 ++ if (status < 0) {
860 ++ if (status != -ENOENT)
861 ++ mlog_errno(status);
862 ++ return status;
863 ++ }
864 ++ }
865 ++ if (!has_locked)
866 ++ ocfs2_add_holder(lockres, oh);
867 ++
868 ++ return has_locked;
869 ++}
870 ++
871 ++void ocfs2_inode_unlock_tracker(struct inode *inode,
872 ++ int ex,
873 ++ struct ocfs2_lock_holder *oh,
874 ++ int had_lock)
875 ++{
876 ++ struct ocfs2_lock_res *lockres;
877 ++
878 ++ lockres = &OCFS2_I(inode)->ip_inode_lockres;
879 ++ if (!had_lock) {
880 ++ ocfs2_remove_holder(lockres, oh);
881 ++ ocfs2_inode_unlock(inode, ex);
882 ++ }
883 ++}
884 ++
885 + int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
886 + {
887 + struct ocfs2_lock_res *lockres;
888 +diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
889 +index d293a22c32c5..a7fc18ba0dc1 100644
890 +--- a/fs/ocfs2/dlmglue.h
891 ++++ b/fs/ocfs2/dlmglue.h
892 +@@ -70,6 +70,11 @@ struct ocfs2_orphan_scan_lvb {
893 + __be32 lvb_os_seqno;
894 + };
895 +
896 ++struct ocfs2_lock_holder {
897 ++ struct list_head oh_list;
898 ++ struct pid *oh_owner_pid;
899 ++};
900 ++
901 + /* ocfs2_inode_lock_full() 'arg_flags' flags */
902 + /* don't wait on recovery. */
903 + #define OCFS2_META_LOCK_RECOVERY (0x01)
904 +@@ -77,6 +82,8 @@ struct ocfs2_orphan_scan_lvb {
905 + #define OCFS2_META_LOCK_NOQUEUE (0x02)
906 + /* don't block waiting for the downconvert thread, instead return -EAGAIN */
907 + #define OCFS2_LOCK_NONBLOCK (0x04)
908 ++/* just get back disk inode bh if we've got cluster lock. */
909 ++#define OCFS2_META_LOCK_GETBH (0x08)
910 +
911 + /* Locking subclasses of inode cluster lock */
912 + enum {
913 +@@ -170,4 +177,15 @@ void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
914 +
915 + /* To set the locking protocol on module initialization */
916 + void ocfs2_set_locking_protocol(void);
917 ++
918 ++/* The _tracker pair is used to avoid cluster recursive locking */
919 ++int ocfs2_inode_lock_tracker(struct inode *inode,
920 ++ struct buffer_head **ret_bh,
921 ++ int ex,
922 ++ struct ocfs2_lock_holder *oh);
923 ++void ocfs2_inode_unlock_tracker(struct inode *inode,
924 ++ int ex,
925 ++ struct ocfs2_lock_holder *oh,
926 ++ int had_lock);
927 ++
928 + #endif /* DLMGLUE_H */
929 +diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
930 +index 7a0126267847..2495066a9ca3 100644
931 +--- a/fs/ocfs2/ocfs2.h
932 ++++ b/fs/ocfs2/ocfs2.h
933 +@@ -172,6 +172,7 @@ struct ocfs2_lock_res {
934 +
935 + struct list_head l_blocked_list;
936 + struct list_head l_mask_waiters;
937 ++ struct list_head l_holders;
938 +
939 + unsigned long l_flags;
940 + char l_name[OCFS2_LOCK_ID_MAX_LEN];
941 +diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
942 +index 4d9f233c4ba8..7d58ffdacd62 100644
943 +--- a/include/asm-generic/percpu.h
944 ++++ b/include/asm-generic/percpu.h
945 +@@ -105,15 +105,35 @@ do { \
946 + (__ret); \
947 + })
948 +
949 +-#define this_cpu_generic_read(pcp) \
950 ++#define __this_cpu_generic_read_nopreempt(pcp) \
951 + ({ \
952 + typeof(pcp) __ret; \
953 + preempt_disable(); \
954 +- __ret = *this_cpu_ptr(&(pcp)); \
955 ++ __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
956 + preempt_enable(); \
957 + __ret; \
958 + })
959 +
960 ++#define __this_cpu_generic_read_noirq(pcp) \
961 ++({ \
962 ++ typeof(pcp) __ret; \
963 ++ unsigned long __flags; \
964 ++ raw_local_irq_save(__flags); \
965 ++ __ret = *raw_cpu_ptr(&(pcp)); \
966 ++ raw_local_irq_restore(__flags); \
967 ++ __ret; \
968 ++})
969 ++
970 ++#define this_cpu_generic_read(pcp) \
971 ++({ \
972 ++ typeof(pcp) __ret; \
973 ++ if (__native_word(pcp)) \
974 ++ __ret = __this_cpu_generic_read_nopreempt(pcp); \
975 ++ else \
976 ++ __ret = __this_cpu_generic_read_noirq(pcp); \
977 ++ __ret; \
978 ++})
979 ++
980 + #define this_cpu_generic_to_op(pcp, val, op) \
981 + do { \
982 + unsigned long __flags; \
983 +diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
984 +index 925730bc9fc1..311176f290b2 100644
985 +--- a/include/linux/trace_events.h
986 ++++ b/include/linux/trace_events.h
987 +@@ -301,6 +301,7 @@ struct trace_event_call {
988 + int perf_refcount;
989 + struct hlist_head __percpu *perf_events;
990 + struct bpf_prog *prog;
991 ++ struct perf_event *bpf_prog_owner;
992 +
993 + int (*perf_perm)(struct trace_event_call *,
994 + struct perf_event *);
995 +diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
996 +index cccdcfd14973..f348c736e6e0 100644
997 +--- a/include/net/sctp/ulpevent.h
998 ++++ b/include/net/sctp/ulpevent.h
999 +@@ -141,8 +141,12 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
1000 + static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
1001 + struct sctp_event_subscribe *mask)
1002 + {
1003 ++ int offset = sn_type - SCTP_SN_TYPE_BASE;
1004 + char *amask = (char *) mask;
1005 +- return amask[sn_type - SCTP_SN_TYPE_BASE];
1006 ++
1007 ++ if (offset >= sizeof(struct sctp_event_subscribe))
1008 ++ return 0;
1009 ++ return amask[offset];
1010 + }
1011 +
1012 + /* Given an event subscription, is this event enabled? */
1013 +diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
1014 +index ce91215cf7e6..e0b566dc72ef 100644
1015 +--- a/include/uapi/linux/mroute6.h
1016 ++++ b/include/uapi/linux/mroute6.h
1017 +@@ -3,6 +3,7 @@
1018 +
1019 + #include <linux/types.h>
1020 + #include <linux/sockios.h>
1021 ++#include <linux/in6.h> /* For struct sockaddr_in6. */
1022 +
1023 + /*
1024 + * Based on the MROUTING 3.5 defines primarily to keep
1025 +diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
1026 +index 0f9265cb2a96..7af20a136429 100644
1027 +--- a/include/uapi/linux/rds.h
1028 ++++ b/include/uapi/linux/rds.h
1029 +@@ -35,6 +35,7 @@
1030 + #define _LINUX_RDS_H
1031 +
1032 + #include <linux/types.h>
1033 ++#include <linux/socket.h> /* For __kernel_sockaddr_storage. */
1034 +
1035 + #define RDS_IB_ABI_VERSION 0x301
1036 +
1037 +@@ -223,7 +224,7 @@ struct rds_get_mr_args {
1038 + };
1039 +
1040 + struct rds_get_mr_for_dest_args {
1041 +- struct sockaddr_storage dest_addr;
1042 ++ struct __kernel_sockaddr_storage dest_addr;
1043 + struct rds_iovec vec;
1044 + uint64_t cookie_addr;
1045 + uint64_t flags;
1046 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1047 +index 863e24f1e62e..70dc6dcf8649 100644
1048 +--- a/kernel/bpf/verifier.c
1049 ++++ b/kernel/bpf/verifier.c
1050 +@@ -1033,7 +1033,8 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
1051 + }
1052 + } else {
1053 + if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
1054 +- (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
1055 ++ (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
1056 ++ BPF_CLASS(insn->code) == BPF_ALU64) {
1057 + verbose("BPF_END uses reserved fields\n");
1058 + return -EINVAL;
1059 + }
1060 +diff --git a/kernel/events/core.c b/kernel/events/core.c
1061 +index 3697063dd09a..8f75386e61a7 100644
1062 +--- a/kernel/events/core.c
1063 ++++ b/kernel/events/core.c
1064 +@@ -7108,6 +7108,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
1065 + }
1066 +
1067 + event->tp_event->prog = prog;
1068 ++ event->tp_event->bpf_prog_owner = event;
1069 +
1070 + return 0;
1071 + }
1072 +@@ -7120,7 +7121,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
1073 + return;
1074 +
1075 + prog = event->tp_event->prog;
1076 +- if (prog) {
1077 ++ if (prog && event->tp_event->bpf_prog_owner == event) {
1078 + event->tp_event->prog = NULL;
1079 + bpf_prog_put_rcu(prog);
1080 + }
1081 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
1082 +index 60ace56618f6..0e2c4911ba61 100644
1083 +--- a/kernel/locking/lockdep.c
1084 ++++ b/kernel/locking/lockdep.c
1085 +@@ -3128,10 +3128,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
1086 + if (depth) {
1087 + hlock = curr->held_locks + depth - 1;
1088 + if (hlock->class_idx == class_idx && nest_lock) {
1089 +- if (hlock->references)
1090 ++ if (hlock->references) {
1091 ++ /*
1092 ++ * Check: unsigned int references:12, overflow.
1093 ++ */
1094 ++ if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
1095 ++ return 0;
1096 ++
1097 + hlock->references++;
1098 +- else
1099 ++ } else {
1100 + hlock->references = 2;
1101 ++ }
1102 +
1103 + return 1;
1104 + }
1105 +diff --git a/mm/slab_common.c b/mm/slab_common.c
1106 +index bec2fce9fafc..01e7246de8df 100644
1107 +--- a/mm/slab_common.c
1108 ++++ b/mm/slab_common.c
1109 +@@ -250,7 +250,7 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
1110 + {
1111 + struct kmem_cache *s;
1112 +
1113 +- if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
1114 ++ if (slab_nomerge)
1115 + return NULL;
1116 +
1117 + if (ctor)
1118 +@@ -261,6 +261,9 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
1119 + size = ALIGN(size, align);
1120 + flags = kmem_cache_flags(size, flags, name, NULL);
1121 +
1122 ++ if (flags & SLAB_NEVER_MERGE)
1123 ++ return NULL;
1124 ++
1125 + list_for_each_entry_reverse(s, &slab_caches, list) {
1126 + if (slab_unmergeable(s))
1127 + continue;
1128 +diff --git a/net/core/sock.c b/net/core/sock.c
1129 +index bd2fad27891e..cd12cb6fe366 100644
1130 +--- a/net/core/sock.c
1131 ++++ b/net/core/sock.c
1132 +@@ -1516,6 +1516,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1133 +
1134 + sock_copy(newsk, sk);
1135 +
1136 ++ newsk->sk_prot_creator = sk->sk_prot;
1137 ++
1138 + /* SANITY */
1139 + if (likely(newsk->sk_net_refcnt))
1140 + get_net(sock_net(newsk));
1141 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
1142 +index 65036891e080..a03f834f16d5 100644
1143 +--- a/net/ipv4/ip_vti.c
1144 ++++ b/net/ipv4/ip_vti.c
1145 +@@ -156,6 +156,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
1146 + struct ip_tunnel_parm *parms = &tunnel->parms;
1147 + struct dst_entry *dst = skb_dst(skb);
1148 + struct net_device *tdev; /* Device to other host */
1149 ++ int pkt_len = skb->len;
1150 + int err;
1151 +
1152 + if (!dst) {
1153 +@@ -199,7 +200,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
1154 +
1155 + err = dst_output(tunnel->net, skb->sk, skb);
1156 + if (net_xmit_eval(err) == 0)
1157 +- err = skb->len;
1158 ++ err = pkt_len;
1159 + iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
1160 + return NETDEV_TX_OK;
1161 +
1162 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
1163 +index e89135828c3d..eab117033b8a 100644
1164 +--- a/net/ipv6/ip6_gre.c
1165 ++++ b/net/ipv6/ip6_gre.c
1166 +@@ -1173,24 +1173,25 @@ static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1167 + }
1168 +
1169 + static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1170 +- unsigned short type,
1171 +- const void *daddr, const void *saddr, unsigned int len)
1172 ++ unsigned short type, const void *daddr,
1173 ++ const void *saddr, unsigned int len)
1174 + {
1175 + struct ip6_tnl *t = netdev_priv(dev);
1176 +- struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
1177 +- __be16 *p = (__be16 *)(ipv6h+1);
1178 ++ struct ipv6hdr *ipv6h;
1179 ++ __be16 *p;
1180 +
1181 +- ip6_flow_hdr(ipv6h, 0,
1182 +- ip6_make_flowlabel(dev_net(dev), skb,
1183 +- t->fl.u.ip6.flowlabel, true,
1184 +- &t->fl.u.ip6));
1185 ++ ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen + sizeof(*ipv6h));
1186 ++ ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
1187 ++ t->fl.u.ip6.flowlabel,
1188 ++ true, &t->fl.u.ip6));
1189 + ipv6h->hop_limit = t->parms.hop_limit;
1190 + ipv6h->nexthdr = NEXTHDR_GRE;
1191 + ipv6h->saddr = t->parms.laddr;
1192 + ipv6h->daddr = t->parms.raddr;
1193 +
1194 +- p[0] = t->parms.o_flags;
1195 +- p[1] = htons(type);
1196 ++ p = (__be16 *)(ipv6h + 1);
1197 ++ p[0] = t->parms.o_flags;
1198 ++ p[1] = htons(type);
1199 +
1200 + /*
1201 + * Set the source hardware address.
1202 +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
1203 +index bdcc4d9cedd3..7ebb14def2cb 100644
1204 +--- a/net/ipv6/ip6_vti.c
1205 ++++ b/net/ipv6/ip6_vti.c
1206 +@@ -434,6 +434,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
1207 + struct dst_entry *dst = skb_dst(skb);
1208 + struct net_device *tdev;
1209 + struct xfrm_state *x;
1210 ++ int pkt_len = skb->len;
1211 + int err = -1;
1212 + int mtu;
1213 +
1214 +@@ -487,7 +488,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
1215 + struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
1216 +
1217 + u64_stats_update_begin(&tstats->syncp);
1218 +- tstats->tx_bytes += skb->len;
1219 ++ tstats->tx_bytes += pkt_len;
1220 + tstats->tx_packets++;
1221 + u64_stats_update_end(&tstats->syncp);
1222 + } else {
1223 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1224 +index 6fd4af3b5b79..6eb1e9293b6f 100644
1225 +--- a/net/ipv6/udp.c
1226 ++++ b/net/ipv6/udp.c
1227 +@@ -1007,6 +1007,7 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1228 + */
1229 + offset = skb_transport_offset(skb);
1230 + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1231 ++ csum = skb->csum;
1232 +
1233 + skb->ip_summed = CHECKSUM_NONE;
1234 +
1235 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
1236 +index d3dec414fd44..d48281ca9c72 100644
1237 +--- a/net/l2tp/l2tp_core.c
1238 ++++ b/net/l2tp/l2tp_core.c
1239 +@@ -1321,6 +1321,9 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
1240 + struct sock *sk = NULL;
1241 +
1242 + tunnel = container_of(work, struct l2tp_tunnel, del_work);
1243 ++
1244 ++ l2tp_tunnel_closeall(tunnel);
1245 ++
1246 + sk = l2tp_tunnel_sock_lookup(tunnel);
1247 + if (!sk)
1248 + goto out;
1249 +@@ -1640,15 +1643,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1250 +
1251 + /* This function is used by the netlink TUNNEL_DELETE command.
1252 + */
1253 +-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1254 ++void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1255 + {
1256 +- l2tp_tunnel_inc_refcount(tunnel);
1257 +- l2tp_tunnel_closeall(tunnel);
1258 +- if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
1259 +- l2tp_tunnel_dec_refcount(tunnel);
1260 +- return 1;
1261 ++ if (!test_and_set_bit(0, &tunnel->dead)) {
1262 ++ l2tp_tunnel_inc_refcount(tunnel);
1263 ++ queue_work(l2tp_wq, &tunnel->del_work);
1264 + }
1265 +- return 0;
1266 + }
1267 + EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1268 +
1269 +diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
1270 +index 555d962a62d2..9cf546846edb 100644
1271 +--- a/net/l2tp/l2tp_core.h
1272 ++++ b/net/l2tp/l2tp_core.h
1273 +@@ -169,6 +169,9 @@ struct l2tp_tunnel_cfg {
1274 +
1275 + struct l2tp_tunnel {
1276 + int magic; /* Should be L2TP_TUNNEL_MAGIC */
1277 ++
1278 ++ unsigned long dead;
1279 ++
1280 + struct rcu_head rcu;
1281 + rwlock_t hlist_lock; /* protect session_hlist */
1282 + struct hlist_head session_hlist[L2TP_HASH_SIZE];
1283 +@@ -253,7 +256,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
1284 + u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
1285 + struct l2tp_tunnel **tunnelp);
1286 + void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
1287 +-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
1288 ++void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
1289 + struct l2tp_session *l2tp_session_create(int priv_size,
1290 + struct l2tp_tunnel *tunnel,
1291 + u32 session_id, u32 peer_session_id,
1292 +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
1293 +index 63ea6cbac5ad..7e7b9ef29d8d 100644
1294 +--- a/net/mac80211/sta_info.c
1295 ++++ b/net/mac80211/sta_info.c
1296 +@@ -661,7 +661,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
1297 + }
1298 +
1299 + /* No need to do anything if the driver does all */
1300 +- if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
1301 ++ if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim)
1302 + return;
1303 +
1304 + if (sta->dead)
1305 +diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
1306 +index acf5c7b3f378..7f16d19d6198 100644
1307 +--- a/net/netfilter/nf_conntrack_expect.c
1308 ++++ b/net/netfilter/nf_conntrack_expect.c
1309 +@@ -395,7 +395,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
1310 + struct net *net = nf_ct_exp_net(expect);
1311 + struct hlist_node *next;
1312 + unsigned int h;
1313 +- int ret = 1;
1314 ++ int ret = 0;
1315 +
1316 + if (!master_help) {
1317 + ret = -ESHUTDOWN;
1318 +@@ -445,7 +445,7 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
1319 +
1320 + spin_lock_bh(&nf_conntrack_expect_lock);
1321 + ret = __nf_ct_expect_check(expect);
1322 +- if (ret <= 0)
1323 ++ if (ret < 0)
1324 + goto out;
1325 +
1326 + ret = nf_ct_expect_insert(expect);
1327 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1328 +index b70055fc30cb..241f69039a72 100644
1329 +--- a/net/packet/af_packet.c
1330 ++++ b/net/packet/af_packet.c
1331 +@@ -1652,10 +1652,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1332 +
1333 + mutex_lock(&fanout_mutex);
1334 +
1335 +- err = -EINVAL;
1336 +- if (!po->running)
1337 +- goto out;
1338 +-
1339 + err = -EALREADY;
1340 + if (po->fanout)
1341 + goto out;
1342 +@@ -1704,7 +1700,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1343 + list_add(&match->list, &fanout_list);
1344 + }
1345 + err = -EINVAL;
1346 +- if (match->type == type &&
1347 ++
1348 ++ spin_lock(&po->bind_lock);
1349 ++ if (po->running &&
1350 ++ match->type == type &&
1351 + match->prot_hook.type == po->prot_hook.type &&
1352 + match->prot_hook.dev == po->prot_hook.dev) {
1353 + err = -ENOSPC;
1354 +@@ -1716,6 +1715,13 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1355 + err = 0;
1356 + }
1357 + }
1358 ++ spin_unlock(&po->bind_lock);
1359 ++
1360 ++ if (err && !atomic_read(&match->sk_ref)) {
1361 ++ list_del(&match->list);
1362 ++ kfree(match);
1363 ++ }
1364 ++
1365 + out:
1366 + if (err && rollover) {
1367 + kfree(rollover);
1368 +@@ -2650,6 +2656,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1369 + int vnet_hdr_len;
1370 + struct packet_sock *po = pkt_sk(sk);
1371 + unsigned short gso_type = 0;
1372 ++ bool has_vnet_hdr = false;
1373 + int hlen, tlen, linear;
1374 + int extra_len = 0;
1375 + ssize_t n;
1376 +@@ -2737,6 +2744,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1377 + goto out_unlock;
1378 +
1379 + }
1380 ++ has_vnet_hdr = true;
1381 + }
1382 +
1383 + if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1384 +@@ -2796,7 +2804,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1385 +
1386 + packet_pick_tx_queue(dev, skb);
1387 +
1388 +- if (po->has_vnet_hdr) {
1389 ++ if (has_vnet_hdr) {
1390 + if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1391 + u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start);
1392 + u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset);
1393 +@@ -2938,13 +2946,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
1394 + int ret = 0;
1395 + bool unlisted = false;
1396 +
1397 +- if (po->fanout)
1398 +- return -EINVAL;
1399 +-
1400 + lock_sock(sk);
1401 + spin_lock(&po->bind_lock);
1402 + rcu_read_lock();
1403 +
1404 ++ if (po->fanout) {
1405 ++ ret = -EINVAL;
1406 ++ goto out_unlock;
1407 ++ }
1408 ++
1409 + if (name) {
1410 + dev = dev_get_by_name_rcu(sock_net(sk), name);
1411 + if (!dev) {
1412 +diff --git a/net/tipc/msg.c b/net/tipc/msg.c
1413 +index 8740930f0787..67bddcb2ff46 100644
1414 +--- a/net/tipc/msg.c
1415 ++++ b/net/tipc/msg.c
1416 +@@ -541,7 +541,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
1417 + return false;
1418 + if (msg_errcode(msg))
1419 + return false;
1420 +- *err = -TIPC_ERR_NO_NAME;
1421 ++ *err = TIPC_ERR_NO_NAME;
1422 + if (skb_linearize(skb))
1423 + return false;
1424 + msg = buf_msg(skb);