Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 29 Oct 2019 14:00:04
Message-Id: 1572357542.54b88fb2da49dc5bca36967ceb0785e32f41f8ee.mpagano@gentoo
1 commit: 54b88fb2da49dc5bca36967ceb0785e32f41f8ee
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Aug 9 17:34:19 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Oct 29 13:59:02 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=54b88fb2
7
8 Linux patch 4.14.137
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1137_linux-4.14.138.patch | 1356 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1360 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 5aa7458..2b98c17 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -591,6 +591,10 @@ Patch: 1136_linux-4.14.137.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.137
23
24 +Patch: 1137_linux-4.14.138.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.138
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1137_linux-4.14.138.patch b/1137_linux-4.14.138.patch
33 new file mode 100644
34 index 0000000..6fa2c7d
35 --- /dev/null
36 +++ b/1137_linux-4.14.138.patch
37 @@ -0,0 +1,1356 @@
38 +diff --git a/Makefile b/Makefile
39 +index ff604059b6a8..82ae13348266 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 137
47 ++SUBLEVEL = 138
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
52 +index a7883676f675..b144a6a5d352 100644
53 +--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
54 ++++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
55 +@@ -115,10 +115,14 @@
56 + };
57 +
58 + &i2c2 {
59 ++ pinctrl-names = "default";
60 ++ pinctrl-0 = <&i2c2_pins>;
61 + clock-frequency = <400000>;
62 + };
63 +
64 + &i2c3 {
65 ++ pinctrl-names = "default";
66 ++ pinctrl-0 = <&i2c3_pins>;
67 + clock-frequency = <400000>;
68 + };
69 +
70 +@@ -241,6 +245,18 @@
71 + OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
72 + >;
73 + };
74 ++ i2c2_pins: pinmux_i2c2_pins {
75 ++ pinctrl-single,pins = <
76 ++ OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT | MUX_MODE0) /* i2c2_scl */
77 ++ OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT | MUX_MODE0) /* i2c2_sda */
78 ++ >;
79 ++ };
80 ++ i2c3_pins: pinmux_i2c3_pins {
81 ++ pinctrl-single,pins = <
82 ++ OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT | MUX_MODE0) /* i2c3_scl */
83 ++ OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT | MUX_MODE0) /* i2c3_sda */
84 ++ >;
85 ++ };
86 + };
87 +
88 + &omap3_pmx_core2 {
89 +diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
90 +index cf22b35f0a28..fe4cbdc72359 100644
91 +--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
92 ++++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
93 +@@ -121,10 +121,14 @@
94 + };
95 +
96 + &i2c2 {
97 ++ pinctrl-names = "default";
98 ++ pinctrl-0 = <&i2c2_pins>;
99 + clock-frequency = <400000>;
100 + };
101 +
102 + &i2c3 {
103 ++ pinctrl-names = "default";
104 ++ pinctrl-0 = <&i2c3_pins>;
105 + clock-frequency = <400000>;
106 + at24@50 {
107 + compatible = "atmel,24c64";
108 +@@ -219,6 +223,18 @@
109 + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
110 + >;
111 + };
112 ++ i2c2_pins: pinmux_i2c2_pins {
113 ++ pinctrl-single,pins = <
114 ++ OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT | MUX_MODE0) /* i2c2_scl */
115 ++ OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT | MUX_MODE0) /* i2c2_sda */
116 ++ >;
117 ++ };
118 ++ i2c3_pins: pinmux_i2c3_pins {
119 ++ pinctrl-single,pins = <
120 ++ OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT | MUX_MODE0) /* i2c3_scl */
121 ++ OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT | MUX_MODE0) /* i2c3_sda */
122 ++ >;
123 ++ };
124 + };
125 +
126 + &uart2 {
127 +diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
128 +index c5bc80a03515..5048c7a55eef 100644
129 +--- a/arch/arm64/include/asm/cpufeature.h
130 ++++ b/arch/arm64/include/asm/cpufeature.h
131 +@@ -44,9 +44,10 @@
132 + */
133 +
134 + enum ftr_type {
135 +- FTR_EXACT, /* Use a predefined safe value */
136 +- FTR_LOWER_SAFE, /* Smaller value is safe */
137 +- FTR_HIGHER_SAFE,/* Bigger value is safe */
138 ++ FTR_EXACT, /* Use a predefined safe value */
139 ++ FTR_LOWER_SAFE, /* Smaller value is safe */
140 ++ FTR_HIGHER_SAFE, /* Bigger value is safe */
141 ++ FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
142 + };
143 +
144 + #define FTR_STRICT true /* SANITY check strict matching required */
145 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
146 +index 29b5b72b7877..3312d46fa29e 100644
147 +--- a/arch/arm64/kernel/cpufeature.c
148 ++++ b/arch/arm64/kernel/cpufeature.c
149 +@@ -178,8 +178,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
150 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
151 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
152 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
153 +- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
154 +- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
155 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 24, 4, 0), /* CWG */
156 ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 20, 4, 0), /* ERG */
157 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
158 + /*
159 + * Linux can handle differing I-cache policies. Userspace JITs will
160 +@@ -411,6 +411,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
161 + case FTR_LOWER_SAFE:
162 + ret = new < cur ? new : cur;
163 + break;
164 ++ case FTR_HIGHER_OR_ZERO_SAFE:
165 ++ if (!cur || !new)
166 ++ break;
167 ++ /* Fallthrough */
168 + case FTR_HIGHER_SAFE:
169 + ret = new > cur ? new : cur;
170 + break;
171 +diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
172 +index fc72b763fdd7..2b29598791e8 100644
173 +--- a/drivers/atm/iphase.c
174 ++++ b/drivers/atm/iphase.c
175 +@@ -63,6 +63,7 @@
176 + #include <asm/byteorder.h>
177 + #include <linux/vmalloc.h>
178 + #include <linux/jiffies.h>
179 ++#include <linux/nospec.h>
180 + #include "iphase.h"
181 + #include "suni.h"
182 + #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
183 +@@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
184 + }
185 + if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
186 + board = ia_cmds.status;
187 +- if ((board < 0) || (board > iadev_count))
188 +- board = 0;
189 ++
190 ++ if ((board < 0) || (board > iadev_count))
191 ++ board = 0;
192 ++ board = array_index_nospec(board, iadev_count + 1);
193 ++
194 + iadev = ia_dev[board];
195 + switch (ia_cmds.cmd) {
196 + case MEMDUMP:
197 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
198 +index 28ae3dc57103..1e2e6e58256a 100644
199 +--- a/drivers/hid/hid-ids.h
200 ++++ b/drivers/hid/hid-ids.h
201 +@@ -537,6 +537,7 @@
202 + #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
203 + #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
204 + #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
205 ++#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
206 +
207 + #define USB_VENDOR_ID_HUION 0x256c
208 + #define USB_DEVICE_ID_HUION_TABLET 0x006e
209 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
210 +index e10eda031b01..7b5c6bd92d56 100644
211 +--- a/drivers/hid/usbhid/hid-quirks.c
212 ++++ b/drivers/hid/usbhid/hid-quirks.c
213 +@@ -100,6 +100,7 @@ static const struct hid_blacklist {
214 + { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
215 + { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
216 + { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A, HID_QUIRK_ALWAYS_POLL },
217 ++ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641, HID_QUIRK_ALWAYS_POLL },
218 + { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
219 + { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
220 + { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
221 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
222 +index c2fb08bba296..60e2d4cf1fe3 100644
223 +--- a/drivers/hid/wacom_wac.c
224 ++++ b/drivers/hid/wacom_wac.c
225 +@@ -537,14 +537,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
226 + */
227 + buttons = (data[4] << 1) | (data[3] & 0x01);
228 + } else if (features->type == CINTIQ_COMPANION_2) {
229 +- /* d-pad right -> data[4] & 0x10
230 +- * d-pad up -> data[4] & 0x20
231 +- * d-pad left -> data[4] & 0x40
232 +- * d-pad down -> data[4] & 0x80
233 +- * d-pad center -> data[3] & 0x01
234 ++ /* d-pad right -> data[2] & 0x10
235 ++ * d-pad up -> data[2] & 0x20
236 ++ * d-pad left -> data[2] & 0x40
237 ++ * d-pad down -> data[2] & 0x80
238 ++ * d-pad center -> data[1] & 0x01
239 + */
240 + buttons = ((data[2] >> 4) << 7) |
241 +- ((data[1] & 0x04) << 6) |
242 ++ ((data[1] & 0x04) << 4) |
243 + ((data[2] & 0x0F) << 2) |
244 + (data[1] & 0x03);
245 + } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
246 +diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
247 +index 40475ebf3a61..aadaa9e84eee 100644
248 +--- a/drivers/infiniband/core/addr.c
249 ++++ b/drivers/infiniband/core/addr.c
250 +@@ -794,14 +794,13 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
251 + struct net_device *dev;
252 +
253 + union {
254 +- struct sockaddr _sockaddr;
255 + struct sockaddr_in _sockaddr_in;
256 + struct sockaddr_in6 _sockaddr_in6;
257 + } sgid_addr, dgid_addr;
258 +
259 +
260 +- rdma_gid2ip(&sgid_addr._sockaddr, sgid);
261 +- rdma_gid2ip(&dgid_addr._sockaddr, dgid);
262 ++ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
263 ++ rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid);
264 +
265 + memset(&dev_addr, 0, sizeof(dev_addr));
266 + if (if_index)
267 +@@ -810,8 +809,9 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
268 +
269 + ctx.addr = &dev_addr;
270 + init_completion(&ctx.comp);
271 +- ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
272 +- &dev_addr, 1000, resolve_cb, &ctx);
273 ++ ret = rdma_resolve_ip(&self, (struct sockaddr *)&sgid_addr,
274 ++ (struct sockaddr *)&dgid_addr, &dev_addr, 1000,
275 ++ resolve_cb, &ctx);
276 + if (ret)
277 + return ret;
278 +
279 +@@ -841,16 +841,15 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
280 + int ret = 0;
281 + struct rdma_dev_addr dev_addr;
282 + union {
283 +- struct sockaddr _sockaddr;
284 + struct sockaddr_in _sockaddr_in;
285 + struct sockaddr_in6 _sockaddr_in6;
286 + } gid_addr;
287 +
288 +- rdma_gid2ip(&gid_addr._sockaddr, sgid);
289 ++ rdma_gid2ip((struct sockaddr *)&gid_addr, sgid);
290 +
291 + memset(&dev_addr, 0, sizeof(dev_addr));
292 + dev_addr.net = &init_net;
293 +- ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
294 ++ ret = rdma_translate_ip((struct sockaddr *)&gid_addr, &dev_addr, vlan_id);
295 + if (ret)
296 + return ret;
297 +
298 +diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
299 +index b81d2597f563..50068b0a91fa 100644
300 +--- a/drivers/infiniband/core/sa_query.c
301 ++++ b/drivers/infiniband/core/sa_query.c
302 +@@ -1263,7 +1263,6 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
303 + &init_net
304 + };
305 + union {
306 +- struct sockaddr _sockaddr;
307 + struct sockaddr_in _sockaddr_in;
308 + struct sockaddr_in6 _sockaddr_in6;
309 + } sgid_addr, dgid_addr;
310 +@@ -1271,12 +1270,13 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
311 + if (!device->get_netdev)
312 + return -EOPNOTSUPP;
313 +
314 +- rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
315 +- rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
316 ++ rdma_gid2ip((struct sockaddr *)&sgid_addr, &rec->sgid);
317 ++ rdma_gid2ip((struct sockaddr *)&dgid_addr, &rec->dgid);
318 +
319 + /* validate the route */
320 +- ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
321 +- &dgid_addr._sockaddr, &dev_addr);
322 ++ ret = rdma_resolve_ip_route((struct sockaddr *)&sgid_addr,
323 ++ (struct sockaddr *)&dgid_addr,
324 ++ &dev_addr);
325 + if (ret)
326 + return ret;
327 +
328 +diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
329 +index d0249e463338..ca29a6b76291 100644
330 +--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
331 ++++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
332 +@@ -83,7 +83,6 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
333 + struct iphdr ipv4;
334 + const struct ib_global_route *ib_grh;
335 + union {
336 +- struct sockaddr _sockaddr;
337 + struct sockaddr_in _sockaddr_in;
338 + struct sockaddr_in6 _sockaddr_in6;
339 + } sgid_addr, dgid_addr;
340 +@@ -133,9 +132,9 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
341 + ipv4.tot_len = htons(0);
342 + ipv4.ttl = ib_grh->hop_limit;
343 + ipv4.protocol = nxthdr;
344 +- rdma_gid2ip(&sgid_addr._sockaddr, sgid);
345 ++ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
346 + ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
347 +- rdma_gid2ip(&dgid_addr._sockaddr, &ib_grh->dgid);
348 ++ rdma_gid2ip((struct sockaddr*)&dgid_addr, &ib_grh->dgid);
349 + ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
350 + memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
351 + } else {
352 +diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
353 +index 65b166cc7437..1ba296aeabca 100644
354 +--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
355 ++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
356 +@@ -2508,7 +2508,6 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
357 + u32 vlan_id = 0xFFFF;
358 + u8 mac_addr[6], hdr_type;
359 + union {
360 +- struct sockaddr _sockaddr;
361 + struct sockaddr_in _sockaddr_in;
362 + struct sockaddr_in6 _sockaddr_in6;
363 + } sgid_addr, dgid_addr;
364 +@@ -2556,8 +2555,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
365 +
366 + hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
367 + if (hdr_type == RDMA_NETWORK_IPV4) {
368 +- rdma_gid2ip(&sgid_addr._sockaddr, &sgid);
369 +- rdma_gid2ip(&dgid_addr._sockaddr, &grh->dgid);
370 ++ rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
371 ++ rdma_gid2ip((struct sockaddr *)&dgid_addr, &grh->dgid);
372 + memcpy(&cmd->params.dgid[0],
373 + &dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
374 + memcpy(&cmd->params.sgid[0],
375 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
376 +index 64828d1438ab..17b825f73c52 100644
377 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
378 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
379 +@@ -1934,7 +1934,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
380 + }
381 +
382 + /* select a non-FCoE queue */
383 +- return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
384 ++ return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp));
385 + }
386 +
387 + void bnx2x_set_num_queues(struct bnx2x *bp)
388 +diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
389 +index e9aa8080a67a..d1eede2625ca 100644
390 +--- a/drivers/net/ethernet/marvell/mvpp2.c
391 ++++ b/drivers/net/ethernet/marvell/mvpp2.c
392 +@@ -6952,6 +6952,7 @@ log_error:
393 + static int mvpp2_change_mtu(struct net_device *dev, int mtu)
394 + {
395 + struct mvpp2_port *port = netdev_priv(dev);
396 ++ bool running = netif_running(dev);
397 + int err;
398 +
399 + if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
400 +@@ -6960,40 +6961,24 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
401 + mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
402 + }
403 +
404 +- if (!netif_running(dev)) {
405 +- err = mvpp2_bm_update_mtu(dev, mtu);
406 +- if (!err) {
407 +- port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
408 +- return 0;
409 +- }
410 +-
411 +- /* Reconfigure BM to the original MTU */
412 +- err = mvpp2_bm_update_mtu(dev, dev->mtu);
413 +- if (err)
414 +- goto log_error;
415 +- }
416 +-
417 +- mvpp2_stop_dev(port);
418 ++ if (running)
419 ++ mvpp2_stop_dev(port);
420 +
421 + err = mvpp2_bm_update_mtu(dev, mtu);
422 +- if (!err) {
423 ++ if (err) {
424 ++ netdev_err(dev, "failed to change MTU\n");
425 ++ /* Reconfigure BM to the original MTU */
426 ++ mvpp2_bm_update_mtu(dev, dev->mtu);
427 ++ } else {
428 + port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
429 +- goto out_start;
430 + }
431 +
432 +- /* Reconfigure BM to the original MTU */
433 +- err = mvpp2_bm_update_mtu(dev, dev->mtu);
434 +- if (err)
435 +- goto log_error;
436 +-
437 +-out_start:
438 +- mvpp2_start_dev(port);
439 +- mvpp2_egress_enable(port);
440 +- mvpp2_ingress_enable(port);
441 ++ if (running) {
442 ++ mvpp2_start_dev(port);
443 ++ mvpp2_egress_enable(port);
444 ++ mvpp2_ingress_enable(port);
445 ++ }
446 +
447 +- return 0;
448 +-log_error:
449 +- netdev_err(dev, "failed to change MTU\n");
450 + return err;
451 + }
452 +
453 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
454 +index 07fda3984e10..bc8de24c56de 100644
455 +--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
456 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
457 +@@ -307,7 +307,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
458 + struct mlx5_interface *intf;
459 +
460 + mutex_lock(&mlx5_intf_mutex);
461 +- list_for_each_entry(intf, &intf_list, list)
462 ++ list_for_each_entry_reverse(intf, &intf_list, list)
463 + mlx5_remove_device(intf, priv);
464 + list_del(&priv->dev_list);
465 + mutex_unlock(&mlx5_intf_mutex);
466 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
467 +index 47003ea4ed65..5103b82fe6c5 100644
468 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
469 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
470 +@@ -473,13 +473,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
471 + void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
472 + {
473 + struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
474 +- u64 bytes, packets, lastuse = 0;
475 + struct mlx5e_tc_flow *flow;
476 + struct mlx5e_encap_entry *e;
477 + struct mlx5_fc *counter;
478 + struct neigh_table *tbl;
479 + bool neigh_used = false;
480 + struct neighbour *n;
481 ++ u64 lastuse;
482 +
483 + if (m_neigh->family == AF_INET)
484 + tbl = &arp_tbl;
485 +@@ -496,7 +496,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
486 + list_for_each_entry(flow, &e->flows, encap) {
487 + if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
488 + counter = mlx5_flow_rule_counter(flow->rule);
489 +- mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
490 ++ lastuse = mlx5_fc_query_lastuse(counter);
491 + if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
492 + neigh_used = true;
493 + break;
494 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
495 +index 89d1f8650033..966ba3f29ed7 100644
496 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
497 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
498 +@@ -312,6 +312,11 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
499 + }
500 + }
501 +
502 ++u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
503 ++{
504 ++ return counter->cache.lastuse;
505 ++}
506 ++
507 + void mlx5_fc_query_cached(struct mlx5_fc *counter,
508 + u64 *bytes, u64 *packets, u64 *lastuse)
509 + {
510 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
511 +index 5bfc961e53c9..5b13c2ba1059 100644
512 +--- a/drivers/net/phy/phylink.c
513 ++++ b/drivers/net/phy/phylink.c
514 +@@ -203,6 +203,8 @@ static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np)
515 + __ETHTOOL_LINK_MODE_MASK_NBITS, true);
516 + linkmode_zero(pl->supported);
517 + phylink_set(pl->supported, MII);
518 ++ phylink_set(pl->supported, Pause);
519 ++ phylink_set(pl->supported, Asym_Pause);
520 + if (s) {
521 + __set_bit(s->bit, pl->supported);
522 + } else {
523 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
524 +index c37ef5287caa..fa7121dcab67 100644
525 +--- a/drivers/net/ppp/pppoe.c
526 ++++ b/drivers/net/ppp/pppoe.c
527 +@@ -1137,6 +1137,9 @@ static const struct proto_ops pppoe_ops = {
528 + .recvmsg = pppoe_recvmsg,
529 + .mmap = sock_no_mmap,
530 + .ioctl = pppox_ioctl,
531 ++#ifdef CONFIG_COMPAT
532 ++ .compat_ioctl = pppox_compat_ioctl,
533 ++#endif
534 + };
535 +
536 + static const struct pppox_proto pppoe_proto = {
537 +diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
538 +index c0599b3b23c0..9128e42e33e7 100644
539 +--- a/drivers/net/ppp/pppox.c
540 ++++ b/drivers/net/ppp/pppox.c
541 +@@ -22,6 +22,7 @@
542 + #include <linux/string.h>
543 + #include <linux/module.h>
544 + #include <linux/kernel.h>
545 ++#include <linux/compat.h>
546 + #include <linux/errno.h>
547 + #include <linux/netdevice.h>
548 + #include <linux/net.h>
549 +@@ -103,6 +104,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
550 +
551 + EXPORT_SYMBOL(pppox_ioctl);
552 +
553 ++#ifdef CONFIG_COMPAT
554 ++int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
555 ++{
556 ++ if (cmd == PPPOEIOCSFWD32)
557 ++ cmd = PPPOEIOCSFWD;
558 ++
559 ++ return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
560 ++}
561 ++
562 ++EXPORT_SYMBOL(pppox_compat_ioctl);
563 ++#endif
564 ++
565 + static int pppox_create(struct net *net, struct socket *sock, int protocol,
566 + int kern)
567 + {
568 +diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
569 +index 68b274b3e448..51d769901397 100644
570 +--- a/drivers/net/ppp/pptp.c
571 ++++ b/drivers/net/ppp/pptp.c
572 +@@ -636,6 +636,9 @@ static const struct proto_ops pptp_ops = {
573 + .recvmsg = sock_no_recvmsg,
574 + .mmap = sock_no_mmap,
575 + .ioctl = pppox_ioctl,
576 ++#ifdef CONFIG_COMPAT
577 ++ .compat_ioctl = pppox_compat_ioctl,
578 ++#endif
579 + };
580 +
581 + static const struct pppox_proto pppox_pptp_proto = {
582 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
583 +index 84a33c81b9b7..7e197ba8abe4 100644
584 +--- a/drivers/net/tun.c
585 ++++ b/drivers/net/tun.c
586 +@@ -1350,6 +1350,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
587 +
588 + skb_reserve(skb, pad - delta);
589 + skb_put(skb, len + delta);
590 ++ skb_set_owner_w(skb, tfile->socket.sk);
591 + get_page(alloc_frag->page);
592 + alloc_frag->offset += buflen;
593 +
594 +diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
595 +index e65d027b91fa..529be35ac178 100644
596 +--- a/drivers/nfc/nfcmrvl/main.c
597 ++++ b/drivers/nfc/nfcmrvl/main.c
598 +@@ -244,7 +244,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
599 + /* Reset possible fault of previous session */
600 + clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
601 +
602 +- if (priv->config.reset_n_io) {
603 ++ if (gpio_is_valid(priv->config.reset_n_io)) {
604 + nfc_info(priv->dev, "reset the chip\n");
605 + gpio_set_value(priv->config.reset_n_io, 0);
606 + usleep_range(5000, 10000);
607 +@@ -255,7 +255,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
608 +
609 + void nfcmrvl_chip_halt(struct nfcmrvl_private *priv)
610 + {
611 +- if (priv->config.reset_n_io)
612 ++ if (gpio_is_valid(priv->config.reset_n_io))
613 + gpio_set_value(priv->config.reset_n_io, 0);
614 + }
615 +
616 +diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
617 +index 9a22056e8d9e..e5a622ce4b95 100644
618 +--- a/drivers/nfc/nfcmrvl/uart.c
619 ++++ b/drivers/nfc/nfcmrvl/uart.c
620 +@@ -26,7 +26,7 @@
621 + static unsigned int hci_muxed;
622 + static unsigned int flow_control;
623 + static unsigned int break_control;
624 +-static unsigned int reset_n_io;
625 ++static int reset_n_io = -EINVAL;
626 +
627 + /*
628 + ** NFCMRVL NCI OPS
629 +@@ -231,5 +231,5 @@ MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal.");
630 + module_param(hci_muxed, uint, 0);
631 + MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
632 +
633 +-module_param(reset_n_io, uint, 0);
634 ++module_param(reset_n_io, int, 0);
635 + MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
636 +diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
637 +index bd35eab652be..deb953290f8f 100644
638 +--- a/drivers/nfc/nfcmrvl/usb.c
639 ++++ b/drivers/nfc/nfcmrvl/usb.c
640 +@@ -304,6 +304,7 @@ static int nfcmrvl_probe(struct usb_interface *intf,
641 +
642 + /* No configuration for USB */
643 + memset(&config, 0, sizeof(config));
644 ++ config.reset_n_io = -EINVAL;
645 +
646 + nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
647 +
648 +diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
649 +index bd61bf4e2da2..d95ba1a07ba3 100644
650 +--- a/drivers/scsi/fcoe/fcoe_ctlr.c
651 ++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
652 +@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
653 + */
654 + static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
655 + {
656 +- return (struct fcoe_rport *)(rdata + 1);
657 ++ return container_of(rdata, struct fcoe_rport, rdata);
658 + }
659 +
660 + /**
661 +@@ -2283,7 +2283,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
662 + */
663 + static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
664 + struct sk_buff *skb,
665 +- struct fc_rport_priv *rdata)
666 ++ struct fcoe_rport *frport)
667 + {
668 + struct fip_header *fiph;
669 + struct fip_desc *desc = NULL;
670 +@@ -2291,16 +2291,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
671 + struct fip_wwn_desc *wwn = NULL;
672 + struct fip_vn_desc *vn = NULL;
673 + struct fip_size_desc *size = NULL;
674 +- struct fcoe_rport *frport;
675 + size_t rlen;
676 + size_t dlen;
677 + u32 desc_mask = 0;
678 + u32 dtype;
679 + u8 sub;
680 +
681 +- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
682 +- frport = fcoe_ctlr_rport(rdata);
683 +-
684 + fiph = (struct fip_header *)skb->data;
685 + frport->flags = ntohs(fiph->fip_flags);
686 +
687 +@@ -2363,15 +2359,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
688 + if (dlen != sizeof(struct fip_wwn_desc))
689 + goto len_err;
690 + wwn = (struct fip_wwn_desc *)desc;
691 +- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
692 ++ frport->rdata.ids.node_name =
693 ++ get_unaligned_be64(&wwn->fd_wwn);
694 + break;
695 + case FIP_DT_VN_ID:
696 + if (dlen != sizeof(struct fip_vn_desc))
697 + goto len_err;
698 + vn = (struct fip_vn_desc *)desc;
699 + memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
700 +- rdata->ids.port_id = ntoh24(vn->fd_fc_id);
701 +- rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
702 ++ frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
703 ++ frport->rdata.ids.port_name =
704 ++ get_unaligned_be64(&vn->fd_wwpn);
705 + break;
706 + case FIP_DT_FC4F:
707 + if (dlen != sizeof(struct fip_fc4_feat))
708 +@@ -2752,10 +2750,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
709 + {
710 + struct fip_header *fiph;
711 + enum fip_vn2vn_subcode sub;
712 +- struct {
713 +- struct fc_rport_priv rdata;
714 +- struct fcoe_rport frport;
715 +- } buf;
716 ++ struct fcoe_rport frport = { };
717 + int rc, vlan_id = 0;
718 +
719 + fiph = (struct fip_header *)skb->data;
720 +@@ -2771,7 +2766,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
721 + goto drop;
722 + }
723 +
724 +- rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
725 ++ rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
726 + if (rc) {
727 + LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
728 + goto drop;
729 +@@ -2780,19 +2775,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
730 + mutex_lock(&fip->ctlr_mutex);
731 + switch (sub) {
732 + case FIP_SC_VN_PROBE_REQ:
733 +- fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
734 ++ fcoe_ctlr_vn_probe_req(fip, &frport.rdata);
735 + break;
736 + case FIP_SC_VN_PROBE_REP:
737 +- fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
738 ++ fcoe_ctlr_vn_probe_reply(fip, &frport.rdata);
739 + break;
740 + case FIP_SC_VN_CLAIM_NOTIFY:
741 +- fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
742 ++ fcoe_ctlr_vn_claim_notify(fip, &frport.rdata);
743 + break;
744 + case FIP_SC_VN_CLAIM_REP:
745 +- fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
746 ++ fcoe_ctlr_vn_claim_resp(fip, &frport.rdata);
747 + break;
748 + case FIP_SC_VN_BEACON:
749 +- fcoe_ctlr_vn_beacon(fip, &buf.rdata);
750 ++ fcoe_ctlr_vn_beacon(fip, &frport.rdata);
751 + break;
752 + default:
753 + LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
754 +@@ -2816,22 +2811,18 @@ drop:
755 + */
756 + static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
757 + struct sk_buff *skb,
758 +- struct fc_rport_priv *rdata)
759 ++ struct fcoe_rport *frport)
760 + {
761 + struct fip_header *fiph;
762 + struct fip_desc *desc = NULL;
763 + struct fip_mac_desc *macd = NULL;
764 + struct fip_wwn_desc *wwn = NULL;
765 +- struct fcoe_rport *frport;
766 + size_t rlen;
767 + size_t dlen;
768 + u32 desc_mask = 0;
769 + u32 dtype;
770 + u8 sub;
771 +
772 +- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
773 +- frport = fcoe_ctlr_rport(rdata);
774 +-
775 + fiph = (struct fip_header *)skb->data;
776 + frport->flags = ntohs(fiph->fip_flags);
777 +
778 +@@ -2885,7 +2876,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
779 + if (dlen != sizeof(struct fip_wwn_desc))
780 + goto len_err;
781 + wwn = (struct fip_wwn_desc *)desc;
782 +- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
783 ++ frport->rdata.ids.node_name =
784 ++ get_unaligned_be64(&wwn->fd_wwn);
785 + break;
786 + default:
787 + LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
788 +@@ -2996,22 +2988,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
789 + {
790 + struct fip_header *fiph;
791 + enum fip_vlan_subcode sub;
792 +- struct {
793 +- struct fc_rport_priv rdata;
794 +- struct fcoe_rport frport;
795 +- } buf;
796 ++ struct fcoe_rport frport = { };
797 + int rc;
798 +
799 + fiph = (struct fip_header *)skb->data;
800 + sub = fiph->fip_subcode;
801 +- rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
802 ++ rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
803 + if (rc) {
804 + LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
805 + goto drop;
806 + }
807 + mutex_lock(&fip->ctlr_mutex);
808 + if (sub == FIP_SC_VL_REQ)
809 +- fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
810 ++ fcoe_ctlr_vlan_disc_reply(fip, &frport.rdata);
811 + mutex_unlock(&fip->ctlr_mutex);
812 +
813 + drop:
814 +diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
815 +index 31d31aad3de1..0e964ce75406 100644
816 +--- a/drivers/scsi/libfc/fc_rport.c
817 ++++ b/drivers/scsi/libfc/fc_rport.c
818 +@@ -142,12 +142,15 @@ EXPORT_SYMBOL(fc_rport_lookup);
819 + struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
820 + {
821 + struct fc_rport_priv *rdata;
822 ++ size_t rport_priv_size = sizeof(*rdata);
823 +
824 + rdata = fc_rport_lookup(lport, port_id);
825 + if (rdata)
826 + return rdata;
827 +
828 +- rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
829 ++ if (lport->rport_priv_size > 0)
830 ++ rport_priv_size = lport->rport_priv_size;
831 ++ rdata = kzalloc(rport_priv_size, GFP_KERNEL);
832 + if (!rdata)
833 + return NULL;
834 +
835 +diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
836 +index 25abf2d1732a..eab27d41ba83 100644
837 +--- a/drivers/spi/spi-bcm2835.c
838 ++++ b/drivers/spi/spi-bcm2835.c
839 +@@ -554,7 +554,8 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
840 + bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
841 +
842 + /* handle all the 3-wire mode */
843 +- if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
844 ++ if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
845 ++ tfr->rx_buf != master->dummy_rx)
846 + cs |= BCM2835_SPI_CS_REN;
847 + else
848 + cs &= ~BCM2835_SPI_CS_REN;
849 +diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
850 +index bd5d91e119ca..ea52b98b39fa 100644
851 +--- a/fs/compat_ioctl.c
852 ++++ b/fs/compat_ioctl.c
853 +@@ -1032,9 +1032,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
854 + COMPATIBLE_IOCTL(PPPIOCATTCHAN)
855 + COMPATIBLE_IOCTL(PPPIOCGCHAN)
856 + COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
857 +-/* PPPOX */
858 +-COMPATIBLE_IOCTL(PPPOEIOCSFWD)
859 +-COMPATIBLE_IOCTL(PPPOEIOCDFWD)
860 + /* Big A */
861 + /* sparc only */
862 + /* Big Q for sound/OSS */
863 +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
864 +index eb396f71285f..8d4b92185a09 100644
865 +--- a/include/linux/cgroup-defs.h
866 ++++ b/include/linux/cgroup-defs.h
867 +@@ -201,6 +201,7 @@ struct css_set {
868 + */
869 + struct list_head tasks;
870 + struct list_head mg_tasks;
871 ++ struct list_head dying_tasks;
872 +
873 + /* all css_task_iters currently walking this cset */
874 + struct list_head task_iters;
875 +diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
876 +index ef4e4ce42642..0e21619f1c03 100644
877 +--- a/include/linux/cgroup.h
878 ++++ b/include/linux/cgroup.h
879 +@@ -42,6 +42,9 @@
880 + /* walk all threaded css_sets in the domain */
881 + #define CSS_TASK_ITER_THREADED (1U << 1)
882 +
883 ++/* internal flags */
884 ++#define CSS_TASK_ITER_SKIPPED (1U << 16)
885 ++
886 + /* a css_task_iter should be treated as an opaque object */
887 + struct css_task_iter {
888 + struct cgroup_subsys *ss;
889 +@@ -56,6 +59,7 @@ struct css_task_iter {
890 + struct list_head *task_pos;
891 + struct list_head *tasks_head;
892 + struct list_head *mg_tasks_head;
893 ++ struct list_head *dying_tasks_head;
894 +
895 + struct css_set *cur_cset;
896 + struct css_set *cur_dcset;
897 +diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
898 +index ba7a9b0c7c57..24e9b360da65 100644
899 +--- a/include/linux/if_pppox.h
900 ++++ b/include/linux/if_pppox.h
901 +@@ -84,6 +84,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
902 + extern void unregister_pppox_proto(int proto_num);
903 + extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
904 + extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
905 ++extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
906 ++
907 ++#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
908 +
909 + /* PPPoX socket states */
910 + enum {
911 +diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
912 +index b25e7baa273e..dfe626ad818a 100644
913 +--- a/include/linux/mlx5/fs.h
914 ++++ b/include/linux/mlx5/fs.h
915 +@@ -164,6 +164,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
916 + struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
917 + struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
918 + void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
919 ++u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
920 + void mlx5_fc_query_cached(struct mlx5_fc *counter,
921 + u64 *bytes, u64 *packets, u64 *lastuse);
922 + int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
923 +diff --git a/include/net/tcp.h b/include/net/tcp.h
924 +index 0b477a1e1177..7994e569644e 100644
925 +--- a/include/net/tcp.h
926 ++++ b/include/net/tcp.h
927 +@@ -1688,6 +1688,23 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
928 + tcp_sk(sk)->highest_sack = NULL;
929 + }
930 +
931 ++static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
932 ++{
933 ++ struct sk_buff *skb = tcp_write_queue_head(sk);
934 ++
935 ++ if (skb == tcp_send_head(sk))
936 ++ skb = NULL;
937 ++
938 ++ return skb;
939 ++}
940 ++
941 ++static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
942 ++{
943 ++ struct sk_buff *skb = tcp_send_head(sk);
944 ++
945 ++ return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
946 ++}
947 ++
948 + static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
949 + {
950 + __skb_queue_tail(&sk->sk_write_queue, skb);
951 +diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
952 +index a4e41444f5fe..282358843659 100644
953 +--- a/include/scsi/libfcoe.h
954 ++++ b/include/scsi/libfcoe.h
955 +@@ -241,6 +241,7 @@ struct fcoe_fcf {
956 + * @vn_mac: VN_Node assigned MAC address for data
957 + */
958 + struct fcoe_rport {
959 ++ struct fc_rport_priv rdata;
960 + unsigned long time;
961 + u16 fcoe_len;
962 + u16 flags;
963 +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
964 +index d30a51da94e2..2c57030f54aa 100644
965 +--- a/kernel/cgroup/cgroup.c
966 ++++ b/kernel/cgroup/cgroup.c
967 +@@ -204,7 +204,8 @@ static struct cftype cgroup_base_files[];
968 +
969 + static int cgroup_apply_control(struct cgroup *cgrp);
970 + static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
971 +-static void css_task_iter_advance(struct css_task_iter *it);
972 ++static void css_task_iter_skip(struct css_task_iter *it,
973 ++ struct task_struct *task);
974 + static int cgroup_destroy_locked(struct cgroup *cgrp);
975 + static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
976 + struct cgroup_subsys *ss);
977 +@@ -642,6 +643,7 @@ struct css_set init_css_set = {
978 + .dom_cset = &init_css_set,
979 + .tasks = LIST_HEAD_INIT(init_css_set.tasks),
980 + .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
981 ++ .dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks),
982 + .task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
983 + .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
984 + .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
985 +@@ -737,6 +739,21 @@ static void css_set_update_populated(struct css_set *cset, bool populated)
986 + cgroup_update_populated(link->cgrp, populated);
987 + }
988 +
989 ++/*
990 ++ * @task is leaving, advance task iterators which are pointing to it so
991 ++ * that they can resume at the next position. Advancing an iterator might
992 ++ * remove it from the list, use safe walk. See css_task_iter_skip() for
993 ++ * details.
994 ++ */
995 ++static void css_set_skip_task_iters(struct css_set *cset,
996 ++ struct task_struct *task)
997 ++{
998 ++ struct css_task_iter *it, *pos;
999 ++
1000 ++ list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
1001 ++ css_task_iter_skip(it, task);
1002 ++}
1003 ++
1004 + /**
1005 + * css_set_move_task - move a task from one css_set to another
1006 + * @task: task being moved
1007 +@@ -762,22 +779,9 @@ static void css_set_move_task(struct task_struct *task,
1008 + css_set_update_populated(to_cset, true);
1009 +
1010 + if (from_cset) {
1011 +- struct css_task_iter *it, *pos;
1012 +-
1013 + WARN_ON_ONCE(list_empty(&task->cg_list));
1014 +
1015 +- /*
1016 +- * @task is leaving, advance task iterators which are
1017 +- * pointing to it so that they can resume at the next
1018 +- * position. Advancing an iterator might remove it from
1019 +- * the list, use safe walk. See css_task_iter_advance*()
1020 +- * for details.
1021 +- */
1022 +- list_for_each_entry_safe(it, pos, &from_cset->task_iters,
1023 +- iters_node)
1024 +- if (it->task_pos == &task->cg_list)
1025 +- css_task_iter_advance(it);
1026 +-
1027 ++ css_set_skip_task_iters(from_cset, task);
1028 + list_del_init(&task->cg_list);
1029 + if (!css_set_populated(from_cset))
1030 + css_set_update_populated(from_cset, false);
1031 +@@ -1104,6 +1108,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
1032 + cset->dom_cset = cset;
1033 + INIT_LIST_HEAD(&cset->tasks);
1034 + INIT_LIST_HEAD(&cset->mg_tasks);
1035 ++ INIT_LIST_HEAD(&cset->dying_tasks);
1036 + INIT_LIST_HEAD(&cset->task_iters);
1037 + INIT_LIST_HEAD(&cset->threaded_csets);
1038 + INIT_HLIST_NODE(&cset->hlist);
1039 +@@ -4043,15 +4048,18 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
1040 + it->task_pos = NULL;
1041 + return;
1042 + }
1043 +- } while (!css_set_populated(cset));
1044 ++ } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
1045 +
1046 + if (!list_empty(&cset->tasks))
1047 + it->task_pos = cset->tasks.next;
1048 +- else
1049 ++ else if (!list_empty(&cset->mg_tasks))
1050 + it->task_pos = cset->mg_tasks.next;
1051 ++ else
1052 ++ it->task_pos = cset->dying_tasks.next;
1053 +
1054 + it->tasks_head = &cset->tasks;
1055 + it->mg_tasks_head = &cset->mg_tasks;
1056 ++ it->dying_tasks_head = &cset->dying_tasks;
1057 +
1058 + /*
1059 + * We don't keep css_sets locked across iteration steps and thus
1060 +@@ -4077,9 +4085,20 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
1061 + list_add(&it->iters_node, &cset->task_iters);
1062 + }
1063 +
1064 ++static void css_task_iter_skip(struct css_task_iter *it,
1065 ++ struct task_struct *task)
1066 ++{
1067 ++ lockdep_assert_held(&css_set_lock);
1068 ++
1069 ++ if (it->task_pos == &task->cg_list) {
1070 ++ it->task_pos = it->task_pos->next;
1071 ++ it->flags |= CSS_TASK_ITER_SKIPPED;
1072 ++ }
1073 ++}
1074 ++
1075 + static void css_task_iter_advance(struct css_task_iter *it)
1076 + {
1077 +- struct list_head *next;
1078 ++ struct task_struct *task;
1079 +
1080 + lockdep_assert_held(&css_set_lock);
1081 + repeat:
1082 +@@ -4089,25 +4108,40 @@ repeat:
1083 + * consumed first and then ->mg_tasks. After ->mg_tasks,
1084 + * we move onto the next cset.
1085 + */
1086 +- next = it->task_pos->next;
1087 +-
1088 +- if (next == it->tasks_head)
1089 +- next = it->mg_tasks_head->next;
1090 ++ if (it->flags & CSS_TASK_ITER_SKIPPED)
1091 ++ it->flags &= ~CSS_TASK_ITER_SKIPPED;
1092 ++ else
1093 ++ it->task_pos = it->task_pos->next;
1094 +
1095 +- if (next == it->mg_tasks_head)
1096 ++ if (it->task_pos == it->tasks_head)
1097 ++ it->task_pos = it->mg_tasks_head->next;
1098 ++ if (it->task_pos == it->mg_tasks_head)
1099 ++ it->task_pos = it->dying_tasks_head->next;
1100 ++ if (it->task_pos == it->dying_tasks_head)
1101 + css_task_iter_advance_css_set(it);
1102 +- else
1103 +- it->task_pos = next;
1104 + } else {
1105 + /* called from start, proceed to the first cset */
1106 + css_task_iter_advance_css_set(it);
1107 + }
1108 +
1109 +- /* if PROCS, skip over tasks which aren't group leaders */
1110 +- if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
1111 +- !thread_group_leader(list_entry(it->task_pos, struct task_struct,
1112 +- cg_list)))
1113 +- goto repeat;
1114 ++ if (!it->task_pos)
1115 ++ return;
1116 ++
1117 ++ task = list_entry(it->task_pos, struct task_struct, cg_list);
1118 ++
1119 ++ if (it->flags & CSS_TASK_ITER_PROCS) {
1120 ++ /* if PROCS, skip over tasks which aren't group leaders */
1121 ++ if (!thread_group_leader(task))
1122 ++ goto repeat;
1123 ++
1124 ++ /* and dying leaders w/o live member threads */
1125 ++ if (!atomic_read(&task->signal->live))
1126 ++ goto repeat;
1127 ++ } else {
1128 ++ /* skip all dying ones */
1129 ++ if (task->flags & PF_EXITING)
1130 ++ goto repeat;
1131 ++ }
1132 + }
1133 +
1134 + /**
1135 +@@ -4163,6 +4197,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
1136 +
1137 + spin_lock_irq(&css_set_lock);
1138 +
1139 ++ /* @it may be half-advanced by skips, finish advancing */
1140 ++ if (it->flags & CSS_TASK_ITER_SKIPPED)
1141 ++ css_task_iter_advance(it);
1142 ++
1143 + if (it->task_pos) {
1144 + it->cur_task = list_entry(it->task_pos, struct task_struct,
1145 + cg_list);
1146 +@@ -5540,6 +5578,7 @@ void cgroup_exit(struct task_struct *tsk)
1147 + if (!list_empty(&tsk->cg_list)) {
1148 + spin_lock_irq(&css_set_lock);
1149 + css_set_move_task(tsk, cset, NULL, false);
1150 ++ list_add_tail(&tsk->cg_list, &cset->dying_tasks);
1151 + cset->nr_tasks--;
1152 + spin_unlock_irq(&css_set_lock);
1153 + } else {
1154 +@@ -5560,6 +5599,13 @@ void cgroup_release(struct task_struct *task)
1155 + do_each_subsys_mask(ss, ssid, have_release_callback) {
1156 + ss->release(task);
1157 + } while_each_subsys_mask();
1158 ++
1159 ++ if (use_task_css_set_links) {
1160 ++ spin_lock_irq(&css_set_lock);
1161 ++ css_set_skip_task_iters(task_css_set(task), task);
1162 ++ list_del_init(&task->cg_list);
1163 ++ spin_unlock_irq(&css_set_lock);
1164 ++ }
1165 + }
1166 +
1167 + void cgroup_free(struct task_struct *task)
1168 +diff --git a/kernel/exit.c b/kernel/exit.c
1169 +index 95ce231ff5e2..15437cfdcd70 100644
1170 +--- a/kernel/exit.c
1171 ++++ b/kernel/exit.c
1172 +@@ -193,6 +193,7 @@ repeat:
1173 + rcu_read_unlock();
1174 +
1175 + proc_flush_task(p);
1176 ++ cgroup_release(p);
1177 +
1178 + write_lock_irq(&tasklist_lock);
1179 + ptrace_release_task(p);
1180 +@@ -218,7 +219,6 @@ repeat:
1181 + }
1182 +
1183 + write_unlock_irq(&tasklist_lock);
1184 +- cgroup_release(p);
1185 + release_thread(p);
1186 + call_rcu(&p->rcu, delayed_put_task_struct);
1187 +
1188 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
1189 +index 5cd83145c7d8..b24782d53474 100644
1190 +--- a/net/bridge/br_multicast.c
1191 ++++ b/net/bridge/br_multicast.c
1192 +@@ -1593,6 +1593,9 @@ br_multicast_leave_group(struct net_bridge *br,
1193 + if (!br_port_group_equal(p, port, src))
1194 + continue;
1195 +
1196 ++ if (p->flags & MDB_PG_FLAGS_PERMANENT)
1197 ++ break;
1198 ++
1199 + rcu_assign_pointer(*pp, p->next);
1200 + hlist_del_init(&p->mglist);
1201 + del_timer(&p->timer);
1202 +diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
1203 +index 9b8a53568b0f..e24a74884768 100644
1204 +--- a/net/bridge/br_vlan.c
1205 ++++ b/net/bridge/br_vlan.c
1206 +@@ -636,6 +636,11 @@ void br_vlan_flush(struct net_bridge *br)
1207 +
1208 + ASSERT_RTNL();
1209 +
1210 ++ /* delete auto-added default pvid local fdb before flushing vlans
1211 ++ * otherwise it will be leaked on bridge device init failure
1212 ++ */
1213 ++ br_fdb_delete_by_port(br, NULL, 0, 1);
1214 ++
1215 + vg = br_vlan_group(br);
1216 + __vlan_flush(vg);
1217 + RCU_INIT_POINTER(br->vlgrp, NULL);
1218 +diff --git a/net/core/dev.c b/net/core/dev.c
1219 +index 08c0e7613ef6..f79b513e80dc 100644
1220 +--- a/net/core/dev.c
1221 ++++ b/net/core/dev.c
1222 +@@ -8652,6 +8652,8 @@ static void __net_exit default_device_exit(struct net *net)
1223 +
1224 + /* Push remaining network devices to init_net */
1225 + snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
1226 ++ if (__dev_get_by_name(&init_net, fb_name))
1227 ++ snprintf(fb_name, IFNAMSIZ, "dev%%d");
1228 + err = dev_change_net_namespace(dev, &init_net, fb_name);
1229 + if (err) {
1230 + pr_emerg("%s: failed to move %s to init_net: %d\n",
1231 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1232 +index a5960b9b6741..a99086bf26ea 100644
1233 +--- a/net/ipv4/tcp_output.c
1234 ++++ b/net/ipv4/tcp_output.c
1235 +@@ -1264,6 +1264,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1236 + struct tcp_sock *tp = tcp_sk(sk);
1237 + struct sk_buff *buff;
1238 + int nsize, old_factor;
1239 ++ long limit;
1240 + int nlen;
1241 + u8 flags;
1242 +
1243 +@@ -1274,7 +1275,15 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1244 + if (nsize < 0)
1245 + nsize = 0;
1246 +
1247 +- if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf + 0x20000)) {
1248 ++ /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1249 ++ * We need some allowance to not penalize applications setting small
1250 ++ * SO_SNDBUF values.
1251 ++ * Also allow first and last skb in retransmit queue to be split.
1252 ++ */
1253 ++ limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
1254 ++ if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1255 ++ skb != tcp_rtx_queue_head(sk) &&
1256 ++ skb != tcp_rtx_queue_tail(sk))) {
1257 + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1258 + return -ENOMEM;
1259 + }
1260 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1261 +index f71c7915ff0e..067fc78cc529 100644
1262 +--- a/net/ipv6/ip6_tunnel.c
1263 ++++ b/net/ipv6/ip6_tunnel.c
1264 +@@ -1280,12 +1280,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1265 + }
1266 +
1267 + fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1268 ++ dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1269 +
1270 + if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1271 + return -1;
1272 +
1273 +- dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1274 +-
1275 + skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1276 +
1277 + err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1278 +@@ -1371,12 +1370,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1279 + }
1280 +
1281 + fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1282 ++ dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1283 +
1284 + if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1285 + return -1;
1286 +
1287 +- dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1288 +-
1289 + skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1290 +
1291 + err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1292 +diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
1293 +index 8bef35aa8786..a7fcf48e9087 100644
1294 +--- a/net/l2tp/l2tp_ppp.c
1295 ++++ b/net/l2tp/l2tp_ppp.c
1296 +@@ -1793,6 +1793,9 @@ static const struct proto_ops pppol2tp_ops = {
1297 + .recvmsg = pppol2tp_recvmsg,
1298 + .mmap = sock_no_mmap,
1299 + .ioctl = pppox_ioctl,
1300 ++#ifdef CONFIG_COMPAT
1301 ++ .compat_ioctl = pppox_compat_ioctl,
1302 ++#endif
1303 + };
1304 +
1305 + static const struct pppox_proto pppol2tp_proto = {
1306 +diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
1307 +index 31de26c99023..16a403d17f44 100644
1308 +--- a/net/sched/act_ife.c
1309 ++++ b/net/sched/act_ife.c
1310 +@@ -459,6 +459,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
1311 + int ret = 0;
1312 + int err;
1313 +
1314 ++ if (!nla)
1315 ++ return -EINVAL;
1316 ++
1317 + err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy, NULL);
1318 + if (err < 0)
1319 + return err;
1320 +diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
1321 +index c518a1efcb9d..b22e5cde6059 100644
1322 +--- a/net/sched/sch_codel.c
1323 ++++ b/net/sched/sch_codel.c
1324 +@@ -71,10 +71,10 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
1325 + struct Qdisc *sch = ctx;
1326 + struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
1327 +
1328 +- if (skb)
1329 ++ if (skb) {
1330 + sch->qstats.backlog -= qdisc_pkt_len(skb);
1331 +-
1332 +- prefetch(&skb->end); /* we'll need skb_shinfo() */
1333 ++ prefetch(&skb->end); /* we'll need skb_shinfo() */
1334 ++ }
1335 + return skb;
1336 + }
1337 +
1338 +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
1339 +index 41954ed7ff51..ad4dcc663c6d 100644
1340 +--- a/net/tipc/netlink_compat.c
1341 ++++ b/net/tipc/netlink_compat.c
1342 +@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
1343 + int rep_type;
1344 + int rep_size;
1345 + int req_type;
1346 ++ int req_size;
1347 + struct net *net;
1348 + struct sk_buff *rep;
1349 + struct tlv_desc *req;
1350 +@@ -252,7 +253,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
1351 + int err;
1352 + struct sk_buff *arg;
1353 +
1354 +- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
1355 ++ if (msg->req_type && (!msg->req_size ||
1356 ++ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
1357 + return -EINVAL;
1358 +
1359 + msg->rep = tipc_tlv_alloc(msg->rep_size);
1360 +@@ -345,7 +347,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
1361 + {
1362 + int err;
1363 +
1364 +- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
1365 ++ if (msg->req_type && (!msg->req_size ||
1366 ++ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
1367 + return -EINVAL;
1368 +
1369 + err = __tipc_nl_compat_doit(cmd, msg);
1370 +@@ -1267,8 +1270,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
1371 + goto send;
1372 + }
1373 +
1374 +- len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
1375 +- if (!len || !TLV_OK(msg.req, len)) {
1376 ++ msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
1377 ++ if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
1378 + msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
1379 + err = -EOPNOTSUPP;
1380 + goto send;
1381 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
1382 +index 7d748e272572..5422543faff8 100644
1383 +--- a/tools/objtool/check.c
1384 ++++ b/tools/objtool/check.c
1385 +@@ -165,6 +165,8 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
1386 + "__reiserfs_panic",
1387 + "lbug_with_loc",
1388 + "fortify_panic",
1389 ++ "machine_real_restart",
1390 ++ "rewind_stack_do_exit",
1391 + };
1392 +
1393 + if (func->bind == STB_WEAK)