Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.13 commit in: /
Date: Fri, 24 Nov 2017 09:42:01
Message-Id: 1511516453.64881833b8854f3c3a82270811c619b49e013da7.alicef@gentoo
1 commit: 64881833b8854f3c3a82270811c619b49e013da7
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Fri Nov 24 09:40:53 2017 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Fri Nov 24 09:40:53 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=64881833
7
8 linux kernel 4.13.16
9
10 0000_README | 4 +
11 1015_linux-4.13.16.patch | 946 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 950 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 7f93bc3..846eb5a 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -103,6 +103,10 @@ Patch: 1014_linux-4.13.15.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.13.15
21
22 +Patch: 1015_linux-4.13.16.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.13.16
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1015_linux-4.13.16.patch b/1015_linux-4.13.16.patch
31 new file mode 100644
32 index 0000000..4f9c8c1
33 --- /dev/null
34 +++ b/1015_linux-4.13.16.patch
35 @@ -0,0 +1,946 @@
36 +diff --git a/Makefile b/Makefile
37 +index 3bd5d9d148d3..bc9a897e0431 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 13
43 +-SUBLEVEL = 15
44 ++SUBLEVEL = 16
45 + EXTRAVERSION =
46 + NAME = Fearless Coyote
47 +
48 +diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
49 +index c55fb2cb2acc..24f749324c0f 100644
50 +--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
51 ++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
52 +@@ -811,7 +811,24 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
53 + struct cacheinfo *this_leaf;
54 + int i, sibling;
55 +
56 +- if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
57 ++ /*
58 ++ * For L3, always use the pre-calculated cpu_llc_shared_mask
59 ++ * to derive shared_cpu_map.
60 ++ */
61 ++ if (index == 3) {
62 ++ for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
63 ++ this_cpu_ci = get_cpu_cacheinfo(i);
64 ++ if (!this_cpu_ci->info_list)
65 ++ continue;
66 ++ this_leaf = this_cpu_ci->info_list + index;
67 ++ for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
68 ++ if (!cpu_online(sibling))
69 ++ continue;
70 ++ cpumask_set_cpu(sibling,
71 ++ &this_leaf->shared_cpu_map);
72 ++ }
73 ++ }
74 ++ } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
75 + unsigned int apicid, nshared, first, last;
76 +
77 + this_leaf = this_cpu_ci->info_list + index;
78 +@@ -839,19 +856,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
79 + &this_leaf->shared_cpu_map);
80 + }
81 + }
82 +- } else if (index == 3) {
83 +- for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
84 +- this_cpu_ci = get_cpu_cacheinfo(i);
85 +- if (!this_cpu_ci->info_list)
86 +- continue;
87 +- this_leaf = this_cpu_ci->info_list + index;
88 +- for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
89 +- if (!cpu_online(sibling))
90 +- continue;
91 +- cpumask_set_cpu(sibling,
92 +- &this_leaf->shared_cpu_map);
93 +- }
94 +- }
95 + } else
96 + return 0;
97 +
98 +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
99 +index 810b138f5897..c82d9fd2f05a 100644
100 +--- a/drivers/char/ipmi/ipmi_msghandler.c
101 ++++ b/drivers/char/ipmi/ipmi_msghandler.c
102 +@@ -4030,7 +4030,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
103 + }
104 +
105 + static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
106 +- struct list_head *timeouts, long timeout_period,
107 ++ struct list_head *timeouts,
108 ++ unsigned long timeout_period,
109 + int slot, unsigned long *flags,
110 + unsigned int *waiting_msgs)
111 + {
112 +@@ -4043,8 +4044,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
113 + if (!ent->inuse)
114 + return;
115 +
116 +- ent->timeout -= timeout_period;
117 +- if (ent->timeout > 0) {
118 ++ if (timeout_period < ent->timeout) {
119 ++ ent->timeout -= timeout_period;
120 + (*waiting_msgs)++;
121 + return;
122 + }
123 +@@ -4110,7 +4111,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
124 + }
125 + }
126 +
127 +-static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
128 ++static unsigned int ipmi_timeout_handler(ipmi_smi_t intf,
129 ++ unsigned long timeout_period)
130 + {
131 + struct list_head timeouts;
132 + struct ipmi_recv_msg *msg, *msg2;
133 +diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
134 +index 610638a80383..461bf0b8a094 100644
135 +--- a/drivers/char/tpm/tpm-dev-common.c
136 ++++ b/drivers/char/tpm/tpm-dev-common.c
137 +@@ -110,6 +110,12 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
138 + return -EFAULT;
139 + }
140 +
141 ++ if (in_size < 6 ||
142 ++ in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
143 ++ mutex_unlock(&priv->buffer_mutex);
144 ++ return -EINVAL;
145 ++ }
146 ++
147 + /* atomic tpm command send and result receive. We only hold the ops
148 + * lock during this period so that the tpm can be unregistered even if
149 + * the char dev is held open.
150 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
151 +index c99dc59d729b..76e8054bfc4e 100644
152 +--- a/drivers/net/bonding/bond_main.c
153 ++++ b/drivers/net/bonding/bond_main.c
154 +@@ -3253,7 +3253,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
155 + hash ^= (hash >> 16);
156 + hash ^= (hash >> 8);
157 +
158 +- return hash;
159 ++ return hash >> 1;
160 + }
161 +
162 + /*-------------------------- Device entry points ----------------------------*/
163 +diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
164 +index c28fa5a8734c..ba15eeadfe21 100644
165 +--- a/drivers/net/ethernet/broadcom/bcmsysport.c
166 ++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
167 +@@ -1743,15 +1743,17 @@ static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
168 +
169 + static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
170 + {
171 +- u32 __maybe_unused reg;
172 ++ u32 reg;
173 +
174 +- /* Include Broadcom tag in pad extension */
175 ++ reg = gib_readl(priv, GIB_CONTROL);
176 ++ /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
177 + if (netdev_uses_dsa(priv->netdev)) {
178 +- reg = gib_readl(priv, GIB_CONTROL);
179 + reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
180 + reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
181 +- gib_writel(priv, reg, GIB_CONTROL);
182 + }
183 ++ reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
184 ++ reg |= 12 << GIB_IPG_LEN_SHIFT;
185 ++ gib_writel(priv, reg, GIB_CONTROL);
186 + }
187 +
188 + static int bcm_sysport_open(struct net_device *dev)
189 +diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
190 +index e92859dab7ae..e191c4ebeaf4 100644
191 +--- a/drivers/net/ethernet/fealnx.c
192 ++++ b/drivers/net/ethernet/fealnx.c
193 +@@ -257,8 +257,8 @@ enum rx_desc_status_bits {
194 + RXFSD = 0x00000800, /* first descriptor */
195 + RXLSD = 0x00000400, /* last descriptor */
196 + ErrorSummary = 0x80, /* error summary */
197 +- RUNT = 0x40, /* runt packet received */
198 +- LONG = 0x20, /* long packet received */
199 ++ RUNTPKT = 0x40, /* runt packet received */
200 ++ LONGPKT = 0x20, /* long packet received */
201 + FAE = 0x10, /* frame align error */
202 + CRC = 0x08, /* crc error */
203 + RXER = 0x04, /* receive error */
204 +@@ -1632,7 +1632,7 @@ static int netdev_rx(struct net_device *dev)
205 + dev->name, rx_status);
206 +
207 + dev->stats.rx_errors++; /* end of a packet. */
208 +- if (rx_status & (LONG | RUNT))
209 ++ if (rx_status & (LONGPKT | RUNTPKT))
210 + dev->stats.rx_length_errors++;
211 + if (rx_status & RXER)
212 + dev->stats.rx_frame_errors++;
213 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
214 +index 7344433259fc..1c513dc0105e 100644
215 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
216 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
217 +@@ -213,22 +213,20 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
218 + static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
219 + struct mlx5e_dma_info *dma_info)
220 + {
221 +- struct page *page;
222 +-
223 + if (mlx5e_rx_cache_get(rq, dma_info))
224 + return 0;
225 +
226 +- page = dev_alloc_pages(rq->buff.page_order);
227 +- if (unlikely(!page))
228 ++ dma_info->page = dev_alloc_pages(rq->buff.page_order);
229 ++ if (unlikely(!dma_info->page))
230 + return -ENOMEM;
231 +
232 +- dma_info->addr = dma_map_page(rq->pdev, page, 0,
233 ++ dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
234 + RQ_PAGE_SIZE(rq), rq->buff.map_dir);
235 + if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
236 +- put_page(page);
237 ++ put_page(dma_info->page);
238 ++ dma_info->page = NULL;
239 + return -ENOMEM;
240 + }
241 +- dma_info->page = page;
242 +
243 + return 0;
244 + }
245 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
246 +index 16885827367b..553bc230d70d 100644
247 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
248 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
249 +@@ -1545,9 +1545,16 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
250 + return -EAGAIN;
251 + }
252 +
253 ++ /* Panic tear down fw command will stop the PCI bus communication
254 ++ * with the HCA, so the health polll is no longer needed.
255 ++ */
256 ++ mlx5_drain_health_wq(dev);
257 ++ mlx5_stop_health_poll(dev);
258 ++
259 + ret = mlx5_cmd_force_teardown_hca(dev);
260 + if (ret) {
261 + mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
262 ++ mlx5_start_health_poll(dev);
263 + return ret;
264 + }
265 +
266 +diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
267 +index b2ff88e69a81..3d4f7959dabb 100644
268 +--- a/drivers/net/usb/asix_devices.c
269 ++++ b/drivers/net/usb/asix_devices.c
270 +@@ -626,7 +626,7 @@ static int asix_suspend(struct usb_interface *intf, pm_message_t message)
271 + struct usbnet *dev = usb_get_intfdata(intf);
272 + struct asix_common_private *priv = dev->driver_priv;
273 +
274 +- if (priv->suspend)
275 ++ if (priv && priv->suspend)
276 + priv->suspend(dev);
277 +
278 + return usbnet_suspend(intf, message);
279 +@@ -678,7 +678,7 @@ static int asix_resume(struct usb_interface *intf)
280 + struct usbnet *dev = usb_get_intfdata(intf);
281 + struct asix_common_private *priv = dev->driver_priv;
282 +
283 +- if (priv->resume)
284 ++ if (priv && priv->resume)
285 + priv->resume(dev);
286 +
287 + return usbnet_resume(intf);
288 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
289 +index 8ab281b478f2..4f88f64cccb4 100644
290 +--- a/drivers/net/usb/cdc_ether.c
291 ++++ b/drivers/net/usb/cdc_ether.c
292 +@@ -221,7 +221,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
293 + goto bad_desc;
294 + }
295 +
296 +- if (header.usb_cdc_ether_desc) {
297 ++ if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
298 + dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize);
299 + /* because of Zaurus, we may be ignoring the host
300 + * side link address we were given.
301 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
302 +index 9c80e80c5493..8d5e97251efe 100644
303 +--- a/drivers/net/usb/cdc_ncm.c
304 ++++ b/drivers/net/usb/cdc_ncm.c
305 +@@ -771,7 +771,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
306 + int err;
307 + u8 iface_no;
308 + struct usb_cdc_parsed_header hdr;
309 +- u16 curr_ntb_format;
310 ++ __le16 curr_ntb_format;
311 +
312 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
313 + if (!ctx)
314 +@@ -889,7 +889,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
315 + goto error2;
316 + }
317 +
318 +- if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
319 ++ if (curr_ntb_format == cpu_to_le16(USB_CDC_NCM_NTB32_FORMAT)) {
320 + dev_info(&intf->dev, "resetting NTB format to 16-bit");
321 + err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
322 + USB_TYPE_CLASS | USB_DIR_OUT
323 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
324 +index 8c3733608271..8d4a6f7cba61 100644
325 +--- a/drivers/net/usb/qmi_wwan.c
326 ++++ b/drivers/net/usb/qmi_wwan.c
327 +@@ -499,6 +499,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
328 + return 1;
329 + }
330 + if (rawip) {
331 ++ skb_reset_mac_header(skb);
332 + skb->dev = dev->net; /* normally set by eth_type_trans */
333 + skb->protocol = proto;
334 + return 1;
335 +@@ -681,7 +682,7 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
336 + }
337 +
338 + /* errors aren't fatal - we can live with the dynamic address */
339 +- if (cdc_ether) {
340 ++ if (cdc_ether && cdc_ether->wMaxSegmentSize) {
341 + dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
342 + usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
343 + }
344 +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
345 +index 8a1eaf3c302a..e91ef5e236cc 100644
346 +--- a/drivers/net/vrf.c
347 ++++ b/drivers/net/vrf.c
348 +@@ -1271,7 +1271,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
349 + frh->family = family;
350 + frh->action = FR_ACT_TO_TBL;
351 +
352 +- if (nla_put_u32(skb, FRA_L3MDEV, 1))
353 ++ if (nla_put_u8(skb, FRA_L3MDEV, 1))
354 + goto nla_put_failure;
355 +
356 + if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
357 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
358 +index e17baac70f43..436154720bf8 100644
359 +--- a/drivers/net/vxlan.c
360 ++++ b/drivers/net/vxlan.c
361 +@@ -1632,26 +1632,19 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
362 + static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
363 + {
364 + struct vxlan_dev *vxlan = netdev_priv(dev);
365 +- struct nd_msg *msg;
366 +- const struct ipv6hdr *iphdr;
367 + const struct in6_addr *daddr;
368 +- struct neighbour *n;
369 ++ const struct ipv6hdr *iphdr;
370 + struct inet6_dev *in6_dev;
371 ++ struct neighbour *n;
372 ++ struct nd_msg *msg;
373 +
374 + in6_dev = __in6_dev_get(dev);
375 + if (!in6_dev)
376 + goto out;
377 +
378 +- if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
379 +- goto out;
380 +-
381 + iphdr = ipv6_hdr(skb);
382 + daddr = &iphdr->daddr;
383 +-
384 + msg = (struct nd_msg *)(iphdr + 1);
385 +- if (msg->icmph.icmp6_code != 0 ||
386 +- msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
387 +- goto out;
388 +
389 + if (ipv6_addr_loopback(daddr) ||
390 + ipv6_addr_is_multicast(&msg->target))
391 +@@ -2258,11 +2251,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
392 + static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
393 + {
394 + struct vxlan_dev *vxlan = netdev_priv(dev);
395 ++ struct vxlan_rdst *rdst, *fdst = NULL;
396 + const struct ip_tunnel_info *info;
397 +- struct ethhdr *eth;
398 + bool did_rsc = false;
399 +- struct vxlan_rdst *rdst, *fdst = NULL;
400 + struct vxlan_fdb *f;
401 ++ struct ethhdr *eth;
402 + __be32 vni = 0;
403 +
404 + info = skb_tunnel_info(skb);
405 +@@ -2287,12 +2280,14 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
406 + if (ntohs(eth->h_proto) == ETH_P_ARP)
407 + return arp_reduce(dev, skb, vni);
408 + #if IS_ENABLED(CONFIG_IPV6)
409 +- else if (ntohs(eth->h_proto) == ETH_P_IPV6) {
410 +- struct ipv6hdr *hdr, _hdr;
411 +- if ((hdr = skb_header_pointer(skb,
412 +- skb_network_offset(skb),
413 +- sizeof(_hdr), &_hdr)) &&
414 +- hdr->nexthdr == IPPROTO_ICMPV6)
415 ++ else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
416 ++ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
417 ++ sizeof(struct nd_msg)) &&
418 ++ ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
419 ++ struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1);
420 ++
421 ++ if (m->icmph.icmp6_code == 0 &&
422 ++ m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
423 + return neigh_reduce(dev, skb, vni);
424 + }
425 + #endif
426 +diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
427 +index e500f7dd2470..4bd376c08b59 100644
428 +--- a/drivers/tty/serial/8250/8250_fintek.c
429 ++++ b/drivers/tty/serial/8250/8250_fintek.c
430 +@@ -118,6 +118,9 @@ static int fintek_8250_enter_key(u16 base_port, u8 key)
431 + if (!request_muxed_region(base_port, 2, "8250_fintek"))
432 + return -EBUSY;
433 +
434 ++ /* Force to deactive all SuperIO in this base_port */
435 ++ outb(EXIT_KEY, base_port + ADDR_PORT);
436 ++
437 + outb(key, base_port + ADDR_PORT);
438 + outb(key, base_port + ADDR_PORT);
439 + return 0;
440 +diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
441 +index 1ea05ac57aa7..670f7e334f93 100644
442 +--- a/drivers/tty/serial/omap-serial.c
443 ++++ b/drivers/tty/serial/omap-serial.c
444 +@@ -693,7 +693,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
445 + if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
446 + up->efr |= UART_EFR_RTS;
447 + else
448 +- up->efr &= UART_EFR_RTS;
449 ++ up->efr &= ~UART_EFR_RTS;
450 + serial_out(up, UART_EFR, up->efr);
451 + serial_out(up, UART_LCR, lcr);
452 +
453 +diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
454 +index e82357c89979..8cf16d8c5261 100644
455 +--- a/fs/coda/upcall.c
456 ++++ b/fs/coda/upcall.c
457 +@@ -446,8 +446,7 @@ int venus_fsync(struct super_block *sb, struct CodaFid *fid)
458 + UPARG(CODA_FSYNC);
459 +
460 + inp->coda_fsync.VFid = *fid;
461 +- error = coda_upcall(coda_vcp(sb), sizeof(union inputArgs),
462 +- &outsize, inp);
463 ++ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
464 +
465 + CODA_FREE(inp, insize);
466 + return error;
467 +diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
468 +index 74407c6dd592..ec8f75813beb 100644
469 +--- a/fs/ocfs2/dlm/dlmrecovery.c
470 ++++ b/fs/ocfs2/dlm/dlmrecovery.c
471 +@@ -2419,6 +2419,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
472 + dlm_lockres_put(res);
473 + continue;
474 + }
475 ++ dlm_move_lockres_to_recovery_list(dlm, res);
476 + } else if (res->owner == dlm->node_num) {
477 + dlm_free_dead_locks(dlm, res, dead_node);
478 + __dlm_lockres_calc_usage(dlm, res);
479 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
480 +index bfeb647459d9..2fc8e65c07c5 100644
481 +--- a/fs/ocfs2/file.c
482 ++++ b/fs/ocfs2/file.c
483 +@@ -1168,6 +1168,13 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
484 + }
485 + size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
486 + if (size_change) {
487 ++ /*
488 ++ * Here we should wait dio to finish before inode lock
489 ++ * to avoid a deadlock between ocfs2_setattr() and
490 ++ * ocfs2_dio_end_io_write()
491 ++ */
492 ++ inode_dio_wait(inode);
493 ++
494 + status = ocfs2_rw_lock(inode, 1);
495 + if (status < 0) {
496 + mlog_errno(status);
497 +@@ -1207,8 +1214,6 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
498 + if (status)
499 + goto bail_unlock;
500 +
501 +- inode_dio_wait(inode);
502 +-
503 + if (i_size_read(inode) >= attr->ia_size) {
504 + if (ocfs2_should_order_data(inode)) {
505 + status = ocfs2_begin_ordered_truncate(inode,
506 +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
507 +index fc14b8b3f6ce..1d86e09f17c1 100644
508 +--- a/include/linux/mmzone.h
509 ++++ b/include/linux/mmzone.h
510 +@@ -691,7 +691,8 @@ typedef struct pglist_data {
511 + * is the first PFN that needs to be initialised.
512 + */
513 + unsigned long first_deferred_pfn;
514 +- unsigned long static_init_size;
515 ++ /* Number of non-deferred pages */
516 ++ unsigned long static_init_pgcnt;
517 + #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
518 +
519 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
520 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
521 +index 63df75ae70ee..baf2dd102686 100644
522 +--- a/include/linux/skbuff.h
523 ++++ b/include/linux/skbuff.h
524 +@@ -3655,6 +3655,13 @@ static inline void nf_reset_trace(struct sk_buff *skb)
525 + #endif
526 + }
527 +
528 ++static inline void ipvs_reset(struct sk_buff *skb)
529 ++{
530 ++#if IS_ENABLED(CONFIG_IP_VS)
531 ++ skb->ipvs_property = 0;
532 ++#endif
533 ++}
534 ++
535 + /* Note: This doesn't put any conntrack and bridge info in dst. */
536 + static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
537 + bool copy)
538 +diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
539 +index 908b309d60d7..b8f51dffeae9 100644
540 +--- a/kernel/rcu/tree_plugin.h
541 ++++ b/kernel/rcu/tree_plugin.h
542 +@@ -1493,7 +1493,7 @@ static void rcu_prepare_for_idle(void)
543 + rdtp->last_accelerate = jiffies;
544 + for_each_rcu_flavor(rsp) {
545 + rdp = this_cpu_ptr(rsp->rda);
546 +- if (rcu_segcblist_pend_cbs(&rdp->cblist))
547 ++ if (!rcu_segcblist_pend_cbs(&rdp->cblist))
548 + continue;
549 + rnp = rdp->mynode;
550 + raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
551 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
552 +index 1423da8dd16f..3bd0999c266f 100644
553 +--- a/mm/page_alloc.c
554 ++++ b/mm/page_alloc.c
555 +@@ -289,28 +289,37 @@ EXPORT_SYMBOL(nr_online_nodes);
556 + int page_group_by_mobility_disabled __read_mostly;
557 +
558 + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
559 ++
560 ++/*
561 ++ * Determine how many pages need to be initialized durig early boot
562 ++ * (non-deferred initialization).
563 ++ * The value of first_deferred_pfn will be set later, once non-deferred pages
564 ++ * are initialized, but for now set it ULONG_MAX.
565 ++ */
566 + static inline void reset_deferred_meminit(pg_data_t *pgdat)
567 + {
568 +- unsigned long max_initialise;
569 +- unsigned long reserved_lowmem;
570 ++ phys_addr_t start_addr, end_addr;
571 ++ unsigned long max_pgcnt;
572 ++ unsigned long reserved;
573 +
574 + /*
575 + * Initialise at least 2G of a node but also take into account that
576 + * two large system hashes that can take up 1GB for 0.25TB/node.
577 + */
578 +- max_initialise = max(2UL << (30 - PAGE_SHIFT),
579 +- (pgdat->node_spanned_pages >> 8));
580 ++ max_pgcnt = max(2UL << (30 - PAGE_SHIFT),
581 ++ (pgdat->node_spanned_pages >> 8));
582 +
583 + /*
584 + * Compensate the all the memblock reservations (e.g. crash kernel)
585 + * from the initial estimation to make sure we will initialize enough
586 + * memory to boot.
587 + */
588 +- reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
589 +- pgdat->node_start_pfn + max_initialise);
590 +- max_initialise += reserved_lowmem;
591 ++ start_addr = PFN_PHYS(pgdat->node_start_pfn);
592 ++ end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt);
593 ++ reserved = memblock_reserved_memory_within(start_addr, end_addr);
594 ++ max_pgcnt += PHYS_PFN(reserved);
595 +
596 +- pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
597 ++ pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages);
598 + pgdat->first_deferred_pfn = ULONG_MAX;
599 + }
600 +
601 +@@ -337,7 +346,7 @@ static inline bool update_defer_init(pg_data_t *pgdat,
602 + if (zone_end < pgdat_end_pfn(pgdat))
603 + return true;
604 + (*nr_initialised)++;
605 +- if ((*nr_initialised > pgdat->static_init_size) &&
606 ++ if ((*nr_initialised > pgdat->static_init_pgcnt) &&
607 + (pfn & (PAGES_PER_SECTION - 1)) == 0) {
608 + pgdat->first_deferred_pfn = pfn;
609 + return false;
610 +diff --git a/mm/page_ext.c b/mm/page_ext.c
611 +index 88ccc044b09a..9dbabbfc4557 100644
612 +--- a/mm/page_ext.c
613 ++++ b/mm/page_ext.c
614 +@@ -124,7 +124,6 @@ struct page_ext *lookup_page_ext(struct page *page)
615 + struct page_ext *base;
616 +
617 + base = NODE_DATA(page_to_nid(page))->node_page_ext;
618 +-#if defined(CONFIG_DEBUG_VM)
619 + /*
620 + * The sanity checks the page allocator does upon freeing a
621 + * page can reach here before the page_ext arrays are
622 +@@ -133,7 +132,6 @@ struct page_ext *lookup_page_ext(struct page *page)
623 + */
624 + if (unlikely(!base))
625 + return NULL;
626 +-#endif
627 + index = pfn - round_down(node_start_pfn(page_to_nid(page)),
628 + MAX_ORDER_NR_PAGES);
629 + return get_entry(base, index);
630 +@@ -198,7 +196,6 @@ struct page_ext *lookup_page_ext(struct page *page)
631 + {
632 + unsigned long pfn = page_to_pfn(page);
633 + struct mem_section *section = __pfn_to_section(pfn);
634 +-#if defined(CONFIG_DEBUG_VM)
635 + /*
636 + * The sanity checks the page allocator does upon freeing a
637 + * page can reach here before the page_ext arrays are
638 +@@ -207,7 +204,6 @@ struct page_ext *lookup_page_ext(struct page *page)
639 + */
640 + if (!section->page_ext)
641 + return NULL;
642 +-#endif
643 + return get_entry(section->page_ext, pfn);
644 + }
645 +
646 +diff --git a/mm/pagewalk.c b/mm/pagewalk.c
647 +index 1a4197965415..7d973f63088c 100644
648 +--- a/mm/pagewalk.c
649 ++++ b/mm/pagewalk.c
650 +@@ -187,8 +187,12 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
651 + do {
652 + next = hugetlb_entry_end(h, addr, end);
653 + pte = huge_pte_offset(walk->mm, addr & hmask, sz);
654 +- if (pte && walk->hugetlb_entry)
655 ++
656 ++ if (pte)
657 + err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
658 ++ else if (walk->pte_hole)
659 ++ err = walk->pte_hole(addr, next, walk);
660 ++
661 + if (err)
662 + break;
663 + } while (addr = next, addr != end);
664 +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
665 +index 9649579b5b9f..4a72ee4e2ae9 100644
666 +--- a/net/8021q/vlan.c
667 ++++ b/net/8021q/vlan.c
668 +@@ -376,6 +376,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
669 + dev->name);
670 + vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
671 + }
672 ++ if (event == NETDEV_DOWN &&
673 ++ (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
674 ++ vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
675 +
676 + vlan_info = rtnl_dereference(dev->vlan_info);
677 + if (!vlan_info)
678 +@@ -423,9 +426,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
679 + struct net_device *tmp;
680 + LIST_HEAD(close_list);
681 +
682 +- if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
683 +- vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
684 +-
685 + /* Put all VLANs for this dev in the down state too. */
686 + vlan_group_for_each_dev(grp, i, vlandev) {
687 + flgs = vlandev->flags;
688 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
689 +index 72eb23d2426f..a0155578e951 100644
690 +--- a/net/core/skbuff.c
691 ++++ b/net/core/skbuff.c
692 +@@ -4476,6 +4476,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
693 + if (!xnet)
694 + return;
695 +
696 ++ ipvs_reset(skb);
697 + skb_orphan(skb);
698 + skb->mark = 0;
699 + }
700 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
701 +index e92e5dbcb3d6..ffe96de8a079 100644
702 +--- a/net/ipv4/tcp_input.c
703 ++++ b/net/ipv4/tcp_input.c
704 +@@ -2613,7 +2613,6 @@ void tcp_simple_retransmit(struct sock *sk)
705 + struct tcp_sock *tp = tcp_sk(sk);
706 + struct sk_buff *skb;
707 + unsigned int mss = tcp_current_mss(sk);
708 +- u32 prior_lost = tp->lost_out;
709 +
710 + tcp_for_write_queue(skb, sk) {
711 + if (skb == tcp_send_head(sk))
712 +@@ -2630,7 +2629,7 @@ void tcp_simple_retransmit(struct sock *sk)
713 +
714 + tcp_clear_retrans_hints_partial(tp);
715 +
716 +- if (prior_lost == tp->lost_out)
717 ++ if (!tp->lost_out)
718 + return;
719 +
720 + if (tcp_is_reno(tp))
721 +diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
722 +index 6d650ed3cb59..5c871666c561 100644
723 +--- a/net/ipv4/tcp_nv.c
724 ++++ b/net/ipv4/tcp_nv.c
725 +@@ -263,7 +263,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
726 +
727 + /* rate in 100's bits per second */
728 + rate64 = ((u64)sample->in_flight) * 8000000;
729 +- rate = (u32)div64_u64(rate64, (u64)(avg_rtt * 100));
730 ++ rate = (u32)div64_u64(rate64, (u64)(avg_rtt ?: 1) * 100);
731 +
732 + /* Remember the maximum rate seen during this RTT
733 + * Note: It may be more than one RTT. This function should be
734 +diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
735 +index 11f69bbf9307..b6a2aa1dcf56 100644
736 +--- a/net/ipv4/tcp_offload.c
737 ++++ b/net/ipv4/tcp_offload.c
738 +@@ -149,11 +149,19 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
739 + * is freed by GSO engine
740 + */
741 + if (copy_destructor) {
742 ++ int delta;
743 ++
744 + swap(gso_skb->sk, skb->sk);
745 + swap(gso_skb->destructor, skb->destructor);
746 + sum_truesize += skb->truesize;
747 +- refcount_add(sum_truesize - gso_skb->truesize,
748 +- &skb->sk->sk_wmem_alloc);
749 ++ delta = sum_truesize - gso_skb->truesize;
750 ++ /* In some pathological cases, delta can be negative.
751 ++ * We need to either use refcount_add() or refcount_sub_and_test()
752 ++ */
753 ++ if (likely(delta >= 0))
754 ++ refcount_add(delta, &skb->sk->sk_wmem_alloc);
755 ++ else
756 ++ WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
757 + }
758 +
759 + delta = htonl(oldlen + (skb_tail_pointer(skb) -
760 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
761 +index 58587b0e2b5d..e359840f46c0 100644
762 +--- a/net/ipv4/tcp_output.c
763 ++++ b/net/ipv4/tcp_output.c
764 +@@ -3207,13 +3207,8 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
765 + th->source = htons(ireq->ir_num);
766 + th->dest = ireq->ir_rmt_port;
767 + skb->mark = ireq->ir_mark;
768 +- /* Setting of flags are superfluous here for callers (and ECE is
769 +- * not even correctly set)
770 +- */
771 +- tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
772 +- TCPHDR_SYN | TCPHDR_ACK);
773 +-
774 +- th->seq = htonl(TCP_SKB_CB(skb)->seq);
775 ++ skb->ip_summed = CHECKSUM_PARTIAL;
776 ++ th->seq = htonl(tcp_rsk(req)->snt_isn);
777 + /* XXX data is queued and acked as is. No buffer/window check */
778 + th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
779 +
780 +diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
781 +index 4d322c1b7233..e4280b6568b4 100644
782 +--- a/net/l2tp/l2tp_ip.c
783 ++++ b/net/l2tp/l2tp_ip.c
784 +@@ -123,6 +123,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
785 + unsigned char *ptr, *optr;
786 + struct l2tp_session *session;
787 + struct l2tp_tunnel *tunnel = NULL;
788 ++ struct iphdr *iph;
789 + int length;
790 +
791 + if (!pskb_may_pull(skb, 4))
792 +@@ -178,24 +179,17 @@ static int l2tp_ip_recv(struct sk_buff *skb)
793 + goto discard;
794 +
795 + tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
796 +- tunnel = l2tp_tunnel_find(net, tunnel_id);
797 +- if (tunnel) {
798 +- sk = tunnel->sock;
799 +- sock_hold(sk);
800 +- } else {
801 +- struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
802 +-
803 +- read_lock_bh(&l2tp_ip_lock);
804 +- sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
805 +- inet_iif(skb), tunnel_id);
806 +- if (!sk) {
807 +- read_unlock_bh(&l2tp_ip_lock);
808 +- goto discard;
809 +- }
810 ++ iph = (struct iphdr *)skb_network_header(skb);
811 +
812 +- sock_hold(sk);
813 ++ read_lock_bh(&l2tp_ip_lock);
814 ++ sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
815 ++ tunnel_id);
816 ++ if (!sk) {
817 + read_unlock_bh(&l2tp_ip_lock);
818 ++ goto discard;
819 + }
820 ++ sock_hold(sk);
821 ++ read_unlock_bh(&l2tp_ip_lock);
822 +
823 + if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
824 + goto discard_put;
825 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
826 +index 88b397c30d86..8bcaa975b432 100644
827 +--- a/net/l2tp/l2tp_ip6.c
828 ++++ b/net/l2tp/l2tp_ip6.c
829 +@@ -136,6 +136,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
830 + unsigned char *ptr, *optr;
831 + struct l2tp_session *session;
832 + struct l2tp_tunnel *tunnel = NULL;
833 ++ struct ipv6hdr *iph;
834 + int length;
835 +
836 + if (!pskb_may_pull(skb, 4))
837 +@@ -192,24 +193,17 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
838 + goto discard;
839 +
840 + tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
841 +- tunnel = l2tp_tunnel_find(net, tunnel_id);
842 +- if (tunnel) {
843 +- sk = tunnel->sock;
844 +- sock_hold(sk);
845 +- } else {
846 +- struct ipv6hdr *iph = ipv6_hdr(skb);
847 +-
848 +- read_lock_bh(&l2tp_ip6_lock);
849 +- sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
850 +- inet6_iif(skb), tunnel_id);
851 +- if (!sk) {
852 +- read_unlock_bh(&l2tp_ip6_lock);
853 +- goto discard;
854 +- }
855 ++ iph = ipv6_hdr(skb);
856 +
857 +- sock_hold(sk);
858 ++ read_lock_bh(&l2tp_ip6_lock);
859 ++ sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
860 ++ inet6_iif(skb), tunnel_id);
861 ++ if (!sk) {
862 + read_unlock_bh(&l2tp_ip6_lock);
863 ++ goto discard;
864 + }
865 ++ sock_hold(sk);
866 ++ read_unlock_bh(&l2tp_ip6_lock);
867 +
868 + if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
869 + goto discard_put;
870 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
871 +index 09c8dbbd2d70..2939a6b87c27 100644
872 +--- a/net/netlink/af_netlink.c
873 ++++ b/net/netlink/af_netlink.c
874 +@@ -2128,7 +2128,7 @@ static int netlink_dump(struct sock *sk)
875 + struct sk_buff *skb = NULL;
876 + struct nlmsghdr *nlh;
877 + struct module *module;
878 +- int len, err = -ENOBUFS;
879 ++ int err = -ENOBUFS;
880 + int alloc_min_size;
881 + int alloc_size;
882 +
883 +@@ -2175,9 +2175,11 @@ static int netlink_dump(struct sock *sk)
884 + skb_reserve(skb, skb_tailroom(skb) - alloc_size);
885 + netlink_skb_set_owner_r(skb, sk);
886 +
887 +- len = cb->dump(skb, cb);
888 ++ if (nlk->dump_done_errno > 0)
889 ++ nlk->dump_done_errno = cb->dump(skb, cb);
890 +
891 +- if (len > 0) {
892 ++ if (nlk->dump_done_errno > 0 ||
893 ++ skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
894 + mutex_unlock(nlk->cb_mutex);
895 +
896 + if (sk_filter(sk, skb))
897 +@@ -2187,13 +2189,15 @@ static int netlink_dump(struct sock *sk)
898 + return 0;
899 + }
900 +
901 +- nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
902 +- if (!nlh)
903 ++ nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE,
904 ++ sizeof(nlk->dump_done_errno), NLM_F_MULTI);
905 ++ if (WARN_ON(!nlh))
906 + goto errout_skb;
907 +
908 + nl_dump_check_consistent(cb, nlh);
909 +
910 +- memcpy(nlmsg_data(nlh), &len, sizeof(len));
911 ++ memcpy(nlmsg_data(nlh), &nlk->dump_done_errno,
912 ++ sizeof(nlk->dump_done_errno));
913 +
914 + if (sk_filter(sk, skb))
915 + kfree_skb(skb);
916 +@@ -2265,6 +2269,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
917 + }
918 +
919 + nlk->cb_running = true;
920 ++ nlk->dump_done_errno = INT_MAX;
921 +
922 + mutex_unlock(nlk->cb_mutex);
923 +
924 +diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
925 +index 3490f2430532..8908fc2d3de0 100644
926 +--- a/net/netlink/af_netlink.h
927 ++++ b/net/netlink/af_netlink.h
928 +@@ -33,6 +33,7 @@ struct netlink_sock {
929 + wait_queue_head_t wait;
930 + bool bound;
931 + bool cb_running;
932 ++ int dump_done_errno;
933 + struct netlink_callback cb;
934 + struct mutex *cb_mutex;
935 + struct mutex cb_def_mutex;
936 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
937 +index 1344e3a411ae..edb462b0b73b 100644
938 +--- a/net/sctp/ipv6.c
939 ++++ b/net/sctp/ipv6.c
940 +@@ -807,9 +807,10 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
941 + addr->v6.sin6_flowinfo = 0;
942 + addr->v6.sin6_port = sh->source;
943 + addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
944 +- if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
945 ++ if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
946 + addr->v6.sin6_scope_id = sctp_v6_skb_iif(skb);
947 +- }
948 ++ else
949 ++ addr->v6.sin6_scope_id = 0;
950 + }
951 +
952 + *addr_len = sctp_v6_addr_to_user(sctp_sk(skb->sk), addr);
953 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
954 +index 3d79085eb4e0..083da13e1af4 100644
955 +--- a/net/sctp/socket.c
956 ++++ b/net/sctp/socket.c
957 +@@ -4924,6 +4924,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
958 + struct socket *sock;
959 + int err = 0;
960 +
961 ++ /* Do not peel off from one netns to another one. */
962 ++ if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
963 ++ return -EINVAL;
964 ++
965 + if (!asoc)
966 + return -EINVAL;
967 +
968 +diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
969 +index 809ba70fbbbf..7d769b948de8 100644
970 +--- a/security/integrity/ima/ima_appraise.c
971 ++++ b/security/integrity/ima/ima_appraise.c
972 +@@ -320,6 +320,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
973 + if (iint->flags & IMA_DIGSIG)
974 + return;
975 +
976 ++ if (iint->ima_file_status != INTEGRITY_PASS)
977 ++ return;
978 ++
979 + rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo);
980 + if (rc < 0)
981 + return;