Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Fri, 24 Nov 2017 09:46:59
Message-Id: 1511516751.8bae67a5ce4b1defc43fc3875433ed2687c3583e.alicef@gentoo
1 commit: 8bae67a5ce4b1defc43fc3875433ed2687c3583e
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Fri Nov 24 09:45:51 2017 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Fri Nov 24 09:45:51 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8bae67a5
7
8 linux kernel 4.4.101
9
10 0000_README | 4 +
11 1100_linux-4.4.101.patch | 718 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 722 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index fb4d48b..c2f7291 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -443,6 +443,10 @@ Patch: 1099_linux-4.4.100.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.100
21
22 +Patch: 1100_linux-4.4.101.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.101
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1100_linux-4.4.101.patch b/1100_linux-4.4.101.patch
31 new file mode 100644
32 index 0000000..2fa1df0
33 --- /dev/null
34 +++ b/1100_linux-4.4.101.patch
35 @@ -0,0 +1,718 @@
36 +diff --git a/Makefile b/Makefile
37 +index 91dd7832f499..0d7b050427ed 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 100
44 ++SUBLEVEL = 101
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
49 +index 210826d5bba5..9119722eb347 100644
50 +--- a/arch/arm64/kernel/traps.c
51 ++++ b/arch/arm64/kernel/traps.c
52 +@@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
53 +
54 + /*
55 + * We need to switch to kernel mode so that we can use __get_user
56 +- * to safely read from kernel space. Note that we now dump the
57 +- * code first, just in case the backtrace kills us.
58 ++ * to safely read from kernel space.
59 + */
60 + fs = get_fs();
61 + set_fs(KERNEL_DS);
62 +@@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where)
63 + print_ip_sym(where);
64 + }
65 +
66 +-static void dump_instr(const char *lvl, struct pt_regs *regs)
67 ++static void __dump_instr(const char *lvl, struct pt_regs *regs)
68 + {
69 + unsigned long addr = instruction_pointer(regs);
70 +- mm_segment_t fs;
71 + char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
72 + int i;
73 +
74 +- /*
75 +- * We need to switch to kernel mode so that we can use __get_user
76 +- * to safely read from kernel space. Note that we now dump the
77 +- * code first, just in case the backtrace kills us.
78 +- */
79 +- fs = get_fs();
80 +- set_fs(KERNEL_DS);
81 +-
82 + for (i = -4; i < 1; i++) {
83 + unsigned int val, bad;
84 +
85 +@@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
86 + }
87 + }
88 + printk("%sCode: %s\n", lvl, str);
89 ++}
90 +
91 +- set_fs(fs);
92 ++static void dump_instr(const char *lvl, struct pt_regs *regs)
93 ++{
94 ++ if (!user_mode(regs)) {
95 ++ mm_segment_t fs = get_fs();
96 ++ set_fs(KERNEL_DS);
97 ++ __dump_instr(lvl, regs);
98 ++ set_fs(fs);
99 ++ } else {
100 ++ __dump_instr(lvl, regs);
101 ++ }
102 + }
103 +
104 + static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
105 +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
106 +index 25372dc381d4..5cb5e8ff0224 100644
107 +--- a/drivers/char/ipmi/ipmi_msghandler.c
108 ++++ b/drivers/char/ipmi/ipmi_msghandler.c
109 +@@ -4029,7 +4029,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
110 + }
111 +
112 + static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
113 +- struct list_head *timeouts, long timeout_period,
114 ++ struct list_head *timeouts,
115 ++ unsigned long timeout_period,
116 + int slot, unsigned long *flags,
117 + unsigned int *waiting_msgs)
118 + {
119 +@@ -4042,8 +4043,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
120 + if (!ent->inuse)
121 + return;
122 +
123 +- ent->timeout -= timeout_period;
124 +- if (ent->timeout > 0) {
125 ++ if (timeout_period < ent->timeout) {
126 ++ ent->timeout -= timeout_period;
127 + (*waiting_msgs)++;
128 + return;
129 + }
130 +@@ -4109,7 +4110,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
131 + }
132 + }
133 +
134 +-static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
135 ++static unsigned int ipmi_timeout_handler(ipmi_smi_t intf,
136 ++ unsigned long timeout_period)
137 + {
138 + struct list_head timeouts;
139 + struct ipmi_recv_msg *msg, *msg2;
140 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
141 +index 5dca77e0ffed..2cb34b0f3856 100644
142 +--- a/drivers/net/bonding/bond_main.c
143 ++++ b/drivers/net/bonding/bond_main.c
144 +@@ -3166,7 +3166,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
145 + hash ^= (hash >> 16);
146 + hash ^= (hash >> 8);
147 +
148 +- return hash;
149 ++ return hash >> 1;
150 + }
151 +
152 + /*-------------------------- Device entry points ----------------------------*/
153 +diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
154 +index b1b9ebafb354..a3b2e23921bf 100644
155 +--- a/drivers/net/ethernet/fealnx.c
156 ++++ b/drivers/net/ethernet/fealnx.c
157 +@@ -257,8 +257,8 @@ enum rx_desc_status_bits {
158 + RXFSD = 0x00000800, /* first descriptor */
159 + RXLSD = 0x00000400, /* last descriptor */
160 + ErrorSummary = 0x80, /* error summary */
161 +- RUNT = 0x40, /* runt packet received */
162 +- LONG = 0x20, /* long packet received */
163 ++ RUNTPKT = 0x40, /* runt packet received */
164 ++ LONGPKT = 0x20, /* long packet received */
165 + FAE = 0x10, /* frame align error */
166 + CRC = 0x08, /* crc error */
167 + RXER = 0x04, /* receive error */
168 +@@ -1633,7 +1633,7 @@ static int netdev_rx(struct net_device *dev)
169 + dev->name, rx_status);
170 +
171 + dev->stats.rx_errors++; /* end of a packet. */
172 +- if (rx_status & (LONG | RUNT))
173 ++ if (rx_status & (LONGPKT | RUNTPKT))
174 + dev->stats.rx_length_errors++;
175 + if (rx_status & RXER)
176 + dev->stats.rx_frame_errors++;
177 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
178 +index 669edbd47602..d6ceb8b91cd6 100644
179 +--- a/drivers/nvme/host/pci.c
180 ++++ b/drivers/nvme/host/pci.c
181 +@@ -350,8 +350,8 @@ static void async_completion(struct nvme_queue *nvmeq, void *ctx,
182 + struct async_cmd_info *cmdinfo = ctx;
183 + cmdinfo->result = le32_to_cpup(&cqe->result);
184 + cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
185 +- queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
186 + blk_mq_free_request(cmdinfo->req);
187 ++ queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
188 + }
189 +
190 + static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq,
191 +diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
192 +index de1c143b475f..21fc9b3a27cf 100644
193 +--- a/drivers/tty/serial/omap-serial.c
194 ++++ b/drivers/tty/serial/omap-serial.c
195 +@@ -693,7 +693,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
196 + if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
197 + up->efr |= UART_EFR_RTS;
198 + else
199 +- up->efr &= UART_EFR_RTS;
200 ++ up->efr &= ~UART_EFR_RTS;
201 + serial_out(up, UART_EFR, up->efr);
202 + serial_out(up, UART_LCR, lcr);
203 +
204 +diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
205 +index f6c6c8adbc01..7289f0a7670b 100644
206 +--- a/fs/coda/upcall.c
207 ++++ b/fs/coda/upcall.c
208 +@@ -446,8 +446,7 @@ int venus_fsync(struct super_block *sb, struct CodaFid *fid)
209 + UPARG(CODA_FSYNC);
210 +
211 + inp->coda_fsync.VFid = *fid;
212 +- error = coda_upcall(coda_vcp(sb), sizeof(union inputArgs),
213 +- &outsize, inp);
214 ++ error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
215 +
216 + CODA_FREE(inp, insize);
217 + return error;
218 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
219 +index 1d738723a41a..501ecc4a1ac4 100644
220 +--- a/fs/ocfs2/file.c
221 ++++ b/fs/ocfs2/file.c
222 +@@ -1166,6 +1166,13 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
223 + }
224 + size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
225 + if (size_change) {
226 ++ /*
227 ++ * Here we should wait dio to finish before inode lock
228 ++ * to avoid a deadlock between ocfs2_setattr() and
229 ++ * ocfs2_dio_end_io_write()
230 ++ */
231 ++ inode_dio_wait(inode);
232 ++
233 + status = ocfs2_rw_lock(inode, 1);
234 + if (status < 0) {
235 + mlog_errno(status);
236 +@@ -1186,8 +1193,6 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
237 + if (status)
238 + goto bail_unlock;
239 +
240 +- inode_dio_wait(inode);
241 +-
242 + if (i_size_read(inode) >= attr->ia_size) {
243 + if (ocfs2_should_order_data(inode)) {
244 + status = ocfs2_begin_ordered_truncate(inode,
245 +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
246 +index 5b609a3ce3d7..ff88d6189411 100644
247 +--- a/include/linux/mmzone.h
248 ++++ b/include/linux/mmzone.h
249 +@@ -688,7 +688,8 @@ typedef struct pglist_data {
250 + * is the first PFN that needs to be initialised.
251 + */
252 + unsigned long first_deferred_pfn;
253 +- unsigned long static_init_size;
254 ++ /* Number of non-deferred pages */
255 ++ unsigned long static_init_pgcnt;
256 + #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
257 + } pg_data_t;
258 +
259 +diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
260 +index bf268fa92c5b..fec40271339f 100644
261 +--- a/include/linux/page_idle.h
262 ++++ b/include/linux/page_idle.h
263 +@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
264 +
265 + static inline bool page_is_young(struct page *page)
266 + {
267 +- return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
268 ++ struct page_ext *page_ext = lookup_page_ext(page);
269 ++
270 ++ if (unlikely(!page_ext))
271 ++ return false;
272 ++
273 ++ return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
274 + }
275 +
276 + static inline void set_page_young(struct page *page)
277 + {
278 +- set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
279 ++ struct page_ext *page_ext = lookup_page_ext(page);
280 ++
281 ++ if (unlikely(!page_ext))
282 ++ return;
283 ++
284 ++ set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
285 + }
286 +
287 + static inline bool test_and_clear_page_young(struct page *page)
288 + {
289 +- return test_and_clear_bit(PAGE_EXT_YOUNG,
290 +- &lookup_page_ext(page)->flags);
291 ++ struct page_ext *page_ext = lookup_page_ext(page);
292 ++
293 ++ if (unlikely(!page_ext))
294 ++ return false;
295 ++
296 ++ return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
297 + }
298 +
299 + static inline bool page_is_idle(struct page *page)
300 + {
301 +- return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
302 ++ struct page_ext *page_ext = lookup_page_ext(page);
303 ++
304 ++ if (unlikely(!page_ext))
305 ++ return false;
306 ++
307 ++ return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
308 + }
309 +
310 + static inline void set_page_idle(struct page *page)
311 + {
312 +- set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
313 ++ struct page_ext *page_ext = lookup_page_ext(page);
314 ++
315 ++ if (unlikely(!page_ext))
316 ++ return;
317 ++
318 ++ set_bit(PAGE_EXT_IDLE, &page_ext->flags);
319 + }
320 +
321 + static inline void clear_page_idle(struct page *page)
322 + {
323 +- clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
324 ++ struct page_ext *page_ext = lookup_page_ext(page);
325 ++
326 ++ if (unlikely(!page_ext))
327 ++ return;
328 ++
329 ++ clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
330 + }
331 + #endif /* CONFIG_64BIT */
332 +
333 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
334 +index 3f61c647fc5c..b5421f6f155a 100644
335 +--- a/include/linux/skbuff.h
336 ++++ b/include/linux/skbuff.h
337 +@@ -3400,6 +3400,13 @@ static inline void nf_reset_trace(struct sk_buff *skb)
338 + #endif
339 + }
340 +
341 ++static inline void ipvs_reset(struct sk_buff *skb)
342 ++{
343 ++#if IS_ENABLED(CONFIG_IP_VS)
344 ++ skb->ipvs_property = 0;
345 ++#endif
346 ++}
347 ++
348 + /* Note: This doesn't put any conntrack and bridge info in dst. */
349 + static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
350 + bool copy)
351 +diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c
352 +index 5bf5906ce13b..fe1c61f7cf26 100644
353 +--- a/mm/debug-pagealloc.c
354 ++++ b/mm/debug-pagealloc.c
355 +@@ -34,6 +34,8 @@ static inline void set_page_poison(struct page *page)
356 + struct page_ext *page_ext;
357 +
358 + page_ext = lookup_page_ext(page);
359 ++ if (page_ext)
360 ++ return;
361 + __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
362 + }
363 +
364 +@@ -42,6 +44,8 @@ static inline void clear_page_poison(struct page *page)
365 + struct page_ext *page_ext;
366 +
367 + page_ext = lookup_page_ext(page);
368 ++ if (page_ext)
369 ++ return;
370 + __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
371 + }
372 +
373 +@@ -50,6 +54,8 @@ static inline bool page_poison(struct page *page)
374 + struct page_ext *page_ext;
375 +
376 + page_ext = lookup_page_ext(page);
377 ++ if (page_ext)
378 ++ return false;
379 + return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
380 + }
381 +
382 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
383 +index 6b5421ae86c6..3c70f03d91ec 100644
384 +--- a/mm/page_alloc.c
385 ++++ b/mm/page_alloc.c
386 +@@ -267,28 +267,37 @@ EXPORT_SYMBOL(nr_online_nodes);
387 + int page_group_by_mobility_disabled __read_mostly;
388 +
389 + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
390 ++
391 ++/*
392 ++ * Determine how many pages need to be initialized durig early boot
393 ++ * (non-deferred initialization).
394 ++ * The value of first_deferred_pfn will be set later, once non-deferred pages
395 ++ * are initialized, but for now set it ULONG_MAX.
396 ++ */
397 + static inline void reset_deferred_meminit(pg_data_t *pgdat)
398 + {
399 +- unsigned long max_initialise;
400 +- unsigned long reserved_lowmem;
401 ++ phys_addr_t start_addr, end_addr;
402 ++ unsigned long max_pgcnt;
403 ++ unsigned long reserved;
404 +
405 + /*
406 + * Initialise at least 2G of a node but also take into account that
407 + * two large system hashes that can take up 1GB for 0.25TB/node.
408 + */
409 +- max_initialise = max(2UL << (30 - PAGE_SHIFT),
410 +- (pgdat->node_spanned_pages >> 8));
411 ++ max_pgcnt = max(2UL << (30 - PAGE_SHIFT),
412 ++ (pgdat->node_spanned_pages >> 8));
413 +
414 + /*
415 + * Compensate the all the memblock reservations (e.g. crash kernel)
416 + * from the initial estimation to make sure we will initialize enough
417 + * memory to boot.
418 + */
419 +- reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
420 +- pgdat->node_start_pfn + max_initialise);
421 +- max_initialise += reserved_lowmem;
422 ++ start_addr = PFN_PHYS(pgdat->node_start_pfn);
423 ++ end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt);
424 ++ reserved = memblock_reserved_memory_within(start_addr, end_addr);
425 ++ max_pgcnt += PHYS_PFN(reserved);
426 +
427 +- pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
428 ++ pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages);
429 + pgdat->first_deferred_pfn = ULONG_MAX;
430 + }
431 +
432 +@@ -324,7 +333,7 @@ static inline bool update_defer_init(pg_data_t *pgdat,
433 + return true;
434 + /* Initialise at least 2G of the highest zone */
435 + (*nr_initialised)++;
436 +- if ((*nr_initialised > pgdat->static_init_size) &&
437 ++ if ((*nr_initialised > pgdat->static_init_pgcnt) &&
438 + (pfn & (PAGES_PER_SECTION - 1)) == 0) {
439 + pgdat->first_deferred_pfn = pfn;
440 + return false;
441 +@@ -560,6 +569,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
442 + return;
443 +
444 + page_ext = lookup_page_ext(page);
445 ++ if (unlikely(!page_ext))
446 ++ return;
447 ++
448 + __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
449 +
450 + INIT_LIST_HEAD(&page->lru);
451 +@@ -577,6 +589,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
452 + return;
453 +
454 + page_ext = lookup_page_ext(page);
455 ++ if (unlikely(!page_ext))
456 ++ return;
457 ++
458 + __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
459 +
460 + set_page_private(page, 0);
461 +diff --git a/mm/page_ext.c b/mm/page_ext.c
462 +index 292ca7b8debd..4d1eac0d4fc5 100644
463 +--- a/mm/page_ext.c
464 ++++ b/mm/page_ext.c
465 +@@ -106,7 +106,6 @@ struct page_ext *lookup_page_ext(struct page *page)
466 + struct page_ext *base;
467 +
468 + base = NODE_DATA(page_to_nid(page))->node_page_ext;
469 +-#ifdef CONFIG_DEBUG_VM
470 + /*
471 + * The sanity checks the page allocator does upon freeing a
472 + * page can reach here before the page_ext arrays are
473 +@@ -115,7 +114,6 @@ struct page_ext *lookup_page_ext(struct page *page)
474 + */
475 + if (unlikely(!base))
476 + return NULL;
477 +-#endif
478 + offset = pfn - round_down(node_start_pfn(page_to_nid(page)),
479 + MAX_ORDER_NR_PAGES);
480 + return base + offset;
481 +@@ -180,7 +178,6 @@ struct page_ext *lookup_page_ext(struct page *page)
482 + {
483 + unsigned long pfn = page_to_pfn(page);
484 + struct mem_section *section = __pfn_to_section(pfn);
485 +-#ifdef CONFIG_DEBUG_VM
486 + /*
487 + * The sanity checks the page allocator does upon freeing a
488 + * page can reach here before the page_ext arrays are
489 +@@ -189,7 +186,6 @@ struct page_ext *lookup_page_ext(struct page *page)
490 + */
491 + if (!section->page_ext)
492 + return NULL;
493 +-#endif
494 + return section->page_ext + pfn;
495 + }
496 +
497 +diff --git a/mm/page_owner.c b/mm/page_owner.c
498 +index 983c3a10fa07..dd6b9cebf981 100644
499 +--- a/mm/page_owner.c
500 ++++ b/mm/page_owner.c
501 +@@ -53,6 +53,8 @@ void __reset_page_owner(struct page *page, unsigned int order)
502 +
503 + for (i = 0; i < (1 << order); i++) {
504 + page_ext = lookup_page_ext(page + i);
505 ++ if (unlikely(!page_ext))
506 ++ continue;
507 + __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
508 + }
509 + }
510 +@@ -60,6 +62,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
511 + void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
512 + {
513 + struct page_ext *page_ext = lookup_page_ext(page);
514 ++
515 + struct stack_trace trace = {
516 + .nr_entries = 0,
517 + .max_entries = ARRAY_SIZE(page_ext->trace_entries),
518 +@@ -67,6 +70,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
519 + .skip = 3,
520 + };
521 +
522 ++ if (unlikely(!page_ext))
523 ++ return;
524 ++
525 + save_stack_trace(&trace);
526 +
527 + page_ext->order = order;
528 +@@ -79,6 +85,12 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
529 + gfp_t __get_page_owner_gfp(struct page *page)
530 + {
531 + struct page_ext *page_ext = lookup_page_ext(page);
532 ++ if (unlikely(!page_ext))
533 ++ /*
534 ++ * The caller just returns 0 if no valid gfp
535 ++ * So return 0 here too.
536 ++ */
537 ++ return 0;
538 +
539 + return page_ext->gfp_mask;
540 + }
541 +@@ -194,6 +206,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
542 + }
543 +
544 + page_ext = lookup_page_ext(page);
545 ++ if (unlikely(!page_ext))
546 ++ continue;
547 +
548 + /*
549 + * Some pages could be missed by concurrent allocation or free,
550 +@@ -257,6 +271,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
551 + continue;
552 +
553 + page_ext = lookup_page_ext(page);
554 ++ if (unlikely(!page_ext))
555 ++ continue;
556 +
557 + /* Maybe overraping zone */
558 + if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
559 +diff --git a/mm/pagewalk.c b/mm/pagewalk.c
560 +index 29f2f8b853ae..c2cbd2620169 100644
561 +--- a/mm/pagewalk.c
562 ++++ b/mm/pagewalk.c
563 +@@ -142,8 +142,12 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
564 + do {
565 + next = hugetlb_entry_end(h, addr, end);
566 + pte = huge_pte_offset(walk->mm, addr & hmask);
567 +- if (pte && walk->hugetlb_entry)
568 ++
569 ++ if (pte)
570 + err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
571 ++ else if (walk->pte_hole)
572 ++ err = walk->pte_hole(addr, next, walk);
573 ++
574 + if (err)
575 + break;
576 + } while (addr = next, addr != end);
577 +diff --git a/mm/vmstat.c b/mm/vmstat.c
578 +index c54fd2924f25..c344e3609c53 100644
579 +--- a/mm/vmstat.c
580 ++++ b/mm/vmstat.c
581 +@@ -1091,6 +1091,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
582 + continue;
583 +
584 + page_ext = lookup_page_ext(page);
585 ++ if (unlikely(!page_ext))
586 ++ continue;
587 +
588 + if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
589 + continue;
590 +diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
591 +index 5e4199d5a388..01abb6431fd9 100644
592 +--- a/net/8021q/vlan.c
593 ++++ b/net/8021q/vlan.c
594 +@@ -376,6 +376,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
595 + dev->name);
596 + vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
597 + }
598 ++ if (event == NETDEV_DOWN &&
599 ++ (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
600 ++ vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
601 +
602 + vlan_info = rtnl_dereference(dev->vlan_info);
603 + if (!vlan_info)
604 +@@ -423,9 +426,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
605 + struct net_device *tmp;
606 + LIST_HEAD(close_list);
607 +
608 +- if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
609 +- vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
610 +-
611 + /* Put all VLANs for this dev in the down state too. */
612 + vlan_group_for_each_dev(grp, i, vlandev) {
613 + flgs = vlandev->flags;
614 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
615 +index 73dfd7729bc9..d33609c2f276 100644
616 +--- a/net/core/skbuff.c
617 ++++ b/net/core/skbuff.c
618 +@@ -4229,6 +4229,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
619 + if (!xnet)
620 + return;
621 +
622 ++ ipvs_reset(skb);
623 + skb_orphan(skb);
624 + skb->mark = 0;
625 + }
626 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
627 +index 64c7ce847584..39c2919fe0d3 100644
628 +--- a/net/ipv4/tcp_output.c
629 ++++ b/net/ipv4/tcp_output.c
630 +@@ -3018,13 +3018,8 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
631 + tcp_ecn_make_synack(req, th);
632 + th->source = htons(ireq->ir_num);
633 + th->dest = ireq->ir_rmt_port;
634 +- /* Setting of flags are superfluous here for callers (and ECE is
635 +- * not even correctly set)
636 +- */
637 +- tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
638 +- TCPHDR_SYN | TCPHDR_ACK);
639 +-
640 +- th->seq = htonl(TCP_SKB_CB(skb)->seq);
641 ++ skb->ip_summed = CHECKSUM_PARTIAL;
642 ++ th->seq = htonl(tcp_rsk(req)->snt_isn);
643 + /* XXX data is queued and acked as is. No buffer/window check */
644 + th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
645 +
646 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
647 +index acfb16fdcd55..9ecdd61c6463 100644
648 +--- a/net/netlink/af_netlink.c
649 ++++ b/net/netlink/af_netlink.c
650 +@@ -2077,7 +2077,7 @@ static int netlink_dump(struct sock *sk)
651 + struct sk_buff *skb = NULL;
652 + struct nlmsghdr *nlh;
653 + struct module *module;
654 +- int len, err = -ENOBUFS;
655 ++ int err = -ENOBUFS;
656 + int alloc_min_size;
657 + int alloc_size;
658 +
659 +@@ -2125,9 +2125,11 @@ static int netlink_dump(struct sock *sk)
660 + skb_reserve(skb, skb_tailroom(skb) - alloc_size);
661 + netlink_skb_set_owner_r(skb, sk);
662 +
663 +- len = cb->dump(skb, cb);
664 ++ if (nlk->dump_done_errno > 0)
665 ++ nlk->dump_done_errno = cb->dump(skb, cb);
666 +
667 +- if (len > 0) {
668 ++ if (nlk->dump_done_errno > 0 ||
669 ++ skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
670 + mutex_unlock(nlk->cb_mutex);
671 +
672 + if (sk_filter(sk, skb))
673 +@@ -2137,13 +2139,15 @@ static int netlink_dump(struct sock *sk)
674 + return 0;
675 + }
676 +
677 +- nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
678 +- if (!nlh)
679 ++ nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE,
680 ++ sizeof(nlk->dump_done_errno), NLM_F_MULTI);
681 ++ if (WARN_ON(!nlh))
682 + goto errout_skb;
683 +
684 + nl_dump_check_consistent(cb, nlh);
685 +
686 +- memcpy(nlmsg_data(nlh), &len, sizeof(len));
687 ++ memcpy(nlmsg_data(nlh), &nlk->dump_done_errno,
688 ++ sizeof(nlk->dump_done_errno));
689 +
690 + if (sk_filter(sk, skb))
691 + kfree_skb(skb);
692 +@@ -2208,6 +2212,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
693 + cb->skb = skb;
694 +
695 + nlk->cb_running = true;
696 ++ nlk->dump_done_errno = INT_MAX;
697 +
698 + mutex_unlock(nlk->cb_mutex);
699 +
700 +diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
701 +index ea4600aea6b0..d987696c0eb4 100644
702 +--- a/net/netlink/af_netlink.h
703 ++++ b/net/netlink/af_netlink.h
704 +@@ -38,6 +38,7 @@ struct netlink_sock {
705 + wait_queue_head_t wait;
706 + bool bound;
707 + bool cb_running;
708 ++ int dump_done_errno;
709 + struct netlink_callback cb;
710 + struct mutex *cb_mutex;
711 + struct mutex cb_def_mutex;
712 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
713 +index e33e9bd4ed5a..8a61ccc37e12 100644
714 +--- a/net/sctp/ipv6.c
715 ++++ b/net/sctp/ipv6.c
716 +@@ -806,6 +806,8 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
717 + if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
718 + struct sctp_ulpevent *ev = sctp_skb2event(skb);
719 + addr->v6.sin6_scope_id = ev->iif;
720 ++ } else {
721 ++ addr->v6.sin6_scope_id = 0;
722 + }
723 + }
724 +
725 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
726 +index 7f0f689b8d2b..272edd7748a0 100644
727 +--- a/net/sctp/socket.c
728 ++++ b/net/sctp/socket.c
729 +@@ -4453,6 +4453,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
730 + struct socket *sock;
731 + int err = 0;
732 +
733 ++ /* Do not peel off from one netns to another one. */
734 ++ if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
735 ++ return -EINVAL;
736 ++
737 + /* Do not peel off from one netns to another one. */
738 + if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
739 + return -EINVAL;
740 +diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
741 +index 9ce9d5003dcc..19014293f927 100644
742 +--- a/security/integrity/ima/ima_appraise.c
743 ++++ b/security/integrity/ima/ima_appraise.c
744 +@@ -297,6 +297,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
745 + if (iint->flags & IMA_DIGSIG)
746 + return;
747 +
748 ++ if (iint->ima_file_status != INTEGRITY_PASS)
749 ++ return;
750 ++
751 + rc = ima_collect_measurement(iint, file, NULL, NULL);
752 + if (rc < 0)
753 + return;