Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Tue, 23 Feb 2021 13:46:23
Message-Id: 1614087966.5a353c5e6247e4d48b8679c204cbf331b072fa44.mpagano@gentoo
1 commit: 5a353c5e6247e4d48b8679c204cbf331b072fa44
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Feb 23 13:46:06 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Feb 23 13:46:06 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5a353c5e
7
8 Linux patch 4.4.258
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1257_linux-4.4.258.patch | 1470 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1474 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 269cc08..c846a29 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1071,6 +1071,10 @@ Patch: 1256_linux-4.4.257.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.4.257
23
24 +Patch: 1257_linux-4.4.258.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.4.258
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1257_linux-4.4.258.patch b/1257_linux-4.4.258.patch
33 new file mode 100644
34 index 0000000..53e2b33
35 --- /dev/null
36 +++ b/1257_linux-4.4.258.patch
37 @@ -0,0 +1,1470 @@
38 +diff --git a/Makefile b/Makefile
39 +index 8de8f9ac32795..abf7b5aa99bbf 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 4
45 +-SUBLEVEL = 257
46 ++SUBLEVEL = 258
47 + EXTRAVERSION =
48 + NAME = Blurry Fish Butt
49 +
50 +@@ -760,6 +760,13 @@ ifdef CONFIG_FUNCTION_TRACER
51 + ifndef CC_FLAGS_FTRACE
52 + CC_FLAGS_FTRACE := -pg
53 + endif
54 ++ifdef CONFIG_FTRACE_MCOUNT_RECORD
55 ++ # gcc 5 supports generating the mcount tables directly
56 ++ ifeq ($(call cc-option-yn,-mrecord-mcount),y)
57 ++ CC_FLAGS_FTRACE += -mrecord-mcount
58 ++ export CC_USING_RECORD_MCOUNT := 1
59 ++ endif
60 ++endif
61 + export CC_FLAGS_FTRACE
62 + ifdef CONFIG_HAVE_FENTRY
63 + CC_USING_FENTRY := $(call cc-option, -mfentry -DCC_USING_FENTRY)
64 +diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
65 +index 0ed01f2d5ee4b..02579e6569f0c 100644
66 +--- a/arch/arm/xen/p2m.c
67 ++++ b/arch/arm/xen/p2m.c
68 +@@ -93,8 +93,10 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
69 + for (i = 0; i < count; i++) {
70 + if (map_ops[i].status)
71 + continue;
72 +- set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
73 +- map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT);
74 ++ if (unlikely(!set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
75 ++ map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) {
76 ++ return -ENOMEM;
77 ++ }
78 + }
79 +
80 + return 0;
81 +diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
82 +index dc2d16ce8a0d5..3e33a9844d99a 100644
83 +--- a/arch/h8300/kernel/asm-offsets.c
84 ++++ b/arch/h8300/kernel/asm-offsets.c
85 +@@ -62,6 +62,9 @@ int main(void)
86 + OFFSET(TI_FLAGS, thread_info, flags);
87 + OFFSET(TI_CPU, thread_info, cpu);
88 + OFFSET(TI_PRE, thread_info, preempt_count);
89 ++#ifdef CONFIG_PREEMPTION
90 ++ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
91 ++#endif
92 +
93 + return 0;
94 + }
95 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
96 +index e59dc138b24ea..5fece9334f12b 100644
97 +--- a/arch/x86/Makefile
98 ++++ b/arch/x86/Makefile
99 +@@ -61,6 +61,9 @@ endif
100 + KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
101 + KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
102 +
103 ++# Intel CET isn't enabled in the kernel
104 ++KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
105 ++
106 + ifeq ($(CONFIG_X86_32),y)
107 + BITS := 32
108 + UTS_MACHINE := i386
109 +@@ -137,9 +140,6 @@ else
110 + KBUILD_CFLAGS += -mno-red-zone
111 + KBUILD_CFLAGS += -mcmodel=kernel
112 +
113 +- # Intel CET isn't enabled in the kernel
114 +- KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
115 +-
116 + # -funit-at-a-time shrinks the kernel .text considerably
117 + # unfortunately it makes reading oopses harder.
118 + KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
119 +diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
120 +index cab9f766bb06b..8c7c5bb94257e 100644
121 +--- a/arch/x86/xen/p2m.c
122 ++++ b/arch/x86/xen/p2m.c
123 +@@ -725,7 +725,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
124 + unsigned long mfn, pfn;
125 +
126 + /* Do not add to override if the map failed. */
127 +- if (map_ops[i].status)
128 ++ if (map_ops[i].status != GNTST_okay ||
129 ++ (kmap_ops && kmap_ops[i].status != GNTST_okay))
130 + continue;
131 +
132 + if (map_ops[i].flags & GNTMAP_contains_pte) {
133 +@@ -763,17 +764,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
134 + unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
135 + unsigned long pfn = page_to_pfn(pages[i]);
136 +
137 +- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
138 ++ if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT))
139 ++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
140 ++ else
141 + ret = -EINVAL;
142 +- goto out;
143 +- }
144 +-
145 +- set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
146 + }
147 + if (kunmap_ops)
148 + ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
149 +- kunmap_ops, count);
150 +-out:
151 ++ kunmap_ops, count) ?: ret;
152 ++
153 + return ret;
154 + }
155 + EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
156 +diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
157 +index 8dbdd156e0d3e..f9dfcd8872af0 100644
158 +--- a/drivers/block/xen-blkback/blkback.c
159 ++++ b/drivers/block/xen-blkback/blkback.c
160 +@@ -825,8 +825,11 @@ again:
161 + pages[i]->page = persistent_gnt->page;
162 + pages[i]->persistent_gnt = persistent_gnt;
163 + } else {
164 +- if (get_free_page(blkif, &pages[i]->page))
165 +- goto out_of_memory;
166 ++ if (get_free_page(blkif, &pages[i]->page)) {
167 ++ put_free_pages(blkif, pages_to_gnt, segs_to_map);
168 ++ ret = -ENOMEM;
169 ++ goto out;
170 ++ }
171 + addr = vaddr(pages[i]->page);
172 + pages_to_gnt[segs_to_map] = pages[i]->page;
173 + pages[i]->persistent_gnt = NULL;
174 +@@ -842,10 +845,8 @@ again:
175 + break;
176 + }
177 +
178 +- if (segs_to_map) {
179 ++ if (segs_to_map)
180 + ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
181 +- BUG_ON(ret);
182 +- }
183 +
184 + /*
185 + * Now swizzle the MFN in our domain with the MFN from the other domain
186 +@@ -860,7 +861,7 @@ again:
187 + pr_debug("invalid buffer -- could not remap it\n");
188 + put_free_pages(blkif, &pages[seg_idx]->page, 1);
189 + pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
190 +- ret |= 1;
191 ++ ret |= !ret;
192 + goto next;
193 + }
194 + pages[seg_idx]->handle = map[new_map_idx].handle;
195 +@@ -912,15 +913,18 @@ next:
196 + }
197 + segs_to_map = 0;
198 + last_map = map_until;
199 +- if (map_until != num)
200 ++ if (!ret && map_until != num)
201 + goto again;
202 +
203 +- return ret;
204 ++out:
205 ++ for (i = last_map; i < num; i++) {
206 ++ /* Don't zap current batch's valid persistent grants. */
207 ++ if(i >= last_map + segs_to_map)
208 ++ pages[i]->persistent_gnt = NULL;
209 ++ pages[i]->handle = BLKBACK_INVALID_HANDLE;
210 ++ }
211 +
212 +-out_of_memory:
213 +- pr_alert("%s: out of memory\n", __func__);
214 +- put_free_pages(blkif, pages_to_gnt, segs_to_map);
215 +- return -ENOMEM;
216 ++ return ret;
217 + }
218 +
219 + static int xen_blkbk_map_seg(struct pending_req *pending_req)
220 +diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
221 +index 13c97f665ba88..bb81261de45fa 100644
222 +--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
223 ++++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
224 +@@ -909,6 +909,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
225 + reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
226 + if (device_reprobe(reprobe->dev))
227 + dev_err(reprobe->dev, "reprobe failed!\n");
228 ++ put_device(reprobe->dev);
229 + kfree(reprobe);
230 + module_put(THIS_MODULE);
231 + }
232 +@@ -991,7 +992,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
233 + module_put(THIS_MODULE);
234 + return;
235 + }
236 +- reprobe->dev = mvm->trans->dev;
237 ++ reprobe->dev = get_device(mvm->trans->dev);
238 + INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
239 + schedule_work(&reprobe->work);
240 + } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) {
241 +diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
242 +index 8dfe6b2bc7031..cb03c2855019b 100644
243 +--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
244 ++++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
245 +@@ -585,6 +585,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
246 + struct iwl_txq *txq = &trans_pcie->txq[txq_id];
247 + struct iwl_queue *q = &txq->q;
248 +
249 ++ if (!txq) {
250 ++ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
251 ++ return;
252 ++ }
253 ++
254 + spin_lock_bh(&txq->lock);
255 + while (q->write_ptr != q->read_ptr) {
256 + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
257 +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
258 +index ee7a800c16d54..9a988ea5d797b 100644
259 +--- a/drivers/net/xen-netback/netback.c
260 ++++ b/drivers/net/xen-netback/netback.c
261 +@@ -1792,13 +1792,11 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
262 + return 0;
263 +
264 + gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
265 +- if (nr_mops != 0) {
266 ++ if (nr_mops != 0)
267 + ret = gnttab_map_refs(queue->tx_map_ops,
268 + NULL,
269 + queue->pages_to_map,
270 + nr_mops);
271 +- BUG_ON(ret);
272 +- }
273 +
274 + work_done = xenvif_tx_submit(queue);
275 +
276 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
277 +index c3e6225246045..74372aaf209de 100644
278 +--- a/drivers/scsi/qla2xxx/qla_tmpl.c
279 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
280 +@@ -871,7 +871,8 @@ qla27xx_template_checksum(void *p, ulong size)
281 + static inline int
282 + qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
283 + {
284 +- return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
285 ++ return qla27xx_template_checksum(tmp,
286 ++ le32_to_cpu(tmp->template_size)) == 0;
287 + }
288 +
289 + static inline int
290 +@@ -887,7 +888,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
291 + ulong len;
292 +
293 + if (qla27xx_fwdt_template_valid(tmp)) {
294 +- len = tmp->template_size;
295 ++ len = le32_to_cpu(tmp->template_size);
296 + tmp = memcpy(vha->hw->fw_dump, tmp, len);
297 + ql27xx_edit_template(vha, tmp);
298 + qla27xx_walk_template(vha, tmp, tmp, &len);
299 +@@ -903,7 +904,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
300 + ulong len = 0;
301 +
302 + if (qla27xx_fwdt_template_valid(tmp)) {
303 +- len = tmp->template_size;
304 ++ len = le32_to_cpu(tmp->template_size);
305 + qla27xx_walk_template(vha, tmp, NULL, &len);
306 + }
307 +
308 +@@ -915,7 +916,7 @@ qla27xx_fwdt_template_size(void *p)
309 + {
310 + struct qla27xx_fwdt_template *tmp = p;
311 +
312 +- return tmp->template_size;
313 ++ return le32_to_cpu(tmp->template_size);
314 + }
315 +
316 + ulong
317 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
318 +index 141c1c5e73f42..2d3e1a8349b3b 100644
319 +--- a/drivers/scsi/qla2xxx/qla_tmpl.h
320 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.h
321 +@@ -13,7 +13,7 @@
322 + struct __packed qla27xx_fwdt_template {
323 + uint32_t template_type;
324 + uint32_t entry_offset;
325 +- uint32_t template_size;
326 ++ __le32 template_size;
327 + uint32_t reserved_1;
328 +
329 + uint32_t entry_count;
330 +diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
331 +index ec004c6d76f23..44f1a496633ca 100644
332 +--- a/drivers/usb/dwc3/ulpi.c
333 ++++ b/drivers/usb/dwc3/ulpi.c
334 +@@ -10,6 +10,8 @@
335 + * published by the Free Software Foundation.
336 + */
337 +
338 ++#include <linux/delay.h>
339 ++#include <linux/time64.h>
340 + #include <linux/ulpi/regs.h>
341 +
342 + #include "core.h"
343 +@@ -20,12 +22,22 @@
344 + DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
345 + DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
346 +
347 +-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
348 ++#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
349 ++
350 ++static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
351 + {
352 +- unsigned count = 1000;
353 ++ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
354 ++ unsigned int count = 1000;
355 + u32 reg;
356 +
357 ++ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
358 ++ ns += DWC3_ULPI_BASE_DELAY;
359 ++
360 ++ if (read)
361 ++ ns += DWC3_ULPI_BASE_DELAY;
362 ++
363 + while (count--) {
364 ++ ndelay(ns);
365 + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
366 + if (!(reg & DWC3_GUSB2PHYACC_BUSY))
367 + return 0;
368 +@@ -44,7 +56,7 @@ static int dwc3_ulpi_read(struct ulpi_ops *ops, u8 addr)
369 + reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
370 + dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
371 +
372 +- ret = dwc3_ulpi_busyloop(dwc);
373 ++ ret = dwc3_ulpi_busyloop(dwc, addr, true);
374 + if (ret)
375 + return ret;
376 +
377 +@@ -62,7 +74,7 @@ static int dwc3_ulpi_write(struct ulpi_ops *ops, u8 addr, u8 val)
378 + reg |= DWC3_GUSB2PHYACC_WRITE | val;
379 + dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
380 +
381 +- return dwc3_ulpi_busyloop(dwc);
382 ++ return dwc3_ulpi_busyloop(dwc, addr, false);
383 + }
384 +
385 + static struct ulpi_ops dwc3_ulpi_ops = {
386 +diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
387 +index 1865bcfa869bf..6f077ae0cf315 100644
388 +--- a/drivers/xen/gntdev.c
389 ++++ b/drivers/xen/gntdev.c
390 +@@ -293,36 +293,47 @@ static int map_grant_pages(struct grant_map *map)
391 + * to the kernel linear addresses of the struct pages.
392 + * These ptes are completely different from the user ptes dealt
393 + * with find_grant_ptes.
394 ++ * Note that GNTMAP_device_map isn't needed here: The
395 ++ * dev_bus_addr output field gets consumed only from ->map_ops,
396 ++ * and by not requesting it when mapping we also avoid needing
397 ++ * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
398 ++ * reference to the page in the hypervisor).
399 + */
400 ++ unsigned int flags = (map->flags & ~GNTMAP_device_map) |
401 ++ GNTMAP_host_map;
402 ++
403 + for (i = 0; i < map->count; i++) {
404 + unsigned long address = (unsigned long)
405 + pfn_to_kaddr(page_to_pfn(map->pages[i]));
406 + BUG_ON(PageHighMem(map->pages[i]));
407 +
408 +- gnttab_set_map_op(&map->kmap_ops[i], address,
409 +- map->flags | GNTMAP_host_map,
410 ++ gnttab_set_map_op(&map->kmap_ops[i], address, flags,
411 + map->grants[i].ref,
412 + map->grants[i].domid);
413 + gnttab_set_unmap_op(&map->kunmap_ops[i], address,
414 +- map->flags | GNTMAP_host_map, -1);
415 ++ flags, -1);
416 + }
417 + }
418 +
419 + pr_debug("map %d+%d\n", map->index, map->count);
420 + err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
421 + map->pages, map->count);
422 +- if (err)
423 +- return err;
424 +
425 + for (i = 0; i < map->count; i++) {
426 +- if (map->map_ops[i].status) {
427 ++ if (map->map_ops[i].status == GNTST_okay)
428 ++ map->unmap_ops[i].handle = map->map_ops[i].handle;
429 ++ else if (!err)
430 + err = -EINVAL;
431 +- continue;
432 +- }
433 +
434 +- map->unmap_ops[i].handle = map->map_ops[i].handle;
435 +- if (use_ptemod)
436 +- map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
437 ++ if (map->flags & GNTMAP_device_map)
438 ++ map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
439 ++
440 ++ if (use_ptemod) {
441 ++ if (map->kmap_ops[i].status == GNTST_okay)
442 ++ map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
443 ++ else if (!err)
444 ++ err = -EINVAL;
445 ++ }
446 + }
447 + return err;
448 + }
449 +diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
450 +index 29a1b8054a4dc..7fb5a2e7fa810 100644
451 +--- a/drivers/xen/xen-scsiback.c
452 ++++ b/drivers/xen/xen-scsiback.c
453 +@@ -415,12 +415,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
454 + return 0;
455 +
456 + err = gnttab_map_refs(map, NULL, pg, cnt);
457 +- BUG_ON(err);
458 + for (i = 0; i < cnt; i++) {
459 + if (unlikely(map[i].status != GNTST_okay)) {
460 + pr_err("invalid buffer -- could not remap it\n");
461 + map[i].handle = SCSIBACK_INVALID_HANDLE;
462 +- err = -ENOMEM;
463 ++ if (!err)
464 ++ err = -ENOMEM;
465 + } else {
466 + get_page(pg[i]);
467 + }
468 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
469 +index 66a9c9dab8316..7f068330edb67 100644
470 +--- a/fs/fs-writeback.c
471 ++++ b/fs/fs-writeback.c
472 +@@ -1929,7 +1929,7 @@ void wb_workfn(struct work_struct *work)
473 + struct bdi_writeback, dwork);
474 + long pages_written;
475 +
476 +- set_worker_desc("flush-%s", dev_name(wb->bdi->dev));
477 ++ set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
478 + current->flags |= PF_SWAPWRITE;
479 +
480 + if (likely(!current_is_workqueue_rescuer() ||
481 +diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
482 +index 8073b6532cf04..d2a806416c3ab 100644
483 +--- a/fs/squashfs/export.c
484 ++++ b/fs/squashfs/export.c
485 +@@ -54,12 +54,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
486 + struct squashfs_sb_info *msblk = sb->s_fs_info;
487 + int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
488 + int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
489 +- u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]);
490 ++ u64 start;
491 + __le64 ino;
492 + int err;
493 +
494 + TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
495 +
496 ++ if (ino_num == 0 || (ino_num - 1) >= msblk->inodes)
497 ++ return -EINVAL;
498 ++
499 ++ start = le64_to_cpu(msblk->inode_lookup_table[blk]);
500 ++
501 + err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
502 + if (err < 0)
503 + return err;
504 +@@ -124,7 +129,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
505 + u64 lookup_table_start, u64 next_table, unsigned int inodes)
506 + {
507 + unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
508 ++ unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes);
509 ++ int n;
510 + __le64 *table;
511 ++ u64 start, end;
512 +
513 + TRACE("In read_inode_lookup_table, length %d\n", length);
514 +
515 +@@ -134,20 +142,37 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
516 + if (inodes == 0)
517 + return ERR_PTR(-EINVAL);
518 +
519 +- /* length bytes should not extend into the next table - this check
520 +- * also traps instances where lookup_table_start is incorrectly larger
521 +- * than the next table start
522 ++ /*
523 ++ * The computed size of the lookup table (length bytes) should exactly
524 ++ * match the table start and end points
525 + */
526 +- if (lookup_table_start + length > next_table)
527 ++ if (length != (next_table - lookup_table_start))
528 + return ERR_PTR(-EINVAL);
529 +
530 + table = squashfs_read_table(sb, lookup_table_start, length);
531 ++ if (IS_ERR(table))
532 ++ return table;
533 +
534 + /*
535 +- * table[0] points to the first inode lookup table metadata block,
536 +- * this should be less than lookup_table_start
537 ++ * table0], table[1], ... table[indexes - 1] store the locations
538 ++ * of the compressed inode lookup blocks. Each entry should be
539 ++ * less than the next (i.e. table[0] < table[1]), and the difference
540 ++ * between them should be SQUASHFS_METADATA_SIZE or less.
541 ++ * table[indexes - 1] should be less than lookup_table_start, and
542 ++ * again the difference should be SQUASHFS_METADATA_SIZE or less
543 + */
544 +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) {
545 ++ for (n = 0; n < (indexes - 1); n++) {
546 ++ start = le64_to_cpu(table[n]);
547 ++ end = le64_to_cpu(table[n + 1]);
548 ++
549 ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
550 ++ kfree(table);
551 ++ return ERR_PTR(-EINVAL);
552 ++ }
553 ++ }
554 ++
555 ++ start = le64_to_cpu(table[indexes - 1]);
556 ++ if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
557 + kfree(table);
558 + return ERR_PTR(-EINVAL);
559 + }
560 +diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
561 +index d38ea3dab9515..8ccc0e3f6ea5a 100644
562 +--- a/fs/squashfs/id.c
563 ++++ b/fs/squashfs/id.c
564 +@@ -48,10 +48,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index,
565 + struct squashfs_sb_info *msblk = sb->s_fs_info;
566 + int block = SQUASHFS_ID_BLOCK(index);
567 + int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
568 +- u64 start_block = le64_to_cpu(msblk->id_table[block]);
569 ++ u64 start_block;
570 + __le32 disk_id;
571 + int err;
572 +
573 ++ if (index >= msblk->ids)
574 ++ return -EINVAL;
575 ++
576 ++ start_block = le64_to_cpu(msblk->id_table[block]);
577 ++
578 + err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
579 + sizeof(disk_id));
580 + if (err < 0)
581 +@@ -69,7 +74,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
582 + u64 id_table_start, u64 next_table, unsigned short no_ids)
583 + {
584 + unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
585 ++ unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids);
586 ++ int n;
587 + __le64 *table;
588 ++ u64 start, end;
589 +
590 + TRACE("In read_id_index_table, length %d\n", length);
591 +
592 +@@ -80,20 +88,36 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
593 + return ERR_PTR(-EINVAL);
594 +
595 + /*
596 +- * length bytes should not extend into the next table - this check
597 +- * also traps instances where id_table_start is incorrectly larger
598 +- * than the next table start
599 ++ * The computed size of the index table (length bytes) should exactly
600 ++ * match the table start and end points
601 + */
602 +- if (id_table_start + length > next_table)
603 ++ if (length != (next_table - id_table_start))
604 + return ERR_PTR(-EINVAL);
605 +
606 + table = squashfs_read_table(sb, id_table_start, length);
607 ++ if (IS_ERR(table))
608 ++ return table;
609 +
610 + /*
611 +- * table[0] points to the first id lookup table metadata block, this
612 +- * should be less than id_table_start
613 ++ * table[0], table[1], ... table[indexes - 1] store the locations
614 ++ * of the compressed id blocks. Each entry should be less than
615 ++ * the next (i.e. table[0] < table[1]), and the difference between them
616 ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
617 ++ * should be less than id_table_start, and again the difference
618 ++ * should be SQUASHFS_METADATA_SIZE or less
619 + */
620 +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) {
621 ++ for (n = 0; n < (indexes - 1); n++) {
622 ++ start = le64_to_cpu(table[n]);
623 ++ end = le64_to_cpu(table[n + 1]);
624 ++
625 ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
626 ++ kfree(table);
627 ++ return ERR_PTR(-EINVAL);
628 ++ }
629 ++ }
630 ++
631 ++ start = le64_to_cpu(table[indexes - 1]);
632 ++ if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
633 + kfree(table);
634 + return ERR_PTR(-EINVAL);
635 + }
636 +diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
637 +index ef69c31947bf8..5234c19a0eabc 100644
638 +--- a/fs/squashfs/squashfs_fs_sb.h
639 ++++ b/fs/squashfs/squashfs_fs_sb.h
640 +@@ -77,5 +77,6 @@ struct squashfs_sb_info {
641 + unsigned int inodes;
642 + unsigned int fragments;
643 + int xattr_ids;
644 ++ unsigned int ids;
645 + };
646 + #endif
647 +diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
648 +index 93aa3e23c845b..44500dcf1805b 100644
649 +--- a/fs/squashfs/super.c
650 ++++ b/fs/squashfs/super.c
651 +@@ -177,6 +177,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
652 + msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
653 + msblk->inodes = le32_to_cpu(sblk->inodes);
654 + msblk->fragments = le32_to_cpu(sblk->fragments);
655 ++ msblk->ids = le16_to_cpu(sblk->no_ids);
656 + flags = le16_to_cpu(sblk->flags);
657 +
658 + TRACE("Found valid superblock on %s\n", bdevname(sb->s_bdev, b));
659 +@@ -188,7 +189,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
660 + TRACE("Block size %d\n", msblk->block_size);
661 + TRACE("Number of inodes %d\n", msblk->inodes);
662 + TRACE("Number of fragments %d\n", msblk->fragments);
663 +- TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
664 ++ TRACE("Number of ids %d\n", msblk->ids);
665 + TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
666 + TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
667 + TRACE("sblk->fragment_table_start %llx\n",
668 +@@ -245,8 +246,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
669 + allocate_id_index_table:
670 + /* Allocate and read id index table */
671 + msblk->id_table = squashfs_read_id_index_table(sb,
672 +- le64_to_cpu(sblk->id_table_start), next_table,
673 +- le16_to_cpu(sblk->no_ids));
674 ++ le64_to_cpu(sblk->id_table_start), next_table, msblk->ids);
675 + if (IS_ERR(msblk->id_table)) {
676 + ERROR("unable to read id index table\n");
677 + err = PTR_ERR(msblk->id_table);
678 +diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
679 +index c83f5d9ec125c..30b3aaa08b628 100644
680 +--- a/fs/squashfs/xattr.h
681 ++++ b/fs/squashfs/xattr.h
682 +@@ -30,8 +30,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
683 + static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
684 + u64 start, u64 *xattr_table_start, int *xattr_ids)
685 + {
686 ++ struct squashfs_xattr_id_table *id_table;
687 ++
688 ++ id_table = squashfs_read_table(sb, start, sizeof(*id_table));
689 ++ if (IS_ERR(id_table))
690 ++ return (__le64 *) id_table;
691 ++
692 ++ *xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
693 ++ kfree(id_table);
694 ++
695 + ERROR("Xattrs in filesystem, these will be ignored\n");
696 +- *xattr_table_start = start;
697 + return ERR_PTR(-ENOTSUPP);
698 + }
699 +
700 +diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
701 +index c89607d690c48..3a655d879600c 100644
702 +--- a/fs/squashfs/xattr_id.c
703 ++++ b/fs/squashfs/xattr_id.c
704 +@@ -44,10 +44,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
705 + struct squashfs_sb_info *msblk = sb->s_fs_info;
706 + int block = SQUASHFS_XATTR_BLOCK(index);
707 + int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
708 +- u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]);
709 ++ u64 start_block;
710 + struct squashfs_xattr_id id;
711 + int err;
712 +
713 ++ if (index >= msblk->xattr_ids)
714 ++ return -EINVAL;
715 ++
716 ++ start_block = le64_to_cpu(msblk->xattr_id_table[block]);
717 ++
718 + err = squashfs_read_metadata(sb, &id, &start_block, &offset,
719 + sizeof(id));
720 + if (err < 0)
721 +@@ -63,13 +68,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
722 + /*
723 + * Read uncompressed xattr id lookup table indexes from disk into memory
724 + */
725 +-__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
726 ++__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
727 + u64 *xattr_table_start, int *xattr_ids)
728 + {
729 +- unsigned int len;
730 ++ struct squashfs_sb_info *msblk = sb->s_fs_info;
731 ++ unsigned int len, indexes;
732 + struct squashfs_xattr_id_table *id_table;
733 ++ __le64 *table;
734 ++ u64 start, end;
735 ++ int n;
736 +
737 +- id_table = squashfs_read_table(sb, start, sizeof(*id_table));
738 ++ id_table = squashfs_read_table(sb, table_start, sizeof(*id_table));
739 + if (IS_ERR(id_table))
740 + return (__le64 *) id_table;
741 +
742 +@@ -83,13 +92,52 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
743 + if (*xattr_ids == 0)
744 + return ERR_PTR(-EINVAL);
745 +
746 +- /* xattr_table should be less than start */
747 +- if (*xattr_table_start >= start)
748 ++ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
749 ++ indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids);
750 ++
751 ++ /*
752 ++ * The computed size of the index table (len bytes) should exactly
753 ++ * match the table start and end points
754 ++ */
755 ++ start = table_start + sizeof(*id_table);
756 ++ end = msblk->bytes_used;
757 ++
758 ++ if (len != (end - start))
759 + return ERR_PTR(-EINVAL);
760 +
761 +- len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
762 ++ table = squashfs_read_table(sb, start, len);
763 ++ if (IS_ERR(table))
764 ++ return table;
765 ++
766 ++ /* table[0], table[1], ... table[indexes - 1] store the locations
767 ++ * of the compressed xattr id blocks. Each entry should be less than
768 ++ * the next (i.e. table[0] < table[1]), and the difference between them
769 ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
770 ++ * should be less than table_start, and again the difference
771 ++ * shouls be SQUASHFS_METADATA_SIZE or less.
772 ++ *
773 ++ * Finally xattr_table_start should be less than table[0].
774 ++ */
775 ++ for (n = 0; n < (indexes - 1); n++) {
776 ++ start = le64_to_cpu(table[n]);
777 ++ end = le64_to_cpu(table[n + 1]);
778 ++
779 ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
780 ++ kfree(table);
781 ++ return ERR_PTR(-EINVAL);
782 ++ }
783 ++ }
784 ++
785 ++ start = le64_to_cpu(table[indexes - 1]);
786 ++ if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
787 ++ kfree(table);
788 ++ return ERR_PTR(-EINVAL);
789 ++ }
790 +
791 +- TRACE("In read_xattr_index_table, length %d\n", len);
792 ++ if (*xattr_table_start >= le64_to_cpu(table[0])) {
793 ++ kfree(table);
794 ++ return ERR_PTR(-EINVAL);
795 ++ }
796 +
797 +- return squashfs_read_table(sb, start + sizeof(*id_table), len);
798 ++ return table;
799 + }
800 +diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
801 +index 361274ce5815f..883ce03191e76 100644
802 +--- a/include/linux/backing-dev.h
803 ++++ b/include/linux/backing-dev.h
804 +@@ -12,6 +12,7 @@
805 + #include <linux/fs.h>
806 + #include <linux/sched.h>
807 + #include <linux/blkdev.h>
808 ++#include <linux/device.h>
809 + #include <linux/writeback.h>
810 + #include <linux/blk-cgroup.h>
811 + #include <linux/backing-dev-defs.h>
812 +@@ -518,4 +519,13 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
813 + (1 << WB_async_congested));
814 + }
815 +
816 ++extern const char *bdi_unknown_name;
817 ++
818 ++static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
819 ++{
820 ++ if (!bdi || !bdi->dev)
821 ++ return bdi_unknown_name;
822 ++ return dev_name(bdi->dev);
823 ++}
824 ++
825 + #endif /* _LINUX_BACKING_DEV_H */
826 +diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
827 +index 60048c50404ee..0603b12180056 100644
828 +--- a/include/linux/ftrace.h
829 ++++ b/include/linux/ftrace.h
830 +@@ -747,7 +747,9 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
831 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
832 +
833 + /* for init task */
834 +-#define INIT_FTRACE_GRAPH .ret_stack = NULL,
835 ++#define INIT_FTRACE_GRAPH \
836 ++ .ret_stack = NULL, \
837 ++ .tracing_graph_pause = ATOMIC_INIT(0),
838 +
839 + /*
840 + * Stack of return addresses for functions
841 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
842 +index c1a42027ee0ee..401a404b64b93 100644
843 +--- a/include/linux/netdevice.h
844 ++++ b/include/linux/netdevice.h
845 +@@ -3428,6 +3428,7 @@ static inline void netif_tx_disable(struct net_device *dev)
846 +
847 + local_bh_disable();
848 + cpu = smp_processor_id();
849 ++ spin_lock(&dev->tx_global_lock);
850 + for (i = 0; i < dev->num_tx_queues; i++) {
851 + struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
852 +
853 +@@ -3435,6 +3436,7 @@ static inline void netif_tx_disable(struct net_device *dev)
854 + netif_tx_stop_queue(txq);
855 + __netif_tx_unlock(txq);
856 + }
857 ++ spin_unlock(&dev->tx_global_lock);
858 + local_bh_enable();
859 + }
860 +
861 +diff --git a/include/linux/string.h b/include/linux/string.h
862 +index 870268d42ae7d..7da409760cf18 100644
863 +--- a/include/linux/string.h
864 ++++ b/include/linux/string.h
865 +@@ -28,6 +28,10 @@ size_t strlcpy(char *, const char *, size_t);
866 + #ifndef __HAVE_ARCH_STRSCPY
867 + ssize_t strscpy(char *, const char *, size_t);
868 + #endif
869 ++
870 ++/* Wraps calls to strscpy()/memset(), no arch specific code required */
871 ++ssize_t strscpy_pad(char *dest, const char *src, size_t count);
872 ++
873 + #ifndef __HAVE_ARCH_STRCAT
874 + extern char * strcat(char *, const char *);
875 + #endif
876 +diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
877 +index 70c6b92e15a7c..8def5e0a491fa 100644
878 +--- a/include/linux/sunrpc/xdr.h
879 ++++ b/include/linux/sunrpc/xdr.h
880 +@@ -23,8 +23,7 @@
881 + #define XDR_QUADLEN(l) (((l) + 3) >> 2)
882 +
883 + /*
884 +- * Generic opaque `network object.' At the kernel level, this type
885 +- * is used only by lockd.
886 ++ * Generic opaque `network object.'
887 + */
888 + #define XDR_MAX_NETOBJ 1024
889 + struct xdr_netobj {
890 +diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
891 +index 2609b1c3549e2..d01217407d6d8 100644
892 +--- a/include/trace/events/writeback.h
893 ++++ b/include/trace/events/writeback.h
894 +@@ -65,8 +65,9 @@ TRACE_EVENT(writeback_dirty_page,
895 + ),
896 +
897 + TP_fast_assign(
898 +- strncpy(__entry->name,
899 +- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
900 ++ strscpy_pad(__entry->name,
901 ++ bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
902 ++ NULL), 32);
903 + __entry->ino = mapping ? mapping->host->i_ino : 0;
904 + __entry->index = page->index;
905 + ),
906 +@@ -95,8 +96,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
907 + struct backing_dev_info *bdi = inode_to_bdi(inode);
908 +
909 + /* may be called for files on pseudo FSes w/ unregistered bdi */
910 +- strncpy(__entry->name,
911 +- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
912 ++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
913 + __entry->ino = inode->i_ino;
914 + __entry->state = inode->i_state;
915 + __entry->flags = flags;
916 +@@ -205,8 +205,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
917 + ),
918 +
919 + TP_fast_assign(
920 +- strncpy(__entry->name,
921 +- dev_name(inode_to_bdi(inode)->dev), 32);
922 ++ strscpy_pad(__entry->name,
923 ++ bdi_dev_name(inode_to_bdi(inode)), 32);
924 + __entry->ino = inode->i_ino;
925 + __entry->sync_mode = wbc->sync_mode;
926 + __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
927 +@@ -249,8 +249,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
928 + __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
929 + ),
930 + TP_fast_assign(
931 +- strncpy(__entry->name,
932 +- wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
933 ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
934 + __entry->nr_pages = work->nr_pages;
935 + __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
936 + __entry->sync_mode = work->sync_mode;
937 +@@ -303,7 +302,7 @@ DECLARE_EVENT_CLASS(writeback_class,
938 + __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
939 + ),
940 + TP_fast_assign(
941 +- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
942 ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
943 + __trace_wb_assign_cgroup(__get_str(cgroup), wb);
944 + ),
945 + TP_printk("bdi %s: cgroup=%s",
946 +@@ -326,7 +325,7 @@ TRACE_EVENT(writeback_bdi_register,
947 + __array(char, name, 32)
948 + ),
949 + TP_fast_assign(
950 +- strncpy(__entry->name, dev_name(bdi->dev), 32);
951 ++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
952 + ),
953 + TP_printk("bdi %s",
954 + __entry->name
955 +@@ -351,7 +350,7 @@ DECLARE_EVENT_CLASS(wbc_class,
956 + ),
957 +
958 + TP_fast_assign(
959 +- strncpy(__entry->name, dev_name(bdi->dev), 32);
960 ++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
961 + __entry->nr_to_write = wbc->nr_to_write;
962 + __entry->pages_skipped = wbc->pages_skipped;
963 + __entry->sync_mode = wbc->sync_mode;
964 +@@ -402,7 +401,7 @@ TRACE_EVENT(writeback_queue_io,
965 + __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
966 + ),
967 + TP_fast_assign(
968 +- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
969 ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
970 + __entry->older = dirtied_before;
971 + __entry->age = (jiffies - dirtied_before) * 1000 / HZ;
972 + __entry->moved = moved;
973 +@@ -487,7 +486,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
974 + ),
975 +
976 + TP_fast_assign(
977 +- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
978 ++ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
979 + __entry->write_bw = KBps(wb->write_bandwidth);
980 + __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
981 + __entry->dirty_rate = KBps(dirty_rate);
982 +@@ -552,7 +551,7 @@ TRACE_EVENT(balance_dirty_pages,
983 +
984 + TP_fast_assign(
985 + unsigned long freerun = (thresh + bg_thresh) / 2;
986 +- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
987 ++ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
988 +
989 + __entry->limit = global_wb_domain.dirty_limit;
990 + __entry->setpoint = (global_wb_domain.dirty_limit +
991 +@@ -613,8 +612,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
992 + ),
993 +
994 + TP_fast_assign(
995 +- strncpy(__entry->name,
996 +- dev_name(inode_to_bdi(inode)->dev), 32);
997 ++ strscpy_pad(__entry->name,
998 ++ bdi_dev_name(inode_to_bdi(inode)), 32);
999 + __entry->ino = inode->i_ino;
1000 + __entry->state = inode->i_state;
1001 + __entry->dirtied_when = inode->dirtied_when;
1002 +@@ -687,8 +686,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
1003 + ),
1004 +
1005 + TP_fast_assign(
1006 +- strncpy(__entry->name,
1007 +- dev_name(inode_to_bdi(inode)->dev), 32);
1008 ++ strscpy_pad(__entry->name,
1009 ++ bdi_dev_name(inode_to_bdi(inode)), 32);
1010 + __entry->ino = inode->i_ino;
1011 + __entry->state = inode->i_state;
1012 + __entry->dirtied_when = inode->dirtied_when;
1013 +diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
1014 +index 34b1379f9777d..f9d8aac170fbc 100644
1015 +--- a/include/xen/grant_table.h
1016 ++++ b/include/xen/grant_table.h
1017 +@@ -157,6 +157,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
1018 + map->flags = flags;
1019 + map->ref = ref;
1020 + map->dom = domid;
1021 ++ map->status = 1; /* arbitrary positive value */
1022 + }
1023 +
1024 + static inline void
1025 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1026 +index 89ed01911a9a2..b56b1daa0a59a 100644
1027 +--- a/kernel/trace/ftrace.c
1028 ++++ b/kernel/trace/ftrace.c
1029 +@@ -5708,7 +5708,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1030 + }
1031 +
1032 + if (t->ret_stack == NULL) {
1033 +- atomic_set(&t->tracing_graph_pause, 0);
1034 + atomic_set(&t->trace_overrun, 0);
1035 + t->curr_ret_stack = -1;
1036 + /* Make sure the tasks see the -1 first: */
1037 +@@ -5920,7 +5919,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
1038 + static void
1039 + graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
1040 + {
1041 +- atomic_set(&t->tracing_graph_pause, 0);
1042 + atomic_set(&t->trace_overrun, 0);
1043 + t->ftrace_timestamp = 0;
1044 + /* make curr_ret_stack visible before we add the ret_stack */
1045 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
1046 +index 9d6e755d17546..b89e00c748f18 100644
1047 +--- a/kernel/trace/trace_events.c
1048 ++++ b/kernel/trace/trace_events.c
1049 +@@ -1083,7 +1083,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1050 + mutex_lock(&event_mutex);
1051 + list_for_each_entry(file, &tr->events, list) {
1052 + call = file->event_call;
1053 +- if (!trace_event_name(call) || !call->class || !call->class->reg)
1054 ++ if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
1055 ++ !trace_event_name(call) || !call->class || !call->class->reg)
1056 + continue;
1057 +
1058 + if (system && strcmp(call->class->system, system->name) != 0)
1059 +diff --git a/lib/string.c b/lib/string.c
1060 +index 7f4baad6fb193..4351ec43cd6b8 100644
1061 +--- a/lib/string.c
1062 ++++ b/lib/string.c
1063 +@@ -157,11 +157,9 @@ EXPORT_SYMBOL(strlcpy);
1064 + * @src: Where to copy the string from
1065 + * @count: Size of destination buffer
1066 + *
1067 +- * Copy the string, or as much of it as fits, into the dest buffer.
1068 +- * The routine returns the number of characters copied (not including
1069 +- * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough.
1070 +- * The behavior is undefined if the string buffers overlap.
1071 +- * The destination buffer is always NUL terminated, unless it's zero-sized.
1072 ++ * Copy the string, or as much of it as fits, into the dest buffer. The
1073 ++ * behavior is undefined if the string buffers overlap. The destination
1074 ++ * buffer is always NUL terminated, unless it's zero-sized.
1075 + *
1076 + * Preferred to strlcpy() since the API doesn't require reading memory
1077 + * from the src string beyond the specified "count" bytes, and since
1078 +@@ -171,8 +169,10 @@ EXPORT_SYMBOL(strlcpy);
1079 + *
1080 + * Preferred to strncpy() since it always returns a valid string, and
1081 + * doesn't unnecessarily force the tail of the destination buffer to be
1082 +- * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy()
1083 +- * with an overflow test, then just memset() the tail of the dest buffer.
1084 ++ * zeroed. If zeroing is desired please use strscpy_pad().
1085 ++ *
1086 ++ * Return: The number of characters copied (not including the trailing
1087 ++ * %NUL) or -E2BIG if the destination buffer wasn't big enough.
1088 + */
1089 + ssize_t strscpy(char *dest, const char *src, size_t count)
1090 + {
1091 +@@ -259,6 +259,39 @@ char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
1092 + }
1093 + EXPORT_SYMBOL(stpcpy);
1094 +
1095 ++/**
1096 ++ * strscpy_pad() - Copy a C-string into a sized buffer
1097 ++ * @dest: Where to copy the string to
1098 ++ * @src: Where to copy the string from
1099 ++ * @count: Size of destination buffer
1100 ++ *
1101 ++ * Copy the string, or as much of it as fits, into the dest buffer. The
1102 ++ * behavior is undefined if the string buffers overlap. The destination
1103 ++ * buffer is always %NUL terminated, unless it's zero-sized.
1104 ++ *
1105 ++ * If the source string is shorter than the destination buffer, zeros
1106 ++ * the tail of the destination buffer.
1107 ++ *
1108 ++ * For full explanation of why you may want to consider using the
1109 ++ * 'strscpy' functions please see the function docstring for strscpy().
1110 ++ *
1111 ++ * Return: The number of characters copied (not including the trailing
1112 ++ * %NUL) or -E2BIG if the destination buffer wasn't big enough.
1113 ++ */
1114 ++ssize_t strscpy_pad(char *dest, const char *src, size_t count)
1115 ++{
1116 ++ ssize_t written;
1117 ++
1118 ++ written = strscpy(dest, src, count);
1119 ++ if (written < 0 || written == count - 1)
1120 ++ return written;
1121 ++
1122 ++ memset(dest + written + 1, 0, count - written - 1);
1123 ++
1124 ++ return written;
1125 ++}
1126 ++EXPORT_SYMBOL(strscpy_pad);
1127 ++
1128 + #ifndef __HAVE_ARCH_STRCAT
1129 + /**
1130 + * strcat - Append one %NUL-terminated string to another
1131 +diff --git a/mm/backing-dev.c b/mm/backing-dev.c
1132 +index 07e3b3b8e8469..f705c58b320b8 100644
1133 +--- a/mm/backing-dev.c
1134 ++++ b/mm/backing-dev.c
1135 +@@ -21,6 +21,7 @@ struct backing_dev_info noop_backing_dev_info = {
1136 + EXPORT_SYMBOL_GPL(noop_backing_dev_info);
1137 +
1138 + static struct class *bdi_class;
1139 ++const char *bdi_unknown_name = "(unknown)";
1140 +
1141 + /*
1142 + * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
1143 +diff --git a/mm/memblock.c b/mm/memblock.c
1144 +index f8fab45bfdb75..ff51a37eb86be 100644
1145 +--- a/mm/memblock.c
1146 ++++ b/mm/memblock.c
1147 +@@ -189,14 +189,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
1148 + *
1149 + * Find @size free area aligned to @align in the specified range and node.
1150 + *
1151 +- * When allocation direction is bottom-up, the @start should be greater
1152 +- * than the end of the kernel image. Otherwise, it will be trimmed. The
1153 +- * reason is that we want the bottom-up allocation just near the kernel
1154 +- * image so it is highly likely that the allocated memory and the kernel
1155 +- * will reside in the same node.
1156 +- *
1157 +- * If bottom-up allocation failed, will try to allocate memory top-down.
1158 +- *
1159 + * RETURNS:
1160 + * Found address on success, 0 on failure.
1161 + */
1162 +@@ -204,8 +196,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
1163 + phys_addr_t align, phys_addr_t start,
1164 + phys_addr_t end, int nid, ulong flags)
1165 + {
1166 +- phys_addr_t kernel_end, ret;
1167 +-
1168 + /* pump up @end */
1169 + if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
1170 + end = memblock.current_limit;
1171 +@@ -213,40 +203,13 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
1172 + /* avoid allocating the first page */
1173 + start = max_t(phys_addr_t, start, PAGE_SIZE);
1174 + end = max(start, end);
1175 +- kernel_end = __pa_symbol(_end);
1176 +-
1177 +- /*
1178 +- * try bottom-up allocation only when bottom-up mode
1179 +- * is set and @end is above the kernel image.
1180 +- */
1181 +- if (memblock_bottom_up() && end > kernel_end) {
1182 +- phys_addr_t bottom_up_start;
1183 +-
1184 +- /* make sure we will allocate above the kernel */
1185 +- bottom_up_start = max(start, kernel_end);
1186 +
1187 +- /* ok, try bottom-up allocation first */
1188 +- ret = __memblock_find_range_bottom_up(bottom_up_start, end,
1189 +- size, align, nid, flags);
1190 +- if (ret)
1191 +- return ret;
1192 +-
1193 +- /*
1194 +- * we always limit bottom-up allocation above the kernel,
1195 +- * but top-down allocation doesn't have the limit, so
1196 +- * retrying top-down allocation may succeed when bottom-up
1197 +- * allocation failed.
1198 +- *
1199 +- * bottom-up allocation is expected to be fail very rarely,
1200 +- * so we use WARN_ONCE() here to see the stack trace if
1201 +- * fail happens.
1202 +- */
1203 +- WARN_ONCE(1, "memblock: bottom-up allocation failed, "
1204 +- "memory hotunplug may be affected\n");
1205 +- }
1206 +-
1207 +- return __memblock_find_range_top_down(start, end, size, align, nid,
1208 +- flags);
1209 ++ if (memblock_bottom_up())
1210 ++ return __memblock_find_range_bottom_up(start, end, size, align,
1211 ++ nid, flags);
1212 ++ else
1213 ++ return __memblock_find_range_top_down(start, end, size, align,
1214 ++ nid, flags);
1215 + }
1216 +
1217 + /**
1218 +diff --git a/net/key/af_key.c b/net/key/af_key.c
1219 +index 76a008b1cbe5f..adc93329e6aac 100644
1220 +--- a/net/key/af_key.c
1221 ++++ b/net/key/af_key.c
1222 +@@ -2933,7 +2933,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
1223 + break;
1224 + if (!aalg->pfkey_supported)
1225 + continue;
1226 +- if (aalg_tmpl_set(t, aalg) && aalg->available)
1227 ++ if (aalg_tmpl_set(t, aalg))
1228 + sz += sizeof(struct sadb_comb);
1229 + }
1230 + return sz + sizeof(struct sadb_prop);
1231 +@@ -2951,7 +2951,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
1232 + if (!ealg->pfkey_supported)
1233 + continue;
1234 +
1235 +- if (!(ealg_tmpl_set(t, ealg) && ealg->available))
1236 ++ if (!(ealg_tmpl_set(t, ealg)))
1237 + continue;
1238 +
1239 + for (k = 1; ; k++) {
1240 +@@ -2962,7 +2962,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
1241 + if (!aalg->pfkey_supported)
1242 + continue;
1243 +
1244 +- if (aalg_tmpl_set(t, aalg) && aalg->available)
1245 ++ if (aalg_tmpl_set(t, aalg))
1246 + sz += sizeof(struct sadb_comb);
1247 + }
1248 + }
1249 +diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
1250 +index cd53b861a15c1..ffe673c6a2485 100644
1251 +--- a/net/netfilter/xt_recent.c
1252 ++++ b/net/netfilter/xt_recent.c
1253 +@@ -156,7 +156,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
1254 + /*
1255 + * Drop entries with timestamps older then 'time'.
1256 + */
1257 +-static void recent_entry_reap(struct recent_table *t, unsigned long time)
1258 ++static void recent_entry_reap(struct recent_table *t, unsigned long time,
1259 ++ struct recent_entry *working, bool update)
1260 + {
1261 + struct recent_entry *e;
1262 +
1263 +@@ -165,6 +166,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time)
1264 + */
1265 + e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
1266 +
1267 ++ /*
1268 ++ * Do not reap the entry which are going to be updated.
1269 ++ */
1270 ++ if (e == working && update)
1271 ++ return;
1272 ++
1273 + /*
1274 + * The last time stamp is the most recent.
1275 + */
1276 +@@ -307,7 +314,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
1277 +
1278 + /* info->seconds must be non-zero */
1279 + if (info->check_set & XT_RECENT_REAP)
1280 +- recent_entry_reap(t, time);
1281 ++ recent_entry_reap(t, time, e,
1282 ++ info->check_set & XT_RECENT_UPDATE && ret);
1283 + }
1284 +
1285 + if (info->check_set & XT_RECENT_SET ||
1286 +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
1287 +index 62fca77bf3c70..7bde2976307ed 100644
1288 +--- a/net/sunrpc/auth_gss/auth_gss.c
1289 ++++ b/net/sunrpc/auth_gss/auth_gss.c
1290 +@@ -53,6 +53,7 @@
1291 + #include <asm/uaccess.h>
1292 + #include <linux/hashtable.h>
1293 +
1294 ++#include "auth_gss_internal.h"
1295 + #include "../netns.h"
1296 +
1297 + static const struct rpc_authops authgss_ops;
1298 +@@ -147,35 +148,6 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
1299 + clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
1300 + }
1301 +
1302 +-static const void *
1303 +-simple_get_bytes(const void *p, const void *end, void *res, size_t len)
1304 +-{
1305 +- const void *q = (const void *)((const char *)p + len);
1306 +- if (unlikely(q > end || q < p))
1307 +- return ERR_PTR(-EFAULT);
1308 +- memcpy(res, p, len);
1309 +- return q;
1310 +-}
1311 +-
1312 +-static inline const void *
1313 +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
1314 +-{
1315 +- const void *q;
1316 +- unsigned int len;
1317 +-
1318 +- p = simple_get_bytes(p, end, &len, sizeof(len));
1319 +- if (IS_ERR(p))
1320 +- return p;
1321 +- q = (const void *)((const char *)p + len);
1322 +- if (unlikely(q > end || q < p))
1323 +- return ERR_PTR(-EFAULT);
1324 +- dest->data = kmemdup(p, len, GFP_NOFS);
1325 +- if (unlikely(dest->data == NULL))
1326 +- return ERR_PTR(-ENOMEM);
1327 +- dest->len = len;
1328 +- return q;
1329 +-}
1330 +-
1331 + static struct gss_cl_ctx *
1332 + gss_cred_get_ctx(struct rpc_cred *cred)
1333 + {
1334 +diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h
1335 +new file mode 100644
1336 +index 0000000000000..f6d9631bd9d00
1337 +--- /dev/null
1338 ++++ b/net/sunrpc/auth_gss/auth_gss_internal.h
1339 +@@ -0,0 +1,45 @@
1340 ++// SPDX-License-Identifier: BSD-3-Clause
1341 ++/*
1342 ++ * linux/net/sunrpc/auth_gss/auth_gss_internal.h
1343 ++ *
1344 ++ * Internal definitions for RPCSEC_GSS client authentication
1345 ++ *
1346 ++ * Copyright (c) 2000 The Regents of the University of Michigan.
1347 ++ * All rights reserved.
1348 ++ *
1349 ++ */
1350 ++#include <linux/err.h>
1351 ++#include <linux/string.h>
1352 ++#include <linux/sunrpc/xdr.h>
1353 ++
1354 ++static inline const void *
1355 ++simple_get_bytes(const void *p, const void *end, void *res, size_t len)
1356 ++{
1357 ++ const void *q = (const void *)((const char *)p + len);
1358 ++ if (unlikely(q > end || q < p))
1359 ++ return ERR_PTR(-EFAULT);
1360 ++ memcpy(res, p, len);
1361 ++ return q;
1362 ++}
1363 ++
1364 ++static inline const void *
1365 ++simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
1366 ++{
1367 ++ const void *q;
1368 ++ unsigned int len;
1369 ++
1370 ++ p = simple_get_bytes(p, end, &len, sizeof(len));
1371 ++ if (IS_ERR(p))
1372 ++ return p;
1373 ++ q = (const void *)((const char *)p + len);
1374 ++ if (unlikely(q > end || q < p))
1375 ++ return ERR_PTR(-EFAULT);
1376 ++ if (len) {
1377 ++ dest->data = kmemdup(p, len, GFP_NOFS);
1378 ++ if (unlikely(dest->data == NULL))
1379 ++ return ERR_PTR(-ENOMEM);
1380 ++ } else
1381 ++ dest->data = NULL;
1382 ++ dest->len = len;
1383 ++ return q;
1384 ++}
1385 +diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
1386 +index 28db442a0034a..89e616da161fd 100644
1387 +--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
1388 ++++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
1389 +@@ -45,6 +45,8 @@
1390 + #include <linux/crypto.h>
1391 + #include <linux/sunrpc/gss_krb5_enctypes.h>
1392 +
1393 ++#include "auth_gss_internal.h"
1394 ++
1395 + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1396 + # define RPCDBG_FACILITY RPCDBG_AUTH
1397 + #endif
1398 +@@ -186,35 +188,6 @@ get_gss_krb5_enctype(int etype)
1399 + return NULL;
1400 + }
1401 +
1402 +-static const void *
1403 +-simple_get_bytes(const void *p, const void *end, void *res, int len)
1404 +-{
1405 +- const void *q = (const void *)((const char *)p + len);
1406 +- if (unlikely(q > end || q < p))
1407 +- return ERR_PTR(-EFAULT);
1408 +- memcpy(res, p, len);
1409 +- return q;
1410 +-}
1411 +-
1412 +-static const void *
1413 +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
1414 +-{
1415 +- const void *q;
1416 +- unsigned int len;
1417 +-
1418 +- p = simple_get_bytes(p, end, &len, sizeof(len));
1419 +- if (IS_ERR(p))
1420 +- return p;
1421 +- q = (const void *)((const char *)p + len);
1422 +- if (unlikely(q > end || q < p))
1423 +- return ERR_PTR(-EFAULT);
1424 +- res->data = kmemdup(p, len, GFP_NOFS);
1425 +- if (unlikely(res->data == NULL))
1426 +- return ERR_PTR(-ENOMEM);
1427 +- res->len = len;
1428 +- return q;
1429 +-}
1430 +-
1431 + static inline const void *
1432 + get_key(const void *p, const void *end,
1433 + struct krb5_ctx *ctx, struct crypto_blkcipher **res)
1434 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
1435 +index 07b1a2775210b..cdd91a60b89aa 100644
1436 +--- a/net/vmw_vsock/af_vsock.c
1437 ++++ b/net/vmw_vsock/af_vsock.c
1438 +@@ -818,10 +818,12 @@ static int vsock_shutdown(struct socket *sock, int mode)
1439 + */
1440 +
1441 + sk = sock->sk;
1442 ++
1443 ++ lock_sock(sk);
1444 + if (sock->state == SS_UNCONNECTED) {
1445 + err = -ENOTCONN;
1446 + if (sk->sk_type == SOCK_STREAM)
1447 +- return err;
1448 ++ goto out;
1449 + } else {
1450 + sock->state = SS_DISCONNECTING;
1451 + err = 0;
1452 +@@ -830,10 +832,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
1453 + /* Receive and send shutdowns are treated alike. */
1454 + mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
1455 + if (mode) {
1456 +- lock_sock(sk);
1457 + sk->sk_shutdown |= mode;
1458 + sk->sk_state_change(sk);
1459 +- release_sock(sk);
1460 +
1461 + if (sk->sk_type == SOCK_STREAM) {
1462 + sock_reset_flag(sk, SOCK_DONE);
1463 +@@ -841,6 +841,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
1464 + }
1465 + }
1466 +
1467 ++out:
1468 ++ release_sock(sk);
1469 + return err;
1470 + }
1471 +
1472 +diff --git a/scripts/Makefile.build b/scripts/Makefile.build
1473 +index 42aef001dfdd7..6de137a28a127 100644
1474 +--- a/scripts/Makefile.build
1475 ++++ b/scripts/Makefile.build
1476 +@@ -221,6 +221,8 @@ cmd_modversions_c = \
1477 + endif
1478 +
1479 + ifdef CONFIG_FTRACE_MCOUNT_RECORD
1480 ++ifndef CC_USING_RECORD_MCOUNT
1481 ++# compiler will not generate __mcount_loc use recordmcount or recordmcount.pl
1482 + ifdef BUILD_C_RECORDMCOUNT
1483 + ifeq ("$(origin RECORDMCOUNT_WARN)", "command line")
1484 + RECORDMCOUNT_FLAGS = -w
1485 +@@ -249,6 +251,7 @@ cmd_record_mcount = \
1486 + "$(CC_FLAGS_FTRACE)" ]; then \
1487 + $(sub_cmd_record_mcount) \
1488 + fi;
1489 ++endif # CC_USING_RECORD_MCOUNT
1490 + endif
1491 +
1492 + define rule_cc_o_c
1493 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
1494 +index ba8e8840b94b2..b0326734a980b 100644
1495 +--- a/virt/kvm/kvm_main.c
1496 ++++ b/virt/kvm/kvm_main.c
1497 +@@ -346,9 +346,8 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
1498 + */
1499 + kvm->mmu_notifier_count++;
1500 + need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
1501 +- need_tlb_flush |= kvm->tlbs_dirty;
1502 + /* we've to flush the tlb before the pages can be freed */
1503 +- if (need_tlb_flush)
1504 ++ if (need_tlb_flush || kvm->tlbs_dirty)
1505 + kvm_flush_remote_tlbs(kvm);
1506 +
1507 + spin_unlock(&kvm->mmu_lock);