Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 23 Feb 2021 13:51:51
Message-Id: 1614088293.bff1ff3be99885afe5dd5970747ee0e3dd77f575.alicef@gentoo
1 commit: bff1ff3be99885afe5dd5970747ee0e3dd77f575
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Tue Feb 23 13:51:16 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Tue Feb 23 13:51:33 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bff1ff3b
7
8 Linux patch 4.14.222
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1221_linux-4.14.222.patch | 2063 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2067 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index a3da73a..6772d7c 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -927,6 +927,10 @@ Patch: 1220_linux-4.14.221.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.221
23
24 +Patch: 1221_linux-4.14.222.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.222
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1221_linux-4.14.222.patch b/1221_linux-4.14.222.patch
33 new file mode 100644
34 index 0000000..ba019a0
35 --- /dev/null
36 +++ b/1221_linux-4.14.222.patch
37 @@ -0,0 +1,2063 @@
38 +diff --git a/Makefile b/Makefile
39 +index b25ce26c1cd71..101b789e7c2ba 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 221
47 ++SUBLEVEL = 222
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +@@ -760,6 +760,13 @@ ifdef CONFIG_FUNCTION_TRACER
52 + ifndef CC_FLAGS_FTRACE
53 + CC_FLAGS_FTRACE := -pg
54 + endif
55 ++ifdef CONFIG_FTRACE_MCOUNT_RECORD
56 ++ # gcc 5 supports generating the mcount tables directly
57 ++ ifeq ($(call cc-option-yn,-mrecord-mcount),y)
58 ++ CC_FLAGS_FTRACE += -mrecord-mcount
59 ++ export CC_USING_RECORD_MCOUNT := 1
60 ++ endif
61 ++endif
62 + export CC_FLAGS_FTRACE
63 + ifdef CONFIG_HAVE_FENTRY
64 + CC_USING_FENTRY := $(call cc-option, -mfentry -DCC_USING_FENTRY)
65 +diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
66 +index c5b119ddb70b8..7f2b73cbd2280 100644
67 +--- a/arch/arm/boot/dts/lpc32xx.dtsi
68 ++++ b/arch/arm/boot/dts/lpc32xx.dtsi
69 +@@ -323,9 +323,6 @@
70 +
71 + clocks = <&xtal_32k>, <&xtal>;
72 + clock-names = "xtal_32k", "xtal";
73 +-
74 +- assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
75 +- assigned-clock-rates = <208000000>;
76 + };
77 + };
78 +
79 +diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
80 +index 02e6b6dfffa7e..19e4ff507209b 100644
81 +--- a/arch/arm/kernel/signal.c
82 ++++ b/arch/arm/kernel/signal.c
83 +@@ -667,18 +667,20 @@ struct page *get_signal_page(void)
84 +
85 + addr = page_address(page);
86 +
87 ++ /* Poison the entire page */
88 ++ memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
89 ++ PAGE_SIZE / sizeof(u32));
90 ++
91 + /* Give the signal return code some randomness */
92 + offset = 0x200 + (get_random_int() & 0x7fc);
93 + signal_return_offset = offset;
94 +
95 +- /*
96 +- * Copy signal return handlers into the vector page, and
97 +- * set sigreturn to be a pointer to these.
98 +- */
99 ++ /* Copy signal return handlers into the page */
100 + memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
101 +
102 +- ptr = (unsigned long)addr + offset;
103 +- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
104 ++ /* Flush out all instructions in this page */
105 ++ ptr = (unsigned long)addr;
106 ++ flush_icache_range(ptr, ptr + PAGE_SIZE);
107 +
108 + return page;
109 + }
110 +diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
111 +index e8e637c4f354d..32aa108b2b7cd 100644
112 +--- a/arch/arm/xen/enlighten.c
113 ++++ b/arch/arm/xen/enlighten.c
114 +@@ -392,8 +392,6 @@ static int __init xen_guest_init(void)
115 + return -ENOMEM;
116 + }
117 + gnttab_init();
118 +- if (!xen_initial_domain())
119 +- xenbus_probe();
120 +
121 + /*
122 + * Making sure board specific code will not set up ops for
123 +diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
124 +index 0641ba54ab62a..ce538c51fa3fb 100644
125 +--- a/arch/arm/xen/p2m.c
126 ++++ b/arch/arm/xen/p2m.c
127 +@@ -93,8 +93,10 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
128 + for (i = 0; i < count; i++) {
129 + if (map_ops[i].status)
130 + continue;
131 +- set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
132 +- map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT);
133 ++ if (unlikely(!set_phys_to_machine(map_ops[i].host_addr >> XEN_PAGE_SHIFT,
134 ++ map_ops[i].dev_bus_addr >> XEN_PAGE_SHIFT))) {
135 ++ return -ENOMEM;
136 ++ }
137 + }
138 +
139 + return 0;
140 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
141 +index 82747048381fa..721f4b6b262f1 100644
142 +--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
143 ++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
144 +@@ -231,6 +231,7 @@
145 + reg = <0x0 0xf8000000 0x0 0x2000000>,
146 + <0x0 0xfd000000 0x0 0x1000000>;
147 + reg-names = "axi-base", "apb-base";
148 ++ device_type = "pci";
149 + #address-cells = <3>;
150 + #size-cells = <2>;
151 + #interrupt-cells = <1>;
152 +@@ -249,7 +250,6 @@
153 + <0 0 0 2 &pcie0_intc 1>,
154 + <0 0 0 3 &pcie0_intc 2>,
155 + <0 0 0 4 &pcie0_intc 3>;
156 +- linux,pci-domain = <0>;
157 + max-link-speed = <1>;
158 + msi-map = <0x0 &its 0x0 0x1000>;
159 + phys = <&pcie_phy 0>, <&pcie_phy 1>,
160 +diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
161 +index 85e60509f0a83..d4b53af657c84 100644
162 +--- a/arch/h8300/kernel/asm-offsets.c
163 ++++ b/arch/h8300/kernel/asm-offsets.c
164 +@@ -63,6 +63,9 @@ int main(void)
165 + OFFSET(TI_FLAGS, thread_info, flags);
166 + OFFSET(TI_CPU, thread_info, cpu);
167 + OFFSET(TI_PRE, thread_info, preempt_count);
168 ++#ifdef CONFIG_PREEMPTION
169 ++ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
170 ++#endif
171 +
172 + return 0;
173 + }
174 +diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
175 +index 3018582794efc..d16e6654a6555 100644
176 +--- a/arch/mips/kernel/smp-bmips.c
177 ++++ b/arch/mips/kernel/smp-bmips.c
178 +@@ -574,7 +574,7 @@ asmlinkage void __weak plat_wired_tlb_setup(void)
179 + */
180 + }
181 +
182 +-void __init bmips_cpu_setup(void)
183 ++void bmips_cpu_setup(void)
184 + {
185 + void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
186 + u32 __maybe_unused cfg;
187 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
188 +index 4c8e9f12b0c4d..9f33a69b56051 100644
189 +--- a/arch/x86/Makefile
190 ++++ b/arch/x86/Makefile
191 +@@ -62,6 +62,9 @@ endif
192 + KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
193 + KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
194 +
195 ++# Intel CET isn't enabled in the kernel
196 ++KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
197 ++
198 + ifeq ($(CONFIG_X86_32),y)
199 + BITS := 32
200 + UTS_MACHINE := i386
201 +@@ -138,9 +141,6 @@ else
202 + KBUILD_CFLAGS += -mno-red-zone
203 + KBUILD_CFLAGS += -mcmodel=kernel
204 +
205 +- # Intel CET isn't enabled in the kernel
206 +- KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
207 +-
208 + # -funit-at-a-time shrinks the kernel .text considerably
209 + # unfortunately it makes reading oopses harder.
210 + KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
211 +diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
212 +index 15812e553b95e..30295d2ebd924 100644
213 +--- a/arch/x86/xen/p2m.c
214 ++++ b/arch/x86/xen/p2m.c
215 +@@ -708,7 +708,8 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
216 + unsigned long mfn, pfn;
217 +
218 + /* Do not add to override if the map failed. */
219 +- if (map_ops[i].status)
220 ++ if (map_ops[i].status != GNTST_okay ||
221 ++ (kmap_ops && kmap_ops[i].status != GNTST_okay))
222 + continue;
223 +
224 + if (map_ops[i].flags & GNTMAP_contains_pte) {
225 +@@ -746,17 +747,15 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
226 + unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
227 + unsigned long pfn = page_to_pfn(pages[i]);
228 +
229 +- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
230 ++ if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT))
231 ++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
232 ++ else
233 + ret = -EINVAL;
234 +- goto out;
235 +- }
236 +-
237 +- set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
238 + }
239 + if (kunmap_ops)
240 + ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
241 +- kunmap_ops, count);
242 +-out:
243 ++ kunmap_ops, count) ?: ret;
244 ++
245 + return ret;
246 + }
247 + EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
248 +diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
249 +index 04ae2474e3344..a703f365b5b19 100644
250 +--- a/drivers/block/xen-blkback/blkback.c
251 ++++ b/drivers/block/xen-blkback/blkback.c
252 +@@ -843,8 +843,11 @@ again:
253 + pages[i]->page = persistent_gnt->page;
254 + pages[i]->persistent_gnt = persistent_gnt;
255 + } else {
256 +- if (get_free_page(ring, &pages[i]->page))
257 +- goto out_of_memory;
258 ++ if (get_free_page(ring, &pages[i]->page)) {
259 ++ put_free_pages(ring, pages_to_gnt, segs_to_map);
260 ++ ret = -ENOMEM;
261 ++ goto out;
262 ++ }
263 + addr = vaddr(pages[i]->page);
264 + pages_to_gnt[segs_to_map] = pages[i]->page;
265 + pages[i]->persistent_gnt = NULL;
266 +@@ -860,10 +863,8 @@ again:
267 + break;
268 + }
269 +
270 +- if (segs_to_map) {
271 ++ if (segs_to_map)
272 + ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
273 +- BUG_ON(ret);
274 +- }
275 +
276 + /*
277 + * Now swizzle the MFN in our domain with the MFN from the other domain
278 +@@ -878,7 +879,7 @@ again:
279 + pr_debug("invalid buffer -- could not remap it\n");
280 + put_free_pages(ring, &pages[seg_idx]->page, 1);
281 + pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
282 +- ret |= 1;
283 ++ ret |= !ret;
284 + goto next;
285 + }
286 + pages[seg_idx]->handle = map[new_map_idx].handle;
287 +@@ -930,17 +931,18 @@ next:
288 + }
289 + segs_to_map = 0;
290 + last_map = map_until;
291 +- if (map_until != num)
292 ++ if (!ret && map_until != num)
293 + goto again;
294 +
295 +- return ret;
296 +-
297 +-out_of_memory:
298 +- pr_alert("%s: out of memory\n", __func__);
299 +- put_free_pages(ring, pages_to_gnt, segs_to_map);
300 +- for (i = last_map; i < num; i++)
301 ++out:
302 ++ for (i = last_map; i < num; i++) {
303 ++ /* Don't zap current batch's valid persistent grants. */
304 ++ if(i >= last_map + segs_to_map)
305 ++ pages[i]->persistent_gnt = NULL;
306 + pages[i]->handle = BLKBACK_INVALID_HANDLE;
307 +- return -ENOMEM;
308 ++ }
309 ++
310 ++ return ret;
311 + }
312 +
313 + static int xen_blkbk_map_seg(struct pending_req *pending_req)
314 +diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
315 +index 14f60751729e7..9768921a164c0 100644
316 +--- a/drivers/i2c/busses/i2c-stm32f7.c
317 ++++ b/drivers/i2c/busses/i2c-stm32f7.c
318 +@@ -42,6 +42,8 @@
319 +
320 + /* STM32F7 I2C control 1 */
321 + #define STM32F7_I2C_CR1_ANFOFF BIT(12)
322 ++#define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8)
323 ++#define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8)
324 + #define STM32F7_I2C_CR1_ERRIE BIT(7)
325 + #define STM32F7_I2C_CR1_TCIE BIT(6)
326 + #define STM32F7_I2C_CR1_STOPIE BIT(5)
327 +@@ -95,7 +97,7 @@
328 + #define STM32F7_I2C_MAX_LEN 0xff
329 +
330 + #define STM32F7_I2C_DNF_DEFAULT 0
331 +-#define STM32F7_I2C_DNF_MAX 16
332 ++#define STM32F7_I2C_DNF_MAX 15
333 +
334 + #define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
335 + #define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
336 +@@ -543,6 +545,13 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
337 + else
338 + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
339 + STM32F7_I2C_CR1_ANFOFF);
340 ++
341 ++ /* Program the Digital Filter */
342 ++ stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
343 ++ STM32F7_I2C_CR1_DNF_MASK);
344 ++ stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
345 ++ STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf));
346 ++
347 + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
348 + STM32F7_I2C_CR1_PE);
349 + }
350 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
351 +index 71a01df96f8b0..6db51abb8f4a3 100644
352 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
353 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
354 +@@ -518,7 +518,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
355 + const size_t bufsz = sizeof(buf);
356 + int pos = 0;
357 +
358 ++ mutex_lock(&mvm->mutex);
359 + iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
360 ++ mutex_unlock(&mvm->mutex);
361 ++
362 + do_div(curr_os, NSEC_PER_USEC);
363 + diff = curr_os - curr_gp2;
364 + pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
365 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
366 +index 54f411b83beae..dc0bc57767390 100644
367 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
368 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
369 +@@ -1169,6 +1169,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
370 + reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
371 + if (device_reprobe(reprobe->dev))
372 + dev_err(reprobe->dev, "reprobe failed!\n");
373 ++ put_device(reprobe->dev);
374 + kfree(reprobe);
375 + module_put(THIS_MODULE);
376 + }
377 +@@ -1219,7 +1220,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
378 + module_put(THIS_MODULE);
379 + return;
380 + }
381 +- reprobe->dev = mvm->trans->dev;
382 ++ reprobe->dev = get_device(mvm->trans->dev);
383 + INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
384 + schedule_work(&reprobe->work);
385 + } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
386 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
387 +index c3a2e6b6da65b..e1fb0258c9168 100644
388 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
389 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
390 +@@ -622,6 +622,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
391 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
392 + struct iwl_txq *txq = trans_pcie->txq[txq_id];
393 +
394 ++ if (!txq) {
395 ++ IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
396 ++ return;
397 ++ }
398 ++
399 + spin_lock_bh(&txq->lock);
400 + while (txq->write_ptr != txq->read_ptr) {
401 + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
402 +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
403 +index b8100298017b9..fcaf4dd9d9c4c 100644
404 +--- a/drivers/net/xen-netback/netback.c
405 ++++ b/drivers/net/xen-netback/netback.c
406 +@@ -1328,13 +1328,11 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget)
407 + return 0;
408 +
409 + gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
410 +- if (nr_mops != 0) {
411 ++ if (nr_mops != 0)
412 + ret = gnttab_map_refs(queue->tx_map_ops,
413 + NULL,
414 + queue->pages_to_map,
415 + nr_mops);
416 +- BUG_ON(ret);
417 +- }
418 +
419 + work_done = xenvif_tx_submit(queue);
420 +
421 +diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
422 +index f152246c7dfb7..ddfb1cfa2dd94 100644
423 +--- a/drivers/net/xen-netback/rx.c
424 ++++ b/drivers/net/xen-netback/rx.c
425 +@@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
426 + RING_IDX prod, cons;
427 + struct sk_buff *skb;
428 + int needed;
429 ++ unsigned long flags;
430 ++
431 ++ spin_lock_irqsave(&queue->rx_queue.lock, flags);
432 +
433 + skb = skb_peek(&queue->rx_queue);
434 +- if (!skb)
435 ++ if (!skb) {
436 ++ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
437 + return false;
438 ++ }
439 +
440 + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
441 + if (skb_is_gso(skb))
442 +@@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
443 + if (skb->sw_hash)
444 + needed++;
445 +
446 ++ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
447 ++
448 + do {
449 + prod = queue->rx.sring->req_prod;
450 + cons = queue->rx.req_cons;
451 +diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
452 +index 952544ca0d84d..93fadd4abf14d 100644
453 +--- a/drivers/platform/x86/hp-wmi.c
454 ++++ b/drivers/platform/x86/hp-wmi.c
455 +@@ -45,6 +45,10 @@ MODULE_LICENSE("GPL");
456 + MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
457 + MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
458 +
459 ++static int enable_tablet_mode_sw = -1;
460 ++module_param(enable_tablet_mode_sw, int, 0444);
461 ++MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
462 ++
463 + #define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
464 + #define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
465 +
466 +@@ -656,10 +660,12 @@ static int __init hp_wmi_input_setup(void)
467 + }
468 +
469 + /* Tablet mode */
470 +- val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
471 +- if (!(val < 0)) {
472 +- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
473 +- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
474 ++ if (enable_tablet_mode_sw > 0) {
475 ++ val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
476 ++ if (val >= 0) {
477 ++ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
478 ++ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
479 ++ }
480 + }
481 +
482 + err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
483 +diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
484 +index 81ec9b6805fcd..965f85a49ba0b 100644
485 +--- a/drivers/remoteproc/qcom_q6v5_pil.c
486 ++++ b/drivers/remoteproc/qcom_q6v5_pil.c
487 +@@ -293,6 +293,12 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
488 + {
489 + struct q6v5 *qproc = rproc->priv;
490 +
491 ++ /* MBA is restricted to a maximum size of 1M */
492 ++ if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
493 ++ dev_err(qproc->dev, "MBA firmware load failed\n");
494 ++ return -EINVAL;
495 ++ }
496 ++
497 + memcpy(qproc->mba_region, fw->data, fw->size);
498 +
499 + return 0;
500 +@@ -560,14 +566,13 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
501 +
502 + if (phdr->p_filesz) {
503 + snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
504 +- ret = request_firmware(&seg_fw, seg_name, qproc->dev);
505 ++ ret = request_firmware_into_buf(&seg_fw, seg_name, qproc->dev,
506 ++ ptr, phdr->p_filesz);
507 + if (ret) {
508 + dev_err(qproc->dev, "failed to load %s\n", seg_name);
509 + goto release_firmware;
510 + }
511 +
512 +- memcpy(ptr, seg_fw->data, seg_fw->size);
513 +-
514 + release_firmware(seg_fw);
515 + }
516 +
517 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
518 +index 733e8dcccf5c3..0b50871957a6d 100644
519 +--- a/drivers/scsi/qla2xxx/qla_tmpl.c
520 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
521 +@@ -897,7 +897,8 @@ qla27xx_template_checksum(void *p, ulong size)
522 + static inline int
523 + qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
524 + {
525 +- return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
526 ++ return qla27xx_template_checksum(tmp,
527 ++ le32_to_cpu(tmp->template_size)) == 0;
528 + }
529 +
530 + static inline int
531 +@@ -913,7 +914,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
532 + ulong len;
533 +
534 + if (qla27xx_fwdt_template_valid(tmp)) {
535 +- len = tmp->template_size;
536 ++ len = le32_to_cpu(tmp->template_size);
537 + tmp = memcpy(vha->hw->fw_dump, tmp, len);
538 + ql27xx_edit_template(vha, tmp);
539 + qla27xx_walk_template(vha, tmp, tmp, &len);
540 +@@ -929,7 +930,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
541 + ulong len = 0;
542 +
543 + if (qla27xx_fwdt_template_valid(tmp)) {
544 +- len = tmp->template_size;
545 ++ len = le32_to_cpu(tmp->template_size);
546 + qla27xx_walk_template(vha, tmp, NULL, &len);
547 + }
548 +
549 +@@ -941,7 +942,7 @@ qla27xx_fwdt_template_size(void *p)
550 + {
551 + struct qla27xx_fwdt_template *tmp = p;
552 +
553 +- return tmp->template_size;
554 ++ return le32_to_cpu(tmp->template_size);
555 + }
556 +
557 + ulong
558 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
559 +index 141c1c5e73f42..2d3e1a8349b3b 100644
560 +--- a/drivers/scsi/qla2xxx/qla_tmpl.h
561 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.h
562 +@@ -13,7 +13,7 @@
563 + struct __packed qla27xx_fwdt_template {
564 + uint32_t template_type;
565 + uint32_t entry_offset;
566 +- uint32_t template_size;
567 ++ __le32 template_size;
568 + uint32_t reserved_1;
569 +
570 + uint32_t entry_count;
571 +diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
572 +index d3b68e97096e7..bc2dd9499ea03 100644
573 +--- a/drivers/usb/dwc3/ulpi.c
574 ++++ b/drivers/usb/dwc3/ulpi.c
575 +@@ -10,6 +10,8 @@
576 + * published by the Free Software Foundation.
577 + */
578 +
579 ++#include <linux/delay.h>
580 ++#include <linux/time64.h>
581 + #include <linux/ulpi/regs.h>
582 +
583 + #include "core.h"
584 +@@ -20,12 +22,22 @@
585 + DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
586 + DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
587 +
588 +-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
589 ++#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
590 ++
591 ++static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
592 + {
593 +- unsigned count = 1000;
594 ++ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
595 ++ unsigned int count = 1000;
596 + u32 reg;
597 +
598 ++ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
599 ++ ns += DWC3_ULPI_BASE_DELAY;
600 ++
601 ++ if (read)
602 ++ ns += DWC3_ULPI_BASE_DELAY;
603 ++
604 + while (count--) {
605 ++ ndelay(ns);
606 + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
607 + if (reg & DWC3_GUSB2PHYACC_DONE)
608 + return 0;
609 +@@ -50,7 +62,7 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
610 + reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
611 + dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
612 +
613 +- ret = dwc3_ulpi_busyloop(dwc);
614 ++ ret = dwc3_ulpi_busyloop(dwc, addr, true);
615 + if (ret)
616 + return ret;
617 +
618 +@@ -74,7 +86,7 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
619 + reg |= DWC3_GUSB2PHYACC_WRITE | val;
620 + dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
621 +
622 +- return dwc3_ulpi_busyloop(dwc);
623 ++ return dwc3_ulpi_busyloop(dwc, addr, false);
624 + }
625 +
626 + static const struct ulpi_ops dwc3_ulpi_ops = {
627 +diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
628 +index 716edd593a994..989682cc86868 100644
629 +--- a/drivers/usb/gadget/function/u_ether.c
630 ++++ b/drivers/usb/gadget/function/u_ether.c
631 +@@ -49,9 +49,10 @@
632 + #define UETH__VERSION "29-May-2008"
633 +
634 + /* Experiments show that both Linux and Windows hosts allow up to 16k
635 +- * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
636 ++ * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
637 + * blocks and still have efficient handling. */
638 +-#define GETHER_MAX_ETH_FRAME_LEN 15412
639 ++#define GETHER_MAX_MTU_SIZE 15412
640 ++#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
641 +
642 + struct eth_dev {
643 + /* lock is held while accessing port_usb
644 +@@ -790,7 +791,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
645 +
646 + /* MTU range: 14 - 15412 */
647 + net->min_mtu = ETH_HLEN;
648 +- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
649 ++ net->max_mtu = GETHER_MAX_MTU_SIZE;
650 +
651 + dev->gadget = g;
652 + SET_NETDEV_DEV(net, &g->dev);
653 +@@ -850,6 +851,10 @@ struct net_device *gether_setup_name_default(const char *netname)
654 + net->ethtool_ops = &ops;
655 + SET_NETDEV_DEVTYPE(net, &gadget_type);
656 +
657 ++ /* MTU range: 14 - 15412 */
658 ++ net->min_mtu = ETH_HLEN;
659 ++ net->max_mtu = GETHER_MAX_MTU_SIZE;
660 ++
661 + return net;
662 + }
663 + EXPORT_SYMBOL_GPL(gether_setup_name_default);
664 +diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
665 +index bd56653b9bbc2..7b4ac5505f532 100644
666 +--- a/drivers/xen/gntdev.c
667 ++++ b/drivers/xen/gntdev.c
668 +@@ -295,36 +295,47 @@ static int map_grant_pages(struct grant_map *map)
669 + * to the kernel linear addresses of the struct pages.
670 + * These ptes are completely different from the user ptes dealt
671 + * with find_grant_ptes.
672 ++ * Note that GNTMAP_device_map isn't needed here: The
673 ++ * dev_bus_addr output field gets consumed only from ->map_ops,
674 ++ * and by not requesting it when mapping we also avoid needing
675 ++ * to mirror dev_bus_addr into ->unmap_ops (and holding an extra
676 ++ * reference to the page in the hypervisor).
677 + */
678 ++ unsigned int flags = (map->flags & ~GNTMAP_device_map) |
679 ++ GNTMAP_host_map;
680 ++
681 + for (i = 0; i < map->count; i++) {
682 + unsigned long address = (unsigned long)
683 + pfn_to_kaddr(page_to_pfn(map->pages[i]));
684 + BUG_ON(PageHighMem(map->pages[i]));
685 +
686 +- gnttab_set_map_op(&map->kmap_ops[i], address,
687 +- map->flags | GNTMAP_host_map,
688 ++ gnttab_set_map_op(&map->kmap_ops[i], address, flags,
689 + map->grants[i].ref,
690 + map->grants[i].domid);
691 + gnttab_set_unmap_op(&map->kunmap_ops[i], address,
692 +- map->flags | GNTMAP_host_map, -1);
693 ++ flags, -1);
694 + }
695 + }
696 +
697 + pr_debug("map %d+%d\n", map->index, map->count);
698 + err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
699 + map->pages, map->count);
700 +- if (err)
701 +- return err;
702 +
703 + for (i = 0; i < map->count; i++) {
704 +- if (map->map_ops[i].status) {
705 ++ if (map->map_ops[i].status == GNTST_okay)
706 ++ map->unmap_ops[i].handle = map->map_ops[i].handle;
707 ++ else if (!err)
708 + err = -EINVAL;
709 +- continue;
710 +- }
711 +
712 +- map->unmap_ops[i].handle = map->map_ops[i].handle;
713 +- if (use_ptemod)
714 +- map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
715 ++ if (map->flags & GNTMAP_device_map)
716 ++ map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
717 ++
718 ++ if (use_ptemod) {
719 ++ if (map->kmap_ops[i].status == GNTST_okay)
720 ++ map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
721 ++ else if (!err)
722 ++ err = -EINVAL;
723 ++ }
724 + }
725 + return err;
726 + }
727 +diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
728 +index fd32c3459b668..6d5eaea3373ba 100644
729 +--- a/drivers/xen/xen-scsiback.c
730 ++++ b/drivers/xen/xen-scsiback.c
731 +@@ -422,12 +422,12 @@ static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
732 + return 0;
733 +
734 + err = gnttab_map_refs(map, NULL, pg, cnt);
735 +- BUG_ON(err);
736 + for (i = 0; i < cnt; i++) {
737 + if (unlikely(map[i].status != GNTST_okay)) {
738 + pr_err("invalid buffer -- could not remap it\n");
739 + map[i].handle = SCSIBACK_INVALID_HANDLE;
740 +- err = -ENOMEM;
741 ++ if (!err)
742 ++ err = -ENOMEM;
743 + } else {
744 + get_page(pg[i]);
745 + }
746 +diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
747 +index e6a8d02d35254..139539b0ab20d 100644
748 +--- a/drivers/xen/xenbus/xenbus.h
749 ++++ b/drivers/xen/xenbus/xenbus.h
750 +@@ -114,7 +114,6 @@ int xenbus_probe_node(struct xen_bus_type *bus,
751 + const char *type,
752 + const char *nodename);
753 + int xenbus_probe_devices(struct xen_bus_type *bus);
754 +-void xenbus_probe(void);
755 +
756 + void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
757 +
758 +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
759 +index 9cac938361a01..08f1ccdbe343f 100644
760 +--- a/drivers/xen/xenbus/xenbus_probe.c
761 ++++ b/drivers/xen/xenbus/xenbus_probe.c
762 +@@ -674,7 +674,7 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
763 + }
764 + EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
765 +
766 +-void xenbus_probe(void)
767 ++static void xenbus_probe(void)
768 + {
769 + xenstored_ready = 1;
770 +
771 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
772 +index 384f95e1936dd..fde277be26420 100644
773 +--- a/fs/fs-writeback.c
774 ++++ b/fs/fs-writeback.c
775 +@@ -1965,7 +1965,7 @@ void wb_workfn(struct work_struct *work)
776 + struct bdi_writeback, dwork);
777 + long pages_written;
778 +
779 +- set_worker_desc("flush-%s", dev_name(wb->bdi->dev));
780 ++ set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
781 + current->flags |= PF_SWAPWRITE;
782 +
783 + if (likely(!current_is_workqueue_rescuer() ||
784 +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
785 +index 8e2e3d3b7b253..0737f193fc532 100644
786 +--- a/fs/nfs/pnfs.c
787 ++++ b/fs/nfs/pnfs.c
788 +@@ -1973,7 +1973,13 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
789 + * We got an entirely new state ID. Mark all segments for the
790 + * inode invalid, and retry the layoutget
791 + */
792 +- pnfs_mark_layout_stateid_invalid(lo, &free_me);
793 ++ struct pnfs_layout_range range = {
794 ++ .iomode = IOMODE_ANY,
795 ++ .length = NFS4_MAX_UINT64,
796 ++ };
797 ++ pnfs_set_plh_return_info(lo, IOMODE_ANY, 0);
798 ++ pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
799 ++ &range, 0);
800 + goto out_forget;
801 + }
802 +
803 +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
804 +index b97fc1df62128..f3ed80e2966c3 100644
805 +--- a/fs/overlayfs/copy_up.c
806 ++++ b/fs/overlayfs/copy_up.c
807 +@@ -95,6 +95,14 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
808 +
809 + if (ovl_is_private_xattr(name))
810 + continue;
811 ++
812 ++ error = security_inode_copy_up_xattr(name);
813 ++ if (error < 0 && error != -EOPNOTSUPP)
814 ++ break;
815 ++ if (error == 1) {
816 ++ error = 0;
817 ++ continue; /* Discard */
818 ++ }
819 + retry:
820 + size = vfs_getxattr(old, name, value, value_size);
821 + if (size == -ERANGE)
822 +@@ -118,13 +126,6 @@ retry:
823 + goto retry;
824 + }
825 +
826 +- error = security_inode_copy_up_xattr(name);
827 +- if (error < 0 && error != -EOPNOTSUPP)
828 +- break;
829 +- if (error == 1) {
830 +- error = 0;
831 +- continue; /* Discard */
832 +- }
833 + error = vfs_setxattr(new, name, value, size, 0);
834 + if (error)
835 + break;
836 +diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
837 +index 30a1c7fc8c75c..ac6efac119fb9 100644
838 +--- a/fs/overlayfs/inode.c
839 ++++ b/fs/overlayfs/inode.c
840 +@@ -216,7 +216,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
841 + goto out;
842 +
843 + if (!value && !upperdentry) {
844 ++ old_cred = ovl_override_creds(dentry->d_sb);
845 + err = vfs_getxattr(realdentry, name, NULL, 0);
846 ++ revert_creds(old_cred);
847 + if (err < 0)
848 + goto out_drop_write;
849 + }
850 +diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
851 +index 8073b6532cf04..d2a806416c3ab 100644
852 +--- a/fs/squashfs/export.c
853 ++++ b/fs/squashfs/export.c
854 +@@ -54,12 +54,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
855 + struct squashfs_sb_info *msblk = sb->s_fs_info;
856 + int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
857 + int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
858 +- u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]);
859 ++ u64 start;
860 + __le64 ino;
861 + int err;
862 +
863 + TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
864 +
865 ++ if (ino_num == 0 || (ino_num - 1) >= msblk->inodes)
866 ++ return -EINVAL;
867 ++
868 ++ start = le64_to_cpu(msblk->inode_lookup_table[blk]);
869 ++
870 + err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
871 + if (err < 0)
872 + return err;
873 +@@ -124,7 +129,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
874 + u64 lookup_table_start, u64 next_table, unsigned int inodes)
875 + {
876 + unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
877 ++ unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes);
878 ++ int n;
879 + __le64 *table;
880 ++ u64 start, end;
881 +
882 + TRACE("In read_inode_lookup_table, length %d\n", length);
883 +
884 +@@ -134,20 +142,37 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
885 + if (inodes == 0)
886 + return ERR_PTR(-EINVAL);
887 +
888 +- /* length bytes should not extend into the next table - this check
889 +- * also traps instances where lookup_table_start is incorrectly larger
890 +- * than the next table start
891 ++ /*
892 ++ * The computed size of the lookup table (length bytes) should exactly
893 ++ * match the table start and end points
894 + */
895 +- if (lookup_table_start + length > next_table)
896 ++ if (length != (next_table - lookup_table_start))
897 + return ERR_PTR(-EINVAL);
898 +
899 + table = squashfs_read_table(sb, lookup_table_start, length);
900 ++ if (IS_ERR(table))
901 ++ return table;
902 +
903 + /*
904 +- * table[0] points to the first inode lookup table metadata block,
905 +- * this should be less than lookup_table_start
906 ++ * table0], table[1], ... table[indexes - 1] store the locations
907 ++ * of the compressed inode lookup blocks. Each entry should be
908 ++ * less than the next (i.e. table[0] < table[1]), and the difference
909 ++ * between them should be SQUASHFS_METADATA_SIZE or less.
910 ++ * table[indexes - 1] should be less than lookup_table_start, and
911 ++ * again the difference should be SQUASHFS_METADATA_SIZE or less
912 + */
913 +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) {
914 ++ for (n = 0; n < (indexes - 1); n++) {
915 ++ start = le64_to_cpu(table[n]);
916 ++ end = le64_to_cpu(table[n + 1]);
917 ++
918 ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
919 ++ kfree(table);
920 ++ return ERR_PTR(-EINVAL);
921 ++ }
922 ++ }
923 ++
924 ++ start = le64_to_cpu(table[indexes - 1]);
925 ++ if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
926 + kfree(table);
927 + return ERR_PTR(-EINVAL);
928 + }
929 +diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
930 +index d38ea3dab9515..8ccc0e3f6ea5a 100644
931 +--- a/fs/squashfs/id.c
932 ++++ b/fs/squashfs/id.c
933 +@@ -48,10 +48,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index,
934 + struct squashfs_sb_info *msblk = sb->s_fs_info;
935 + int block = SQUASHFS_ID_BLOCK(index);
936 + int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
937 +- u64 start_block = le64_to_cpu(msblk->id_table[block]);
938 ++ u64 start_block;
939 + __le32 disk_id;
940 + int err;
941 +
942 ++ if (index >= msblk->ids)
943 ++ return -EINVAL;
944 ++
945 ++ start_block = le64_to_cpu(msblk->id_table[block]);
946 ++
947 + err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
948 + sizeof(disk_id));
949 + if (err < 0)
950 +@@ -69,7 +74,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
951 + u64 id_table_start, u64 next_table, unsigned short no_ids)
952 + {
953 + unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
954 ++ unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids);
955 ++ int n;
956 + __le64 *table;
957 ++ u64 start, end;
958 +
959 + TRACE("In read_id_index_table, length %d\n", length);
960 +
961 +@@ -80,20 +88,36 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
962 + return ERR_PTR(-EINVAL);
963 +
964 + /*
965 +- * length bytes should not extend into the next table - this check
966 +- * also traps instances where id_table_start is incorrectly larger
967 +- * than the next table start
968 ++ * The computed size of the index table (length bytes) should exactly
969 ++ * match the table start and end points
970 + */
971 +- if (id_table_start + length > next_table)
972 ++ if (length != (next_table - id_table_start))
973 + return ERR_PTR(-EINVAL);
974 +
975 + table = squashfs_read_table(sb, id_table_start, length);
976 ++ if (IS_ERR(table))
977 ++ return table;
978 +
979 + /*
980 +- * table[0] points to the first id lookup table metadata block, this
981 +- * should be less than id_table_start
982 ++ * table[0], table[1], ... table[indexes - 1] store the locations
983 ++ * of the compressed id blocks. Each entry should be less than
984 ++ * the next (i.e. table[0] < table[1]), and the difference between them
985 ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
986 ++ * should be less than id_table_start, and again the difference
987 ++ * should be SQUASHFS_METADATA_SIZE or less
988 + */
989 +- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) {
990 ++ for (n = 0; n < (indexes - 1); n++) {
991 ++ start = le64_to_cpu(table[n]);
992 ++ end = le64_to_cpu(table[n + 1]);
993 ++
994 ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
995 ++ kfree(table);
996 ++ return ERR_PTR(-EINVAL);
997 ++ }
998 ++ }
999 ++
1000 ++ start = le64_to_cpu(table[indexes - 1]);
1001 ++ if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
1002 + kfree(table);
1003 + return ERR_PTR(-EINVAL);
1004 + }
1005 +diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
1006 +index ef69c31947bf8..5234c19a0eabc 100644
1007 +--- a/fs/squashfs/squashfs_fs_sb.h
1008 ++++ b/fs/squashfs/squashfs_fs_sb.h
1009 +@@ -77,5 +77,6 @@ struct squashfs_sb_info {
1010 + unsigned int inodes;
1011 + unsigned int fragments;
1012 + int xattr_ids;
1013 ++ unsigned int ids;
1014 + };
1015 + #endif
1016 +diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
1017 +index 1516bb779b8d4..5abc9d03397c1 100644
1018 +--- a/fs/squashfs/super.c
1019 ++++ b/fs/squashfs/super.c
1020 +@@ -176,6 +176,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
1021 + msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
1022 + msblk->inodes = le32_to_cpu(sblk->inodes);
1023 + msblk->fragments = le32_to_cpu(sblk->fragments);
1024 ++ msblk->ids = le16_to_cpu(sblk->no_ids);
1025 + flags = le16_to_cpu(sblk->flags);
1026 +
1027 + TRACE("Found valid superblock on %pg\n", sb->s_bdev);
1028 +@@ -187,7 +188,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
1029 + TRACE("Block size %d\n", msblk->block_size);
1030 + TRACE("Number of inodes %d\n", msblk->inodes);
1031 + TRACE("Number of fragments %d\n", msblk->fragments);
1032 +- TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
1033 ++ TRACE("Number of ids %d\n", msblk->ids);
1034 + TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
1035 + TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
1036 + TRACE("sblk->fragment_table_start %llx\n",
1037 +@@ -244,8 +245,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
1038 + allocate_id_index_table:
1039 + /* Allocate and read id index table */
1040 + msblk->id_table = squashfs_read_id_index_table(sb,
1041 +- le64_to_cpu(sblk->id_table_start), next_table,
1042 +- le16_to_cpu(sblk->no_ids));
1043 ++ le64_to_cpu(sblk->id_table_start), next_table, msblk->ids);
1044 + if (IS_ERR(msblk->id_table)) {
1045 + ERROR("unable to read id index table\n");
1046 + err = PTR_ERR(msblk->id_table);
1047 +diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
1048 +index afe70f815e3de..86b0a0073e51f 100644
1049 +--- a/fs/squashfs/xattr.h
1050 ++++ b/fs/squashfs/xattr.h
1051 +@@ -30,8 +30,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
1052 + static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
1053 + u64 start, u64 *xattr_table_start, int *xattr_ids)
1054 + {
1055 ++ struct squashfs_xattr_id_table *id_table;
1056 ++
1057 ++ id_table = squashfs_read_table(sb, start, sizeof(*id_table));
1058 ++ if (IS_ERR(id_table))
1059 ++ return (__le64 *) id_table;
1060 ++
1061 ++ *xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
1062 ++ kfree(id_table);
1063 ++
1064 + ERROR("Xattrs in filesystem, these will be ignored\n");
1065 +- *xattr_table_start = start;
1066 + return ERR_PTR(-ENOTSUPP);
1067 + }
1068 +
1069 +diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
1070 +index c89607d690c48..3a655d879600c 100644
1071 +--- a/fs/squashfs/xattr_id.c
1072 ++++ b/fs/squashfs/xattr_id.c
1073 +@@ -44,10 +44,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
1074 + struct squashfs_sb_info *msblk = sb->s_fs_info;
1075 + int block = SQUASHFS_XATTR_BLOCK(index);
1076 + int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
1077 +- u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]);
1078 ++ u64 start_block;
1079 + struct squashfs_xattr_id id;
1080 + int err;
1081 +
1082 ++ if (index >= msblk->xattr_ids)
1083 ++ return -EINVAL;
1084 ++
1085 ++ start_block = le64_to_cpu(msblk->xattr_id_table[block]);
1086 ++
1087 + err = squashfs_read_metadata(sb, &id, &start_block, &offset,
1088 + sizeof(id));
1089 + if (err < 0)
1090 +@@ -63,13 +68,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
1091 + /*
1092 + * Read uncompressed xattr id lookup table indexes from disk into memory
1093 + */
1094 +-__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
1095 ++__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
1096 + u64 *xattr_table_start, int *xattr_ids)
1097 + {
1098 +- unsigned int len;
1099 ++ struct squashfs_sb_info *msblk = sb->s_fs_info;
1100 ++ unsigned int len, indexes;
1101 + struct squashfs_xattr_id_table *id_table;
1102 ++ __le64 *table;
1103 ++ u64 start, end;
1104 ++ int n;
1105 +
1106 +- id_table = squashfs_read_table(sb, start, sizeof(*id_table));
1107 ++ id_table = squashfs_read_table(sb, table_start, sizeof(*id_table));
1108 + if (IS_ERR(id_table))
1109 + return (__le64 *) id_table;
1110 +
1111 +@@ -83,13 +92,52 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
1112 + if (*xattr_ids == 0)
1113 + return ERR_PTR(-EINVAL);
1114 +
1115 +- /* xattr_table should be less than start */
1116 +- if (*xattr_table_start >= start)
1117 ++ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
1118 ++ indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids);
1119 ++
1120 ++ /*
1121 ++ * The computed size of the index table (len bytes) should exactly
1122 ++ * match the table start and end points
1123 ++ */
1124 ++ start = table_start + sizeof(*id_table);
1125 ++ end = msblk->bytes_used;
1126 ++
1127 ++ if (len != (end - start))
1128 + return ERR_PTR(-EINVAL);
1129 +
1130 +- len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
1131 ++ table = squashfs_read_table(sb, start, len);
1132 ++ if (IS_ERR(table))
1133 ++ return table;
1134 ++
1135 ++ /* table[0], table[1], ... table[indexes - 1] store the locations
1136 ++ * of the compressed xattr id blocks. Each entry should be less than
1137 ++ * the next (i.e. table[0] < table[1]), and the difference between them
1138 ++ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
1139 ++ * should be less than table_start, and again the difference
1140 ++ * shouls be SQUASHFS_METADATA_SIZE or less.
1141 ++ *
1142 ++ * Finally xattr_table_start should be less than table[0].
1143 ++ */
1144 ++ for (n = 0; n < (indexes - 1); n++) {
1145 ++ start = le64_to_cpu(table[n]);
1146 ++ end = le64_to_cpu(table[n + 1]);
1147 ++
1148 ++ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
1149 ++ kfree(table);
1150 ++ return ERR_PTR(-EINVAL);
1151 ++ }
1152 ++ }
1153 ++
1154 ++ start = le64_to_cpu(table[indexes - 1]);
1155 ++ if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
1156 ++ kfree(table);
1157 ++ return ERR_PTR(-EINVAL);
1158 ++ }
1159 +
1160 +- TRACE("In read_xattr_index_table, length %d\n", len);
1161 ++ if (*xattr_table_start >= le64_to_cpu(table[0])) {
1162 ++ kfree(table);
1163 ++ return ERR_PTR(-EINVAL);
1164 ++ }
1165 +
1166 +- return squashfs_read_table(sb, start + sizeof(*id_table), len);
1167 ++ return table;
1168 + }
1169 +diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
1170 +index 012adec975433..c947b29380547 100644
1171 +--- a/include/linux/backing-dev.h
1172 ++++ b/include/linux/backing-dev.h
1173 +@@ -13,6 +13,7 @@
1174 + #include <linux/fs.h>
1175 + #include <linux/sched.h>
1176 + #include <linux/blkdev.h>
1177 ++#include <linux/device.h>
1178 + #include <linux/writeback.h>
1179 + #include <linux/blk-cgroup.h>
1180 + #include <linux/backing-dev-defs.h>
1181 +@@ -493,4 +494,13 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
1182 + (1 << WB_async_congested));
1183 + }
1184 +
1185 ++extern const char *bdi_unknown_name;
1186 ++
1187 ++static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
1188 ++{
1189 ++ if (!bdi || !bdi->dev)
1190 ++ return bdi_unknown_name;
1191 ++ return dev_name(bdi->dev);
1192 ++}
1193 ++
1194 + #endif /* _LINUX_BACKING_DEV_H */
1195 +diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
1196 +index e54d257983f28..2c9f2ddd62f92 100644
1197 +--- a/include/linux/ftrace.h
1198 ++++ b/include/linux/ftrace.h
1199 +@@ -792,7 +792,9 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
1200 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1201 +
1202 + /* for init task */
1203 +-#define INIT_FTRACE_GRAPH .ret_stack = NULL,
1204 ++#define INIT_FTRACE_GRAPH \
1205 ++ .ret_stack = NULL, \
1206 ++ .tracing_graph_pause = ATOMIC_INIT(0),
1207 +
1208 + /*
1209 + * Stack of return addresses for functions
1210 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1211 +index 3512c337a4a6b..80579577a7005 100644
1212 +--- a/include/linux/netdevice.h
1213 ++++ b/include/linux/netdevice.h
1214 +@@ -3674,6 +3674,7 @@ static inline void netif_tx_disable(struct net_device *dev)
1215 +
1216 + local_bh_disable();
1217 + cpu = smp_processor_id();
1218 ++ spin_lock(&dev->tx_global_lock);
1219 + for (i = 0; i < dev->num_tx_queues; i++) {
1220 + struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1221 +
1222 +@@ -3681,6 +3682,7 @@ static inline void netif_tx_disable(struct net_device *dev)
1223 + netif_tx_stop_queue(txq);
1224 + __netif_tx_unlock(txq);
1225 + }
1226 ++ spin_unlock(&dev->tx_global_lock);
1227 + local_bh_enable();
1228 + }
1229 +
1230 +diff --git a/include/linux/string.h b/include/linux/string.h
1231 +index 315fef3aff4e6..3b5d01e80962a 100644
1232 +--- a/include/linux/string.h
1233 ++++ b/include/linux/string.h
1234 +@@ -30,6 +30,10 @@ size_t strlcpy(char *, const char *, size_t);
1235 + #ifndef __HAVE_ARCH_STRSCPY
1236 + ssize_t strscpy(char *, const char *, size_t);
1237 + #endif
1238 ++
1239 ++/* Wraps calls to strscpy()/memset(), no arch specific code required */
1240 ++ssize_t strscpy_pad(char *dest, const char *src, size_t count);
1241 ++
1242 + #ifndef __HAVE_ARCH_STRCAT
1243 + extern char * strcat(char *, const char *);
1244 + #endif
1245 +diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
1246 +index d950223c64b1c..819f63e0edc15 100644
1247 +--- a/include/linux/sunrpc/xdr.h
1248 ++++ b/include/linux/sunrpc/xdr.h
1249 +@@ -26,8 +26,7 @@ struct rpc_rqst;
1250 + #define XDR_QUADLEN(l) (((l) + 3) >> 2)
1251 +
1252 + /*
1253 +- * Generic opaque `network object.' At the kernel level, this type
1254 +- * is used only by lockd.
1255 ++ * Generic opaque `network object.'
1256 + */
1257 + #define XDR_MAX_NETOBJ 1024
1258 + struct xdr_netobj {
1259 +diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
1260 +index 627f5759b67d1..cb2a5016247af 100644
1261 +--- a/include/trace/events/writeback.h
1262 ++++ b/include/trace/events/writeback.h
1263 +@@ -65,8 +65,9 @@ TRACE_EVENT(writeback_dirty_page,
1264 + ),
1265 +
1266 + TP_fast_assign(
1267 +- strncpy(__entry->name,
1268 +- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
1269 ++ strscpy_pad(__entry->name,
1270 ++ bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
1271 ++ NULL), 32);
1272 + __entry->ino = mapping ? mapping->host->i_ino : 0;
1273 + __entry->index = page->index;
1274 + ),
1275 +@@ -95,8 +96,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
1276 + struct backing_dev_info *bdi = inode_to_bdi(inode);
1277 +
1278 + /* may be called for files on pseudo FSes w/ unregistered bdi */
1279 +- strncpy(__entry->name,
1280 +- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
1281 ++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
1282 + __entry->ino = inode->i_ino;
1283 + __entry->state = inode->i_state;
1284 + __entry->flags = flags;
1285 +@@ -175,8 +175,8 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
1286 + ),
1287 +
1288 + TP_fast_assign(
1289 +- strncpy(__entry->name,
1290 +- dev_name(inode_to_bdi(inode)->dev), 32);
1291 ++ strscpy_pad(__entry->name,
1292 ++ bdi_dev_name(inode_to_bdi(inode)), 32);
1293 + __entry->ino = inode->i_ino;
1294 + __entry->sync_mode = wbc->sync_mode;
1295 + __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
1296 +@@ -219,8 +219,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
1297 + __field(unsigned int, cgroup_ino)
1298 + ),
1299 + TP_fast_assign(
1300 +- strncpy(__entry->name,
1301 +- wb->bdi->dev ? dev_name(wb->bdi->dev) : "(unknown)", 32);
1302 ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
1303 + __entry->nr_pages = work->nr_pages;
1304 + __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
1305 + __entry->sync_mode = work->sync_mode;
1306 +@@ -273,7 +272,7 @@ DECLARE_EVENT_CLASS(writeback_class,
1307 + __field(unsigned int, cgroup_ino)
1308 + ),
1309 + TP_fast_assign(
1310 +- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
1311 ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
1312 + __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
1313 + ),
1314 + TP_printk("bdi %s: cgroup_ino=%u",
1315 +@@ -296,7 +295,7 @@ TRACE_EVENT(writeback_bdi_register,
1316 + __array(char, name, 32)
1317 + ),
1318 + TP_fast_assign(
1319 +- strncpy(__entry->name, dev_name(bdi->dev), 32);
1320 ++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
1321 + ),
1322 + TP_printk("bdi %s",
1323 + __entry->name
1324 +@@ -321,7 +320,7 @@ DECLARE_EVENT_CLASS(wbc_class,
1325 + ),
1326 +
1327 + TP_fast_assign(
1328 +- strncpy(__entry->name, dev_name(bdi->dev), 32);
1329 ++ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
1330 + __entry->nr_to_write = wbc->nr_to_write;
1331 + __entry->pages_skipped = wbc->pages_skipped;
1332 + __entry->sync_mode = wbc->sync_mode;
1333 +@@ -372,7 +371,7 @@ TRACE_EVENT(writeback_queue_io,
1334 + __field(unsigned int, cgroup_ino)
1335 + ),
1336 + TP_fast_assign(
1337 +- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
1338 ++ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
1339 + __entry->older = dirtied_before;
1340 + __entry->age = (jiffies - dirtied_before) * 1000 / HZ;
1341 + __entry->moved = moved;
1342 +@@ -457,7 +456,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
1343 + ),
1344 +
1345 + TP_fast_assign(
1346 +- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
1347 ++ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
1348 + __entry->write_bw = KBps(wb->write_bandwidth);
1349 + __entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
1350 + __entry->dirty_rate = KBps(dirty_rate);
1351 +@@ -522,7 +521,7 @@ TRACE_EVENT(balance_dirty_pages,
1352 +
1353 + TP_fast_assign(
1354 + unsigned long freerun = (thresh + bg_thresh) / 2;
1355 +- strlcpy(__entry->bdi, dev_name(wb->bdi->dev), 32);
1356 ++ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
1357 +
1358 + __entry->limit = global_wb_domain.dirty_limit;
1359 + __entry->setpoint = (global_wb_domain.dirty_limit +
1360 +@@ -582,8 +581,8 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
1361 + ),
1362 +
1363 + TP_fast_assign(
1364 +- strncpy(__entry->name,
1365 +- dev_name(inode_to_bdi(inode)->dev), 32);
1366 ++ strscpy_pad(__entry->name,
1367 ++ bdi_dev_name(inode_to_bdi(inode)), 32);
1368 + __entry->ino = inode->i_ino;
1369 + __entry->state = inode->i_state;
1370 + __entry->dirtied_when = inode->dirtied_when;
1371 +@@ -656,8 +655,8 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
1372 + ),
1373 +
1374 + TP_fast_assign(
1375 +- strncpy(__entry->name,
1376 +- dev_name(inode_to_bdi(inode)->dev), 32);
1377 ++ strscpy_pad(__entry->name,
1378 ++ bdi_dev_name(inode_to_bdi(inode)), 32);
1379 + __entry->ino = inode->i_ino;
1380 + __entry->state = inode->i_state;
1381 + __entry->dirtied_when = inode->dirtied_when;
1382 +diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
1383 +index 34b1379f9777d..f9d8aac170fbc 100644
1384 +--- a/include/xen/grant_table.h
1385 ++++ b/include/xen/grant_table.h
1386 +@@ -157,6 +157,7 @@ gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
1387 + map->flags = flags;
1388 + map->ref = ref;
1389 + map->dom = domid;
1390 ++ map->status = 1; /* arbitrary positive value */
1391 + }
1392 +
1393 + static inline void
1394 +diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
1395 +index fe9a9fa2ebc45..14d47ed4114fd 100644
1396 +--- a/include/xen/xenbus.h
1397 ++++ b/include/xen/xenbus.h
1398 +@@ -187,8 +187,6 @@ void xs_suspend_cancel(void);
1399 +
1400 + struct work_struct;
1401 +
1402 +-void xenbus_probe(void);
1403 +-
1404 + #define XENBUS_IS_ERR_READ(str) ({ \
1405 + if (!IS_ERR(str) && strlen(str) == 0) { \
1406 + kfree(str); \
1407 +diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
1408 +index 135be433e9a0f..1d4c3fba0f8cd 100644
1409 +--- a/kernel/bpf/stackmap.c
1410 ++++ b/kernel/bpf/stackmap.c
1411 +@@ -71,6 +71,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
1412 +
1413 + /* hash table size must be power of 2 */
1414 + n_buckets = roundup_pow_of_two(attr->max_entries);
1415 ++ if (!n_buckets)
1416 ++ return ERR_PTR(-E2BIG);
1417 +
1418 + cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
1419 + if (cost >= U32_MAX - PAGE_SIZE)
1420 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1421 +index 5b200b8797654..0373d050ff0c4 100644
1422 +--- a/kernel/trace/ftrace.c
1423 ++++ b/kernel/trace/ftrace.c
1424 +@@ -6666,7 +6666,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1425 + }
1426 +
1427 + if (t->ret_stack == NULL) {
1428 +- atomic_set(&t->tracing_graph_pause, 0);
1429 + atomic_set(&t->trace_overrun, 0);
1430 + t->curr_ret_stack = -1;
1431 + /* Make sure the tasks see the -1 first: */
1432 +@@ -6878,7 +6877,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
1433 + static void
1434 + graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
1435 + {
1436 +- atomic_set(&t->tracing_graph_pause, 0);
1437 + atomic_set(&t->trace_overrun, 0);
1438 + t->ftrace_timestamp = 0;
1439 + /* make curr_ret_stack visible before we add the ret_stack */
1440 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1441 +index c0dbc683322fb..3a0691c647044 100644
1442 +--- a/kernel/trace/trace.c
1443 ++++ b/kernel/trace/trace.c
1444 +@@ -2285,7 +2285,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1445 + (entry = this_cpu_read(trace_buffered_event))) {
1446 + /* Try to use the per cpu buffer first */
1447 + val = this_cpu_inc_return(trace_buffered_event_cnt);
1448 +- if (val == 1) {
1449 ++ if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
1450 + trace_event_setup(entry, type, flags, pc);
1451 + entry->array[0] = len;
1452 + return entry;
1453 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
1454 +index d69c79ac97986..7b4af70d9dfd0 100644
1455 +--- a/kernel/trace/trace_events.c
1456 ++++ b/kernel/trace/trace_events.c
1457 +@@ -1114,7 +1114,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1458 + mutex_lock(&event_mutex);
1459 + list_for_each_entry(file, &tr->events, list) {
1460 + call = file->event_call;
1461 +- if (!trace_event_name(call) || !call->class || !call->class->reg)
1462 ++ if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
1463 ++ !trace_event_name(call) || !call->class || !call->class->reg)
1464 + continue;
1465 +
1466 + if (system && strcmp(call->class->system, system->name) != 0)
1467 +diff --git a/lib/string.c b/lib/string.c
1468 +index db9abc18b2165..fba43e4ad5514 100644
1469 +--- a/lib/string.c
1470 ++++ b/lib/string.c
1471 +@@ -158,11 +158,9 @@ EXPORT_SYMBOL(strlcpy);
1472 + * @src: Where to copy the string from
1473 + * @count: Size of destination buffer
1474 + *
1475 +- * Copy the string, or as much of it as fits, into the dest buffer.
1476 +- * The routine returns the number of characters copied (not including
1477 +- * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough.
1478 +- * The behavior is undefined if the string buffers overlap.
1479 +- * The destination buffer is always NUL terminated, unless it's zero-sized.
1480 ++ * Copy the string, or as much of it as fits, into the dest buffer. The
1481 ++ * behavior is undefined if the string buffers overlap. The destination
1482 ++ * buffer is always NUL terminated, unless it's zero-sized.
1483 + *
1484 + * Preferred to strlcpy() since the API doesn't require reading memory
1485 + * from the src string beyond the specified "count" bytes, and since
1486 +@@ -172,8 +170,10 @@ EXPORT_SYMBOL(strlcpy);
1487 + *
1488 + * Preferred to strncpy() since it always returns a valid string, and
1489 + * doesn't unnecessarily force the tail of the destination buffer to be
1490 +- * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy()
1491 +- * with an overflow test, then just memset() the tail of the dest buffer.
1492 ++ * zeroed. If zeroing is desired please use strscpy_pad().
1493 ++ *
1494 ++ * Return: The number of characters copied (not including the trailing
1495 ++ * %NUL) or -E2BIG if the destination buffer wasn't big enough.
1496 + */
1497 + ssize_t strscpy(char *dest, const char *src, size_t count)
1498 + {
1499 +@@ -260,6 +260,39 @@ char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
1500 + }
1501 + EXPORT_SYMBOL(stpcpy);
1502 +
1503 ++/**
1504 ++ * strscpy_pad() - Copy a C-string into a sized buffer
1505 ++ * @dest: Where to copy the string to
1506 ++ * @src: Where to copy the string from
1507 ++ * @count: Size of destination buffer
1508 ++ *
1509 ++ * Copy the string, or as much of it as fits, into the dest buffer. The
1510 ++ * behavior is undefined if the string buffers overlap. The destination
1511 ++ * buffer is always %NUL terminated, unless it's zero-sized.
1512 ++ *
1513 ++ * If the source string is shorter than the destination buffer, zeros
1514 ++ * the tail of the destination buffer.
1515 ++ *
1516 ++ * For full explanation of why you may want to consider using the
1517 ++ * 'strscpy' functions please see the function docstring for strscpy().
1518 ++ *
1519 ++ * Return: The number of characters copied (not including the trailing
1520 ++ * %NUL) or -E2BIG if the destination buffer wasn't big enough.
1521 ++ */
1522 ++ssize_t strscpy_pad(char *dest, const char *src, size_t count)
1523 ++{
1524 ++ ssize_t written;
1525 ++
1526 ++ written = strscpy(dest, src, count);
1527 ++ if (written < 0 || written == count - 1)
1528 ++ return written;
1529 ++
1530 ++ memset(dest + written + 1, 0, count - written - 1);
1531 ++
1532 ++ return written;
1533 ++}
1534 ++EXPORT_SYMBOL(strscpy_pad);
1535 ++
1536 + #ifndef __HAVE_ARCH_STRCAT
1537 + /**
1538 + * strcat - Append one %NUL-terminated string to another
1539 +diff --git a/mm/backing-dev.c b/mm/backing-dev.c
1540 +index 6fa31754eadd9..f5a5e9f82b221 100644
1541 +--- a/mm/backing-dev.c
1542 ++++ b/mm/backing-dev.c
1543 +@@ -19,6 +19,7 @@ struct backing_dev_info noop_backing_dev_info = {
1544 + EXPORT_SYMBOL_GPL(noop_backing_dev_info);
1545 +
1546 + static struct class *bdi_class;
1547 ++const char *bdi_unknown_name = "(unknown)";
1548 +
1549 + /*
1550 + * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
1551 +diff --git a/mm/memblock.c b/mm/memblock.c
1552 +index e81d12c544e9f..5d36b4c549292 100644
1553 +--- a/mm/memblock.c
1554 ++++ b/mm/memblock.c
1555 +@@ -174,14 +174,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
1556 + *
1557 + * Find @size free area aligned to @align in the specified range and node.
1558 + *
1559 +- * When allocation direction is bottom-up, the @start should be greater
1560 +- * than the end of the kernel image. Otherwise, it will be trimmed. The
1561 +- * reason is that we want the bottom-up allocation just near the kernel
1562 +- * image so it is highly likely that the allocated memory and the kernel
1563 +- * will reside in the same node.
1564 +- *
1565 +- * If bottom-up allocation failed, will try to allocate memory top-down.
1566 +- *
1567 + * RETURNS:
1568 + * Found address on success, 0 on failure.
1569 + */
1570 +@@ -189,8 +181,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
1571 + phys_addr_t align, phys_addr_t start,
1572 + phys_addr_t end, int nid, ulong flags)
1573 + {
1574 +- phys_addr_t kernel_end, ret;
1575 +-
1576 + /* pump up @end */
1577 + if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
1578 + end = memblock.current_limit;
1579 +@@ -198,39 +188,13 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
1580 + /* avoid allocating the first page */
1581 + start = max_t(phys_addr_t, start, PAGE_SIZE);
1582 + end = max(start, end);
1583 +- kernel_end = __pa_symbol(_end);
1584 +-
1585 +- /*
1586 +- * try bottom-up allocation only when bottom-up mode
1587 +- * is set and @end is above the kernel image.
1588 +- */
1589 +- if (memblock_bottom_up() && end > kernel_end) {
1590 +- phys_addr_t bottom_up_start;
1591 +-
1592 +- /* make sure we will allocate above the kernel */
1593 +- bottom_up_start = max(start, kernel_end);
1594 +
1595 +- /* ok, try bottom-up allocation first */
1596 +- ret = __memblock_find_range_bottom_up(bottom_up_start, end,
1597 +- size, align, nid, flags);
1598 +- if (ret)
1599 +- return ret;
1600 +-
1601 +- /*
1602 +- * we always limit bottom-up allocation above the kernel,
1603 +- * but top-down allocation doesn't have the limit, so
1604 +- * retrying top-down allocation may succeed when bottom-up
1605 +- * allocation failed.
1606 +- *
1607 +- * bottom-up allocation is expected to be fail very rarely,
1608 +- * so we use WARN_ONCE() here to see the stack trace if
1609 +- * fail happens.
1610 +- */
1611 +- WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
1612 +- }
1613 +-
1614 +- return __memblock_find_range_top_down(start, end, size, align, nid,
1615 +- flags);
1616 ++ if (memblock_bottom_up())
1617 ++ return __memblock_find_range_bottom_up(start, end, size, align,
1618 ++ nid, flags);
1619 ++ else
1620 ++ return __memblock_find_range_top_down(start, end, size, align,
1621 ++ nid, flags);
1622 + }
1623 +
1624 + /**
1625 +diff --git a/net/key/af_key.c b/net/key/af_key.c
1626 +index 0747747fffe58..a10336cd7f974 100644
1627 +--- a/net/key/af_key.c
1628 ++++ b/net/key/af_key.c
1629 +@@ -2906,7 +2906,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
1630 + break;
1631 + if (!aalg->pfkey_supported)
1632 + continue;
1633 +- if (aalg_tmpl_set(t, aalg) && aalg->available)
1634 ++ if (aalg_tmpl_set(t, aalg))
1635 + sz += sizeof(struct sadb_comb);
1636 + }
1637 + return sz + sizeof(struct sadb_prop);
1638 +@@ -2924,7 +2924,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
1639 + if (!ealg->pfkey_supported)
1640 + continue;
1641 +
1642 +- if (!(ealg_tmpl_set(t, ealg) && ealg->available))
1643 ++ if (!(ealg_tmpl_set(t, ealg)))
1644 + continue;
1645 +
1646 + for (k = 1; ; k++) {
1647 +@@ -2935,7 +2935,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
1648 + if (!aalg->pfkey_supported)
1649 + continue;
1650 +
1651 +- if (aalg_tmpl_set(t, aalg) && aalg->available)
1652 ++ if (aalg_tmpl_set(t, aalg))
1653 + sz += sizeof(struct sadb_comb);
1654 + }
1655 + }
1656 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
1657 +index 8064d769c953c..ede0ab5dc400a 100644
1658 +--- a/net/netfilter/nf_conntrack_core.c
1659 ++++ b/net/netfilter/nf_conntrack_core.c
1660 +@@ -939,7 +939,8 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
1661 + * Let nf_ct_resolve_clash() deal with this later.
1662 + */
1663 + if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1664 +- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
1665 ++ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
1666 ++ nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
1667 + continue;
1668 +
1669 + NF_CT_STAT_INC_ATOMIC(net, found);
1670 +diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
1671 +index cf96d230e5a3c..cafbddf844d62 100644
1672 +--- a/net/netfilter/xt_recent.c
1673 ++++ b/net/netfilter/xt_recent.c
1674 +@@ -155,7 +155,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
1675 + /*
1676 + * Drop entries with timestamps older then 'time'.
1677 + */
1678 +-static void recent_entry_reap(struct recent_table *t, unsigned long time)
1679 ++static void recent_entry_reap(struct recent_table *t, unsigned long time,
1680 ++ struct recent_entry *working, bool update)
1681 + {
1682 + struct recent_entry *e;
1683 +
1684 +@@ -164,6 +165,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time)
1685 + */
1686 + e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
1687 +
1688 ++ /*
1689 ++ * Do not reap the entry which are going to be updated.
1690 ++ */
1691 ++ if (e == working && update)
1692 ++ return;
1693 ++
1694 + /*
1695 + * The last time stamp is the most recent.
1696 + */
1697 +@@ -306,7 +313,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
1698 +
1699 + /* info->seconds must be non-zero */
1700 + if (info->check_set & XT_RECENT_REAP)
1701 +- recent_entry_reap(t, time);
1702 ++ recent_entry_reap(t, time, e,
1703 ++ info->check_set & XT_RECENT_UPDATE && ret);
1704 + }
1705 +
1706 + if (info->check_set & XT_RECENT_SET ||
1707 +diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
1708 +index 1281b967dbf96..dc1eae4c206ba 100644
1709 +--- a/net/sunrpc/auth_gss/auth_gss.c
1710 ++++ b/net/sunrpc/auth_gss/auth_gss.c
1711 +@@ -53,6 +53,7 @@
1712 + #include <linux/uaccess.h>
1713 + #include <linux/hashtable.h>
1714 +
1715 ++#include "auth_gss_internal.h"
1716 + #include "../netns.h"
1717 +
1718 + static const struct rpc_authops authgss_ops;
1719 +@@ -147,35 +148,6 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
1720 + clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
1721 + }
1722 +
1723 +-static const void *
1724 +-simple_get_bytes(const void *p, const void *end, void *res, size_t len)
1725 +-{
1726 +- const void *q = (const void *)((const char *)p + len);
1727 +- if (unlikely(q > end || q < p))
1728 +- return ERR_PTR(-EFAULT);
1729 +- memcpy(res, p, len);
1730 +- return q;
1731 +-}
1732 +-
1733 +-static inline const void *
1734 +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
1735 +-{
1736 +- const void *q;
1737 +- unsigned int len;
1738 +-
1739 +- p = simple_get_bytes(p, end, &len, sizeof(len));
1740 +- if (IS_ERR(p))
1741 +- return p;
1742 +- q = (const void *)((const char *)p + len);
1743 +- if (unlikely(q > end || q < p))
1744 +- return ERR_PTR(-EFAULT);
1745 +- dest->data = kmemdup(p, len, GFP_NOFS);
1746 +- if (unlikely(dest->data == NULL))
1747 +- return ERR_PTR(-ENOMEM);
1748 +- dest->len = len;
1749 +- return q;
1750 +-}
1751 +-
1752 + static struct gss_cl_ctx *
1753 + gss_cred_get_ctx(struct rpc_cred *cred)
1754 + {
1755 +diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h
1756 +new file mode 100644
1757 +index 0000000000000..f6d9631bd9d00
1758 +--- /dev/null
1759 ++++ b/net/sunrpc/auth_gss/auth_gss_internal.h
1760 +@@ -0,0 +1,45 @@
1761 ++// SPDX-License-Identifier: BSD-3-Clause
1762 ++/*
1763 ++ * linux/net/sunrpc/auth_gss/auth_gss_internal.h
1764 ++ *
1765 ++ * Internal definitions for RPCSEC_GSS client authentication
1766 ++ *
1767 ++ * Copyright (c) 2000 The Regents of the University of Michigan.
1768 ++ * All rights reserved.
1769 ++ *
1770 ++ */
1771 ++#include <linux/err.h>
1772 ++#include <linux/string.h>
1773 ++#include <linux/sunrpc/xdr.h>
1774 ++
1775 ++static inline const void *
1776 ++simple_get_bytes(const void *p, const void *end, void *res, size_t len)
1777 ++{
1778 ++ const void *q = (const void *)((const char *)p + len);
1779 ++ if (unlikely(q > end || q < p))
1780 ++ return ERR_PTR(-EFAULT);
1781 ++ memcpy(res, p, len);
1782 ++ return q;
1783 ++}
1784 ++
1785 ++static inline const void *
1786 ++simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
1787 ++{
1788 ++ const void *q;
1789 ++ unsigned int len;
1790 ++
1791 ++ p = simple_get_bytes(p, end, &len, sizeof(len));
1792 ++ if (IS_ERR(p))
1793 ++ return p;
1794 ++ q = (const void *)((const char *)p + len);
1795 ++ if (unlikely(q > end || q < p))
1796 ++ return ERR_PTR(-EFAULT);
1797 ++ if (len) {
1798 ++ dest->data = kmemdup(p, len, GFP_NOFS);
1799 ++ if (unlikely(dest->data == NULL))
1800 ++ return ERR_PTR(-ENOMEM);
1801 ++ } else
1802 ++ dest->data = NULL;
1803 ++ dest->len = len;
1804 ++ return q;
1805 ++}
1806 +diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
1807 +index 7bb2514aadd9d..14f2823ad6c20 100644
1808 +--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
1809 ++++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
1810 +@@ -46,6 +46,8 @@
1811 + #include <linux/sunrpc/xdr.h>
1812 + #include <linux/sunrpc/gss_krb5_enctypes.h>
1813 +
1814 ++#include "auth_gss_internal.h"
1815 ++
1816 + #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1817 + # define RPCDBG_FACILITY RPCDBG_AUTH
1818 + #endif
1819 +@@ -187,35 +189,6 @@ get_gss_krb5_enctype(int etype)
1820 + return NULL;
1821 + }
1822 +
1823 +-static const void *
1824 +-simple_get_bytes(const void *p, const void *end, void *res, int len)
1825 +-{
1826 +- const void *q = (const void *)((const char *)p + len);
1827 +- if (unlikely(q > end || q < p))
1828 +- return ERR_PTR(-EFAULT);
1829 +- memcpy(res, p, len);
1830 +- return q;
1831 +-}
1832 +-
1833 +-static const void *
1834 +-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
1835 +-{
1836 +- const void *q;
1837 +- unsigned int len;
1838 +-
1839 +- p = simple_get_bytes(p, end, &len, sizeof(len));
1840 +- if (IS_ERR(p))
1841 +- return p;
1842 +- q = (const void *)((const char *)p + len);
1843 +- if (unlikely(q > end || q < p))
1844 +- return ERR_PTR(-EFAULT);
1845 +- res->data = kmemdup(p, len, GFP_NOFS);
1846 +- if (unlikely(res->data == NULL))
1847 +- return ERR_PTR(-ENOMEM);
1848 +- res->len = len;
1849 +- return q;
1850 +-}
1851 +-
1852 + static inline const void *
1853 + get_key(const void *p, const void *end,
1854 + struct krb5_ctx *ctx, struct crypto_skcipher **res)
1855 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
1856 +index 29f7491acb354..eafcc75f289ac 100644
1857 +--- a/net/vmw_vsock/af_vsock.c
1858 ++++ b/net/vmw_vsock/af_vsock.c
1859 +@@ -823,10 +823,12 @@ static int vsock_shutdown(struct socket *sock, int mode)
1860 + */
1861 +
1862 + sk = sock->sk;
1863 ++
1864 ++ lock_sock(sk);
1865 + if (sock->state == SS_UNCONNECTED) {
1866 + err = -ENOTCONN;
1867 + if (sk->sk_type == SOCK_STREAM)
1868 +- return err;
1869 ++ goto out;
1870 + } else {
1871 + sock->state = SS_DISCONNECTING;
1872 + err = 0;
1873 +@@ -835,10 +837,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
1874 + /* Receive and send shutdowns are treated alike. */
1875 + mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
1876 + if (mode) {
1877 +- lock_sock(sk);
1878 + sk->sk_shutdown |= mode;
1879 + sk->sk_state_change(sk);
1880 +- release_sock(sk);
1881 +
1882 + if (sk->sk_type == SOCK_STREAM) {
1883 + sock_reset_flag(sk, SOCK_DONE);
1884 +@@ -846,6 +846,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
1885 + }
1886 + }
1887 +
1888 ++out:
1889 ++ release_sock(sk);
1890 + return err;
1891 + }
1892 +
1893 +@@ -1114,7 +1116,6 @@ static void vsock_connect_timeout(struct work_struct *work)
1894 + {
1895 + struct sock *sk;
1896 + struct vsock_sock *vsk;
1897 +- int cancel = 0;
1898 +
1899 + vsk = container_of(work, struct vsock_sock, connect_work.work);
1900 + sk = sk_vsock(vsk);
1901 +@@ -1125,11 +1126,9 @@ static void vsock_connect_timeout(struct work_struct *work)
1902 + sk->sk_state = TCP_CLOSE;
1903 + sk->sk_err = ETIMEDOUT;
1904 + sk->sk_error_report(sk);
1905 +- cancel = 1;
1906 ++ vsock_transport_cancel_pkt(vsk);
1907 + }
1908 + release_sock(sk);
1909 +- if (cancel)
1910 +- vsock_transport_cancel_pkt(vsk);
1911 +
1912 + sock_put(sk);
1913 + }
1914 +diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
1915 +index 736b76ec8cf01..ea350a99cfc38 100644
1916 +--- a/net/vmw_vsock/hyperv_transport.c
1917 ++++ b/net/vmw_vsock/hyperv_transport.c
1918 +@@ -444,14 +444,10 @@ static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
1919 +
1920 + static int hvs_shutdown(struct vsock_sock *vsk, int mode)
1921 + {
1922 +- struct sock *sk = sk_vsock(vsk);
1923 +-
1924 + if (!(mode & SEND_SHUTDOWN))
1925 + return 0;
1926 +
1927 +- lock_sock(sk);
1928 + hvs_shutdown_lock_held(vsk->trans, mode);
1929 +- release_sock(sk);
1930 + return 0;
1931 + }
1932 +
1933 +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
1934 +index 8e4c13cc61ba8..349311f6d1958 100644
1935 +--- a/net/vmw_vsock/virtio_transport_common.c
1936 ++++ b/net/vmw_vsock/virtio_transport_common.c
1937 +@@ -1029,10 +1029,10 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
1938 +
1939 + vsk = vsock_sk(sk);
1940 +
1941 +- space_available = virtio_transport_space_update(sk, pkt);
1942 +-
1943 + lock_sock(sk);
1944 +
1945 ++ space_available = virtio_transport_space_update(sk, pkt);
1946 ++
1947 + /* Update CID in case it has changed after a transport reset event */
1948 + vsk->local_addr.svm_cid = dst.svm_cid;
1949 +
1950 +diff --git a/scripts/Makefile.build b/scripts/Makefile.build
1951 +index 3edc9c04cb468..f4b752cb17516 100644
1952 +--- a/scripts/Makefile.build
1953 ++++ b/scripts/Makefile.build
1954 +@@ -224,6 +224,8 @@ cmd_modversions_c = \
1955 + endif
1956 +
1957 + ifdef CONFIG_FTRACE_MCOUNT_RECORD
1958 ++ifndef CC_USING_RECORD_MCOUNT
1959 ++# compiler will not generate __mcount_loc use recordmcount or recordmcount.pl
1960 + ifdef BUILD_C_RECORDMCOUNT
1961 + ifeq ("$(origin RECORDMCOUNT_WARN)", "command line")
1962 + RECORDMCOUNT_FLAGS = -w
1963 +@@ -252,6 +254,7 @@ cmd_record_mcount = \
1964 + "$(CC_FLAGS_FTRACE)" ]; then \
1965 + $(sub_cmd_record_mcount) \
1966 + fi;
1967 ++endif # CC_USING_RECORD_MCOUNT
1968 + endif # CONFIG_FTRACE_MCOUNT_RECORD
1969 +
1970 + ifdef CONFIG_STACK_VALIDATION
1971 +diff --git a/security/commoncap.c b/security/commoncap.c
1972 +index ac031fa391908..bf689d61b293c 100644
1973 +--- a/security/commoncap.c
1974 ++++ b/security/commoncap.c
1975 +@@ -378,10 +378,11 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
1976 + {
1977 + int size, ret;
1978 + kuid_t kroot;
1979 ++ u32 nsmagic, magic;
1980 + uid_t root, mappedroot;
1981 + char *tmpbuf = NULL;
1982 + struct vfs_cap_data *cap;
1983 +- struct vfs_ns_cap_data *nscap;
1984 ++ struct vfs_ns_cap_data *nscap = NULL;
1985 + struct dentry *dentry;
1986 + struct user_namespace *fs_ns;
1987 +
1988 +@@ -403,46 +404,61 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
1989 + fs_ns = inode->i_sb->s_user_ns;
1990 + cap = (struct vfs_cap_data *) tmpbuf;
1991 + if (is_v2header((size_t) ret, cap)) {
1992 +- /* If this is sizeof(vfs_cap_data) then we're ok with the
1993 +- * on-disk value, so return that. */
1994 +- if (alloc)
1995 +- *buffer = tmpbuf;
1996 +- else
1997 +- kfree(tmpbuf);
1998 +- return ret;
1999 +- } else if (!is_v3header((size_t) ret, cap)) {
2000 +- kfree(tmpbuf);
2001 +- return -EINVAL;
2002 ++ root = 0;
2003 ++ } else if (is_v3header((size_t) ret, cap)) {
2004 ++ nscap = (struct vfs_ns_cap_data *) tmpbuf;
2005 ++ root = le32_to_cpu(nscap->rootid);
2006 ++ } else {
2007 ++ size = -EINVAL;
2008 ++ goto out_free;
2009 + }
2010 +
2011 +- nscap = (struct vfs_ns_cap_data *) tmpbuf;
2012 +- root = le32_to_cpu(nscap->rootid);
2013 + kroot = make_kuid(fs_ns, root);
2014 +
2015 + /* If the root kuid maps to a valid uid in current ns, then return
2016 + * this as a nscap. */
2017 + mappedroot = from_kuid(current_user_ns(), kroot);
2018 + if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) {
2019 ++ size = sizeof(struct vfs_ns_cap_data);
2020 + if (alloc) {
2021 +- *buffer = tmpbuf;
2022 ++ if (!nscap) {
2023 ++ /* v2 -> v3 conversion */
2024 ++ nscap = kzalloc(size, GFP_ATOMIC);
2025 ++ if (!nscap) {
2026 ++ size = -ENOMEM;
2027 ++ goto out_free;
2028 ++ }
2029 ++ nsmagic = VFS_CAP_REVISION_3;
2030 ++ magic = le32_to_cpu(cap->magic_etc);
2031 ++ if (magic & VFS_CAP_FLAGS_EFFECTIVE)
2032 ++ nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
2033 ++ memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
2034 ++ nscap->magic_etc = cpu_to_le32(nsmagic);
2035 ++ } else {
2036 ++ /* use allocated v3 buffer */
2037 ++ tmpbuf = NULL;
2038 ++ }
2039 + nscap->rootid = cpu_to_le32(mappedroot);
2040 +- } else
2041 +- kfree(tmpbuf);
2042 +- return size;
2043 ++ *buffer = nscap;
2044 ++ }
2045 ++ goto out_free;
2046 + }
2047 +
2048 + if (!rootid_owns_currentns(kroot)) {
2049 +- kfree(tmpbuf);
2050 +- return -EOPNOTSUPP;
2051 ++ size = -EOVERFLOW;
2052 ++ goto out_free;
2053 + }
2054 +
2055 + /* This comes from a parent namespace. Return as a v2 capability */
2056 + size = sizeof(struct vfs_cap_data);
2057 + if (alloc) {
2058 +- *buffer = kmalloc(size, GFP_ATOMIC);
2059 +- if (*buffer) {
2060 +- struct vfs_cap_data *cap = *buffer;
2061 +- __le32 nsmagic, magic;
2062 ++ if (nscap) {
2063 ++ /* v3 -> v2 conversion */
2064 ++ cap = kzalloc(size, GFP_ATOMIC);
2065 ++ if (!cap) {
2066 ++ size = -ENOMEM;
2067 ++ goto out_free;
2068 ++ }
2069 + magic = VFS_CAP_REVISION_2;
2070 + nsmagic = le32_to_cpu(nscap->magic_etc);
2071 + if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE)
2072 +@@ -450,9 +466,12 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
2073 + memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
2074 + cap->magic_etc = cpu_to_le32(magic);
2075 + } else {
2076 +- size = -ENOMEM;
2077 ++ /* use unconverted v2 */
2078 ++ tmpbuf = NULL;
2079 + }
2080 ++ *buffer = cap;
2081 + }
2082 ++out_free:
2083 + kfree(tmpbuf);
2084 + return size;
2085 + }
2086 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2087 +index c1ca4d40157b1..547ae59199db2 100644
2088 +--- a/virt/kvm/kvm_main.c
2089 ++++ b/virt/kvm/kvm_main.c
2090 +@@ -382,9 +382,8 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
2091 + */
2092 + kvm->mmu_notifier_count++;
2093 + need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
2094 +- need_tlb_flush |= kvm->tlbs_dirty;
2095 + /* we've to flush the tlb before the pages can be freed */
2096 +- if (need_tlb_flush)
2097 ++ if (need_tlb_flush || kvm->tlbs_dirty)
2098 + kvm_flush_remote_tlbs(kvm);
2099 +
2100 + spin_unlock(&kvm->mmu_lock);