Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 14 Nov 2018 13:49:22
Message-Id: 1542203321.360a0d86a529599f70200ef16fcdccf520e11d76.mpagano@gentoo
1 commit: 360a0d86a529599f70200ef16fcdccf520e11d76
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Oct 10 11:17:55 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 14 13:48:41 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=360a0d86
7
8 Linux patch 4.14.75
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1074_linux-4.14.75.patch | 3396 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3400 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index db32f2e..0684007 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -339,6 +339,10 @@ Patch: 1073_linux-4.14.74.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.74
23
24 +Patch: 1074_linux-4.14.75.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.75
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1074_linux-4.14.75.patch b/1074_linux-4.14.75.patch
33 new file mode 100644
34 index 0000000..a13dbf4
35 --- /dev/null
36 +++ b/1074_linux-4.14.75.patch
37 @@ -0,0 +1,3396 @@
38 +diff --git a/Makefile b/Makefile
39 +index cc0e65a8d7bf..7fc373c011c0 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 74
47 ++SUBLEVEL = 75
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
52 +index 11859287c52a..c98b59ac0612 100644
53 +--- a/arch/arc/include/asm/atomic.h
54 ++++ b/arch/arc/include/asm/atomic.h
55 +@@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
56 + "1: llock %[orig], [%[ctr]] \n" \
57 + " " #asm_op " %[val], %[orig], %[i] \n" \
58 + " scond %[val], [%[ctr]] \n" \
59 +- " \n" \
60 ++ " bnz 1b \n" \
61 + : [val] "=&r" (val), \
62 + [orig] "=&r" (orig) \
63 + : [ctr] "r" (&v->counter), \
64 +diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
65 +index 1b5e0e843c3a..7e2b3e360086 100644
66 +--- a/arch/arm64/include/asm/jump_label.h
67 ++++ b/arch/arm64/include/asm/jump_label.h
68 +@@ -28,7 +28,7 @@
69 +
70 + static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
71 + {
72 +- asm goto("1: nop\n\t"
73 ++ asm_volatile_goto("1: nop\n\t"
74 + ".pushsection __jump_table, \"aw\"\n\t"
75 + ".align 3\n\t"
76 + ".quad 1b, %l[l_yes], %c0\n\t"
77 +@@ -42,7 +42,7 @@ l_yes:
78 +
79 + static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
80 + {
81 +- asm goto("1: b %l[l_yes]\n\t"
82 ++ asm_volatile_goto("1: b %l[l_yes]\n\t"
83 + ".pushsection __jump_table, \"aw\"\n\t"
84 + ".align 3\n\t"
85 + ".quad 1b, %l[l_yes], %c0\n\t"
86 +diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
87 +index 5e4a59b3ec1b..2691a1857d20 100644
88 +--- a/arch/hexagon/include/asm/bitops.h
89 ++++ b/arch/hexagon/include/asm/bitops.h
90 +@@ -211,7 +211,7 @@ static inline long ffz(int x)
91 + * This is defined the same way as ffs.
92 + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
93 + */
94 +-static inline long fls(int x)
95 ++static inline int fls(int x)
96 + {
97 + int r;
98 +
99 +@@ -232,7 +232,7 @@ static inline long fls(int x)
100 + * the libc and compiler builtin ffs routines, therefore
101 + * differs in spirit from the above ffz (man ffs).
102 + */
103 +-static inline long ffs(int x)
104 ++static inline int ffs(int x)
105 + {
106 + int r;
107 +
108 +diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
109 +index 546792d176a4..564651bded42 100644
110 +--- a/arch/hexagon/kernel/dma.c
111 ++++ b/arch/hexagon/kernel/dma.c
112 +@@ -59,7 +59,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
113 + panic("Can't create %s() memory pool!", __func__);
114 + else
115 + gen_pool_add(coherent_pool,
116 +- pfn_to_virt(max_low_pfn),
117 ++ (unsigned long)pfn_to_virt(max_low_pfn),
118 + hexagon_coherent_pool_size, -1);
119 + }
120 +
121 +diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
122 +index df9b53f40b1e..7ac7e21b137e 100644
123 +--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
124 ++++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
125 +@@ -355,7 +355,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
126 + unsigned long pp, key;
127 + unsigned long v, orig_v, gr;
128 + __be64 *hptep;
129 +- int index;
130 ++ long int index;
131 + int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
132 +
133 + /* Get SLB entry */
134 +diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
135 +index a4170048a30b..17fbd07e4245 100644
136 +--- a/arch/x86/events/intel/lbr.c
137 ++++ b/arch/x86/events/intel/lbr.c
138 +@@ -1250,4 +1250,8 @@ void intel_pmu_lbr_init_knl(void)
139 +
140 + x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
141 + x86_pmu.lbr_sel_map = snb_lbr_sel_map;
142 ++
143 ++ /* Knights Landing does have MISPREDICT bit */
144 ++ if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
145 ++ x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
146 + }
147 +diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
148 +index a8a2a271b63d..43fe195f6dca 100644
149 +--- a/drivers/crypto/caam/caamalg.c
150 ++++ b/drivers/crypto/caam/caamalg.c
151 +@@ -1511,8 +1511,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
152 + edesc->src_nents = src_nents;
153 + edesc->dst_nents = dst_nents;
154 + edesc->sec4_sg_bytes = sec4_sg_bytes;
155 +- edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
156 +- desc_bytes;
157 ++ edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
158 ++ desc_bytes);
159 + edesc->iv_dir = DMA_TO_DEVICE;
160 +
161 + /* Make sure IV is located in a DMAable area */
162 +@@ -1715,8 +1715,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
163 + edesc->src_nents = src_nents;
164 + edesc->dst_nents = dst_nents;
165 + edesc->sec4_sg_bytes = sec4_sg_bytes;
166 +- edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
167 +- desc_bytes;
168 ++ edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
169 ++ desc_bytes);
170 + edesc->iv_dir = DMA_FROM_DEVICE;
171 +
172 + /* Make sure IV is located in a DMAable area */
173 +diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
174 +index 764be3e6933c..a98a25733a22 100644
175 +--- a/drivers/crypto/mxs-dcp.c
176 ++++ b/drivers/crypto/mxs-dcp.c
177 +@@ -63,7 +63,7 @@ struct dcp {
178 + struct dcp_coherent_block *coh;
179 +
180 + struct completion completion[DCP_MAX_CHANS];
181 +- struct mutex mutex[DCP_MAX_CHANS];
182 ++ spinlock_t lock[DCP_MAX_CHANS];
183 + struct task_struct *thread[DCP_MAX_CHANS];
184 + struct crypto_queue queue[DCP_MAX_CHANS];
185 + };
186 +@@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
187 +
188 + int ret;
189 +
190 +- do {
191 +- __set_current_state(TASK_INTERRUPTIBLE);
192 ++ while (!kthread_should_stop()) {
193 ++ set_current_state(TASK_INTERRUPTIBLE);
194 +
195 +- mutex_lock(&sdcp->mutex[chan]);
196 ++ spin_lock(&sdcp->lock[chan]);
197 + backlog = crypto_get_backlog(&sdcp->queue[chan]);
198 + arq = crypto_dequeue_request(&sdcp->queue[chan]);
199 +- mutex_unlock(&sdcp->mutex[chan]);
200 ++ spin_unlock(&sdcp->lock[chan]);
201 ++
202 ++ if (!backlog && !arq) {
203 ++ schedule();
204 ++ continue;
205 ++ }
206 ++
207 ++ set_current_state(TASK_RUNNING);
208 +
209 + if (backlog)
210 + backlog->complete(backlog, -EINPROGRESS);
211 +@@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
212 + if (arq) {
213 + ret = mxs_dcp_aes_block_crypt(arq);
214 + arq->complete(arq, ret);
215 +- continue;
216 + }
217 +-
218 +- schedule();
219 +- } while (!kthread_should_stop());
220 ++ }
221 +
222 + return 0;
223 + }
224 +@@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
225 + rctx->ecb = ecb;
226 + actx->chan = DCP_CHAN_CRYPTO;
227 +
228 +- mutex_lock(&sdcp->mutex[actx->chan]);
229 ++ spin_lock(&sdcp->lock[actx->chan]);
230 + ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
231 +- mutex_unlock(&sdcp->mutex[actx->chan]);
232 ++ spin_unlock(&sdcp->lock[actx->chan]);
233 +
234 + wake_up_process(sdcp->thread[actx->chan]);
235 +
236 +@@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
237 + struct ahash_request *req;
238 + int ret, fini;
239 +
240 +- do {
241 +- __set_current_state(TASK_INTERRUPTIBLE);
242 ++ while (!kthread_should_stop()) {
243 ++ set_current_state(TASK_INTERRUPTIBLE);
244 +
245 +- mutex_lock(&sdcp->mutex[chan]);
246 ++ spin_lock(&sdcp->lock[chan]);
247 + backlog = crypto_get_backlog(&sdcp->queue[chan]);
248 + arq = crypto_dequeue_request(&sdcp->queue[chan]);
249 +- mutex_unlock(&sdcp->mutex[chan]);
250 ++ spin_unlock(&sdcp->lock[chan]);
251 ++
252 ++ if (!backlog && !arq) {
253 ++ schedule();
254 ++ continue;
255 ++ }
256 ++
257 ++ set_current_state(TASK_RUNNING);
258 +
259 + if (backlog)
260 + backlog->complete(backlog, -EINPROGRESS);
261 +@@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
262 + ret = dcp_sha_req_to_buf(arq);
263 + fini = rctx->fini;
264 + arq->complete(arq, ret);
265 +- if (!fini)
266 +- continue;
267 + }
268 +-
269 +- schedule();
270 +- } while (!kthread_should_stop());
271 ++ }
272 +
273 + return 0;
274 + }
275 +@@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
276 + rctx->init = 1;
277 + }
278 +
279 +- mutex_lock(&sdcp->mutex[actx->chan]);
280 ++ spin_lock(&sdcp->lock[actx->chan]);
281 + ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
282 +- mutex_unlock(&sdcp->mutex[actx->chan]);
283 ++ spin_unlock(&sdcp->lock[actx->chan]);
284 +
285 + wake_up_process(sdcp->thread[actx->chan]);
286 + mutex_unlock(&actx->mutex);
287 +@@ -983,7 +990,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
288 + platform_set_drvdata(pdev, sdcp);
289 +
290 + for (i = 0; i < DCP_MAX_CHANS; i++) {
291 +- mutex_init(&sdcp->mutex[i]);
292 ++ spin_lock_init(&sdcp->lock[i]);
293 + init_completion(&sdcp->completion[i]);
294 + crypto_init_queue(&sdcp->queue[i], 50);
295 + }
296 +diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
297 +index f172171668ee..7c470ae97f60 100644
298 +--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
299 ++++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
300 +@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
301 + struct adf_hw_device_data *hw_data;
302 + char name[ADF_DEVICE_NAME_LENGTH];
303 + unsigned int i, bar_nr;
304 +- int ret, bar_mask;
305 ++ unsigned long bar_mask;
306 ++ int ret;
307 +
308 + switch (ent->device) {
309 + case ADF_C3XXX_PCI_DEVICE_ID:
310 +@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
311 + /* Find and map all the device's BARS */
312 + i = 0;
313 + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
314 +- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
315 +- ADF_PCI_MAX_BARS * 2) {
316 ++ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
317 + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
318 +
319 + bar->base_addr = pci_resource_start(pdev, bar_nr);
320 +diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
321 +index 24ec908eb26c..613c7d5644ce 100644
322 +--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
323 ++++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
324 +@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
325 + struct adf_hw_device_data *hw_data;
326 + char name[ADF_DEVICE_NAME_LENGTH];
327 + unsigned int i, bar_nr;
328 +- int ret, bar_mask;
329 ++ unsigned long bar_mask;
330 ++ int ret;
331 +
332 + switch (ent->device) {
333 + case ADF_C3XXXIOV_PCI_DEVICE_ID:
334 +@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
335 + /* Find and map all the device's BARS */
336 + i = 0;
337 + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
338 +- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
339 +- ADF_PCI_MAX_BARS * 2) {
340 ++ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
341 + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
342 +
343 + bar->base_addr = pci_resource_start(pdev, bar_nr);
344 +diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
345 +index 58a984c9c3ec..cb11d85d7bb3 100644
346 +--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
347 ++++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
348 +@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
349 + struct adf_hw_device_data *hw_data;
350 + char name[ADF_DEVICE_NAME_LENGTH];
351 + unsigned int i, bar_nr;
352 +- int ret, bar_mask;
353 ++ unsigned long bar_mask;
354 ++ int ret;
355 +
356 + switch (ent->device) {
357 + case ADF_C62X_PCI_DEVICE_ID:
358 +@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
359 + /* Find and map all the device's BARS */
360 + i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
361 + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
362 +- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
363 +- ADF_PCI_MAX_BARS * 2) {
364 ++ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
365 + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
366 +
367 + bar->base_addr = pci_resource_start(pdev, bar_nr);
368 +diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
369 +index b9f3e0e4fde9..278452b8ef81 100644
370 +--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
371 ++++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
372 +@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
373 + struct adf_hw_device_data *hw_data;
374 + char name[ADF_DEVICE_NAME_LENGTH];
375 + unsigned int i, bar_nr;
376 +- int ret, bar_mask;
377 ++ unsigned long bar_mask;
378 ++ int ret;
379 +
380 + switch (ent->device) {
381 + case ADF_C62XIOV_PCI_DEVICE_ID:
382 +@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
383 + /* Find and map all the device's BARS */
384 + i = 0;
385 + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
386 +- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
387 +- ADF_PCI_MAX_BARS * 2) {
388 ++ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
389 + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
390 +
391 + bar->base_addr = pci_resource_start(pdev, bar_nr);
392 +diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
393 +index 2ce01f010c74..07b741aed108 100644
394 +--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
395 ++++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
396 +@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
397 + struct adf_hw_device_data *hw_data;
398 + char name[ADF_DEVICE_NAME_LENGTH];
399 + unsigned int i, bar_nr;
400 +- int ret, bar_mask;
401 ++ unsigned long bar_mask;
402 ++ int ret;
403 +
404 + switch (ent->device) {
405 + case ADF_DH895XCC_PCI_DEVICE_ID:
406 +@@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
407 + /* Find and map all the device's BARS */
408 + i = 0;
409 + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
410 +- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
411 +- ADF_PCI_MAX_BARS * 2) {
412 ++ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
413 + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
414 +
415 + bar->base_addr = pci_resource_start(pdev, bar_nr);
416 +diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
417 +index 26ab17bfc6da..3da0f951cb59 100644
418 +--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
419 ++++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
420 +@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
421 + struct adf_hw_device_data *hw_data;
422 + char name[ADF_DEVICE_NAME_LENGTH];
423 + unsigned int i, bar_nr;
424 +- int ret, bar_mask;
425 ++ unsigned long bar_mask;
426 ++ int ret;
427 +
428 + switch (ent->device) {
429 + case ADF_DH895XCCIOV_PCI_DEVICE_ID:
430 +@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
431 + /* Find and map all the device's BARS */
432 + i = 0;
433 + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
434 +- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
435 +- ADF_PCI_MAX_BARS * 2) {
436 ++ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
437 + struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
438 +
439 + bar->base_addr = pci_resource_start(pdev, bar_nr);
440 +diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
441 +index e717f8dc3966..202d367a21e4 100644
442 +--- a/drivers/gpio/gpio-adp5588.c
443 ++++ b/drivers/gpio/gpio-adp5588.c
444 +@@ -41,6 +41,8 @@ struct adp5588_gpio {
445 + uint8_t int_en[3];
446 + uint8_t irq_mask[3];
447 + uint8_t irq_stat[3];
448 ++ uint8_t int_input_en[3];
449 ++ uint8_t int_lvl_cached[3];
450 + };
451 +
452 + static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
453 +@@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
454 + struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
455 + int i;
456 +
457 +- for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
458 ++ for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
459 ++ if (dev->int_input_en[i]) {
460 ++ mutex_lock(&dev->lock);
461 ++ dev->dir[i] &= ~dev->int_input_en[i];
462 ++ dev->int_input_en[i] = 0;
463 ++ adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
464 ++ dev->dir[i]);
465 ++ mutex_unlock(&dev->lock);
466 ++ }
467 ++
468 ++ if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
469 ++ dev->int_lvl_cached[i] = dev->int_lvl[i];
470 ++ adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
471 ++ dev->int_lvl[i]);
472 ++ }
473 ++
474 + if (dev->int_en[i] ^ dev->irq_mask[i]) {
475 + dev->int_en[i] = dev->irq_mask[i];
476 + adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
477 + dev->int_en[i]);
478 + }
479 ++ }
480 +
481 + mutex_unlock(&dev->irq_lock);
482 + }
483 +@@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
484 + else
485 + return -EINVAL;
486 +
487 +- adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
488 +- adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
489 +- dev->int_lvl[bank]);
490 ++ dev->int_input_en[bank] |= bit;
491 +
492 + return 0;
493 + }
494 +diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
495 +index 70b3c556f6cf..33d4bd505b5b 100644
496 +--- a/drivers/gpio/gpiolib-acpi.c
497 ++++ b/drivers/gpio/gpiolib-acpi.c
498 +@@ -25,7 +25,6 @@
499 +
500 + struct acpi_gpio_event {
501 + struct list_head node;
502 +- struct list_head initial_sync_list;
503 + acpi_handle handle;
504 + unsigned int pin;
505 + unsigned int irq;
506 +@@ -49,10 +48,19 @@ struct acpi_gpio_chip {
507 + struct mutex conn_lock;
508 + struct gpio_chip *chip;
509 + struct list_head events;
510 ++ struct list_head deferred_req_irqs_list_entry;
511 + };
512 +
513 +-static LIST_HEAD(acpi_gpio_initial_sync_list);
514 +-static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
515 ++/*
516 ++ * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
517 ++ * (so builtin drivers) we register the ACPI GpioInt event handlers from a
518 ++ * late_initcall_sync handler, so that other builtin drivers can register their
519 ++ * OpRegions before the event handlers can run. This list contains gpiochips
520 ++ * for which the acpi_gpiochip_request_interrupts() has been deferred.
521 ++ */
522 ++static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
523 ++static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
524 ++static bool acpi_gpio_deferred_req_irqs_done;
525 +
526 + static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
527 + {
528 +@@ -146,21 +154,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
529 + return gpiochip_get_desc(chip, offset);
530 + }
531 +
532 +-static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
533 +-{
534 +- mutex_lock(&acpi_gpio_initial_sync_list_lock);
535 +- list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
536 +- mutex_unlock(&acpi_gpio_initial_sync_list_lock);
537 +-}
538 +-
539 +-static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
540 +-{
541 +- mutex_lock(&acpi_gpio_initial_sync_list_lock);
542 +- if (!list_empty(&event->initial_sync_list))
543 +- list_del_init(&event->initial_sync_list);
544 +- mutex_unlock(&acpi_gpio_initial_sync_list_lock);
545 +-}
546 +-
547 + static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
548 + {
549 + struct acpi_gpio_event *event = data;
550 +@@ -247,7 +240,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
551 +
552 + gpiod_direction_input(desc);
553 +
554 +- value = gpiod_get_value(desc);
555 ++ value = gpiod_get_value_cansleep(desc);
556 +
557 + ret = gpiochip_lock_as_irq(chip, pin);
558 + if (ret) {
559 +@@ -290,7 +283,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
560 + event->irq = irq;
561 + event->pin = pin;
562 + event->desc = desc;
563 +- INIT_LIST_HEAD(&event->initial_sync_list);
564 +
565 + ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
566 + "ACPI:Event", event);
567 +@@ -312,10 +304,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
568 + * may refer to OperationRegions from other (builtin) drivers which
569 + * may be probed after us.
570 + */
571 +- if (handler == acpi_gpio_irq_handler &&
572 +- (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
573 +- ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
574 +- acpi_gpio_add_to_initial_sync_list(event);
575 ++ if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
576 ++ ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
577 ++ handler(event->irq, event);
578 +
579 + return AE_OK;
580 +
581 +@@ -344,6 +335,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
582 + struct acpi_gpio_chip *acpi_gpio;
583 + acpi_handle handle;
584 + acpi_status status;
585 ++ bool defer;
586 +
587 + if (!chip->parent || !chip->to_irq)
588 + return;
589 +@@ -356,6 +348,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
590 + if (ACPI_FAILURE(status))
591 + return;
592 +
593 ++ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
594 ++ defer = !acpi_gpio_deferred_req_irqs_done;
595 ++ if (defer)
596 ++ list_add(&acpi_gpio->deferred_req_irqs_list_entry,
597 ++ &acpi_gpio_deferred_req_irqs_list);
598 ++ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
599 ++
600 ++ if (defer)
601 ++ return;
602 ++
603 + acpi_walk_resources(handle, "_AEI",
604 + acpi_gpiochip_request_interrupt, acpi_gpio);
605 + }
606 +@@ -386,11 +388,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
607 + if (ACPI_FAILURE(status))
608 + return;
609 +
610 ++ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
611 ++ if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
612 ++ list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
613 ++ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
614 ++
615 + list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
616 + struct gpio_desc *desc;
617 +
618 +- acpi_gpio_del_from_initial_sync_list(event);
619 +-
620 + if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
621 + disable_irq_wake(event->irq);
622 +
623 +@@ -1101,6 +1106,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
624 +
625 + acpi_gpio->chip = chip;
626 + INIT_LIST_HEAD(&acpi_gpio->events);
627 ++ INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
628 +
629 + status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
630 + if (ACPI_FAILURE(status)) {
631 +@@ -1247,20 +1253,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
632 + return con_id == NULL;
633 + }
634 +
635 +-/* Sync the initial state of handlers after all builtin drivers have probed */
636 +-static int acpi_gpio_initial_sync(void)
637 ++/* Run deferred acpi_gpiochip_request_interrupts() */
638 ++static int acpi_gpio_handle_deferred_request_interrupts(void)
639 + {
640 +- struct acpi_gpio_event *event, *ep;
641 ++ struct acpi_gpio_chip *acpi_gpio, *tmp;
642 ++
643 ++ mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
644 ++ list_for_each_entry_safe(acpi_gpio, tmp,
645 ++ &acpi_gpio_deferred_req_irqs_list,
646 ++ deferred_req_irqs_list_entry) {
647 ++ acpi_handle handle;
648 +
649 +- mutex_lock(&acpi_gpio_initial_sync_list_lock);
650 +- list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
651 +- initial_sync_list) {
652 +- acpi_evaluate_object(event->handle, NULL, NULL, NULL);
653 +- list_del_init(&event->initial_sync_list);
654 ++ handle = ACPI_HANDLE(acpi_gpio->chip->parent);
655 ++ acpi_walk_resources(handle, "_AEI",
656 ++ acpi_gpiochip_request_interrupt, acpi_gpio);
657 ++
658 ++ list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
659 + }
660 +- mutex_unlock(&acpi_gpio_initial_sync_list_lock);
661 ++
662 ++ acpi_gpio_deferred_req_irqs_done = true;
663 ++ mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
664 +
665 + return 0;
666 + }
667 + /* We must use _sync so that this runs after the first deferred_probe run */
668 +-late_initcall_sync(acpi_gpio_initial_sync);
669 ++late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
670 +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
671 +index ba38f530e403..ee8c046cab62 100644
672 +--- a/drivers/gpio/gpiolib-of.c
673 ++++ b/drivers/gpio/gpiolib-of.c
674 +@@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
675 + struct of_phandle_args *gpiospec = data;
676 +
677 + return chip->gpiodev->dev.of_node == gpiospec->np &&
678 ++ chip->of_xlate &&
679 + chip->of_xlate(chip, gpiospec, NULL) >= 0;
680 + }
681 +
682 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
683 +index 7e0bfd7347f6..7d5de4ef4f22 100644
684 +--- a/drivers/gpio/gpiolib.c
685 ++++ b/drivers/gpio/gpiolib.c
686 +@@ -489,7 +489,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
687 + if (ret)
688 + goto out_free_descs;
689 + lh->descs[i] = desc;
690 +- count = i;
691 ++ count = i + 1;
692 +
693 + if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
694 + set_bit(FLAG_ACTIVE_LOW, &desc->flags);
695 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
696 +index 1ae5ae8c45a4..1a75a6b9ab2f 100644
697 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
698 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
699 +@@ -569,6 +569,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
700 + { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
701 + { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
702 + { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
703 ++ { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
704 + { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX },
705 + { 0, 0, 0, 0, 0 },
706 + };
707 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
708 +index 5f892ad6476e..44aa58ab55d0 100644
709 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
710 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
711 +@@ -37,6 +37,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
712 + {
713 + struct drm_gem_object *gobj;
714 + unsigned long size;
715 ++ int r;
716 +
717 + gobj = drm_gem_object_lookup(p->filp, data->handle);
718 + if (gobj == NULL)
719 +@@ -48,20 +49,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
720 + p->uf_entry.tv.shared = true;
721 + p->uf_entry.user_pages = NULL;
722 +
723 +- size = amdgpu_bo_size(p->uf_entry.robj);
724 +- if (size != PAGE_SIZE || (data->offset + 8) > size)
725 +- return -EINVAL;
726 +-
727 +- *offset = data->offset;
728 +-
729 + drm_gem_object_put_unlocked(gobj);
730 +
731 ++ size = amdgpu_bo_size(p->uf_entry.robj);
732 ++ if (size != PAGE_SIZE || (data->offset + 8) > size) {
733 ++ r = -EINVAL;
734 ++ goto error_unref;
735 ++ }
736 ++
737 + if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
738 +- amdgpu_bo_unref(&p->uf_entry.robj);
739 +- return -EINVAL;
740 ++ r = -EINVAL;
741 ++ goto error_unref;
742 + }
743 +
744 ++ *offset = data->offset;
745 ++
746 + return 0;
747 ++
748 ++error_unref:
749 ++ amdgpu_bo_unref(&p->uf_entry.robj);
750 ++ return r;
751 + }
752 +
753 + static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
754 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
755 +index e7fa67063cdc..cb9e1cd456b8 100644
756 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
757 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
758 +@@ -1142,7 +1142,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
759 + for (count = 0; count < num_se; count++) {
760 + data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
761 + cgs_write_register(hwmgr->device, reg, data);
762 +- result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
763 ++ result = vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
764 + result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
765 + result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
766 + result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
767 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
768 +index 7c5bed29ffef..6160a6158cf2 100644
769 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
770 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
771 +@@ -412,14 +412,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
772 + }
773 +
774 + static void
775 +-nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
776 ++nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
777 + {
778 + struct nvkm_dp *dp = nvkm_dp(outp);
779 +
780 +- /* Prevent link from being retrained if sink sends an IRQ. */
781 +- atomic_set(&dp->lt.done, 0);
782 +- ior->dp.nr = 0;
783 +-
784 + /* Execute DisableLT script from DP Info Table. */
785 + nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
786 + init.outp = &dp->outp.info;
787 +@@ -428,6 +424,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
788 + );
789 + }
790 +
791 ++static void
792 ++nvkm_dp_release(struct nvkm_outp *outp)
793 ++{
794 ++ struct nvkm_dp *dp = nvkm_dp(outp);
795 ++
796 ++ /* Prevent link from being retrained if sink sends an IRQ. */
797 ++ atomic_set(&dp->lt.done, 0);
798 ++ dp->outp.ior->dp.nr = 0;
799 ++}
800 ++
801 + static int
802 + nvkm_dp_acquire(struct nvkm_outp *outp)
803 + {
804 +@@ -576,6 +582,7 @@ nvkm_dp_func = {
805 + .fini = nvkm_dp_fini,
806 + .acquire = nvkm_dp_acquire,
807 + .release = nvkm_dp_release,
808 ++ .disable = nvkm_dp_disable,
809 + };
810 +
811 + static int
812 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
813 +index 0c570dbd3021..bc18a96bc61a 100644
814 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
815 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
816 +@@ -436,11 +436,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
817 + nv50_disp_super_ied_off(head, ior, 2);
818 +
819 + /* If we're shutting down the OR's only active head, execute
820 +- * the output path's release function.
821 ++ * the output path's disable function.
822 + */
823 + if (ior->arm.head == (1 << head->id)) {
824 +- if ((outp = ior->arm.outp) && outp->func->release)
825 +- outp->func->release(outp, ior);
826 ++ if ((outp = ior->arm.outp) && outp->func->disable)
827 ++ outp->func->disable(outp, ior);
828 + }
829 + }
830 +
831 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
832 +index be9e7f8c3b23..bbba77ff9385 100644
833 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
834 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
835 +@@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user)
836 + if (ior) {
837 + outp->acquired &= ~user;
838 + if (!outp->acquired) {
839 ++ if (outp->func->release && outp->ior)
840 ++ outp->func->release(outp);
841 + outp->ior->asy.outp = NULL;
842 + outp->ior = NULL;
843 + }
844 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
845 +index ea84d7d5741a..97196f802924 100644
846 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
847 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
848 +@@ -41,7 +41,8 @@ struct nvkm_outp_func {
849 + void (*init)(struct nvkm_outp *);
850 + void (*fini)(struct nvkm_outp *);
851 + int (*acquire)(struct nvkm_outp *);
852 +- void (*release)(struct nvkm_outp *, struct nvkm_ior *);
853 ++ void (*release)(struct nvkm_outp *);
854 ++ void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
855 + };
856 +
857 + #define OUTP_MSG(o,l,f,a...) do { \
858 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
859 +index 1730371933df..be0dd6074b57 100644
860 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
861 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
862 +@@ -158,7 +158,8 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
863 + }
864 +
865 + /* load and execute some other ucode image (bios therm?) */
866 +- return pmu_load(init, 0x01, post, NULL, NULL);
867 ++ pmu_load(init, 0x01, post, NULL, NULL);
868 ++ return 0;
869 + }
870 +
871 + static const struct nvkm_devinit_func
872 +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
873 +index 25b7bd56ae11..1cb41992aaa1 100644
874 +--- a/drivers/hid/hid-apple.c
875 ++++ b/drivers/hid/hid-apple.c
876 +@@ -335,7 +335,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
877 + struct hid_field *field, struct hid_usage *usage,
878 + unsigned long **bit, int *max)
879 + {
880 +- if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
881 ++ if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
882 ++ usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
883 + /* The fn key on Apple USB keyboards */
884 + set_bit(EV_REP, hi->input->evbit);
885 + hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
886 +@@ -472,6 +473,12 @@ static const struct hid_device_id apple_devices[] = {
887 + .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
888 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
889 + .driver_data = APPLE_HAS_FN },
890 ++ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
891 ++ .driver_data = APPLE_HAS_FN },
892 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
893 ++ .driver_data = APPLE_HAS_FN },
894 ++ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
895 ++ .driver_data = APPLE_HAS_FN },
896 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
897 + .driver_data = APPLE_HAS_FN },
898 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
899 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
900 +index 81ee1d026648..3fc8c0d67592 100644
901 +--- a/drivers/hid/hid-ids.h
902 ++++ b/drivers/hid/hid-ids.h
903 +@@ -85,6 +85,7 @@
904 + #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
905 +
906 + #define USB_VENDOR_ID_APPLE 0x05ac
907 ++#define BT_VENDOR_ID_APPLE 0x004c
908 + #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
909 + #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
910 + #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
911 +@@ -154,6 +155,7 @@
912 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
913 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
914 + #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267
915 ++#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c
916 + #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
917 + #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
918 + #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
919 +@@ -924,6 +926,7 @@
920 + #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
921 + #define USB_DEVICE_ID_SAITEK_PS1000 0x0621
922 + #define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
923 ++#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd
924 + #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
925 + #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
926 + #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
927 +diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
928 +index 39e642686ff0..683861f324e3 100644
929 +--- a/drivers/hid/hid-saitek.c
930 ++++ b/drivers/hid/hid-saitek.c
931 +@@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
932 + .driver_data = SAITEK_RELEASE_MODE_RAT7 },
933 + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
934 + .driver_data = SAITEK_RELEASE_MODE_RAT7 },
935 ++ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
936 ++ .driver_data = SAITEK_RELEASE_MODE_RAT7 },
937 + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
938 + .driver_data = SAITEK_RELEASE_MODE_RAT7 },
939 + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
940 +diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
941 +index 25363fc571bc..faba542d1b07 100644
942 +--- a/drivers/hid/hid-sensor-hub.c
943 ++++ b/drivers/hid/hid-sensor-hub.c
944 +@@ -579,6 +579,28 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
945 + }
946 + EXPORT_SYMBOL_GPL(sensor_hub_device_close);
947 +
948 ++static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
949 ++ unsigned int *rsize)
950 ++{
951 ++ /*
952 ++ * Checks if the report descriptor of Thinkpad Helix 2 has a logical
953 ++ * minimum for magnetic flux axis greater than the maximum.
954 ++ */
955 ++ if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&
956 ++ *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&
957 ++ rdesc[915] == 0x81 && rdesc[916] == 0x08 &&
958 ++ rdesc[917] == 0x00 && rdesc[918] == 0x27 &&
959 ++ rdesc[921] == 0x07 && rdesc[922] == 0x00) {
960 ++ /* Sets negative logical minimum for mag x, y and z */
961 ++ rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;
962 ++ rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;
963 ++ rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;
964 ++ rdesc[917] = rdesc[938] = rdesc[959] = 0xff;
965 ++ }
966 ++
967 ++ return rdesc;
968 ++}
969 ++
970 + static int sensor_hub_probe(struct hid_device *hdev,
971 + const struct hid_device_id *id)
972 + {
973 +@@ -742,6 +764,7 @@ static struct hid_driver sensor_hub_driver = {
974 + .probe = sensor_hub_probe,
975 + .remove = sensor_hub_remove,
976 + .raw_event = sensor_hub_raw_event,
977 ++ .report_fixup = sensor_hub_report_fixup,
978 + #ifdef CONFIG_PM
979 + .suspend = sensor_hub_suspend,
980 + .resume = sensor_hub_resume,
981 +diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
982 +index f41901f80b64..5449fc59b7f5 100644
983 +--- a/drivers/hv/connection.c
984 ++++ b/drivers/hv/connection.c
985 +@@ -74,6 +74,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
986 + __u32 version)
987 + {
988 + int ret = 0;
989 ++ unsigned int cur_cpu;
990 + struct vmbus_channel_initiate_contact *msg;
991 + unsigned long flags;
992 +
993 +@@ -96,9 +97,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
994 + * the CPU attempting to connect may not be CPU 0.
995 + */
996 + if (version >= VERSION_WIN8_1) {
997 +- msg->target_vcpu =
998 +- hv_cpu_number_to_vp_number(smp_processor_id());
999 +- vmbus_connection.connect_cpu = smp_processor_id();
1000 ++ cur_cpu = get_cpu();
1001 ++ msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
1002 ++ vmbus_connection.connect_cpu = cur_cpu;
1003 ++ put_cpu();
1004 + } else {
1005 + msg->target_vcpu = 0;
1006 + vmbus_connection.connect_cpu = 0;
1007 +diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
1008 +index 9918bdd81619..a403e8579b65 100644
1009 +--- a/drivers/i2c/busses/i2c-uniphier-f.c
1010 ++++ b/drivers/i2c/busses/i2c-uniphier-f.c
1011 +@@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
1012 + return ret;
1013 +
1014 + for (msg = msgs; msg < emsg; msg++) {
1015 +- /* If next message is read, skip the stop condition */
1016 +- bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
1017 +- /* but, force it if I2C_M_STOP is set */
1018 +- if (msg->flags & I2C_M_STOP)
1019 +- stop = true;
1020 ++ /* Emit STOP if it is the last message or I2C_M_STOP is set. */
1021 ++ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
1022 +
1023 + ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
1024 + if (ret)
1025 +diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
1026 +index bb181b088291..454f914ae66d 100644
1027 +--- a/drivers/i2c/busses/i2c-uniphier.c
1028 ++++ b/drivers/i2c/busses/i2c-uniphier.c
1029 +@@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
1030 + return ret;
1031 +
1032 + for (msg = msgs; msg < emsg; msg++) {
1033 +- /* If next message is read, skip the stop condition */
1034 +- bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
1035 +- /* but, force it if I2C_M_STOP is set */
1036 +- if (msg->flags & I2C_M_STOP)
1037 +- stop = true;
1038 ++ /* Emit STOP if it is the last message or I2C_M_STOP is set. */
1039 ++ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
1040 +
1041 + ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
1042 + if (ret)
1043 +diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
1044 +index d70e2e53d6a7..557214202eff 100644
1045 +--- a/drivers/iio/temperature/maxim_thermocouple.c
1046 ++++ b/drivers/iio/temperature/maxim_thermocouple.c
1047 +@@ -267,7 +267,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi)
1048 + static const struct spi_device_id maxim_thermocouple_id[] = {
1049 + {"max6675", MAX6675},
1050 + {"max31855", MAX31855},
1051 +- {"max31856", MAX31855},
1052 + {},
1053 + };
1054 + MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
1055 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
1056 +index a22b992cde38..16423d7ab599 100644
1057 +--- a/drivers/infiniband/core/ucma.c
1058 ++++ b/drivers/infiniband/core/ucma.c
1059 +@@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut);
1060 + static DEFINE_IDR(ctx_idr);
1061 + static DEFINE_IDR(multicast_idr);
1062 +
1063 ++static const struct file_operations ucma_fops;
1064 ++
1065 + static inline struct ucma_context *_ucma_find_context(int id,
1066 + struct ucma_file *file)
1067 + {
1068 +@@ -1564,6 +1566,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1069 + f = fdget(cmd.fd);
1070 + if (!f.file)
1071 + return -ENOENT;
1072 ++ if (f.file->f_op != &ucma_fops) {
1073 ++ ret = -EINVAL;
1074 ++ goto file_put;
1075 ++ }
1076 +
1077 + /* Validate current fd and prevent destruction of id. */
1078 + ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1079 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1080 +index 01746e7b90de..9137030423cd 100644
1081 +--- a/drivers/iommu/amd_iommu.c
1082 ++++ b/drivers/iommu/amd_iommu.c
1083 +@@ -3071,7 +3071,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
1084 + return 0;
1085 +
1086 + offset_mask = pte_pgsize - 1;
1087 +- __pte = *pte & PM_ADDR_MASK;
1088 ++ __pte = __sme_clr(*pte & PM_ADDR_MASK);
1089 +
1090 + return (__pte & ~offset_mask) | (iova & offset_mask);
1091 + }
1092 +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
1093 +index 38a2ac24428e..151211b4cb1b 100644
1094 +--- a/drivers/md/dm-raid.c
1095 ++++ b/drivers/md/dm-raid.c
1096 +@@ -3061,6 +3061,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1097 + set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
1098 + rs_set_new(rs);
1099 + } else if (rs_is_recovering(rs)) {
1100 ++ /* Rebuild particular devices */
1101 ++ if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
1102 ++ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
1103 ++ rs_setup_recovery(rs, MaxSector);
1104 ++ }
1105 + /* A recovering raid set may be resized */
1106 + ; /* skip setup rs */
1107 + } else if (rs_is_reshaping(rs)) {
1108 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1109 +index 36ef284ad086..45ff8fd00248 100644
1110 +--- a/drivers/md/dm-thin-metadata.c
1111 ++++ b/drivers/md/dm-thin-metadata.c
1112 +@@ -188,6 +188,12 @@ struct dm_pool_metadata {
1113 + unsigned long flags;
1114 + sector_t data_block_size;
1115 +
1116 ++ /*
1117 ++ * We reserve a section of the metadata for commit overhead.
1118 ++ * All reported space does *not* include this.
1119 ++ */
1120 ++ dm_block_t metadata_reserve;
1121 ++
1122 + /*
1123 + * Set if a transaction has to be aborted but the attempt to roll back
1124 + * to the previous (good) transaction failed. The only pool metadata
1125 +@@ -825,6 +831,20 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
1126 + return dm_tm_commit(pmd->tm, sblock);
1127 + }
1128 +
1129 ++static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
1130 ++{
1131 ++ int r;
1132 ++ dm_block_t total;
1133 ++ dm_block_t max_blocks = 4096; /* 16M */
1134 ++
1135 ++ r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
1136 ++ if (r) {
1137 ++ DMERR("could not get size of metadata device");
1138 ++ pmd->metadata_reserve = max_blocks;
1139 ++ } else
1140 ++ pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
1141 ++}
1142 ++
1143 + struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
1144 + sector_t data_block_size,
1145 + bool format_device)
1146 +@@ -858,6 +878,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
1147 + return ERR_PTR(r);
1148 + }
1149 +
1150 ++ __set_metadata_reserve(pmd);
1151 ++
1152 + return pmd;
1153 + }
1154 +
1155 +@@ -1829,6 +1851,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
1156 + down_read(&pmd->root_lock);
1157 + if (!pmd->fail_io)
1158 + r = dm_sm_get_nr_free(pmd->metadata_sm, result);
1159 ++
1160 ++ if (!r) {
1161 ++ if (*result < pmd->metadata_reserve)
1162 ++ *result = 0;
1163 ++ else
1164 ++ *result -= pmd->metadata_reserve;
1165 ++ }
1166 + up_read(&pmd->root_lock);
1167 +
1168 + return r;
1169 +@@ -1941,8 +1970,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
1170 + int r = -EINVAL;
1171 +
1172 + down_write(&pmd->root_lock);
1173 +- if (!pmd->fail_io)
1174 ++ if (!pmd->fail_io) {
1175 + r = __resize_space_map(pmd->metadata_sm, new_count);
1176 ++ if (!r)
1177 ++ __set_metadata_reserve(pmd);
1178 ++ }
1179 + up_write(&pmd->root_lock);
1180 +
1181 + return r;
1182 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1183 +index 6cf9ad4e4e16..699c40c7fe60 100644
1184 +--- a/drivers/md/dm-thin.c
1185 ++++ b/drivers/md/dm-thin.c
1186 +@@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
1187 + enum pool_mode {
1188 + PM_WRITE, /* metadata may be changed */
1189 + PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
1190 ++
1191 ++ /*
1192 ++ * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
1193 ++ */
1194 ++ PM_OUT_OF_METADATA_SPACE,
1195 + PM_READ_ONLY, /* metadata may not be changed */
1196 ++
1197 + PM_FAIL, /* all I/O fails */
1198 + };
1199 +
1200 +@@ -1382,7 +1388,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1201 +
1202 + static void requeue_bios(struct pool *pool);
1203 +
1204 +-static void check_for_space(struct pool *pool)
1205 ++static bool is_read_only_pool_mode(enum pool_mode mode)
1206 ++{
1207 ++ return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
1208 ++}
1209 ++
1210 ++static bool is_read_only(struct pool *pool)
1211 ++{
1212 ++ return is_read_only_pool_mode(get_pool_mode(pool));
1213 ++}
1214 ++
1215 ++static void check_for_metadata_space(struct pool *pool)
1216 ++{
1217 ++ int r;
1218 ++ const char *ooms_reason = NULL;
1219 ++ dm_block_t nr_free;
1220 ++
1221 ++ r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
1222 ++ if (r)
1223 ++ ooms_reason = "Could not get free metadata blocks";
1224 ++ else if (!nr_free)
1225 ++ ooms_reason = "No free metadata blocks";
1226 ++
1227 ++ if (ooms_reason && !is_read_only(pool)) {
1228 ++ DMERR("%s", ooms_reason);
1229 ++ set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
1230 ++ }
1231 ++}
1232 ++
1233 ++static void check_for_data_space(struct pool *pool)
1234 + {
1235 + int r;
1236 + dm_block_t nr_free;
1237 +@@ -1408,14 +1442,16 @@ static int commit(struct pool *pool)
1238 + {
1239 + int r;
1240 +
1241 +- if (get_pool_mode(pool) >= PM_READ_ONLY)
1242 ++ if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
1243 + return -EINVAL;
1244 +
1245 + r = dm_pool_commit_metadata(pool->pmd);
1246 + if (r)
1247 + metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1248 +- else
1249 +- check_for_space(pool);
1250 ++ else {
1251 ++ check_for_metadata_space(pool);
1252 ++ check_for_data_space(pool);
1253 ++ }
1254 +
1255 + return r;
1256 + }
1257 +@@ -1481,6 +1517,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1258 + return r;
1259 + }
1260 +
1261 ++ r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
1262 ++ if (r) {
1263 ++ metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
1264 ++ return r;
1265 ++ }
1266 ++
1267 ++ if (!free_blocks) {
1268 ++ /* Let's commit before we use up the metadata reserve. */
1269 ++ r = commit(pool);
1270 ++ if (r)
1271 ++ return r;
1272 ++ }
1273 ++
1274 + return 0;
1275 + }
1276 +
1277 +@@ -1512,6 +1561,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool)
1278 + case PM_OUT_OF_DATA_SPACE:
1279 + return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
1280 +
1281 ++ case PM_OUT_OF_METADATA_SPACE:
1282 + case PM_READ_ONLY:
1283 + case PM_FAIL:
1284 + return BLK_STS_IOERR;
1285 +@@ -2475,8 +2525,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1286 + error_retry_list(pool);
1287 + break;
1288 +
1289 ++ case PM_OUT_OF_METADATA_SPACE:
1290 + case PM_READ_ONLY:
1291 +- if (old_mode != new_mode)
1292 ++ if (!is_read_only_pool_mode(old_mode))
1293 + notify_of_pool_mode_change(pool, "read-only");
1294 + dm_pool_metadata_read_only(pool->pmd);
1295 + pool->process_bio = process_bio_read_only;
1296 +@@ -3412,6 +3463,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
1297 + DMINFO("%s: growing the metadata device from %llu to %llu blocks",
1298 + dm_device_name(pool->pool_md),
1299 + sb_metadata_dev_size, metadata_dev_size);
1300 ++
1301 ++ if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
1302 ++ set_pool_mode(pool, PM_WRITE);
1303 ++
1304 + r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
1305 + if (r) {
1306 + metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
1307 +@@ -3715,7 +3770,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
1308 + struct pool_c *pt = ti->private;
1309 + struct pool *pool = pt->pool;
1310 +
1311 +- if (get_pool_mode(pool) >= PM_READ_ONLY) {
1312 ++ if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
1313 + DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
1314 + dm_device_name(pool->pool_md));
1315 + return -EOPNOTSUPP;
1316 +@@ -3789,6 +3844,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
1317 + dm_block_t nr_blocks_data;
1318 + dm_block_t nr_blocks_metadata;
1319 + dm_block_t held_root;
1320 ++ enum pool_mode mode;
1321 + char buf[BDEVNAME_SIZE];
1322 + char buf2[BDEVNAME_SIZE];
1323 + struct pool_c *pt = ti->private;
1324 +@@ -3859,9 +3915,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
1325 + else
1326 + DMEMIT("- ");
1327 +
1328 +- if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
1329 ++ mode = get_pool_mode(pool);
1330 ++ if (mode == PM_OUT_OF_DATA_SPACE)
1331 + DMEMIT("out_of_data_space ");
1332 +- else if (pool->pf.mode == PM_READ_ONLY)
1333 ++ else if (is_read_only_pool_mode(mode))
1334 + DMEMIT("ro ");
1335 + else
1336 + DMEMIT("rw ");
1337 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1338 +index 262a0f0f8fd5..927b60e9d3ca 100644
1339 +--- a/drivers/md/raid10.c
1340 ++++ b/drivers/md/raid10.c
1341 +@@ -4394,11 +4394,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
1342 + allow_barrier(conf);
1343 + }
1344 +
1345 ++ raise_barrier(conf, 0);
1346 + read_more:
1347 + /* Now schedule reads for blocks from sector_nr to last */
1348 + r10_bio = raid10_alloc_init_r10buf(conf);
1349 + r10_bio->state = 0;
1350 +- raise_barrier(conf, sectors_done != 0);
1351 ++ raise_barrier(conf, 1);
1352 + atomic_set(&r10_bio->remaining, 0);
1353 + r10_bio->mddev = mddev;
1354 + r10_bio->sector = sector_nr;
1355 +@@ -4494,6 +4495,8 @@ read_more:
1356 + if (sector_nr <= last)
1357 + goto read_more;
1358 +
1359 ++ lower_barrier(conf);
1360 ++
1361 + /* Now that we have done the whole section we can
1362 + * update reshape_progress
1363 + */
1364 +diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
1365 +index 284578b0a349..5c908c510c77 100644
1366 +--- a/drivers/md/raid5-log.h
1367 ++++ b/drivers/md/raid5-log.h
1368 +@@ -43,6 +43,11 @@ extern void ppl_write_stripe_run(struct r5conf *conf);
1369 + extern void ppl_stripe_write_finished(struct stripe_head *sh);
1370 + extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
1371 +
1372 ++static inline bool raid5_has_log(struct r5conf *conf)
1373 ++{
1374 ++ return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1375 ++}
1376 ++
1377 + static inline bool raid5_has_ppl(struct r5conf *conf)
1378 + {
1379 + return test_bit(MD_HAS_PPL, &conf->mddev->flags);
1380 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1381 +index 5018fb2352c2..dbf51b4c21b3 100644
1382 +--- a/drivers/md/raid5.c
1383 ++++ b/drivers/md/raid5.c
1384 +@@ -736,7 +736,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
1385 + {
1386 + struct r5conf *conf = sh->raid_conf;
1387 +
1388 +- if (conf->log || raid5_has_ppl(conf))
1389 ++ if (raid5_has_log(conf) || raid5_has_ppl(conf))
1390 + return false;
1391 + return test_bit(STRIPE_BATCH_READY, &sh->state) &&
1392 + !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
1393 +@@ -7717,7 +7717,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
1394 + sector_t newsize;
1395 + struct r5conf *conf = mddev->private;
1396 +
1397 +- if (conf->log || raid5_has_ppl(conf))
1398 ++ if (raid5_has_log(conf) || raid5_has_ppl(conf))
1399 + return -EINVAL;
1400 + sectors &= ~((sector_t)conf->chunk_sectors - 1);
1401 + newsize = raid5_size(mddev, sectors, mddev->raid_disks);
1402 +@@ -7768,7 +7768,7 @@ static int check_reshape(struct mddev *mddev)
1403 + {
1404 + struct r5conf *conf = mddev->private;
1405 +
1406 +- if (conf->log || raid5_has_ppl(conf))
1407 ++ if (raid5_has_log(conf) || raid5_has_ppl(conf))
1408 + return -EINVAL;
1409 + if (mddev->delta_disks == 0 &&
1410 + mddev->new_layout == mddev->layout &&
1411 +diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
1412 +index e3b7a71fcad9..1a4ffc5d3da4 100644
1413 +--- a/drivers/net/ethernet/amazon/ena/ena_com.c
1414 ++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
1415 +@@ -457,7 +457,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
1416 + cqe = &admin_queue->cq.entries[head_masked];
1417 +
1418 + /* Go over all the completions */
1419 +- while ((cqe->acq_common_descriptor.flags &
1420 ++ while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
1421 + ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
1422 + /* Do not read the rest of the completion entry before the
1423 + * phase bit was validated
1424 +@@ -633,7 +633,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
1425 + writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
1426 +
1427 + for (i = 0; i < timeout; i++) {
1428 +- if (read_resp->req_id == mmio_read->seq_num)
1429 ++ if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
1430 + break;
1431 +
1432 + udelay(1);
1433 +@@ -1790,8 +1790,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1434 + aenq_common = &aenq_e->aenq_common_desc;
1435 +
1436 + /* Go over all the events */
1437 +- while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
1438 +- phase) {
1439 ++ while ((READ_ONCE(aenq_common->flags) &
1440 ++ ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
1441 + pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1442 + aenq_common->group, aenq_common->syndrom,
1443 + (u64)aenq_common->timestamp_low +
1444 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1445 +index 67df5053dc30..60b3ee29d82c 100644
1446 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
1447 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1448 +@@ -456,7 +456,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
1449 + return -ENOMEM;
1450 + }
1451 +
1452 +- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
1453 ++ dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
1454 + DMA_FROM_DEVICE);
1455 + if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
1456 + u64_stats_update_begin(&rx_ring->syncp);
1457 +@@ -473,7 +473,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
1458 + rx_info->page_offset = 0;
1459 + ena_buf = &rx_info->ena_buf;
1460 + ena_buf->paddr = dma;
1461 +- ena_buf->len = PAGE_SIZE;
1462 ++ ena_buf->len = ENA_PAGE_SIZE;
1463 +
1464 + return 0;
1465 + }
1466 +@@ -490,7 +490,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
1467 + return;
1468 + }
1469 +
1470 +- dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
1471 ++ dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
1472 + DMA_FROM_DEVICE);
1473 +
1474 + __free_page(page);
1475 +@@ -910,10 +910,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1476 + do {
1477 + dma_unmap_page(rx_ring->dev,
1478 + dma_unmap_addr(&rx_info->ena_buf, paddr),
1479 +- PAGE_SIZE, DMA_FROM_DEVICE);
1480 ++ ENA_PAGE_SIZE, DMA_FROM_DEVICE);
1481 +
1482 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
1483 +- rx_info->page_offset, len, PAGE_SIZE);
1484 ++ rx_info->page_offset, len, ENA_PAGE_SIZE);
1485 +
1486 + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1487 + "rx skb updated. len %d. data_len %d\n",
1488 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
1489 +index 29bb5704260b..3404376c28ca 100644
1490 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
1491 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
1492 +@@ -350,4 +350,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
1493 +
1494 + int ena_get_sset_count(struct net_device *netdev, int sset);
1495 +
1496 ++/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
1497 ++ * driver passas 0.
1498 ++ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
1499 ++ * 16kB.
1500 ++ */
1501 ++#if PAGE_SIZE > SZ_16K
1502 ++#define ENA_PAGE_SIZE SZ_16K
1503 ++#else
1504 ++#define ENA_PAGE_SIZE PAGE_SIZE
1505 ++#endif
1506 ++
1507 + #endif /* !(ENA_H) */
1508 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1509 +index dfef4ec167c1..c1787be6a258 100644
1510 +--- a/drivers/net/ethernet/cadence/macb_main.c
1511 ++++ b/drivers/net/ethernet/cadence/macb_main.c
1512 +@@ -642,7 +642,7 @@ static int macb_halt_tx(struct macb *bp)
1513 + if (!(status & MACB_BIT(TGO)))
1514 + return 0;
1515 +
1516 +- usleep_range(10, 250);
1517 ++ udelay(250);
1518 + } while (time_before(halt_time, timeout));
1519 +
1520 + return -ETIMEDOUT;
1521 +diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
1522 +index cad52bd331f7..08a750fb60c4 100644
1523 +--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
1524 ++++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
1525 +@@ -486,6 +486,8 @@ struct hnae_ae_ops {
1526 + u8 *auto_neg, u16 *speed, u8 *duplex);
1527 + void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
1528 + void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
1529 ++ bool (*need_adjust_link)(struct hnae_handle *handle,
1530 ++ int speed, int duplex);
1531 + int (*set_loopback)(struct hnae_handle *handle,
1532 + enum hnae_loop loop_mode, int en);
1533 + void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
1534 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1535 +index bd68379d2bea..bf930ab3c2bd 100644
1536 +--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1537 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
1538 +@@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
1539 + hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
1540 + }
1541 +
1542 ++static int hns_ae_wait_flow_down(struct hnae_handle *handle)
1543 ++{
1544 ++ struct dsaf_device *dsaf_dev;
1545 ++ struct hns_ppe_cb *ppe_cb;
1546 ++ struct hnae_vf_cb *vf_cb;
1547 ++ int ret;
1548 ++ int i;
1549 ++
1550 ++ for (i = 0; i < handle->q_num; i++) {
1551 ++ ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
1552 ++ if (ret)
1553 ++ return ret;
1554 ++ }
1555 ++
1556 ++ ppe_cb = hns_get_ppe_cb(handle);
1557 ++ ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
1558 ++ if (ret)
1559 ++ return ret;
1560 ++
1561 ++ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
1562 ++ if (!dsaf_dev)
1563 ++ return -EINVAL;
1564 ++ ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
1565 ++ if (ret)
1566 ++ return ret;
1567 ++
1568 ++ vf_cb = hns_ae_get_vf_cb(handle);
1569 ++ ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
1570 ++ if (ret)
1571 ++ return ret;
1572 ++
1573 ++ mdelay(10);
1574 ++ return 0;
1575 ++}
1576 ++
1577 + static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
1578 + {
1579 + int q_num = handle->q_num;
1580 +@@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle,
1581 + return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
1582 + }
1583 +
1584 ++static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
1585 ++ int duplex)
1586 ++{
1587 ++ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
1588 ++
1589 ++ return hns_mac_need_adjust_link(mac_cb, speed, duplex);
1590 ++}
1591 ++
1592 + static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
1593 + int duplex)
1594 + {
1595 + struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
1596 +
1597 +- hns_mac_adjust_link(mac_cb, speed, duplex);
1598 ++ switch (mac_cb->dsaf_dev->dsaf_ver) {
1599 ++ case AE_VERSION_1:
1600 ++ hns_mac_adjust_link(mac_cb, speed, duplex);
1601 ++ break;
1602 ++
1603 ++ case AE_VERSION_2:
1604 ++ /* chip need to clear all pkt inside */
1605 ++ hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
1606 ++ if (hns_ae_wait_flow_down(handle)) {
1607 ++ hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
1608 ++ break;
1609 ++ }
1610 ++
1611 ++ hns_mac_adjust_link(mac_cb, speed, duplex);
1612 ++ hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
1613 ++ break;
1614 ++
1615 ++ default:
1616 ++ break;
1617 ++ }
1618 ++
1619 ++ return;
1620 + }
1621 +
1622 + static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
1623 +@@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
1624 + .get_status = hns_ae_get_link_status,
1625 + .get_info = hns_ae_get_mac_info,
1626 + .adjust_link = hns_ae_adjust_link,
1627 ++ .need_adjust_link = hns_ae_need_adjust_link,
1628 + .set_loopback = hns_ae_config_loopback,
1629 + .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
1630 + .get_pauseparam = hns_ae_get_pauseparam,
1631 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
1632 +index 74bd260ca02a..8c7bc5cf193c 100644
1633 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
1634 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
1635 +@@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
1636 + *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
1637 + }
1638 +
1639 ++static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed,
1640 ++ int duplex)
1641 ++{
1642 ++ struct mac_driver *drv = (struct mac_driver *)mac_drv;
1643 ++ struct hns_mac_cb *mac_cb = drv->mac_cb;
1644 ++
1645 ++ return (mac_cb->speed != speed) ||
1646 ++ (mac_cb->half_duplex == duplex);
1647 ++}
1648 ++
1649 + static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
1650 + u32 full_duplex)
1651 + {
1652 +@@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en)
1653 + hns_gmac_set_uc_match(mac_drv, en);
1654 + }
1655 +
1656 ++int hns_gmac_wait_fifo_clean(void *mac_drv)
1657 ++{
1658 ++ struct mac_driver *drv = (struct mac_driver *)mac_drv;
1659 ++ int wait_cnt;
1660 ++ u32 val;
1661 ++
1662 ++ wait_cnt = 0;
1663 ++ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
1664 ++ val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG);
1665 ++ /* bit5~bit0 is not send complete pkts */
1666 ++ if ((val & 0x3f) == 0)
1667 ++ break;
1668 ++ usleep_range(100, 200);
1669 ++ }
1670 ++
1671 ++ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
1672 ++ dev_err(drv->dev,
1673 ++ "hns ge %d fifo was not idle.\n", drv->mac_id);
1674 ++ return -EBUSY;
1675 ++ }
1676 ++
1677 ++ return 0;
1678 ++}
1679 ++
1680 + static void hns_gmac_init(void *mac_drv)
1681 + {
1682 + u32 port;
1683 +@@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
1684 + mac_drv->mac_disable = hns_gmac_disable;
1685 + mac_drv->mac_free = hns_gmac_free;
1686 + mac_drv->adjust_link = hns_gmac_adjust_link;
1687 ++ mac_drv->need_adjust_link = hns_gmac_need_adjust_link;
1688 + mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
1689 + mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
1690 + mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
1691 +@@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
1692 + mac_drv->get_strings = hns_gmac_get_strings;
1693 + mac_drv->update_stats = hns_gmac_update_stats;
1694 + mac_drv->set_promiscuous = hns_gmac_set_promisc;
1695 ++ mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean;
1696 +
1697 + return (void *)mac_drv;
1698 + }
1699 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
1700 +index 8b5cdf490850..5a8dbd72fe45 100644
1701 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
1702 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
1703 +@@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
1704 + return 0;
1705 + }
1706 +
1707 ++/**
1708 ++ *hns_mac_is_adjust_link - check is need change mac speed and duplex register
1709 ++ *@mac_cb: mac device
1710 ++ *@speed: phy device speed
1711 ++ *@duplex:phy device duplex
1712 ++ *
1713 ++ */
1714 ++bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
1715 ++{
1716 ++ struct mac_driver *mac_ctrl_drv;
1717 ++
1718 ++ mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
1719 ++
1720 ++ if (mac_ctrl_drv->need_adjust_link)
1721 ++ return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv,
1722 ++ (enum mac_speed)speed, duplex);
1723 ++ else
1724 ++ return true;
1725 ++}
1726 ++
1727 + void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
1728 + {
1729 + int ret;
1730 +@@ -432,6 +452,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
1731 + return 0;
1732 + }
1733 +
1734 ++int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb)
1735 ++{
1736 ++ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
1737 ++
1738 ++ if (drv->wait_fifo_clean)
1739 ++ return drv->wait_fifo_clean(drv);
1740 ++
1741 ++ return 0;
1742 ++}
1743 ++
1744 + void hns_mac_reset(struct hns_mac_cb *mac_cb)
1745 + {
1746 + struct mac_driver *drv = hns_mac_get_drv(mac_cb);
1747 +@@ -1001,6 +1031,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
1748 + return DSAF_MAX_PORT_NUM;
1749 + }
1750 +
1751 ++void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
1752 ++{
1753 ++ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
1754 ++
1755 ++ mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode);
1756 ++}
1757 ++
1758 ++void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
1759 ++{
1760 ++ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
1761 ++
1762 ++ mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode);
1763 ++}
1764 ++
1765 + /**
1766 + * hns_mac_init - init mac
1767 + * @dsaf_dev: dsa fabric device struct pointer
1768 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
1769 +index bbc0a98e7ca3..fbc75341bef7 100644
1770 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
1771 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
1772 +@@ -356,6 +356,9 @@ struct mac_driver {
1773 + /*adjust mac mode of port,include speed and duplex*/
1774 + int (*adjust_link)(void *mac_drv, enum mac_speed speed,
1775 + u32 full_duplex);
1776 ++ /* need adjust link */
1777 ++ bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed,
1778 ++ int duplex);
1779 + /* config autoegotaite mode of port*/
1780 + void (*set_an_mode)(void *mac_drv, u8 enable);
1781 + /* config loopbank mode */
1782 +@@ -394,6 +397,7 @@ struct mac_driver {
1783 + void (*get_info)(void *mac_drv, struct mac_info *mac_info);
1784 +
1785 + void (*update_stats)(void *mac_drv);
1786 ++ int (*wait_fifo_clean)(void *mac_drv);
1787 +
1788 + enum mac_mode mac_mode;
1789 + u8 mac_id;
1790 +@@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
1791 +
1792 + int hns_mac_init(struct dsaf_device *dsaf_dev);
1793 + void mac_adjust_link(struct net_device *net_dev);
1794 ++bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
1795 + void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
1796 + int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
1797 + int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
1798 +@@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
1799 + int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
1800 + const unsigned char *addr);
1801 + int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
1802 ++void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
1803 ++void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
1804 ++int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb);
1805 +
1806 + #endif /* _HNS_DSAF_MAC_H */
1807 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
1808 +index e0bc79ea3d88..1f056a6b167e 100644
1809 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
1810 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
1811 +@@ -2720,6 +2720,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
1812 + soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
1813 + }
1814 +
1815 ++int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
1816 ++{
1817 ++ u32 val, val_tmp;
1818 ++ int wait_cnt;
1819 ++
1820 ++ if (port >= DSAF_SERVICE_NW_NUM)
1821 ++ return 0;
1822 ++
1823 ++ wait_cnt = 0;
1824 ++ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
1825 ++ val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG +
1826 ++ (port + DSAF_XGE_NUM) * 0x40);
1827 ++ val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG +
1828 ++ (port + DSAF_XGE_NUM) * 0x40);
1829 ++ if (val == val_tmp)
1830 ++ break;
1831 ++
1832 ++ usleep_range(100, 200);
1833 ++ }
1834 ++
1835 ++ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
1836 ++ dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n",
1837 ++ val, val_tmp);
1838 ++ return -EBUSY;
1839 ++ }
1840 ++
1841 ++ return 0;
1842 ++}
1843 ++
1844 + /**
1845 + * dsaf_probe - probo dsaf dev
1846 + * @pdev: dasf platform device
1847 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
1848 +index 4507e8222683..0e1cd99831a6 100644
1849 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
1850 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
1851 +@@ -44,6 +44,8 @@ struct hns_mac_cb;
1852 + #define DSAF_ROCE_CREDIT_CHN 8
1853 + #define DSAF_ROCE_CHAN_MODE 3
1854 +
1855 ++#define HNS_MAX_WAIT_CNT 10000
1856 ++
1857 + enum dsaf_roce_port_mode {
1858 + DSAF_ROCE_6PORT_MODE,
1859 + DSAF_ROCE_4PORT_MODE,
1860 +@@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr(
1861 +
1862 + int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
1863 + u8 mac_id, u8 port_num);
1864 ++int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
1865 +
1866 + #endif /* __HNS_DSAF_MAIN_H__ */
1867 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
1868 +index 93e71e27401b..a19932aeb9d7 100644
1869 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
1870 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
1871 +@@ -274,6 +274,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
1872 + dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
1873 + }
1874 +
1875 ++int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
1876 ++{
1877 ++ int wait_cnt;
1878 ++ u32 val;
1879 ++
1880 ++ wait_cnt = 0;
1881 ++ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
1882 ++ val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU;
1883 ++ if (!val)
1884 ++ break;
1885 ++
1886 ++ usleep_range(100, 200);
1887 ++ }
1888 ++
1889 ++ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
1890 ++ dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n",
1891 ++ val);
1892 ++ return -EBUSY;
1893 ++ }
1894 ++
1895 ++ return 0;
1896 ++}
1897 ++
1898 + /**
1899 + * ppe_init_hw - init ppe
1900 + * @ppe_cb: ppe device
1901 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
1902 +index 9d8e643e8aa6..f670e63a5a01 100644
1903 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
1904 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
1905 +@@ -100,6 +100,7 @@ struct ppe_common_cb {
1906 +
1907 + };
1908 +
1909 ++int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb);
1910 + int hns_ppe_init(struct dsaf_device *dsaf_dev);
1911 +
1912 + void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
1913 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
1914 +index e2e28532e4dc..1e43d7a3ca86 100644
1915 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
1916 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
1917 +@@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
1918 + "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
1919 + }
1920 +
1921 ++int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
1922 ++{
1923 ++ u32 head, tail;
1924 ++ int wait_cnt;
1925 ++
1926 ++ tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
1927 ++ wait_cnt = 0;
1928 ++ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
1929 ++ head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
1930 ++ if (tail == head)
1931 ++ break;
1932 ++
1933 ++ usleep_range(100, 200);
1934 ++ }
1935 ++
1936 ++ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
1937 ++ dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
1938 ++ return -EBUSY;
1939 ++ }
1940 ++
1941 ++ return 0;
1942 ++}
1943 ++
1944 + /**
1945 + *hns_rcb_reset_ring_hw - ring reset
1946 + *@q: ring struct pointer
1947 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
1948 +index 602816498c8d..2319b772a271 100644
1949 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
1950 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
1951 +@@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
1952 + void hns_rcb_init_hw(struct ring_pair_cb *ring);
1953 + void hns_rcb_reset_ring_hw(struct hnae_queue *q);
1954 + void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
1955 ++int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
1956 + u32 hns_rcb_get_rx_coalesced_frames(
1957 + struct rcb_common_cb *rcb_common, u32 port_idx);
1958 + u32 hns_rcb_get_tx_coalesced_frames(
1959 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
1960 +index 46a52d9bb196..6d20e4eb7402 100644
1961 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
1962 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
1963 +@@ -464,6 +464,7 @@
1964 + #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4
1965 + #define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8
1966 +
1967 ++#define GMAC_FIFO_STATE_REG 0x0000UL
1968 + #define GMAC_DUPLEX_TYPE_REG 0x0008UL
1969 + #define GMAC_FD_FC_TYPE_REG 0x000CUL
1970 + #define GMAC_TX_WATER_LINE_REG 0x0010UL
1971 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1972 +index 25a9732afc84..07d6a9cf2c55 100644
1973 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1974 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
1975 +@@ -1212,11 +1212,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
1976 + struct hnae_handle *h = priv->ae_handle;
1977 + int state = 1;
1978 +
1979 ++ /* If there is no phy, do not need adjust link */
1980 + if (ndev->phydev) {
1981 +- h->dev->ops->adjust_link(h, ndev->phydev->speed,
1982 +- ndev->phydev->duplex);
1983 +- state = ndev->phydev->link;
1984 ++ /* When phy link down, do nothing */
1985 ++ if (ndev->phydev->link == 0)
1986 ++ return;
1987 ++
1988 ++ if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
1989 ++ ndev->phydev->duplex)) {
1990 ++ /* because Hi161X chip don't support to change gmac
1991 ++ * speed and duplex with traffic. Delay 200ms to
1992 ++ * make sure there is no more data in chip FIFO.
1993 ++ */
1994 ++ netif_carrier_off(ndev);
1995 ++ msleep(200);
1996 ++ h->dev->ops->adjust_link(h, ndev->phydev->speed,
1997 ++ ndev->phydev->duplex);
1998 ++ netif_carrier_on(ndev);
1999 ++ }
2000 + }
2001 ++
2002 + state = state && h->dev->ops->get_status(h);
2003 +
2004 + if (state != priv->link) {
2005 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2006 +index 2e14a3ae1d8b..c1e947bb852f 100644
2007 +--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2008 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2009 +@@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev,
2010 + }
2011 +
2012 + if (h->dev->ops->adjust_link) {
2013 ++ netif_carrier_off(net_dev);
2014 + h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
2015 ++ netif_carrier_on(net_dev);
2016 + return 0;
2017 + }
2018 +
2019 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2020 +index b68d94b49a8a..42183a8b649c 100644
2021 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2022 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2023 +@@ -3108,11 +3108,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
2024 + return budget;
2025 +
2026 + /* all work done, exit the polling mode */
2027 +- napi_complete_done(napi, work_done);
2028 +- if (adapter->rx_itr_setting & 1)
2029 +- ixgbe_set_itr(q_vector);
2030 +- if (!test_bit(__IXGBE_DOWN, &adapter->state))
2031 +- ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
2032 ++ if (likely(napi_complete_done(napi, work_done))) {
2033 ++ if (adapter->rx_itr_setting & 1)
2034 ++ ixgbe_set_itr(q_vector);
2035 ++ if (!test_bit(__IXGBE_DOWN, &adapter->state))
2036 ++ ixgbe_irq_enable_queues(adapter,
2037 ++ BIT_ULL(q_vector->v_idx));
2038 ++ }
2039 +
2040 + return min(work_done, budget - 1);
2041 + }
2042 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
2043 +index 9f9c9ff10735..07fda3984e10 100644
2044 +--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
2045 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
2046 +@@ -388,16 +388,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
2047 + }
2048 + }
2049 +
2050 +-static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
2051 ++static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
2052 + {
2053 +- return (u16)((dev->pdev->bus->number << 8) |
2054 ++ return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
2055 ++ (dev->pdev->bus->number << 8) |
2056 + PCI_SLOT(dev->pdev->devfn));
2057 + }
2058 +
2059 + /* Must be called with intf_mutex held */
2060 + struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
2061 + {
2062 +- u16 pci_id = mlx5_gen_pci_id(dev);
2063 ++ u32 pci_id = mlx5_gen_pci_id(dev);
2064 + struct mlx5_core_dev *res = NULL;
2065 + struct mlx5_core_dev *tmp_dev;
2066 + struct mlx5_priv *priv;
2067 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
2068 +index 3669005b9294..f7e540eeb877 100644
2069 +--- a/drivers/net/ethernet/realtek/r8169.c
2070 ++++ b/drivers/net/ethernet/realtek/r8169.c
2071 +@@ -760,7 +760,7 @@ struct rtl8169_tc_offsets {
2072 + };
2073 +
2074 + enum rtl_flag {
2075 +- RTL_FLAG_TASK_ENABLED,
2076 ++ RTL_FLAG_TASK_ENABLED = 0,
2077 + RTL_FLAG_TASK_SLOW_PENDING,
2078 + RTL_FLAG_TASK_RESET_PENDING,
2079 + RTL_FLAG_TASK_PHY_PENDING,
2080 +@@ -7657,7 +7657,8 @@ static int rtl8169_close(struct net_device *dev)
2081 + rtl8169_update_counters(dev);
2082 +
2083 + rtl_lock_work(tp);
2084 +- clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
2085 ++ /* Clear all task flags */
2086 ++ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
2087 +
2088 + rtl8169_down(dev);
2089 + rtl_unlock_work(tp);
2090 +@@ -7838,7 +7839,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
2091 +
2092 + rtl_lock_work(tp);
2093 + napi_disable(&tp->napi);
2094 +- clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
2095 ++ /* Clear all task flags */
2096 ++ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
2097 ++
2098 + rtl_unlock_work(tp);
2099 +
2100 + rtl_pll_power_down(tp);
2101 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2102 +index d686ba10fecc..aafa7aa18fbd 100644
2103 +--- a/drivers/net/wireless/mac80211_hwsim.c
2104 ++++ b/drivers/net/wireless/mac80211_hwsim.c
2105 +@@ -2632,9 +2632,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2106 + IEEE80211_VHT_CAP_SHORT_GI_80 |
2107 + IEEE80211_VHT_CAP_SHORT_GI_160 |
2108 + IEEE80211_VHT_CAP_TXSTBC |
2109 +- IEEE80211_VHT_CAP_RXSTBC_1 |
2110 +- IEEE80211_VHT_CAP_RXSTBC_2 |
2111 +- IEEE80211_VHT_CAP_RXSTBC_3 |
2112 + IEEE80211_VHT_CAP_RXSTBC_4 |
2113 + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
2114 + sband->vht_cap.vht_mcs.rx_mcs_map =
2115 +@@ -3124,6 +3121,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
2116 + if (info->attrs[HWSIM_ATTR_CHANNELS])
2117 + param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
2118 +
2119 ++ if (param.channels < 1) {
2120 ++ GENL_SET_ERR_MSG(info, "must have at least one channel");
2121 ++ return -EINVAL;
2122 ++ }
2123 ++
2124 + if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
2125 + GENL_SET_ERR_MSG(info, "too many channels specified");
2126 + return -EINVAL;
2127 +diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
2128 +index 3333d417b248..a70b3d24936d 100644
2129 +--- a/drivers/nvme/target/rdma.c
2130 ++++ b/drivers/nvme/target/rdma.c
2131 +@@ -65,6 +65,7 @@ struct nvmet_rdma_rsp {
2132 +
2133 + struct nvmet_req req;
2134 +
2135 ++ bool allocated;
2136 + u8 n_rdma;
2137 + u32 flags;
2138 + u32 invalidate_rkey;
2139 +@@ -167,11 +168,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
2140 + unsigned long flags;
2141 +
2142 + spin_lock_irqsave(&queue->rsps_lock, flags);
2143 +- rsp = list_first_entry(&queue->free_rsps,
2144 ++ rsp = list_first_entry_or_null(&queue->free_rsps,
2145 + struct nvmet_rdma_rsp, free_list);
2146 +- list_del(&rsp->free_list);
2147 ++ if (likely(rsp))
2148 ++ list_del(&rsp->free_list);
2149 + spin_unlock_irqrestore(&queue->rsps_lock, flags);
2150 +
2151 ++ if (unlikely(!rsp)) {
2152 ++ rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
2153 ++ if (unlikely(!rsp))
2154 ++ return NULL;
2155 ++ rsp->allocated = true;
2156 ++ }
2157 ++
2158 + return rsp;
2159 + }
2160 +
2161 +@@ -180,6 +189,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
2162 + {
2163 + unsigned long flags;
2164 +
2165 ++ if (rsp->allocated) {
2166 ++ kfree(rsp);
2167 ++ return;
2168 ++ }
2169 ++
2170 + spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
2171 + list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
2172 + spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
2173 +@@ -756,6 +770,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2174 +
2175 + cmd->queue = queue;
2176 + rsp = nvmet_rdma_get_rsp(queue);
2177 ++ if (unlikely(!rsp)) {
2178 ++ /*
2179 ++ * we get here only under memory pressure,
2180 ++ * silently drop and have the host retry
2181 ++ * as we can't even fail it.
2182 ++ */
2183 ++ nvmet_rdma_post_recv(queue->dev, cmd);
2184 ++ return;
2185 ++ }
2186 + rsp->queue = queue;
2187 + rsp->cmd = cmd;
2188 + rsp->flags = 0;
2189 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
2190 +index 0a6afd4b283d..4f2747cd15a6 100644
2191 +--- a/drivers/s390/net/qeth_core_main.c
2192 ++++ b/drivers/s390/net/qeth_core_main.c
2193 +@@ -23,6 +23,7 @@
2194 + #include <linux/netdevice.h>
2195 + #include <linux/netdev_features.h>
2196 + #include <linux/skbuff.h>
2197 ++#include <linux/vmalloc.h>
2198 +
2199 + #include <net/iucv/af_iucv.h>
2200 + #include <net/dsfield.h>
2201 +@@ -4728,7 +4729,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
2202 +
2203 + priv.buffer_len = oat_data.buffer_len;
2204 + priv.response_len = 0;
2205 +- priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL);
2206 ++ priv.buffer = vzalloc(oat_data.buffer_len);
2207 + if (!priv.buffer) {
2208 + rc = -ENOMEM;
2209 + goto out;
2210 +@@ -4769,7 +4770,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
2211 + rc = -EFAULT;
2212 +
2213 + out_free:
2214 +- kfree(priv.buffer);
2215 ++ vfree(priv.buffer);
2216 + out:
2217 + return rc;
2218 + }
2219 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
2220 +index 521293b1f4fa..11ae67842edf 100644
2221 +--- a/drivers/s390/net/qeth_l2_main.c
2222 ++++ b/drivers/s390/net/qeth_l2_main.c
2223 +@@ -484,7 +484,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
2224 + default:
2225 + dev_kfree_skb_any(skb);
2226 + QETH_CARD_TEXT(card, 3, "inbunkno");
2227 +- QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
2228 ++ QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
2229 + continue;
2230 + }
2231 + work_done++;
2232 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
2233 +index 1c62cbbaa66f..cd73172bff47 100644
2234 +--- a/drivers/s390/net/qeth_l3_main.c
2235 ++++ b/drivers/s390/net/qeth_l3_main.c
2236 +@@ -1793,7 +1793,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
2237 + default:
2238 + dev_kfree_skb_any(skb);
2239 + QETH_CARD_TEXT(card, 3, "inbunkno");
2240 +- QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
2241 ++ QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
2242 + continue;
2243 + }
2244 + work_done++;
2245 +diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
2246 +index 5be0086142ca..ab30db8c36c6 100644
2247 +--- a/drivers/scsi/csiostor/csio_hw.c
2248 ++++ b/drivers/scsi/csiostor/csio_hw.c
2249 +@@ -2010,8 +2010,8 @@ bye:
2250 + }
2251 +
2252 + /*
2253 +- * Returns -EINVAL if attempts to flash the firmware failed
2254 +- * else returns 0,
2255 ++ * Returns -EINVAL if attempts to flash the firmware failed,
2256 ++ * -ENOMEM if memory allocation failed else returns 0,
2257 + * if flashing was not attempted because the card had the
2258 + * latest firmware ECANCELED is returned
2259 + */
2260 +@@ -2039,6 +2039,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
2261 + return -EINVAL;
2262 + }
2263 +
2264 ++ /* allocate memory to read the header of the firmware on the
2265 ++ * card
2266 ++ */
2267 ++ card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
2268 ++ if (!card_fw)
2269 ++ return -ENOMEM;
2270 ++
2271 + if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
2272 + fw_bin_file = FW_FNAME_T5;
2273 + else
2274 +@@ -2052,11 +2059,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
2275 + fw_size = fw->size;
2276 + }
2277 +
2278 +- /* allocate memory to read the header of the firmware on the
2279 +- * card
2280 +- */
2281 +- card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
2282 +-
2283 + /* upgrade FW logic */
2284 + ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
2285 + hw->fw_state, reset);
2286 +diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
2287 +index b8b22ce60ecc..95141066c3fa 100644
2288 +--- a/drivers/scsi/qedi/qedi.h
2289 ++++ b/drivers/scsi/qedi/qedi.h
2290 +@@ -77,6 +77,11 @@ enum qedi_nvm_tgts {
2291 + QEDI_NVM_TGT_SEC,
2292 + };
2293 +
2294 ++struct qedi_nvm_iscsi_image {
2295 ++ struct nvm_iscsi_cfg iscsi_cfg;
2296 ++ u32 crc;
2297 ++};
2298 ++
2299 + struct qedi_uio_ctrl {
2300 + /* meta data */
2301 + u32 uio_hsi_version;
2302 +@@ -294,7 +299,7 @@ struct qedi_ctx {
2303 + void *bdq_pbl_list;
2304 + dma_addr_t bdq_pbl_list_dma;
2305 + u8 bdq_pbl_list_num_entries;
2306 +- struct nvm_iscsi_cfg *iscsi_cfg;
2307 ++ struct qedi_nvm_iscsi_image *iscsi_image;
2308 + dma_addr_t nvm_buf_dma;
2309 + void __iomem *bdq_primary_prod;
2310 + void __iomem *bdq_secondary_prod;
2311 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
2312 +index e7daadc089fc..24b945b555ba 100644
2313 +--- a/drivers/scsi/qedi/qedi_main.c
2314 ++++ b/drivers/scsi/qedi/qedi_main.c
2315 +@@ -1147,23 +1147,26 @@ exit_setup_int:
2316 +
2317 + static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
2318 + {
2319 +- if (qedi->iscsi_cfg)
2320 ++ if (qedi->iscsi_image)
2321 + dma_free_coherent(&qedi->pdev->dev,
2322 +- sizeof(struct nvm_iscsi_cfg),
2323 +- qedi->iscsi_cfg, qedi->nvm_buf_dma);
2324 ++ sizeof(struct qedi_nvm_iscsi_image),
2325 ++ qedi->iscsi_image, qedi->nvm_buf_dma);
2326 + }
2327 +
2328 + static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
2329 + {
2330 +- qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev,
2331 +- sizeof(struct nvm_iscsi_cfg),
2332 +- &qedi->nvm_buf_dma, GFP_KERNEL);
2333 +- if (!qedi->iscsi_cfg) {
2334 ++ struct qedi_nvm_iscsi_image nvm_image;
2335 ++
2336 ++ qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
2337 ++ sizeof(nvm_image),
2338 ++ &qedi->nvm_buf_dma,
2339 ++ GFP_KERNEL);
2340 ++ if (!qedi->iscsi_image) {
2341 + QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
2342 + return -ENOMEM;
2343 + }
2344 + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2345 +- "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg,
2346 ++ "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image,
2347 + qedi->nvm_buf_dma);
2348 +
2349 + return 0;
2350 +@@ -1716,7 +1719,7 @@ qedi_get_nvram_block(struct qedi_ctx *qedi)
2351 + struct nvm_iscsi_block *block;
2352 +
2353 + pf = qedi->dev_info.common.abs_pf_id;
2354 +- block = &qedi->iscsi_cfg->block[0];
2355 ++ block = &qedi->iscsi_image->iscsi_cfg.block[0];
2356 + for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
2357 + flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
2358 + NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
2359 +@@ -2008,15 +2011,14 @@ static void qedi_boot_release(void *data)
2360 + static int qedi_get_boot_info(struct qedi_ctx *qedi)
2361 + {
2362 + int ret = 1;
2363 +- u16 len;
2364 +-
2365 +- len = sizeof(struct nvm_iscsi_cfg);
2366 ++ struct qedi_nvm_iscsi_image nvm_image;
2367 +
2368 + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2369 + "Get NVM iSCSI CFG image\n");
2370 + ret = qedi_ops->common->nvm_get_image(qedi->cdev,
2371 + QED_NVM_IMAGE_ISCSI_CFG,
2372 +- (char *)qedi->iscsi_cfg, len);
2373 ++ (char *)qedi->iscsi_image,
2374 ++ sizeof(nvm_image));
2375 + if (ret)
2376 + QEDI_ERR(&qedi->dbg_ctx,
2377 + "Could not get NVM image. ret = %d\n", ret);
2378 +diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
2379 +index 98e27da34f3c..27893d90c4ef 100644
2380 +--- a/drivers/target/iscsi/iscsi_target_login.c
2381 ++++ b/drivers/target/iscsi/iscsi_target_login.c
2382 +@@ -310,11 +310,9 @@ static int iscsi_login_zero_tsih_s1(
2383 + return -ENOMEM;
2384 + }
2385 +
2386 +- ret = iscsi_login_set_conn_values(sess, conn, pdu->cid);
2387 +- if (unlikely(ret)) {
2388 +- kfree(sess);
2389 +- return ret;
2390 +- }
2391 ++ if (iscsi_login_set_conn_values(sess, conn, pdu->cid))
2392 ++ goto free_sess;
2393 ++
2394 + sess->init_task_tag = pdu->itt;
2395 + memcpy(&sess->isid, pdu->isid, 6);
2396 + sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn);
2397 +diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
2398 +index 45b57c294d13..401c983ec5f3 100644
2399 +--- a/drivers/tty/serial/mvebu-uart.c
2400 ++++ b/drivers/tty/serial/mvebu-uart.c
2401 +@@ -327,8 +327,10 @@ static void mvebu_uart_set_termios(struct uart_port *port,
2402 + if ((termios->c_cflag & CREAD) == 0)
2403 + port->ignore_status_mask |= STAT_RX_RDY | STAT_BRK_ERR;
2404 +
2405 +- if (old)
2406 ++ if (old) {
2407 + tty_termios_copy_hw(termios, old);
2408 ++ termios->c_cflag |= CS8;
2409 ++ }
2410 +
2411 + baud = uart_get_baud_rate(port, termios, old, 0, 460800);
2412 + uart_update_timeout(port, termios->c_cflag, baud);
2413 +diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
2414 +index 78d0204e3e20..d17d7052605b 100644
2415 +--- a/drivers/usb/gadget/udc/fotg210-udc.c
2416 ++++ b/drivers/usb/gadget/udc/fotg210-udc.c
2417 +@@ -1066,12 +1066,15 @@ static const struct usb_gadget_ops fotg210_gadget_ops = {
2418 + static int fotg210_udc_remove(struct platform_device *pdev)
2419 + {
2420 + struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
2421 ++ int i;
2422 +
2423 + usb_del_gadget_udc(&fotg210->gadget);
2424 + iounmap(fotg210->reg);
2425 + free_irq(platform_get_irq(pdev, 0), fotg210);
2426 +
2427 + fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
2428 ++ for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
2429 ++ kfree(fotg210->ep[i]);
2430 + kfree(fotg210);
2431 +
2432 + return 0;
2433 +@@ -1102,7 +1105,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
2434 + /* initialize udc */
2435 + fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
2436 + if (fotg210 == NULL)
2437 +- goto err_alloc;
2438 ++ goto err;
2439 +
2440 + for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
2441 + _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
2442 +@@ -1114,7 +1117,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
2443 + fotg210->reg = ioremap(res->start, resource_size(res));
2444 + if (fotg210->reg == NULL) {
2445 + pr_err("ioremap error.\n");
2446 +- goto err_map;
2447 ++ goto err_alloc;
2448 + }
2449 +
2450 + spin_lock_init(&fotg210->lock);
2451 +@@ -1162,7 +1165,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
2452 + fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
2453 + GFP_KERNEL);
2454 + if (fotg210->ep0_req == NULL)
2455 +- goto err_req;
2456 ++ goto err_map;
2457 +
2458 + fotg210_init(fotg210);
2459 +
2460 +@@ -1190,12 +1193,14 @@ err_req:
2461 + fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
2462 +
2463 + err_map:
2464 +- if (fotg210->reg)
2465 +- iounmap(fotg210->reg);
2466 ++ iounmap(fotg210->reg);
2467 +
2468 + err_alloc:
2469 ++ for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
2470 ++ kfree(fotg210->ep[i]);
2471 + kfree(fotg210);
2472 +
2473 ++err:
2474 + return ret;
2475 + }
2476 +
2477 +diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
2478 +index 0673f286afbd..4f48f5730e12 100644
2479 +--- a/drivers/usb/misc/yurex.c
2480 ++++ b/drivers/usb/misc/yurex.c
2481 +@@ -417,6 +417,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
2482 + spin_unlock_irqrestore(&dev->lock, flags);
2483 + mutex_unlock(&dev->io_mutex);
2484 +
2485 ++ if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
2486 ++ return -EIO;
2487 ++
2488 + return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
2489 + }
2490 +
2491 +diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
2492 +index d4265c8ebb22..b1357aa4bc55 100644
2493 +--- a/drivers/xen/cpu_hotplug.c
2494 ++++ b/drivers/xen/cpu_hotplug.c
2495 +@@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu)
2496 +
2497 + static void disable_hotplug_cpu(int cpu)
2498 + {
2499 +- if (cpu_online(cpu)) {
2500 +- lock_device_hotplug();
2501 ++ if (!cpu_is_hotpluggable(cpu))
2502 ++ return;
2503 ++ lock_device_hotplug();
2504 ++ if (cpu_online(cpu))
2505 + device_offline(get_cpu_device(cpu));
2506 +- unlock_device_hotplug();
2507 +- }
2508 +- if (cpu_present(cpu))
2509 ++ if (!cpu_online(cpu) && cpu_present(cpu)) {
2510 + xen_arch_unregister_cpu(cpu);
2511 +-
2512 +- set_cpu_present(cpu, false);
2513 ++ set_cpu_present(cpu, false);
2514 ++ }
2515 ++ unlock_device_hotplug();
2516 + }
2517 +
2518 + static int vcpu_online(unsigned int cpu)
2519 +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
2520 +index 08e4af04d6f2..e6c1934734b7 100644
2521 +--- a/drivers/xen/events/events_base.c
2522 ++++ b/drivers/xen/events/events_base.c
2523 +@@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
2524 + clear_evtchn_to_irq_row(row);
2525 + }
2526 +
2527 +- evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
2528 ++ evtchn_to_irq[row][col] = irq;
2529 + return 0;
2530 + }
2531 +
2532 +diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
2533 +index 587d12829925..0444ebdda3f0 100644
2534 +--- a/drivers/xen/manage.c
2535 ++++ b/drivers/xen/manage.c
2536 +@@ -283,9 +283,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
2537 + /*
2538 + * The Xenstore watch fires directly after registering it and
2539 + * after a suspend/resume cycle. So ENOENT is no error but
2540 +- * might happen in those cases.
2541 ++ * might happen in those cases. ERANGE is observed when we get
2542 ++ * an empty value (''), this happens when we acknowledge the
2543 ++ * request by writing '\0' below.
2544 + */
2545 +- if (err != -ENOENT)
2546 ++ if (err != -ENOENT && err != -ERANGE)
2547 + pr_err("Error %d reading sysrq code in control/sysrq\n",
2548 + err);
2549 + xenbus_transaction_end(xbt, 1);
2550 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
2551 +index a39b1f0b0606..a0947f4a3e87 100644
2552 +--- a/fs/btrfs/volumes.c
2553 ++++ b/fs/btrfs/volumes.c
2554 +@@ -4517,7 +4517,12 @@ again:
2555 +
2556 + /* Now btrfs_update_device() will change the on-disk size. */
2557 + ret = btrfs_update_device(trans, device);
2558 +- btrfs_end_transaction(trans);
2559 ++ if (ret < 0) {
2560 ++ btrfs_abort_transaction(trans, ret);
2561 ++ btrfs_end_transaction(trans);
2562 ++ } else {
2563 ++ ret = btrfs_commit_transaction(trans);
2564 ++ }
2565 + done:
2566 + btrfs_free_path(path);
2567 + if (ret) {
2568 +diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
2569 +index b380e0871372..a2b2355e7f01 100644
2570 +--- a/fs/cifs/cifs_unicode.c
2571 ++++ b/fs/cifs/cifs_unicode.c
2572 +@@ -105,9 +105,6 @@ convert_sfm_char(const __u16 src_char, char *target)
2573 + case SFM_LESSTHAN:
2574 + *target = '<';
2575 + break;
2576 +- case SFM_SLASH:
2577 +- *target = '\\';
2578 +- break;
2579 + case SFM_SPACE:
2580 + *target = ' ';
2581 + break;
2582 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
2583 +index b5a436583469..2e936f94f102 100644
2584 +--- a/fs/cifs/cifssmb.c
2585 ++++ b/fs/cifs/cifssmb.c
2586 +@@ -589,10 +589,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
2587 + }
2588 +
2589 + count = 0;
2590 ++ /*
2591 ++ * We know that all the name entries in the protocols array
2592 ++ * are short (< 16 bytes anyway) and are NUL terminated.
2593 ++ */
2594 + for (i = 0; i < CIFS_NUM_PROT; i++) {
2595 +- strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
2596 +- count += strlen(protocols[i].name) + 1;
2597 +- /* null at end of source and target buffers anyway */
2598 ++ size_t len = strlen(protocols[i].name) + 1;
2599 ++
2600 ++ memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
2601 ++ count += len;
2602 + }
2603 + inc_rfc1001_len(pSMB, count);
2604 + pSMB->ByteCount = cpu_to_le16(count);
2605 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
2606 +index 460084a8eac5..bcab30d4a6c7 100644
2607 +--- a/fs/cifs/misc.c
2608 ++++ b/fs/cifs/misc.c
2609 +@@ -398,9 +398,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
2610 + (struct smb_com_transaction_change_notify_rsp *)buf;
2611 + struct file_notify_information *pnotify;
2612 + __u32 data_offset = 0;
2613 ++ size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
2614 ++
2615 + if (get_bcc(buf) > sizeof(struct file_notify_information)) {
2616 + data_offset = le32_to_cpu(pSMBr->DataOffset);
2617 +
2618 ++ if (data_offset >
2619 ++ len - sizeof(struct file_notify_information)) {
2620 ++ cifs_dbg(FYI, "invalid data_offset %u\n",
2621 ++ data_offset);
2622 ++ return true;
2623 ++ }
2624 + pnotify = (struct file_notify_information *)
2625 + ((char *)&pSMBr->hdr.Protocol + data_offset);
2626 + cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
2627 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2628 +index 759cbbf7b1af..4e5b05263e4a 100644
2629 +--- a/fs/cifs/smb2ops.c
2630 ++++ b/fs/cifs/smb2ops.c
2631 +@@ -1239,7 +1239,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2632 + }
2633 +
2634 + srch_inf->entries_in_buffer = 0;
2635 +- srch_inf->index_of_last_entry = 0;
2636 ++ srch_inf->index_of_last_entry = 2;
2637 +
2638 + rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
2639 + fid->volatile_fid, 0, srch_inf);
2640 +diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
2641 +index 3e04279446e8..44e7d180ebec 100644
2642 +--- a/fs/ocfs2/dlm/dlmmaster.c
2643 ++++ b/fs/ocfs2/dlm/dlmmaster.c
2644 +@@ -589,9 +589,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
2645 +
2646 + res->last_used = 0;
2647 +
2648 +- spin_lock(&dlm->spinlock);
2649 ++ spin_lock(&dlm->track_lock);
2650 + list_add_tail(&res->tracking, &dlm->tracking_list);
2651 +- spin_unlock(&dlm->spinlock);
2652 ++ spin_unlock(&dlm->track_lock);
2653 +
2654 + memset(res->lvb, 0, DLM_LVB_LEN);
2655 + memset(res->refmap, 0, sizeof(res->refmap));
2656 +diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
2657 +index 8a10506db993..d9468de3c951 100644
2658 +--- a/fs/overlayfs/namei.c
2659 ++++ b/fs/overlayfs/namei.c
2660 +@@ -519,7 +519,7 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
2661 + index = NULL;
2662 + goto out;
2663 + }
2664 +- pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
2665 ++ pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
2666 + "overlayfs: mount with '-o index=off' to disable inodes index.\n",
2667 + d_inode(origin)->i_ino, name.len, name.name,
2668 + err);
2669 +diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
2670 +index d9a0edd4e57e..e1e743005583 100644
2671 +--- a/fs/overlayfs/overlayfs.h
2672 ++++ b/fs/overlayfs/overlayfs.h
2673 +@@ -136,8 +136,8 @@ static inline int ovl_do_setxattr(struct dentry *dentry, const char *name,
2674 + const void *value, size_t size, int flags)
2675 + {
2676 + int err = vfs_setxattr(dentry, name, value, size, flags);
2677 +- pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n",
2678 +- dentry, name, (int) size, (char *) value, flags, err);
2679 ++ pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, 0x%x) = %i\n",
2680 ++ dentry, name, min((int)size, 48), value, size, flags, err);
2681 + return err;
2682 + }
2683 +
2684 +diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
2685 +index f60ce2e04df0..afdc2533ce74 100644
2686 +--- a/fs/overlayfs/util.c
2687 ++++ b/fs/overlayfs/util.c
2688 +@@ -438,7 +438,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
2689 + struct dentry *upperdentry = ovl_dentry_upper(dentry);
2690 + struct dentry *index = NULL;
2691 + struct inode *inode;
2692 +- struct qstr name;
2693 ++ struct qstr name = { };
2694 + int err;
2695 +
2696 + err = ovl_get_index_name(lowerdentry, &name);
2697 +@@ -477,6 +477,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
2698 + goto fail;
2699 +
2700 + out:
2701 ++ kfree(name.name);
2702 + dput(index);
2703 + return;
2704 +
2705 +diff --git a/fs/proc/base.c b/fs/proc/base.c
2706 +index c5c42f3e33d1..9063738ff1f0 100644
2707 +--- a/fs/proc/base.c
2708 ++++ b/fs/proc/base.c
2709 +@@ -431,6 +431,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
2710 + int err;
2711 + int i;
2712 +
2713 ++ /*
2714 ++ * The ability to racily run the kernel stack unwinder on a running task
2715 ++ * and then observe the unwinder output is scary; while it is useful for
2716 ++ * debugging kernel issues, it can also allow an attacker to leak kernel
2717 ++ * stack contents.
2718 ++ * Doing this in a manner that is at least safe from races would require
2719 ++ * some work to ensure that the remote task can not be scheduled; and
2720 ++ * even then, this would still expose the unwinder as local attack
2721 ++ * surface.
2722 ++ * Therefore, this interface is restricted to root.
2723 ++ */
2724 ++ if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
2725 ++ return -EACCES;
2726 ++
2727 + entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
2728 + if (!entries)
2729 + return -ENOMEM;
2730 +diff --git a/fs/xattr.c b/fs/xattr.c
2731 +index be2ce57cd6ad..50029811fbe3 100644
2732 +--- a/fs/xattr.c
2733 ++++ b/fs/xattr.c
2734 +@@ -951,17 +951,19 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
2735 + int err = 0;
2736 +
2737 + #ifdef CONFIG_FS_POSIX_ACL
2738 +- if (inode->i_acl) {
2739 +- err = xattr_list_one(&buffer, &remaining_size,
2740 +- XATTR_NAME_POSIX_ACL_ACCESS);
2741 +- if (err)
2742 +- return err;
2743 +- }
2744 +- if (inode->i_default_acl) {
2745 +- err = xattr_list_one(&buffer, &remaining_size,
2746 +- XATTR_NAME_POSIX_ACL_DEFAULT);
2747 +- if (err)
2748 +- return err;
2749 ++ if (IS_POSIXACL(inode)) {
2750 ++ if (inode->i_acl) {
2751 ++ err = xattr_list_one(&buffer, &remaining_size,
2752 ++ XATTR_NAME_POSIX_ACL_ACCESS);
2753 ++ if (err)
2754 ++ return err;
2755 ++ }
2756 ++ if (inode->i_default_acl) {
2757 ++ err = xattr_list_one(&buffer, &remaining_size,
2758 ++ XATTR_NAME_POSIX_ACL_DEFAULT);
2759 ++ if (err)
2760 ++ return err;
2761 ++ }
2762 + }
2763 + #endif
2764 +
2765 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2766 +index 450e2cd31ed6..a0ffc62e7677 100644
2767 +--- a/kernel/bpf/verifier.c
2768 ++++ b/kernel/bpf/verifier.c
2769 +@@ -2076,6 +2076,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2770 + u64 umin_val, umax_val;
2771 + u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
2772 +
2773 ++ if (insn_bitness == 32) {
2774 ++ /* Relevant for 32-bit RSH: Information can propagate towards
2775 ++ * LSB, so it isn't sufficient to only truncate the output to
2776 ++ * 32 bits.
2777 ++ */
2778 ++ coerce_reg_to_size(dst_reg, 4);
2779 ++ coerce_reg_to_size(&src_reg, 4);
2780 ++ }
2781 ++
2782 + smin_val = src_reg.smin_value;
2783 + smax_val = src_reg.smax_value;
2784 + umin_val = src_reg.umin_value;
2785 +@@ -2295,7 +2304,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2786 + if (BPF_CLASS(insn->code) != BPF_ALU64) {
2787 + /* 32-bit ALU ops are (32,32)->32 */
2788 + coerce_reg_to_size(dst_reg, 4);
2789 +- coerce_reg_to_size(&src_reg, 4);
2790 + }
2791 +
2792 + __reg_deduce_bounds(dst_reg);
2793 +diff --git a/mm/madvise.c b/mm/madvise.c
2794 +index 751e97aa2210..576b753be428 100644
2795 +--- a/mm/madvise.c
2796 ++++ b/mm/madvise.c
2797 +@@ -96,7 +96,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
2798 + new_flags |= VM_DONTDUMP;
2799 + break;
2800 + case MADV_DODUMP:
2801 +- if (new_flags & VM_SPECIAL) {
2802 ++ if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
2803 + error = -EINVAL;
2804 + goto out;
2805 + }
2806 +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
2807 +index e9c6aa3ed05b..3d0d12fbd8dd 100644
2808 +--- a/net/mac80211/ibss.c
2809 ++++ b/net/mac80211/ibss.c
2810 +@@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
2811 + if (len < IEEE80211_DEAUTH_FRAME_LEN)
2812 + return;
2813 +
2814 +- ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
2815 +- mgmt->sa, mgmt->da, mgmt->bssid, reason);
2816 ++ ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
2817 ++ ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
2818 + sta_info_destroy_addr(sdata, mgmt->sa);
2819 + }
2820 +
2821 +@@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
2822 + auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
2823 + auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
2824 +
2825 +- ibss_dbg(sdata,
2826 +- "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
2827 +- mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
2828 ++ ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
2829 ++ ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
2830 ++ mgmt->bssid, auth_transaction);
2831 +
2832 + if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
2833 + return;
2834 +@@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2835 + rx_timestamp = drv_get_tsf(local, sdata);
2836 + }
2837 +
2838 +- ibss_dbg(sdata,
2839 +- "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
2840 ++ ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
2841 + mgmt->sa, mgmt->bssid,
2842 +- (unsigned long long)rx_timestamp,
2843 ++ (unsigned long long)rx_timestamp);
2844 ++ ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
2845 + (unsigned long long)beacon_timestamp,
2846 + (unsigned long long)(rx_timestamp - beacon_timestamp),
2847 + jiffies);
2848 +@@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
2849 +
2850 + tx_last_beacon = drv_tx_last_beacon(local);
2851 +
2852 +- ibss_dbg(sdata,
2853 +- "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
2854 +- mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
2855 ++ ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
2856 ++ ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
2857 ++ mgmt->bssid, tx_last_beacon);
2858 +
2859 + if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
2860 + return;
2861 +diff --git a/net/mac80211/main.c b/net/mac80211/main.c
2862 +index 8aa1f5b6a051..8a51f94ec1ce 100644
2863 +--- a/net/mac80211/main.c
2864 ++++ b/net/mac80211/main.c
2865 +@@ -255,8 +255,27 @@ static void ieee80211_restart_work(struct work_struct *work)
2866 +
2867 + flush_work(&local->radar_detected_work);
2868 + rtnl_lock();
2869 +- list_for_each_entry(sdata, &local->interfaces, list)
2870 ++ list_for_each_entry(sdata, &local->interfaces, list) {
2871 ++ /*
2872 ++ * XXX: there may be more work for other vif types and even
2873 ++ * for station mode: a good thing would be to run most of
2874 ++ * the iface type's dependent _stop (ieee80211_mg_stop,
2875 ++ * ieee80211_ibss_stop) etc...
2876 ++ * For now, fix only the specific bug that was seen: race
2877 ++ * between csa_connection_drop_work and us.
2878 ++ */
2879 ++ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
2880 ++ /*
2881 ++ * This worker is scheduled from the iface worker that
2882 ++ * runs on mac80211's workqueue, so we can't be
2883 ++ * scheduling this worker after the cancel right here.
2884 ++ * The exception is ieee80211_chswitch_done.
2885 ++ * Then we can have a race...
2886 ++ */
2887 ++ cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
2888 ++ }
2889 + flush_delayed_work(&sdata->dec_tailroom_needed_wk);
2890 ++ }
2891 + ieee80211_scan_cancel(local);
2892 +
2893 + /* make sure any new ROC will consider local->in_reconfig */
2894 +@@ -467,10 +486,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
2895 + cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
2896 + IEEE80211_VHT_CAP_SHORT_GI_80 |
2897 + IEEE80211_VHT_CAP_SHORT_GI_160 |
2898 +- IEEE80211_VHT_CAP_RXSTBC_1 |
2899 +- IEEE80211_VHT_CAP_RXSTBC_2 |
2900 +- IEEE80211_VHT_CAP_RXSTBC_3 |
2901 +- IEEE80211_VHT_CAP_RXSTBC_4 |
2902 ++ IEEE80211_VHT_CAP_RXSTBC_MASK |
2903 + IEEE80211_VHT_CAP_TXSTBC |
2904 + IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2905 + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2906 +@@ -1171,6 +1187,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
2907 + #if IS_ENABLED(CONFIG_IPV6)
2908 + unregister_inet6addr_notifier(&local->ifa6_notifier);
2909 + #endif
2910 ++ ieee80211_txq_teardown_flows(local);
2911 +
2912 + rtnl_lock();
2913 +
2914 +@@ -1199,7 +1216,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
2915 + skb_queue_purge(&local->skb_queue);
2916 + skb_queue_purge(&local->skb_queue_unreliable);
2917 + skb_queue_purge(&local->skb_queue_tdls_chsw);
2918 +- ieee80211_txq_teardown_flows(local);
2919 +
2920 + destroy_workqueue(local->workqueue);
2921 + wiphy_unregister(local->hw.wiphy);
2922 +diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
2923 +index d6d3f316de4c..055ea36ff27b 100644
2924 +--- a/net/mac80211/mesh_hwmp.c
2925 ++++ b/net/mac80211/mesh_hwmp.c
2926 +@@ -572,6 +572,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
2927 + forward = false;
2928 + reply = true;
2929 + target_metric = 0;
2930 ++
2931 ++ if (SN_GT(target_sn, ifmsh->sn))
2932 ++ ifmsh->sn = target_sn;
2933 ++
2934 + if (time_after(jiffies, ifmsh->last_sn_update +
2935 + net_traversal_jiffies(sdata)) ||
2936 + time_before(jiffies, ifmsh->last_sn_update)) {
2937 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2938 +index 052dbd4fa366..328ac10084e4 100644
2939 +--- a/net/mac80211/mlme.c
2940 ++++ b/net/mac80211/mlme.c
2941 +@@ -988,6 +988,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
2942 + */
2943 +
2944 + if (sdata->reserved_chanctx) {
2945 ++ struct ieee80211_supported_band *sband = NULL;
2946 ++ struct sta_info *mgd_sta = NULL;
2947 ++ enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
2948 ++
2949 + /*
2950 + * with multi-vif csa driver may call ieee80211_csa_finish()
2951 + * many times while waiting for other interfaces to use their
2952 +@@ -996,6 +1000,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
2953 + if (sdata->reserved_ready)
2954 + goto out;
2955 +
2956 ++ if (sdata->vif.bss_conf.chandef.width !=
2957 ++ sdata->csa_chandef.width) {
2958 ++ /*
2959 ++ * For managed interface, we need to also update the AP
2960 ++ * station bandwidth and align the rate scale algorithm
2961 ++ * on the bandwidth change. Here we only consider the
2962 ++ * bandwidth of the new channel definition (as channel
2963 ++ * switch flow does not have the full HT/VHT/HE
2964 ++ * information), assuming that if additional changes are
2965 ++ * required they would be done as part of the processing
2966 ++ * of the next beacon from the AP.
2967 ++ */
2968 ++ switch (sdata->csa_chandef.width) {
2969 ++ case NL80211_CHAN_WIDTH_20_NOHT:
2970 ++ case NL80211_CHAN_WIDTH_20:
2971 ++ default:
2972 ++ bw = IEEE80211_STA_RX_BW_20;
2973 ++ break;
2974 ++ case NL80211_CHAN_WIDTH_40:
2975 ++ bw = IEEE80211_STA_RX_BW_40;
2976 ++ break;
2977 ++ case NL80211_CHAN_WIDTH_80:
2978 ++ bw = IEEE80211_STA_RX_BW_80;
2979 ++ break;
2980 ++ case NL80211_CHAN_WIDTH_80P80:
2981 ++ case NL80211_CHAN_WIDTH_160:
2982 ++ bw = IEEE80211_STA_RX_BW_160;
2983 ++ break;
2984 ++ }
2985 ++
2986 ++ mgd_sta = sta_info_get(sdata, ifmgd->bssid);
2987 ++ sband =
2988 ++ local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
2989 ++ }
2990 ++
2991 ++ if (sdata->vif.bss_conf.chandef.width >
2992 ++ sdata->csa_chandef.width) {
2993 ++ mgd_sta->sta.bandwidth = bw;
2994 ++ rate_control_rate_update(local, sband, mgd_sta,
2995 ++ IEEE80211_RC_BW_CHANGED);
2996 ++ }
2997 ++
2998 + ret = ieee80211_vif_use_reserved_context(sdata);
2999 + if (ret) {
3000 + sdata_info(sdata,
3001 +@@ -1006,6 +1052,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
3002 + goto out;
3003 + }
3004 +
3005 ++ if (sdata->vif.bss_conf.chandef.width <
3006 ++ sdata->csa_chandef.width) {
3007 ++ mgd_sta->sta.bandwidth = bw;
3008 ++ rate_control_rate_update(local, sband, mgd_sta,
3009 ++ IEEE80211_RC_BW_CHANGED);
3010 ++ }
3011 ++
3012 + goto out;
3013 + }
3014 +
3015 +@@ -1227,6 +1280,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
3016 + cbss->beacon_interval));
3017 + return;
3018 + drop_connection:
3019 ++ /*
3020 ++ * This is just so that the disconnect flow will know that
3021 ++ * we were trying to switch channel and failed. In case the
3022 ++ * mode is 1 (we are not allowed to Tx), we will know not to
3023 ++ * send a deauthentication frame. Those two fields will be
3024 ++ * reset when the disconnection worker runs.
3025 ++ */
3026 ++ sdata->vif.csa_active = true;
3027 ++ sdata->csa_block_tx = csa_ie.mode;
3028 ++
3029 + ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
3030 + mutex_unlock(&local->chanctx_mtx);
3031 + mutex_unlock(&local->mtx);
3032 +@@ -2397,6 +2460,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
3033 + struct ieee80211_local *local = sdata->local;
3034 + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3035 + u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
3036 ++ bool tx;
3037 +
3038 + sdata_lock(sdata);
3039 + if (!ifmgd->associated) {
3040 +@@ -2404,6 +2468,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
3041 + return;
3042 + }
3043 +
3044 ++ tx = !sdata->csa_block_tx;
3045 ++
3046 + /* AP is probably out of range (or not reachable for another reason) so
3047 + * remove the bss struct for that AP.
3048 + */
3049 +@@ -2411,7 +2477,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
3050 +
3051 + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
3052 + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
3053 +- true, frame_buf);
3054 ++ tx, frame_buf);
3055 + mutex_lock(&local->mtx);
3056 + sdata->vif.csa_active = false;
3057 + ifmgd->csa_waiting_bcn = false;
3058 +@@ -2422,7 +2488,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
3059 + }
3060 + mutex_unlock(&local->mtx);
3061 +
3062 +- ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
3063 ++ ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
3064 + WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
3065 +
3066 + sdata_unlock(sdata);
3067 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
3068 +index ccb65f18df5d..d8fddd88bf46 100644
3069 +--- a/net/mac80211/tx.c
3070 ++++ b/net/mac80211/tx.c
3071 +@@ -3022,27 +3022,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta)
3072 + }
3073 +
3074 + static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
3075 +- struct sk_buff *skb, int headroom,
3076 +- int *subframe_len)
3077 ++ struct sk_buff *skb, int headroom)
3078 + {
3079 +- int amsdu_len = *subframe_len + sizeof(struct ethhdr);
3080 +- int padding = (4 - amsdu_len) & 3;
3081 +-
3082 +- if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) {
3083 ++ if (skb_headroom(skb) < headroom) {
3084 + I802_DEBUG_INC(local->tx_expand_skb_head);
3085 +
3086 +- if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) {
3087 ++ if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
3088 + wiphy_debug(local->hw.wiphy,
3089 + "failed to reallocate TX buffer\n");
3090 + return false;
3091 + }
3092 + }
3093 +
3094 +- if (padding) {
3095 +- *subframe_len += padding;
3096 +- skb_put_zero(skb, padding);
3097 +- }
3098 +-
3099 + return true;
3100 + }
3101 +
3102 +@@ -3066,8 +3057,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
3103 + if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
3104 + return true;
3105 +
3106 +- if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr),
3107 +- &subframe_len))
3108 ++ if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
3109 + return false;
3110 +
3111 + data = skb_push(skb, sizeof(*amsdu_hdr));
3112 +@@ -3133,7 +3123,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3113 + void *data;
3114 + bool ret = false;
3115 + unsigned int orig_len;
3116 +- int n = 1, nfrags;
3117 ++ int n = 2, nfrags, pad = 0;
3118 ++ u16 hdrlen;
3119 +
3120 + if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
3121 + return false;
3122 +@@ -3166,9 +3157,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3123 + if (skb->len + head->len > max_amsdu_len)
3124 + goto out;
3125 +
3126 +- if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
3127 +- goto out;
3128 +-
3129 + nfrags = 1 + skb_shinfo(skb)->nr_frags;
3130 + nfrags += 1 + skb_shinfo(head)->nr_frags;
3131 + frag_tail = &skb_shinfo(head)->frag_list;
3132 +@@ -3184,10 +3172,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3133 + if (max_frags && nfrags > max_frags)
3134 + goto out;
3135 +
3136 +- if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2,
3137 +- &subframe_len))
3138 ++ if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
3139 + goto out;
3140 +
3141 ++ /*
3142 ++ * Pad out the previous subframe to a multiple of 4 by adding the
3143 ++ * padding to the next one, that's being added. Note that head->len
3144 ++ * is the length of the full A-MSDU, but that works since each time
3145 ++ * we add a new subframe we pad out the previous one to a multiple
3146 ++ * of 4 and thus it no longer matters in the next round.
3147 ++ */
3148 ++ hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
3149 ++ if ((head->len - hdrlen) & 3)
3150 ++ pad = 4 - ((head->len - hdrlen) & 3);
3151 ++
3152 ++ if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
3153 ++ 2 + pad))
3154 ++ goto out_recalc;
3155 ++
3156 + ret = true;
3157 + data = skb_push(skb, ETH_ALEN + 2);
3158 + memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
3159 +@@ -3197,15 +3199,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3160 + memcpy(data, &len, 2);
3161 + memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
3162 +
3163 ++ memset(skb_push(skb, pad), 0, pad);
3164 ++
3165 + head->len += skb->len;
3166 + head->data_len += skb->len;
3167 + *frag_tail = skb;
3168 +
3169 +- flow->backlog += head->len - orig_len;
3170 +- tin->backlog_bytes += head->len - orig_len;
3171 +-
3172 +- fq_recalc_backlog(fq, tin, flow);
3173 ++out_recalc:
3174 ++ if (head->len != orig_len) {
3175 ++ flow->backlog += head->len - orig_len;
3176 ++ tin->backlog_bytes += head->len - orig_len;
3177 +
3178 ++ fq_recalc_backlog(fq, tin, flow);
3179 ++ }
3180 + out:
3181 + spin_unlock_bh(&fq->lock);
3182 +
3183 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
3184 +index 742aacb317e5..3ae365f92bff 100644
3185 +--- a/net/netfilter/nf_tables_api.c
3186 ++++ b/net/netfilter/nf_tables_api.c
3187 +@@ -4250,6 +4250,7 @@ static int nft_flush_set(const struct nft_ctx *ctx,
3188 + }
3189 + set->ndeact++;
3190 +
3191 ++ nft_set_elem_deactivate(ctx->net, set, elem);
3192 + nft_trans_elem_set(trans) = set;
3193 + nft_trans_elem(trans) = *elem;
3194 + list_add_tail(&trans->list, &ctx->net->nft.commit_list);
3195 +diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
3196 +index 57ef175dfbfa..504d5f730f4e 100644
3197 +--- a/net/netfilter/xt_cluster.c
3198 ++++ b/net/netfilter/xt_cluster.c
3199 +@@ -133,6 +133,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
3200 + static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
3201 + {
3202 + struct xt_cluster_match_info *info = par->matchinfo;
3203 ++ int ret;
3204 +
3205 + if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
3206 + pr_info("you have exceeded the maximum "
3207 +@@ -145,7 +146,17 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
3208 + "higher than the total number of nodes\n");
3209 + return -EDOM;
3210 + }
3211 +- return 0;
3212 ++
3213 ++ ret = nf_ct_netns_get(par->net, par->family);
3214 ++ if (ret < 0)
3215 ++ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
3216 ++ par->family);
3217 ++ return ret;
3218 ++}
3219 ++
3220 ++static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par)
3221 ++{
3222 ++ nf_ct_netns_put(par->net, par->family);
3223 + }
3224 +
3225 + static struct xt_match xt_cluster_match __read_mostly = {
3226 +@@ -154,6 +165,7 @@ static struct xt_match xt_cluster_match __read_mostly = {
3227 + .match = xt_cluster_mt,
3228 + .checkentry = xt_cluster_mt_checkentry,
3229 + .matchsize = sizeof(struct xt_cluster_match_info),
3230 ++ .destroy = xt_cluster_mt_destroy,
3231 + .me = THIS_MODULE,
3232 + };
3233 +
3234 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3235 +index 753f3e73c498..3de415bca391 100644
3236 +--- a/net/wireless/nl80211.c
3237 ++++ b/net/wireless/nl80211.c
3238 +@@ -11679,6 +11679,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
3239 + return -EOPNOTSUPP;
3240 +
3241 + if (!info->attrs[NL80211_ATTR_MDID] ||
3242 ++ !info->attrs[NL80211_ATTR_IE] ||
3243 + !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3244 + return -EINVAL;
3245 +
3246 +diff --git a/net/wireless/util.c b/net/wireless/util.c
3247 +index c1238d582fd1..ca3361a3e750 100644
3248 +--- a/net/wireless/util.c
3249 ++++ b/net/wireless/util.c
3250 +@@ -1449,7 +1449,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
3251 + u8 *op_class)
3252 + {
3253 + u8 vht_opclass;
3254 +- u16 freq = chandef->center_freq1;
3255 ++ u32 freq = chandef->center_freq1;
3256 +
3257 + if (freq >= 2412 && freq <= 2472) {
3258 + if (chandef->width > NL80211_CHAN_WIDTH_40)
3259 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3260 +index dcc9e6551b51..fe5c741fcc6a 100644
3261 +--- a/sound/pci/hda/patch_realtek.c
3262 ++++ b/sound/pci/hda/patch_realtek.c
3263 +@@ -6288,6 +6288,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3264 + SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
3265 + SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
3266 + SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
3267 ++ SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
3268 + SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
3269 + SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
3270 + SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
3271 +diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
3272 +index 785f4e95148c..7a1039c15e7d 100644
3273 +--- a/tools/hv/hv_fcopy_daemon.c
3274 ++++ b/tools/hv/hv_fcopy_daemon.c
3275 +@@ -233,6 +233,7 @@ int main(int argc, char *argv[])
3276 + break;
3277 +
3278 + default:
3279 ++ error = HV_E_FAIL;
3280 + syslog(LOG_ERR, "Unknown operation: %d",
3281 + buffer.hdr.operation);
3282 +
3283 +diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
3284 +index 32283d88701a..c0d653d36c0f 100755
3285 +--- a/tools/kvm/kvm_stat/kvm_stat
3286 ++++ b/tools/kvm/kvm_stat/kvm_stat
3287 +@@ -724,13 +724,20 @@ class DebugfsProvider(Provider):
3288 + if len(vms) == 0:
3289 + self.do_read = False
3290 +
3291 +- self.paths = filter(lambda x: "{}-".format(pid) in x, vms)
3292 ++ self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms))
3293 +
3294 + else:
3295 + self.paths = []
3296 + self.do_read = True
3297 + self.reset()
3298 +
3299 ++ def _verify_paths(self):
3300 ++ """Remove invalid paths"""
3301 ++ for path in self.paths:
3302 ++ if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)):
3303 ++ self.paths.remove(path)
3304 ++ continue
3305 ++
3306 + def read(self, reset=0, by_guest=0):
3307 + """Returns a dict with format:'file name / field -> current value'.
3308 +
3309 +@@ -745,6 +752,7 @@ class DebugfsProvider(Provider):
3310 + # If no debugfs filtering support is available, then don't read.
3311 + if not self.do_read:
3312 + return results
3313 ++ self._verify_paths()
3314 +
3315 + paths = self.paths
3316 + if self._pid == 0:
3317 +@@ -1119,10 +1127,10 @@ class Tui(object):
3318 + (x, term_width) = self.screen.getmaxyx()
3319 + row = 2
3320 + for line in text:
3321 +- start = (term_width - len(line)) / 2
3322 ++ start = (term_width - len(line)) // 2
3323 + self.screen.addstr(row, start, line)
3324 + row += 1
3325 +- self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint,
3326 ++ self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint,
3327 + curses.A_STANDOUT)
3328 + self.screen.getkey()
3329 +
3330 +diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
3331 +index 20e7d74d86cd..10a44e946f77 100644
3332 +--- a/tools/perf/arch/powerpc/util/sym-handling.c
3333 ++++ b/tools/perf/arch/powerpc/util/sym-handling.c
3334 +@@ -22,15 +22,16 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
3335 +
3336 + #endif
3337 +
3338 +-#if !defined(_CALL_ELF) || _CALL_ELF != 2
3339 + int arch__choose_best_symbol(struct symbol *syma,
3340 + struct symbol *symb __maybe_unused)
3341 + {
3342 + char *sym = syma->name;
3343 +
3344 ++#if !defined(_CALL_ELF) || _CALL_ELF != 2
3345 + /* Skip over any initial dot */
3346 + if (*sym == '.')
3347 + sym++;
3348 ++#endif
3349 +
3350 + /* Avoid "SyS" kernel syscall aliases */
3351 + if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
3352 +@@ -41,6 +42,7 @@ int arch__choose_best_symbol(struct symbol *syma,
3353 + return SYMBOL_A;
3354 + }
3355 +
3356 ++#if !defined(_CALL_ELF) || _CALL_ELF != 2
3357 + /* Allow matching against dot variants */
3358 + int arch__compare_symbol_names(const char *namea, const char *nameb)
3359 + {
3360 +diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
3361 +index 2227ee92d8e2..44c2f62b47a3 100644
3362 +--- a/tools/perf/util/evsel.c
3363 ++++ b/tools/perf/util/evsel.c
3364 +@@ -259,8 +259,9 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
3365 + {
3366 + struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
3367 +
3368 +- if (evsel != NULL)
3369 +- perf_evsel__init(evsel, attr, idx);
3370 ++ if (!evsel)
3371 ++ return NULL;
3372 ++ perf_evsel__init(evsel, attr, idx);
3373 +
3374 + if (perf_evsel__is_bpf_output(evsel)) {
3375 + evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
3376 +diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
3377 +index e7d60d05596d..8f3b7ef221f2 100644
3378 +--- a/tools/perf/util/trace-event-info.c
3379 ++++ b/tools/perf/util/trace-event-info.c
3380 +@@ -379,7 +379,7 @@ out:
3381 +
3382 + static int record_saved_cmdline(void)
3383 + {
3384 +- unsigned int size;
3385 ++ unsigned long long size;
3386 + char *path;
3387 + struct stat st;
3388 + int ret, err = 0;
3389 +diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
3390 +index 1512086c8cb8..7a1b20ec5216 100644
3391 +--- a/tools/power/x86/turbostat/turbostat.c
3392 ++++ b/tools/power/x86/turbostat/turbostat.c
3393 +@@ -1485,7 +1485,7 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
3394 + if (get_msr(cpu, mp->msr_num, counterp))
3395 + return -1;
3396 + } else {
3397 +- char path[128];
3398 ++ char path[128 + PATH_BYTES];
3399 +
3400 + if (mp->flags & SYSFS_PERCPU) {
3401 + sprintf(path, "/sys/devices/system/cpu/cpu%d/%s",
3402 +diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
3403 +index e92903fc7113..6d5bcbaf6193 100644
3404 +--- a/tools/vm/page-types.c
3405 ++++ b/tools/vm/page-types.c
3406 +@@ -155,12 +155,6 @@ static const char * const page_flag_names[] = {
3407 + };
3408 +
3409 +
3410 +-static const char * const debugfs_known_mountpoints[] = {
3411 +- "/sys/kernel/debug",
3412 +- "/debug",
3413 +- 0,
3414 +-};
3415 +-
3416 + /*
3417 + * data structures
3418 + */
3419 +diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
3420 +index b0b7ef6d0de1..3fe093254385 100644
3421 +--- a/tools/vm/slabinfo.c
3422 ++++ b/tools/vm/slabinfo.c
3423 +@@ -30,8 +30,8 @@ struct slabinfo {
3424 + int alias;
3425 + int refs;
3426 + int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
3427 +- int hwcache_align, object_size, objs_per_slab;
3428 +- int sanity_checks, slab_size, store_user, trace;
3429 ++ unsigned int hwcache_align, object_size, objs_per_slab;
3430 ++ unsigned int sanity_checks, slab_size, store_user, trace;
3431 + int order, poison, reclaim_account, red_zone;
3432 + unsigned long partial, objects, slabs, objects_partial, objects_total;
3433 + unsigned long alloc_fastpath, alloc_slowpath;