Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Tue, 18 Apr 2017 10:23:11
Message-Id: 1492510979.511a2d39861de3acb2bf076ea7fd141c74cfad0b.mpagano@gentoo
1 commit: 511a2d39861de3acb2bf076ea7fd141c74cfad0b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Apr 18 10:22:59 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Apr 18 10:22:59 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=511a2d39
7
8 Linux patch 4.9.23
9
10 0000_README | 4 +
11 1022_linux-4.9.23.patch | 1236 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1240 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 9eac63e..bc4b2a4 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -131,6 +131,10 @@ Patch: 1021_linux-4.9.22.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.22
21
22 +Patch: 1022_linux-4.9.23.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.23
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1022_linux-4.9.23.patch b/1022_linux-4.9.23.patch
31 new file mode 100644
32 index 0000000..05a3313
33 --- /dev/null
34 +++ b/1022_linux-4.9.23.patch
35 @@ -0,0 +1,1236 @@
36 +diff --git a/Makefile b/Makefile
37 +index 4bf4648d97db..0de75976cad5 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 9
43 +-SUBLEVEL = 22
44 ++SUBLEVEL = 23
45 + EXTRAVERSION =
46 + NAME = Roaring Lionus
47 +
48 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
49 +index 9a6e11b6f457..5a4f2eb9d0d5 100644
50 +--- a/arch/mips/Kconfig
51 ++++ b/arch/mips/Kconfig
52 +@@ -9,6 +9,7 @@ config MIPS
53 + select HAVE_CONTEXT_TRACKING
54 + select HAVE_GENERIC_DMA_COHERENT
55 + select HAVE_IDE
56 ++ select HAVE_IRQ_EXIT_ON_IRQ_STACK
57 + select HAVE_OPROFILE
58 + select HAVE_PERF_EVENTS
59 + select PERF_USE_VMALLOC
60 +diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
61 +index 6bf10e796553..956db6e201d1 100644
62 +--- a/arch/mips/include/asm/irq.h
63 ++++ b/arch/mips/include/asm/irq.h
64 +@@ -17,6 +17,18 @@
65 +
66 + #include <irq.h>
67 +
68 ++#define IRQ_STACK_SIZE THREAD_SIZE
69 ++
70 ++extern void *irq_stack[NR_CPUS];
71 ++
72 ++static inline bool on_irq_stack(int cpu, unsigned long sp)
73 ++{
74 ++ unsigned long low = (unsigned long)irq_stack[cpu];
75 ++ unsigned long high = low + IRQ_STACK_SIZE;
76 ++
77 ++ return (low <= sp && sp <= high);
78 ++}
79 ++
80 + #ifdef CONFIG_I8259
81 + static inline int irq_canonicalize(int irq)
82 + {
83 +diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
84 +index eebf39549606..2f182bdf024f 100644
85 +--- a/arch/mips/include/asm/stackframe.h
86 ++++ b/arch/mips/include/asm/stackframe.h
87 +@@ -216,12 +216,19 @@
88 + LONG_S $25, PT_R25(sp)
89 + LONG_S $28, PT_R28(sp)
90 + LONG_S $31, PT_R31(sp)
91 ++
92 ++ /* Set thread_info if we're coming from user mode */
93 ++ mfc0 k0, CP0_STATUS
94 ++ sll k0, 3 /* extract cu0 bit */
95 ++ bltz k0, 9f
96 ++
97 + ori $28, sp, _THREAD_MASK
98 + xori $28, _THREAD_MASK
99 + #ifdef CONFIG_CPU_CAVIUM_OCTEON
100 + .set mips64
101 + pref 0, 0($28) /* Prefetch the current pointer */
102 + #endif
103 ++9:
104 + .set pop
105 + .endm
106 +
107 +diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
108 +index fae2f9447792..4be2763f835d 100644
109 +--- a/arch/mips/kernel/asm-offsets.c
110 ++++ b/arch/mips/kernel/asm-offsets.c
111 +@@ -102,6 +102,7 @@ void output_thread_info_defines(void)
112 + OFFSET(TI_REGS, thread_info, regs);
113 + DEFINE(_THREAD_SIZE, THREAD_SIZE);
114 + DEFINE(_THREAD_MASK, THREAD_MASK);
115 ++ DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
116 + BLANK();
117 + }
118 +
119 +diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
120 +index 52a4fdfc8513..2ac6c2625c13 100644
121 +--- a/arch/mips/kernel/genex.S
122 ++++ b/arch/mips/kernel/genex.S
123 +@@ -187,9 +187,44 @@ NESTED(handle_int, PT_SIZE, sp)
124 +
125 + LONG_L s0, TI_REGS($28)
126 + LONG_S sp, TI_REGS($28)
127 +- PTR_LA ra, ret_from_irq
128 +- PTR_LA v0, plat_irq_dispatch
129 +- jr v0
130 ++
131 ++ /*
132 ++ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
133 ++ * Check if we are already using the IRQ stack.
134 ++ */
135 ++ move s1, sp # Preserve the sp
136 ++
137 ++ /* Get IRQ stack for this CPU */
138 ++ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
139 ++#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
140 ++ lui k1, %hi(irq_stack)
141 ++#else
142 ++ lui k1, %highest(irq_stack)
143 ++ daddiu k1, %higher(irq_stack)
144 ++ dsll k1, 16
145 ++ daddiu k1, %hi(irq_stack)
146 ++ dsll k1, 16
147 ++#endif
148 ++ LONG_SRL k0, SMP_CPUID_PTRSHIFT
149 ++ LONG_ADDU k1, k0
150 ++ LONG_L t0, %lo(irq_stack)(k1)
151 ++
152 ++ # Check if already on IRQ stack
153 ++ PTR_LI t1, ~(_THREAD_SIZE-1)
154 ++ and t1, t1, sp
155 ++ beq t0, t1, 2f
156 ++
157 ++ /* Switch to IRQ stack */
158 ++ li t1, _IRQ_STACK_SIZE
159 ++ PTR_ADD sp, t0, t1
160 ++
161 ++2:
162 ++ jal plat_irq_dispatch
163 ++
164 ++ /* Restore sp */
165 ++ move sp, s1
166 ++
167 ++ j ret_from_irq
168 + #ifdef CONFIG_CPU_MICROMIPS
169 + nop
170 + #endif
171 +@@ -262,8 +297,44 @@ NESTED(except_vec_vi_handler, 0, sp)
172 +
173 + LONG_L s0, TI_REGS($28)
174 + LONG_S sp, TI_REGS($28)
175 +- PTR_LA ra, ret_from_irq
176 +- jr v0
177 ++
178 ++ /*
179 ++ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
180 ++ * Check if we are already using the IRQ stack.
181 ++ */
182 ++ move s1, sp # Preserve the sp
183 ++
184 ++ /* Get IRQ stack for this CPU */
185 ++ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
186 ++#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
187 ++ lui k1, %hi(irq_stack)
188 ++#else
189 ++ lui k1, %highest(irq_stack)
190 ++ daddiu k1, %higher(irq_stack)
191 ++ dsll k1, 16
192 ++ daddiu k1, %hi(irq_stack)
193 ++ dsll k1, 16
194 ++#endif
195 ++ LONG_SRL k0, SMP_CPUID_PTRSHIFT
196 ++ LONG_ADDU k1, k0
197 ++ LONG_L t0, %lo(irq_stack)(k1)
198 ++
199 ++ # Check if already on IRQ stack
200 ++ PTR_LI t1, ~(_THREAD_SIZE-1)
201 ++ and t1, t1, sp
202 ++ beq t0, t1, 2f
203 ++
204 ++ /* Switch to IRQ stack */
205 ++ li t1, _IRQ_STACK_SIZE
206 ++ PTR_ADD sp, t0, t1
207 ++
208 ++2:
209 ++ jalr v0
210 ++
211 ++ /* Restore sp */
212 ++ move sp, s1
213 ++
214 ++ j ret_from_irq
215 + END(except_vec_vi_handler)
216 +
217 + /*
218 +diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
219 +index f25f7eab7307..2b0a371b42af 100644
220 +--- a/arch/mips/kernel/irq.c
221 ++++ b/arch/mips/kernel/irq.c
222 +@@ -25,6 +25,8 @@
223 + #include <linux/atomic.h>
224 + #include <asm/uaccess.h>
225 +
226 ++void *irq_stack[NR_CPUS];
227 ++
228 + /*
229 + * 'what should we do if we get a hw irq event on an illegal vector'.
230 + * each architecture has to answer this themselves.
231 +@@ -58,6 +60,15 @@ void __init init_IRQ(void)
232 + clear_c0_status(ST0_IM);
233 +
234 + arch_init_irq();
235 ++
236 ++ for_each_possible_cpu(i) {
237 ++ int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
238 ++ void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
239 ++
240 ++ irq_stack[i] = s;
241 ++ pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
242 ++ irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
243 ++ }
244 + }
245 +
246 + #ifdef CONFIG_DEBUG_STACKOVERFLOW
247 +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
248 +index 1652f36acad1..fbbf5fcc695a 100644
249 +--- a/arch/mips/kernel/process.c
250 ++++ b/arch/mips/kernel/process.c
251 +@@ -33,6 +33,7 @@
252 + #include <asm/dsemul.h>
253 + #include <asm/dsp.h>
254 + #include <asm/fpu.h>
255 ++#include <asm/irq.h>
256 + #include <asm/msa.h>
257 + #include <asm/pgtable.h>
258 + #include <asm/mipsregs.h>
259 +@@ -556,7 +557,19 @@ EXPORT_SYMBOL(unwind_stack_by_address);
260 + unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
261 + unsigned long pc, unsigned long *ra)
262 + {
263 +- unsigned long stack_page = (unsigned long)task_stack_page(task);
264 ++ unsigned long stack_page = 0;
265 ++ int cpu;
266 ++
267 ++ for_each_possible_cpu(cpu) {
268 ++ if (on_irq_stack(cpu, *sp)) {
269 ++ stack_page = (unsigned long)irq_stack[cpu];
270 ++ break;
271 ++ }
272 ++ }
273 ++
274 ++ if (!stack_page)
275 ++ stack_page = (unsigned long)task_stack_page(task);
276 ++
277 + return unwind_stack_by_address(stack_page, sp, pc, ra);
278 + }
279 + #endif
280 +diff --git a/block/blk-mq.c b/block/blk-mq.c
281 +index ee54ad01f7ac..7b597ec4e9c5 100644
282 +--- a/block/blk-mq.c
283 ++++ b/block/blk-mq.c
284 +@@ -1474,7 +1474,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
285 + INIT_LIST_HEAD(&tags->page_list);
286 +
287 + tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
288 +- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
289 ++ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
290 + set->numa_node);
291 + if (!tags->rqs) {
292 + blk_mq_free_tags(tags);
293 +@@ -1500,7 +1500,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
294 +
295 + do {
296 + page = alloc_pages_node(set->numa_node,
297 +- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
298 ++ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
299 + this_order);
300 + if (page)
301 + break;
302 +@@ -1521,7 +1521,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
303 + * Allow kmemleak to scan these pages as they contain pointers
304 + * to additional allocations like via ops->init_request().
305 + */
306 +- kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
307 ++ kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
308 + entries_per_page = order_to_size(this_order) / rq_size;
309 + to_do = min(entries_per_page, set->queue_depth - i);
310 + left -= to_do * rq_size;
311 +diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
312 +index 851015e652b8..354a16ab5a16 100644
313 +--- a/drivers/crypto/caam/caampkc.c
314 ++++ b/drivers/crypto/caam/caampkc.c
315 +@@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
316 + ctx->dev = caam_jr_alloc();
317 +
318 + if (IS_ERR(ctx->dev)) {
319 +- dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n");
320 ++ pr_err("Job Ring Device allocation for transform failed\n");
321 + return PTR_ERR(ctx->dev);
322 + }
323 +
324 +diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
325 +index e483b78c6343..98468b96c32f 100644
326 +--- a/drivers/crypto/caam/ctrl.c
327 ++++ b/drivers/crypto/caam/ctrl.c
328 +@@ -282,7 +282,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
329 + /* Try to run it through DECO0 */
330 + ret = run_descriptor_deco0(ctrldev, desc, &status);
331 +
332 +- if (ret || status) {
333 ++ if (ret ||
334 ++ (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
335 + dev_err(ctrldev,
336 + "Failed to deinstantiate RNG4 SH%d\n",
337 + sh_idx);
338 +diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
339 +index cf04d249a6a4..6b54e02da10c 100644
340 +--- a/drivers/dma-buf/dma-buf.c
341 ++++ b/drivers/dma-buf/dma-buf.c
342 +@@ -303,6 +303,9 @@ static const struct file_operations dma_buf_fops = {
343 + .llseek = dma_buf_llseek,
344 + .poll = dma_buf_poll,
345 + .unlocked_ioctl = dma_buf_ioctl,
346 ++#ifdef CONFIG_COMPAT
347 ++ .compat_ioctl = dma_buf_ioctl,
348 ++#endif
349 + };
350 +
351 + /*
352 +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
353 +index 670beebc32f6..923150de46cb 100644
354 +--- a/drivers/gpu/drm/i915/i915_drv.c
355 ++++ b/drivers/gpu/drm/i915/i915_drv.c
356 +@@ -240,6 +240,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
357 + case I915_PARAM_IRQ_ACTIVE:
358 + case I915_PARAM_ALLOW_BATCHBUFFER:
359 + case I915_PARAM_LAST_DISPATCH:
360 ++ case I915_PARAM_HAS_EXEC_CONSTANTS:
361 + /* Reject all old ums/dri params. */
362 + return -ENODEV;
363 + case I915_PARAM_CHIPSET_ID:
364 +@@ -266,9 +267,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
365 + case I915_PARAM_HAS_BSD2:
366 + value = intel_engine_initialized(&dev_priv->engine[VCS2]);
367 + break;
368 +- case I915_PARAM_HAS_EXEC_CONSTANTS:
369 +- value = INTEL_GEN(dev_priv) >= 4;
370 +- break;
371 + case I915_PARAM_HAS_LLC:
372 + value = HAS_LLC(dev_priv);
373 + break;
374 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
375 +index da832d3cdca7..e0d72457b23c 100644
376 +--- a/drivers/gpu/drm/i915/i915_drv.h
377 ++++ b/drivers/gpu/drm/i915/i915_drv.h
378 +@@ -1225,7 +1225,7 @@ struct intel_gen6_power_mgmt {
379 + unsigned boosts;
380 +
381 + /* manual wa residency calculations */
382 +- struct intel_rps_ei up_ei, down_ei;
383 ++ struct intel_rps_ei ei;
384 +
385 + /*
386 + * Protects RPS/RC6 register access and PCU communication.
387 +@@ -1751,8 +1751,6 @@ struct drm_i915_private {
388 +
389 + const struct intel_device_info info;
390 +
391 +- int relative_constants_mode;
392 +-
393 + void __iomem *regs;
394 +
395 + struct intel_uncore uncore;
396 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
397 +index 00eb4814b913..7b2030925825 100644
398 +--- a/drivers/gpu/drm/i915/i915_gem.c
399 ++++ b/drivers/gpu/drm/i915/i915_gem.c
400 +@@ -4587,8 +4587,6 @@ i915_gem_load_init(struct drm_device *dev)
401 + init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
402 + init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
403 +
404 +- dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
405 +-
406 + init_waitqueue_head(&dev_priv->pending_flip_queue);
407 +
408 + dev_priv->mm.interruptible = true;
409 +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
410 +index 0c400f852a76..2117f172d7a2 100644
411 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
412 ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
413 +@@ -1454,10 +1454,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
414 + struct drm_i915_gem_execbuffer2 *args,
415 + struct list_head *vmas)
416 + {
417 +- struct drm_i915_private *dev_priv = params->request->i915;
418 + u64 exec_start, exec_len;
419 +- int instp_mode;
420 +- u32 instp_mask;
421 + int ret;
422 +
423 + ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
424 +@@ -1468,56 +1465,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
425 + if (ret)
426 + return ret;
427 +
428 +- instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
429 +- instp_mask = I915_EXEC_CONSTANTS_MASK;
430 +- switch (instp_mode) {
431 +- case I915_EXEC_CONSTANTS_REL_GENERAL:
432 +- case I915_EXEC_CONSTANTS_ABSOLUTE:
433 +- case I915_EXEC_CONSTANTS_REL_SURFACE:
434 +- if (instp_mode != 0 && params->engine->id != RCS) {
435 +- DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
436 +- return -EINVAL;
437 +- }
438 +-
439 +- if (instp_mode != dev_priv->relative_constants_mode) {
440 +- if (INTEL_INFO(dev_priv)->gen < 4) {
441 +- DRM_DEBUG("no rel constants on pre-gen4\n");
442 +- return -EINVAL;
443 +- }
444 +-
445 +- if (INTEL_INFO(dev_priv)->gen > 5 &&
446 +- instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
447 +- DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
448 +- return -EINVAL;
449 +- }
450 +-
451 +- /* The HW changed the meaning on this bit on gen6 */
452 +- if (INTEL_INFO(dev_priv)->gen >= 6)
453 +- instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
454 +- }
455 +- break;
456 +- default:
457 +- DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
458 ++ if (args->flags & I915_EXEC_CONSTANTS_MASK) {
459 ++ DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
460 + return -EINVAL;
461 + }
462 +
463 +- if (params->engine->id == RCS &&
464 +- instp_mode != dev_priv->relative_constants_mode) {
465 +- struct intel_ring *ring = params->request->ring;
466 +-
467 +- ret = intel_ring_begin(params->request, 4);
468 +- if (ret)
469 +- return ret;
470 +-
471 +- intel_ring_emit(ring, MI_NOOP);
472 +- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
473 +- intel_ring_emit_reg(ring, INSTPM);
474 +- intel_ring_emit(ring, instp_mask << 16 | instp_mode);
475 +- intel_ring_advance(ring);
476 +-
477 +- dev_priv->relative_constants_mode = instp_mode;
478 +- }
479 +-
480 + if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
481 + ret = i915_reset_gen7_sol_offsets(params->request);
482 + if (ret)
483 +diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
484 +index 1c237d02f30b..755d78832a66 100644
485 +--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
486 ++++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
487 +@@ -233,7 +233,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
488 + I915_SHRINK_BOUND |
489 + I915_SHRINK_UNBOUND |
490 + I915_SHRINK_ACTIVE);
491 +- rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
492 ++ synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
493 +
494 + return freed;
495 + }
496 +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
497 +index 3fc286cd1157..02908e37c228 100644
498 +--- a/drivers/gpu/drm/i915/i915_irq.c
499 ++++ b/drivers/gpu/drm/i915/i915_irq.c
500 +@@ -990,68 +990,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
501 + ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
502 + }
503 +
504 +-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
505 +- const struct intel_rps_ei *old,
506 +- const struct intel_rps_ei *now,
507 +- int threshold)
508 +-{
509 +- u64 time, c0;
510 +- unsigned int mul = 100;
511 +-
512 +- if (old->cz_clock == 0)
513 +- return false;
514 +-
515 +- if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
516 +- mul <<= 8;
517 +-
518 +- time = now->cz_clock - old->cz_clock;
519 +- time *= threshold * dev_priv->czclk_freq;
520 +-
521 +- /* Workload can be split between render + media, e.g. SwapBuffers
522 +- * being blitted in X after being rendered in mesa. To account for
523 +- * this we need to combine both engines into our activity counter.
524 +- */
525 +- c0 = now->render_c0 - old->render_c0;
526 +- c0 += now->media_c0 - old->media_c0;
527 +- c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
528 +-
529 +- return c0 >= time;
530 +-}
531 +-
532 + void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
533 + {
534 +- vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
535 +- dev_priv->rps.up_ei = dev_priv->rps.down_ei;
536 ++ memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
537 + }
538 +
539 + static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
540 + {
541 ++ const struct intel_rps_ei *prev = &dev_priv->rps.ei;
542 + struct intel_rps_ei now;
543 + u32 events = 0;
544 +
545 +- if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
546 ++ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
547 + return 0;
548 +
549 + vlv_c0_read(dev_priv, &now);
550 + if (now.cz_clock == 0)
551 + return 0;
552 +
553 +- if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
554 +- if (!vlv_c0_above(dev_priv,
555 +- &dev_priv->rps.down_ei, &now,
556 +- dev_priv->rps.down_threshold))
557 +- events |= GEN6_PM_RP_DOWN_THRESHOLD;
558 +- dev_priv->rps.down_ei = now;
559 +- }
560 ++ if (prev->cz_clock) {
561 ++ u64 time, c0;
562 ++ unsigned int mul;
563 ++
564 ++ mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
565 ++ if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
566 ++ mul <<= 8;
567 +
568 +- if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
569 +- if (vlv_c0_above(dev_priv,
570 +- &dev_priv->rps.up_ei, &now,
571 +- dev_priv->rps.up_threshold))
572 +- events |= GEN6_PM_RP_UP_THRESHOLD;
573 +- dev_priv->rps.up_ei = now;
574 ++ time = now.cz_clock - prev->cz_clock;
575 ++ time *= dev_priv->czclk_freq;
576 ++
577 ++ /* Workload can be split between render + media,
578 ++ * e.g. SwapBuffers being blitted in X after being rendered in
579 ++ * mesa. To account for this we need to combine both engines
580 ++ * into our activity counter.
581 ++ */
582 ++ c0 = now.render_c0 - prev->render_c0;
583 ++ c0 += now.media_c0 - prev->media_c0;
584 ++ c0 *= mul;
585 ++
586 ++ if (c0 > time * dev_priv->rps.up_threshold)
587 ++ events = GEN6_PM_RP_UP_THRESHOLD;
588 ++ else if (c0 < time * dev_priv->rps.down_threshold)
589 ++ events = GEN6_PM_RP_DOWN_THRESHOLD;
590 + }
591 +
592 ++ dev_priv->rps.ei = now;
593 + return events;
594 + }
595 +
596 +@@ -4490,7 +4473,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
597 + /* Let's track the enabled rps events */
598 + if (IS_VALLEYVIEW(dev_priv))
599 + /* WaGsvRC0ResidencyMethod:vlv */
600 +- dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
601 ++ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
602 + else
603 + dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
604 +
605 +@@ -4531,6 +4514,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
606 + if (!IS_GEN2(dev_priv))
607 + dev->vblank_disable_immediate = true;
608 +
609 ++ /* Most platforms treat the display irq block as an always-on
610 ++ * power domain. vlv/chv can disable it at runtime and need
611 ++ * special care to avoid writing any of the display block registers
612 ++ * outside of the power domain. We defer setting up the display irqs
613 ++ * in this case to the runtime pm.
614 ++ */
615 ++ dev_priv->display_irqs_enabled = true;
616 ++ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
617 ++ dev_priv->display_irqs_enabled = false;
618 ++
619 + dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
620 + dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
621 +
622 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
623 +index b9be8a6141d8..5dc6082639db 100644
624 +--- a/drivers/gpu/drm/i915/intel_display.c
625 ++++ b/drivers/gpu/drm/i915/intel_display.c
626 +@@ -3696,10 +3696,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
627 + /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
628 + crtc->base.mode = crtc->base.state->mode;
629 +
630 +- DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
631 +- old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
632 +- pipe_config->pipe_src_w, pipe_config->pipe_src_h);
633 +-
634 + /*
635 + * Update pipe size and adjust fitter if needed: the reason for this is
636 + * that in compute_mode_changes we check the native mode (not the pfit
637 +@@ -4832,23 +4828,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
638 + struct intel_crtc_scaler_state *scaler_state =
639 + &crtc->config->scaler_state;
640 +
641 +- DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
642 +-
643 + if (crtc->config->pch_pfit.enabled) {
644 + int id;
645 +
646 +- if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
647 +- DRM_ERROR("Requesting pfit without getting a scaler first\n");
648 ++ if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
649 + return;
650 +- }
651 +
652 + id = scaler_state->scaler_id;
653 + I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
654 + PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
655 + I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
656 + I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
657 +-
658 +- DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
659 + }
660 + }
661 +
662 +diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
663 +index 334d47b5811a..db3afdf698ca 100644
664 +--- a/drivers/gpu/drm/i915/intel_hotplug.c
665 ++++ b/drivers/gpu/drm/i915/intel_hotplug.c
666 +@@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
667 + }
668 + }
669 + }
670 +- if (dev_priv->display.hpd_irq_setup)
671 ++ if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
672 + dev_priv->display.hpd_irq_setup(dev_priv);
673 + spin_unlock_irq(&dev_priv->irq_lock);
674 +
675 +@@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
676 + }
677 + }
678 +
679 +- if (storm_detected)
680 ++ if (storm_detected && dev_priv->display_irqs_enabled)
681 + dev_priv->display.hpd_irq_setup(dev_priv);
682 + spin_unlock(&dev_priv->irq_lock);
683 +
684 +@@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
685 + * Interrupt setup is already guaranteed to be single-threaded, this is
686 + * just to make the assert_spin_locked checks happy.
687 + */
688 +- spin_lock_irq(&dev_priv->irq_lock);
689 +- if (dev_priv->display.hpd_irq_setup)
690 +- dev_priv->display.hpd_irq_setup(dev_priv);
691 +- spin_unlock_irq(&dev_priv->irq_lock);
692 ++ if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
693 ++ spin_lock_irq(&dev_priv->irq_lock);
694 ++ if (dev_priv->display_irqs_enabled)
695 ++ dev_priv->display.hpd_irq_setup(dev_priv);
696 ++ spin_unlock_irq(&dev_priv->irq_lock);
697 ++ }
698 + }
699 +
700 + static void i915_hpd_poll_init_work(struct work_struct *work)
701 +diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
702 +index 4147e51cf893..67db1577ee49 100644
703 +--- a/drivers/gpu/drm/i915/intel_lrc.c
704 ++++ b/drivers/gpu/drm/i915/intel_lrc.c
705 +@@ -2152,42 +2152,30 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
706 +
707 + void intel_lr_context_resume(struct drm_i915_private *dev_priv)
708 + {
709 ++ struct i915_gem_context *ctx = dev_priv->kernel_context;
710 + struct intel_engine_cs *engine;
711 +- struct i915_gem_context *ctx;
712 +-
713 +- /* Because we emit WA_TAIL_DWORDS there may be a disparity
714 +- * between our bookkeeping in ce->ring->head and ce->ring->tail and
715 +- * that stored in context. As we only write new commands from
716 +- * ce->ring->tail onwards, everything before that is junk. If the GPU
717 +- * starts reading from its RING_HEAD from the context, it may try to
718 +- * execute that junk and die.
719 +- *
720 +- * So to avoid that we reset the context images upon resume. For
721 +- * simplicity, we just zero everything out.
722 +- */
723 +- list_for_each_entry(ctx, &dev_priv->context_list, link) {
724 +- for_each_engine(engine, dev_priv) {
725 +- struct intel_context *ce = &ctx->engine[engine->id];
726 +- u32 *reg;
727 +
728 +- if (!ce->state)
729 +- continue;
730 ++ for_each_engine(engine, dev_priv) {
731 ++ struct intel_context *ce = &ctx->engine[engine->id];
732 ++ void *vaddr;
733 ++ uint32_t *reg_state;
734 +
735 +- reg = i915_gem_object_pin_map(ce->state->obj,
736 +- I915_MAP_WB);
737 +- if (WARN_ON(IS_ERR(reg)))
738 +- continue;
739 ++ if (!ce->state)
740 ++ continue;
741 +
742 +- reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
743 +- reg[CTX_RING_HEAD+1] = 0;
744 +- reg[CTX_RING_TAIL+1] = 0;
745 ++ vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
746 ++ if (WARN_ON(IS_ERR(vaddr)))
747 ++ continue;
748 +
749 +- ce->state->obj->dirty = true;
750 +- i915_gem_object_unpin_map(ce->state->obj);
751 ++ reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
752 +
753 +- ce->ring->head = ce->ring->tail = 0;
754 +- ce->ring->last_retired_head = -1;
755 +- intel_ring_update_space(ce->ring);
756 +- }
757 ++ reg_state[CTX_RING_HEAD+1] = 0;
758 ++ reg_state[CTX_RING_TAIL+1] = 0;
759 ++
760 ++ ce->state->obj->dirty = true;
761 ++ i915_gem_object_unpin_map(ce->state->obj);
762 ++
763 ++ ce->ring->head = 0;
764 ++ ce->ring->tail = 0;
765 + }
766 + }
767 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
768 +index e559a45ff1f7..2c6d59d4b6d3 100644
769 +--- a/drivers/gpu/drm/i915/intel_pm.c
770 ++++ b/drivers/gpu/drm/i915/intel_pm.c
771 +@@ -4903,6 +4903,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
772 + break;
773 + }
774 +
775 ++ /* When byt can survive without system hang with dynamic
776 ++ * sw freq adjustments, this restriction can be lifted.
777 ++ */
778 ++ if (IS_VALLEYVIEW(dev_priv))
779 ++ goto skip_hw_write;
780 ++
781 + I915_WRITE(GEN6_RP_UP_EI,
782 + GT_INTERVAL_FROM_US(dev_priv, ei_up));
783 + I915_WRITE(GEN6_RP_UP_THRESHOLD,
784 +@@ -4923,6 +4929,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
785 + GEN6_RP_UP_BUSY_AVG |
786 + GEN6_RP_DOWN_IDLE_AVG);
787 +
788 ++skip_hw_write:
789 + dev_priv->rps.power = new_power;
790 + dev_priv->rps.up_threshold = threshold_up;
791 + dev_priv->rps.down_threshold = threshold_down;
792 +@@ -4933,8 +4940,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
793 + {
794 + u32 mask = 0;
795 +
796 ++ /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
797 + if (val > dev_priv->rps.min_freq_softlimit)
798 +- mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
799 ++ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
800 + if (val < dev_priv->rps.max_freq_softlimit)
801 + mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
802 +
803 +@@ -5034,7 +5042,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
804 + {
805 + mutex_lock(&dev_priv->rps.hw_lock);
806 + if (dev_priv->rps.enabled) {
807 +- if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
808 ++ if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
809 + gen6_rps_reset_ei(dev_priv);
810 + I915_WRITE(GEN6_PMINTRMSK,
811 + gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
812 +@@ -7960,10 +7968,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
813 + * @timeout_base_ms: timeout for polling with preemption enabled
814 + *
815 + * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
816 +- * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
817 ++ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
818 + * The request is acknowledged once the PCODE reply dword equals @reply after
819 + * applying @reply_mask. Polling is first attempted with preemption enabled
820 +- * for @timeout_base_ms and if this times out for another 10 ms with
821 ++ * for @timeout_base_ms and if this times out for another 50 ms with
822 + * preemption disabled.
823 + *
824 + * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
825 +@@ -7999,14 +8007,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
826 + * worst case) _and_ PCODE was busy for some reason even after a
827 + * (queued) request and @timeout_base_ms delay. As a workaround retry
828 + * the poll with preemption disabled to maximize the number of
829 +- * requests. Increase the timeout from @timeout_base_ms to 10ms to
830 ++ * requests. Increase the timeout from @timeout_base_ms to 50ms to
831 + * account for interrupts that could reduce the number of these
832 +- * requests.
833 ++ * requests, and for any quirks of the PCODE firmware that delays
834 ++ * the request completion.
835 + */
836 + DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
837 + WARN_ON_ONCE(timeout_base_ms > 3);
838 + preempt_disable();
839 +- ret = wait_for_atomic(COND, 10);
840 ++ ret = wait_for_atomic(COND, 50);
841 + preempt_enable();
842 +
843 + out:
844 +diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
845 +index d4f3239b5686..f283b714aa79 100644
846 +--- a/drivers/i2c/busses/i2c-bcm2835.c
847 ++++ b/drivers/i2c/busses/i2c-bcm2835.c
848 +@@ -64,6 +64,7 @@ struct bcm2835_i2c_dev {
849 + int irq;
850 + struct i2c_adapter adapter;
851 + struct completion completion;
852 ++ struct i2c_msg *curr_msg;
853 + u32 msg_err;
854 + u8 *msg_buf;
855 + size_t msg_buf_remaining;
856 +@@ -126,14 +127,13 @@ static irqreturn_t bcm2835_i2c_isr(int this_irq, void *data)
857 + return IRQ_HANDLED;
858 + }
859 +
860 +- if (val & BCM2835_I2C_S_RXD) {
861 +- bcm2835_drain_rxfifo(i2c_dev);
862 +- if (!(val & BCM2835_I2C_S_DONE))
863 +- return IRQ_HANDLED;
864 +- }
865 +-
866 + if (val & BCM2835_I2C_S_DONE) {
867 +- if (i2c_dev->msg_buf_remaining)
868 ++ if (i2c_dev->curr_msg->flags & I2C_M_RD) {
869 ++ bcm2835_drain_rxfifo(i2c_dev);
870 ++ val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
871 ++ }
872 ++
873 ++ if ((val & BCM2835_I2C_S_RXD) || i2c_dev->msg_buf_remaining)
874 + i2c_dev->msg_err = BCM2835_I2C_S_LEN;
875 + else
876 + i2c_dev->msg_err = 0;
877 +@@ -141,11 +141,16 @@ static irqreturn_t bcm2835_i2c_isr(int this_irq, void *data)
878 + return IRQ_HANDLED;
879 + }
880 +
881 +- if (val & BCM2835_I2C_S_TXD) {
882 ++ if (val & BCM2835_I2C_S_TXW) {
883 + bcm2835_fill_txfifo(i2c_dev);
884 + return IRQ_HANDLED;
885 + }
886 +
887 ++ if (val & BCM2835_I2C_S_RXR) {
888 ++ bcm2835_drain_rxfifo(i2c_dev);
889 ++ return IRQ_HANDLED;
890 ++ }
891 ++
892 + return IRQ_NONE;
893 + }
894 +
895 +@@ -155,6 +160,7 @@ static int bcm2835_i2c_xfer_msg(struct bcm2835_i2c_dev *i2c_dev,
896 + u32 c;
897 + unsigned long time_left;
898 +
899 ++ i2c_dev->curr_msg = msg;
900 + i2c_dev->msg_buf = msg->buf;
901 + i2c_dev->msg_buf_remaining = msg->len;
902 + reinit_completion(&i2c_dev->completion);
903 +diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
904 +index 377947580203..283ff7e17a0f 100644
905 +--- a/drivers/mtd/bcm47xxpart.c
906 ++++ b/drivers/mtd/bcm47xxpart.c
907 +@@ -229,12 +229,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
908 +
909 + last_trx_part = curr_part - 1;
910 +
911 +- /*
912 +- * We have whole TRX scanned, skip to the next part. Use
913 +- * roundown (not roundup), as the loop will increase
914 +- * offset in next step.
915 +- */
916 +- offset = rounddown(offset + trx->length, blocksize);
917 ++ /* Jump to the end of TRX */
918 ++ offset = roundup(offset + trx->length, blocksize);
919 ++ /* Next loop iteration will increase the offset */
920 ++ offset -= blocksize;
921 + continue;
922 + }
923 +
924 +diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
925 +index a849da92f857..6b8635378f1f 100644
926 +--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
927 ++++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
928 +@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
929 + {
930 + struct mlx4_cq *cq;
931 +
932 ++ rcu_read_lock();
933 + cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
934 + cqn & (dev->caps.num_cqs - 1));
935 ++ rcu_read_unlock();
936 ++
937 + if (!cq) {
938 + mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
939 + return;
940 + }
941 +
942 ++ /* Acessing the CQ outside of rcu_read_lock is safe, because
943 ++ * the CQ is freed only after interrupt handling is completed.
944 ++ */
945 + ++cq->arm_sn;
946 +
947 + cq->comp(cq);
948 +@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
949 + struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
950 + struct mlx4_cq *cq;
951 +
952 +- spin_lock(&cq_table->lock);
953 +-
954 ++ rcu_read_lock();
955 + cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
956 +- if (cq)
957 +- atomic_inc(&cq->refcount);
958 +-
959 +- spin_unlock(&cq_table->lock);
960 ++ rcu_read_unlock();
961 +
962 + if (!cq) {
963 +- mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
964 ++ mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
965 + return;
966 + }
967 +
968 ++ /* Acessing the CQ outside of rcu_read_lock is safe, because
969 ++ * the CQ is freed only after interrupt handling is completed.
970 ++ */
971 + cq->event(cq, event_type);
972 +-
973 +- if (atomic_dec_and_test(&cq->refcount))
974 +- complete(&cq->free);
975 + }
976 +
977 + static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
978 +@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
979 + if (err)
980 + return err;
981 +
982 +- spin_lock_irq(&cq_table->lock);
983 ++ spin_lock(&cq_table->lock);
984 + err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
985 +- spin_unlock_irq(&cq_table->lock);
986 ++ spin_unlock(&cq_table->lock);
987 + if (err)
988 + goto err_icm;
989 +
990 +@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
991 + return 0;
992 +
993 + err_radix:
994 +- spin_lock_irq(&cq_table->lock);
995 ++ spin_lock(&cq_table->lock);
996 + radix_tree_delete(&cq_table->tree, cq->cqn);
997 +- spin_unlock_irq(&cq_table->lock);
998 ++ spin_unlock(&cq_table->lock);
999 +
1000 + err_icm:
1001 + mlx4_cq_free_icm(dev, cq->cqn);
1002 +@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
1003 + if (err)
1004 + mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
1005 +
1006 ++ spin_lock(&cq_table->lock);
1007 ++ radix_tree_delete(&cq_table->tree, cq->cqn);
1008 ++ spin_unlock(&cq_table->lock);
1009 ++
1010 + synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
1011 + if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
1012 + priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
1013 + synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
1014 +
1015 +- spin_lock_irq(&cq_table->lock);
1016 +- radix_tree_delete(&cq_table->tree, cq->cqn);
1017 +- spin_unlock_irq(&cq_table->lock);
1018 +-
1019 + if (atomic_dec_and_test(&cq->refcount))
1020 + complete(&cq->free);
1021 + wait_for_completion(&cq->free);
1022 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1023 +index 4d3ddc2f7e43..5d484581becd 100644
1024 +--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1025 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1026 +@@ -444,8 +444,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
1027 + ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
1028 +
1029 + ring->stride = stride;
1030 +- if (ring->stride <= TXBB_SIZE)
1031 ++ if (ring->stride <= TXBB_SIZE) {
1032 ++ /* Stamp first unused send wqe */
1033 ++ __be32 *ptr = (__be32 *)ring->buf;
1034 ++ __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
1035 ++ *ptr = stamp;
1036 ++ /* Move pointer to start of rx section */
1037 + ring->buf += TXBB_SIZE;
1038 ++ }
1039 +
1040 + ring->log_stride = ffs(ring->stride) - 1;
1041 + ring->buf_size = ring->size * ring->stride;
1042 +diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1043 +index c548beaaf910..32f76bf018c3 100644
1044 +--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1045 ++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1046 +@@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1047 + put_res(dev, slave, srqn, RES_SRQ);
1048 + qp->srq = srq;
1049 + }
1050 ++
1051 ++ /* Save param3 for dynamic changes from VST back to VGT */
1052 ++ qp->param3 = qpc->param3;
1053 + put_res(dev, slave, rcqn, RES_CQ);
1054 + put_res(dev, slave, mtt_base, RES_MTT);
1055 + res_end_move(dev, slave, RES_QP, qpn);
1056 +@@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
1057 + int qpn = vhcr->in_modifier & 0x7fffff;
1058 + struct res_qp *qp;
1059 + u8 orig_sched_queue;
1060 +- __be32 orig_param3 = qpc->param3;
1061 + u8 orig_vlan_control = qpc->pri_path.vlan_control;
1062 + u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
1063 + u8 orig_pri_path_fl = qpc->pri_path.fl;
1064 +@@ -3814,7 +3816,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
1065 + */
1066 + if (!err) {
1067 + qp->sched_queue = orig_sched_queue;
1068 +- qp->param3 = orig_param3;
1069 + qp->vlan_control = orig_vlan_control;
1070 + qp->fvl_rx = orig_fvl_rx;
1071 + qp->pri_path_fl = orig_pri_path_fl;
1072 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
1073 +index 4e0c5653054b..b7273be9303d 100644
1074 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
1075 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
1076 +@@ -1422,7 +1422,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1077 + cancel_work_sync(&rt2x00dev->intf_work);
1078 + cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
1079 + cancel_work_sync(&rt2x00dev->sleep_work);
1080 +-#ifdef CONFIG_RT2X00_LIB_USB
1081 ++#if IS_ENABLED(CONFIG_RT2X00_LIB_USB)
1082 + if (rt2x00_is_usb(rt2x00dev)) {
1083 + usb_kill_anchored_urbs(rt2x00dev->anchor);
1084 + hrtimer_cancel(&rt2x00dev->txstatus_timer);
1085 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
1086 +index 6005e14213ca..662705e31136 100644
1087 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
1088 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
1089 +@@ -319,10 +319,8 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
1090 + entry->skb->data, length,
1091 + rt2x00usb_interrupt_txdone, entry);
1092 +
1093 +- usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
1094 + status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
1095 + if (status) {
1096 +- usb_unanchor_urb(entry_priv->urb);
1097 + if (status == -ENODEV)
1098 + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1099 + set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
1100 +@@ -410,10 +408,8 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
1101 + entry->skb->data, entry->skb->len,
1102 + rt2x00usb_interrupt_rxdone, entry);
1103 +
1104 +- usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
1105 + status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
1106 + if (status) {
1107 +- usb_unanchor_urb(entry_priv->urb);
1108 + if (status == -ENODEV)
1109 + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1110 + set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
1111 +@@ -824,10 +820,6 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
1112 + if (retval)
1113 + goto exit_free_device;
1114 +
1115 +- retval = rt2x00lib_probe_dev(rt2x00dev);
1116 +- if (retval)
1117 +- goto exit_free_reg;
1118 +-
1119 + rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
1120 + sizeof(struct usb_anchor),
1121 + GFP_KERNEL);
1122 +@@ -835,10 +827,17 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
1123 + retval = -ENOMEM;
1124 + goto exit_free_reg;
1125 + }
1126 +-
1127 + init_usb_anchor(rt2x00dev->anchor);
1128 ++
1129 ++ retval = rt2x00lib_probe_dev(rt2x00dev);
1130 ++ if (retval)
1131 ++ goto exit_free_anchor;
1132 ++
1133 + return 0;
1134 +
1135 ++exit_free_anchor:
1136 ++ usb_kill_anchored_urbs(rt2x00dev->anchor);
1137 ++
1138 + exit_free_reg:
1139 + rt2x00usb_free_reg(rt2x00dev);
1140 +
1141 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1142 +index c28ccf1b5a1f..35fb2bef0e45 100644
1143 +--- a/drivers/usb/core/hub.c
1144 ++++ b/drivers/usb/core/hub.c
1145 +@@ -2650,8 +2650,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
1146 + if (ret < 0)
1147 + return ret;
1148 +
1149 +- /* The port state is unknown until the reset completes. */
1150 +- if (!(portstatus & USB_PORT_STAT_RESET))
1151 ++ /*
1152 ++ * The port state is unknown until the reset completes.
1153 ++ *
1154 ++ * On top of that, some chips may require additional time
1155 ++ * to re-establish a connection after the reset is complete,
1156 ++ * so also wait for the connection to be re-established.
1157 ++ */
1158 ++ if (!(portstatus & USB_PORT_STAT_RESET) &&
1159 ++ (portstatus & USB_PORT_STAT_CONNECTION))
1160 + break;
1161 +
1162 + /* switch to the long delay after two short delay failures */
1163 +diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
1164 +index 516ffb4dc9a0..f419dd999581 100644
1165 +--- a/fs/orangefs/devorangefs-req.c
1166 ++++ b/fs/orangefs/devorangefs-req.c
1167 +@@ -402,8 +402,9 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
1168 + /* remove the op from the in progress hash table */
1169 + op = orangefs_devreq_remove_op(head.tag);
1170 + if (!op) {
1171 +- gossip_err("WARNING: No one's waiting for tag %llu\n",
1172 +- llu(head.tag));
1173 ++ gossip_debug(GOSSIP_DEV_DEBUG,
1174 ++ "%s: No one's waiting for tag %llu\n",
1175 ++ __func__, llu(head.tag));
1176 + return ret;
1177 + }
1178 +
1179 +diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
1180 +index 38887cc5577f..0748a26598fc 100644
1181 +--- a/fs/orangefs/orangefs-debugfs.c
1182 ++++ b/fs/orangefs/orangefs-debugfs.c
1183 +@@ -671,8 +671,10 @@ int orangefs_prepare_debugfs_help_string(int at_boot)
1184 + */
1185 + cdm_element_count =
1186 + orangefs_prepare_cdm_array(client_debug_array_string);
1187 +- if (cdm_element_count <= 0)
1188 ++ if (cdm_element_count <= 0) {
1189 ++ kfree(new);
1190 + goto out;
1191 ++ }
1192 +
1193 + for (i = 0; i < cdm_element_count; i++) {
1194 + strlcat(new, "\t", string_size);
1195 +@@ -963,13 +965,13 @@ int orangefs_debugfs_new_client_string(void __user *arg)
1196 + int ret;
1197 +
1198 + ret = copy_from_user(&client_debug_array_string,
1199 +- (void __user *)arg,
1200 +- ORANGEFS_MAX_DEBUG_STRING_LEN);
1201 ++ (void __user *)arg,
1202 ++ ORANGEFS_MAX_DEBUG_STRING_LEN);
1203 +
1204 + if (ret != 0) {
1205 + pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
1206 + __func__);
1207 +- return -EIO;
1208 ++ return -EFAULT;
1209 + }
1210 +
1211 + /*
1212 +@@ -984,17 +986,18 @@ int orangefs_debugfs_new_client_string(void __user *arg)
1213 + */
1214 + client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] =
1215 + '\0';
1216 +-
1217 ++
1218 + pr_info("%s: client debug array string has been received.\n",
1219 + __func__);
1220 +
1221 + if (!help_string_initialized) {
1222 +
1223 + /* Build a proper debug help string. */
1224 +- if (orangefs_prepare_debugfs_help_string(0)) {
1225 ++ ret = orangefs_prepare_debugfs_help_string(0);
1226 ++ if (ret) {
1227 + gossip_err("%s: no debug help string \n",
1228 + __func__);
1229 +- return -EIO;
1230 ++ return ret;
1231 + }
1232 +
1233 + }
1234 +@@ -1007,7 +1010,7 @@ int orangefs_debugfs_new_client_string(void __user *arg)
1235 +
1236 + help_string_initialized++;
1237 +
1238 +- return ret;
1239 ++ return 0;
1240 + }
1241 +
1242 + int orangefs_debugfs_new_debug(void __user *arg)
1243 +diff --git a/fs/orangefs/orangefs-dev-proto.h b/fs/orangefs/orangefs-dev-proto.h
1244 +index a3d84ffee905..f380f9ed1b28 100644
1245 +--- a/fs/orangefs/orangefs-dev-proto.h
1246 ++++ b/fs/orangefs/orangefs-dev-proto.h
1247 +@@ -50,8 +50,7 @@
1248 + * Misc constants. Please retain them as multiples of 8!
1249 + * Otherwise 32-64 bit interactions will be messed up :)
1250 + */
1251 +-#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000400
1252 +-#define ORANGEFS_MAX_DEBUG_ARRAY_LEN 0x00000800
1253 ++#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000800
1254 +
1255 + /*
1256 + * The maximum number of directory entries in a single request is 96.
1257 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1258 +index f2b04a77258d..8ab0974f4ee2 100644
1259 +--- a/net/packet/af_packet.c
1260 ++++ b/net/packet/af_packet.c
1261 +@@ -4235,8 +4235,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
1262 + if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
1263 + goto out;
1264 + if (po->tp_version >= TPACKET_V3 &&
1265 +- (int)(req->tp_block_size -
1266 +- BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
1267 ++ req->tp_block_size <=
1268 ++ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
1269 + goto out;
1270 + if (unlikely(req->tp_frame_size < po->tp_hdrlen +
1271 + po->tp_reserve))