Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.10 commit in: /
Date: Tue, 18 Apr 2017 10:24:00
Message-Id: 1492511029.4e0e4f1029afd27b8bf7999371ce7d817c11d73a.mpagano@gentoo
1 commit: 4e0e4f1029afd27b8bf7999371ce7d817c11d73a
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Apr 18 10:23:49 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Apr 18 10:23:49 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4e0e4f10
7
8 Linux patch 4.10.11
9
10 0000_README | 4 +
11 1010_linux-4.10.11.patch | 1128 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1132 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index abc6f43..f05d7f1 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -83,6 +83,10 @@ Patch: 1009_linux-4.10.10.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.10.10
21
22 +Patch: 1010_linux-4.10.11.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.10.11
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1010_linux-4.10.11.patch b/1010_linux-4.10.11.patch
31 new file mode 100644
32 index 0000000..ac7bb4e
33 --- /dev/null
34 +++ b/1010_linux-4.10.11.patch
35 @@ -0,0 +1,1128 @@
36 +diff --git a/Makefile b/Makefile
37 +index 52858726495b..412f2a0a3814 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 10
43 +-SUBLEVEL = 10
44 ++SUBLEVEL = 11
45 + EXTRAVERSION =
46 + NAME = Fearless Coyote
47 +
48 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
49 +index 9a6e11b6f457..5a4f2eb9d0d5 100644
50 +--- a/arch/mips/Kconfig
51 ++++ b/arch/mips/Kconfig
52 +@@ -9,6 +9,7 @@ config MIPS
53 + select HAVE_CONTEXT_TRACKING
54 + select HAVE_GENERIC_DMA_COHERENT
55 + select HAVE_IDE
56 ++ select HAVE_IRQ_EXIT_ON_IRQ_STACK
57 + select HAVE_OPROFILE
58 + select HAVE_PERF_EVENTS
59 + select PERF_USE_VMALLOC
60 +diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
61 +index 6bf10e796553..956db6e201d1 100644
62 +--- a/arch/mips/include/asm/irq.h
63 ++++ b/arch/mips/include/asm/irq.h
64 +@@ -17,6 +17,18 @@
65 +
66 + #include <irq.h>
67 +
68 ++#define IRQ_STACK_SIZE THREAD_SIZE
69 ++
70 ++extern void *irq_stack[NR_CPUS];
71 ++
72 ++static inline bool on_irq_stack(int cpu, unsigned long sp)
73 ++{
74 ++ unsigned long low = (unsigned long)irq_stack[cpu];
75 ++ unsigned long high = low + IRQ_STACK_SIZE;
76 ++
77 ++ return (low <= sp && sp <= high);
78 ++}
79 ++
80 + #ifdef CONFIG_I8259
81 + static inline int irq_canonicalize(int irq)
82 + {
83 +diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
84 +index eebf39549606..2f182bdf024f 100644
85 +--- a/arch/mips/include/asm/stackframe.h
86 ++++ b/arch/mips/include/asm/stackframe.h
87 +@@ -216,12 +216,19 @@
88 + LONG_S $25, PT_R25(sp)
89 + LONG_S $28, PT_R28(sp)
90 + LONG_S $31, PT_R31(sp)
91 ++
92 ++ /* Set thread_info if we're coming from user mode */
93 ++ mfc0 k0, CP0_STATUS
94 ++ sll k0, 3 /* extract cu0 bit */
95 ++ bltz k0, 9f
96 ++
97 + ori $28, sp, _THREAD_MASK
98 + xori $28, _THREAD_MASK
99 + #ifdef CONFIG_CPU_CAVIUM_OCTEON
100 + .set mips64
101 + pref 0, 0($28) /* Prefetch the current pointer */
102 + #endif
103 ++9:
104 + .set pop
105 + .endm
106 +
107 +diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
108 +index 6080582a26d1..a7277698d328 100644
109 +--- a/arch/mips/kernel/asm-offsets.c
110 ++++ b/arch/mips/kernel/asm-offsets.c
111 +@@ -102,6 +102,7 @@ void output_thread_info_defines(void)
112 + OFFSET(TI_REGS, thread_info, regs);
113 + DEFINE(_THREAD_SIZE, THREAD_SIZE);
114 + DEFINE(_THREAD_MASK, THREAD_MASK);
115 ++ DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
116 + BLANK();
117 + }
118 +
119 +diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
120 +index 52a4fdfc8513..2ac6c2625c13 100644
121 +--- a/arch/mips/kernel/genex.S
122 ++++ b/arch/mips/kernel/genex.S
123 +@@ -187,9 +187,44 @@ NESTED(handle_int, PT_SIZE, sp)
124 +
125 + LONG_L s0, TI_REGS($28)
126 + LONG_S sp, TI_REGS($28)
127 +- PTR_LA ra, ret_from_irq
128 +- PTR_LA v0, plat_irq_dispatch
129 +- jr v0
130 ++
131 ++ /*
132 ++ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
133 ++ * Check if we are already using the IRQ stack.
134 ++ */
135 ++ move s1, sp # Preserve the sp
136 ++
137 ++ /* Get IRQ stack for this CPU */
138 ++ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
139 ++#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
140 ++ lui k1, %hi(irq_stack)
141 ++#else
142 ++ lui k1, %highest(irq_stack)
143 ++ daddiu k1, %higher(irq_stack)
144 ++ dsll k1, 16
145 ++ daddiu k1, %hi(irq_stack)
146 ++ dsll k1, 16
147 ++#endif
148 ++ LONG_SRL k0, SMP_CPUID_PTRSHIFT
149 ++ LONG_ADDU k1, k0
150 ++ LONG_L t0, %lo(irq_stack)(k1)
151 ++
152 ++ # Check if already on IRQ stack
153 ++ PTR_LI t1, ~(_THREAD_SIZE-1)
154 ++ and t1, t1, sp
155 ++ beq t0, t1, 2f
156 ++
157 ++ /* Switch to IRQ stack */
158 ++ li t1, _IRQ_STACK_SIZE
159 ++ PTR_ADD sp, t0, t1
160 ++
161 ++2:
162 ++ jal plat_irq_dispatch
163 ++
164 ++ /* Restore sp */
165 ++ move sp, s1
166 ++
167 ++ j ret_from_irq
168 + #ifdef CONFIG_CPU_MICROMIPS
169 + nop
170 + #endif
171 +@@ -262,8 +297,44 @@ NESTED(except_vec_vi_handler, 0, sp)
172 +
173 + LONG_L s0, TI_REGS($28)
174 + LONG_S sp, TI_REGS($28)
175 +- PTR_LA ra, ret_from_irq
176 +- jr v0
177 ++
178 ++ /*
179 ++ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
180 ++ * Check if we are already using the IRQ stack.
181 ++ */
182 ++ move s1, sp # Preserve the sp
183 ++
184 ++ /* Get IRQ stack for this CPU */
185 ++ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
186 ++#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
187 ++ lui k1, %hi(irq_stack)
188 ++#else
189 ++ lui k1, %highest(irq_stack)
190 ++ daddiu k1, %higher(irq_stack)
191 ++ dsll k1, 16
192 ++ daddiu k1, %hi(irq_stack)
193 ++ dsll k1, 16
194 ++#endif
195 ++ LONG_SRL k0, SMP_CPUID_PTRSHIFT
196 ++ LONG_ADDU k1, k0
197 ++ LONG_L t0, %lo(irq_stack)(k1)
198 ++
199 ++ # Check if already on IRQ stack
200 ++ PTR_LI t1, ~(_THREAD_SIZE-1)
201 ++ and t1, t1, sp
202 ++ beq t0, t1, 2f
203 ++
204 ++ /* Switch to IRQ stack */
205 ++ li t1, _IRQ_STACK_SIZE
206 ++ PTR_ADD sp, t0, t1
207 ++
208 ++2:
209 ++ jalr v0
210 ++
211 ++ /* Restore sp */
212 ++ move sp, s1
213 ++
214 ++ j ret_from_irq
215 + END(except_vec_vi_handler)
216 +
217 + /*
218 +diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
219 +index f8f5836eb3c1..ba150c755fcc 100644
220 +--- a/arch/mips/kernel/irq.c
221 ++++ b/arch/mips/kernel/irq.c
222 +@@ -25,6 +25,8 @@
223 + #include <linux/atomic.h>
224 + #include <linux/uaccess.h>
225 +
226 ++void *irq_stack[NR_CPUS];
227 ++
228 + /*
229 + * 'what should we do if we get a hw irq event on an illegal vector'.
230 + * each architecture has to answer this themselves.
231 +@@ -58,6 +60,15 @@ void __init init_IRQ(void)
232 + clear_c0_status(ST0_IM);
233 +
234 + arch_init_irq();
235 ++
236 ++ for_each_possible_cpu(i) {
237 ++ int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
238 ++ void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
239 ++
240 ++ irq_stack[i] = s;
241 ++ pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
242 ++ irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
243 ++ }
244 + }
245 +
246 + #ifdef CONFIG_DEBUG_STACKOVERFLOW
247 +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
248 +index 7d80447e5d03..efa1df52c616 100644
249 +--- a/arch/mips/kernel/process.c
250 ++++ b/arch/mips/kernel/process.c
251 +@@ -33,6 +33,7 @@
252 + #include <asm/dsemul.h>
253 + #include <asm/dsp.h>
254 + #include <asm/fpu.h>
255 ++#include <asm/irq.h>
256 + #include <asm/msa.h>
257 + #include <asm/pgtable.h>
258 + #include <asm/mipsregs.h>
259 +@@ -556,7 +557,19 @@ EXPORT_SYMBOL(unwind_stack_by_address);
260 + unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
261 + unsigned long pc, unsigned long *ra)
262 + {
263 +- unsigned long stack_page = (unsigned long)task_stack_page(task);
264 ++ unsigned long stack_page = 0;
265 ++ int cpu;
266 ++
267 ++ for_each_possible_cpu(cpu) {
268 ++ if (on_irq_stack(cpu, *sp)) {
269 ++ stack_page = (unsigned long)irq_stack[cpu];
270 ++ break;
271 ++ }
272 ++ }
273 ++
274 ++ if (!stack_page)
275 ++ stack_page = (unsigned long)task_stack_page(task);
276 ++
277 + return unwind_stack_by_address(stack_page, sp, pc, ra);
278 + }
279 + #endif
280 +diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
281 +index 32100c4851dd..49cbdcba7883 100644
282 +--- a/drivers/crypto/caam/caampkc.c
283 ++++ b/drivers/crypto/caam/caampkc.c
284 +@@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
285 + ctx->dev = caam_jr_alloc();
286 +
287 + if (IS_ERR(ctx->dev)) {
288 +- dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n");
289 ++ pr_err("Job Ring Device allocation for transform failed\n");
290 + return PTR_ERR(ctx->dev);
291 + }
292 +
293 +diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
294 +index 755109841cfd..6092252ce6ca 100644
295 +--- a/drivers/crypto/caam/ctrl.c
296 ++++ b/drivers/crypto/caam/ctrl.c
297 +@@ -282,7 +282,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
298 + /* Try to run it through DECO0 */
299 + ret = run_descriptor_deco0(ctrldev, desc, &status);
300 +
301 +- if (ret || status) {
302 ++ if (ret ||
303 ++ (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
304 + dev_err(ctrldev,
305 + "Failed to deinstantiate RNG4 SH%d\n",
306 + sh_idx);
307 +diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
308 +index e72e64484131..686dc3e7eb0b 100644
309 +--- a/drivers/dma-buf/dma-buf.c
310 ++++ b/drivers/dma-buf/dma-buf.c
311 +@@ -303,6 +303,9 @@ static const struct file_operations dma_buf_fops = {
312 + .llseek = dma_buf_llseek,
313 + .poll = dma_buf_poll,
314 + .unlocked_ioctl = dma_buf_ioctl,
315 ++#ifdef CONFIG_COMPAT
316 ++ .compat_ioctl = dma_buf_ioctl,
317 ++#endif
318 + };
319 +
320 + /*
321 +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
322 +index f02da12f2860..8be958fee160 100644
323 +--- a/drivers/gpu/drm/i915/i915_drv.c
324 ++++ b/drivers/gpu/drm/i915/i915_drv.c
325 +@@ -248,6 +248,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
326 + case I915_PARAM_IRQ_ACTIVE:
327 + case I915_PARAM_ALLOW_BATCHBUFFER:
328 + case I915_PARAM_LAST_DISPATCH:
329 ++ case I915_PARAM_HAS_EXEC_CONSTANTS:
330 + /* Reject all old ums/dri params. */
331 + return -ENODEV;
332 + case I915_PARAM_CHIPSET_ID:
333 +@@ -274,9 +275,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
334 + case I915_PARAM_HAS_BSD2:
335 + value = !!dev_priv->engine[VCS2];
336 + break;
337 +- case I915_PARAM_HAS_EXEC_CONSTANTS:
338 +- value = INTEL_GEN(dev_priv) >= 4;
339 +- break;
340 + case I915_PARAM_HAS_LLC:
341 + value = HAS_LLC(dev_priv);
342 + break;
343 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
344 +index 8493e19b563a..4a1ed776b41d 100644
345 +--- a/drivers/gpu/drm/i915/i915_drv.h
346 ++++ b/drivers/gpu/drm/i915/i915_drv.h
347 +@@ -1263,7 +1263,7 @@ struct intel_gen6_power_mgmt {
348 + unsigned boosts;
349 +
350 + /* manual wa residency calculations */
351 +- struct intel_rps_ei up_ei, down_ei;
352 ++ struct intel_rps_ei ei;
353 +
354 + /*
355 + * Protects RPS/RC6 register access and PCU communication.
356 +@@ -1805,8 +1805,6 @@ struct drm_i915_private {
357 +
358 + const struct intel_device_info info;
359 +
360 +- int relative_constants_mode;
361 +-
362 + void __iomem *regs;
363 +
364 + struct intel_uncore uncore;
365 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
366 +index 7f4a54b94447..b7146494d53f 100644
367 +--- a/drivers/gpu/drm/i915/i915_gem.c
368 ++++ b/drivers/gpu/drm/i915/i915_gem.c
369 +@@ -2184,6 +2184,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
370 + */
371 + shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
372 + obj->mm.madv = __I915_MADV_PURGED;
373 ++ obj->mm.pages = ERR_PTR(-EFAULT);
374 + }
375 +
376 + /* Try to discard unwanted pages */
377 +@@ -2283,7 +2284,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
378 +
379 + __i915_gem_object_reset_page_iter(obj);
380 +
381 +- obj->ops->put_pages(obj, pages);
382 ++ if (!IS_ERR(pages))
383 ++ obj->ops->put_pages(obj, pages);
384 ++
385 + unlock:
386 + mutex_unlock(&obj->mm.lock);
387 + }
388 +@@ -2501,7 +2504,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
389 + if (err)
390 + return err;
391 +
392 +- if (unlikely(!obj->mm.pages)) {
393 ++ if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
394 + err = ____i915_gem_object_get_pages(obj);
395 + if (err)
396 + goto unlock;
397 +@@ -2579,7 +2582,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
398 +
399 + pinned = true;
400 + if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
401 +- if (unlikely(!obj->mm.pages)) {
402 ++ if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
403 + ret = ____i915_gem_object_get_pages(obj);
404 + if (ret)
405 + goto err_unlock;
406 +@@ -3003,6 +3006,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
407 + args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
408 + if (args->timeout_ns < 0)
409 + args->timeout_ns = 0;
410 ++
411 ++ /*
412 ++ * Apparently ktime isn't accurate enough and occasionally has a
413 ++ * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
414 ++ * things up to make the test happy. We allow up to 1 jiffy.
415 ++ *
416 ++ * This is a regression from the timespec->ktime conversion.
417 ++ */
418 ++ if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
419 ++ args->timeout_ns = 0;
420 + }
421 +
422 + i915_gem_object_put(obj);
423 +@@ -4554,8 +4567,6 @@ i915_gem_load_init(struct drm_device *dev)
424 + init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
425 + init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
426 +
427 +- dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
428 +-
429 + init_waitqueue_head(&dev_priv->pending_flip_queue);
430 +
431 + dev_priv->mm.interruptible = true;
432 +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
433 +index b8b877c91b0a..3d37a15531ad 100644
434 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
435 ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
436 +@@ -1410,10 +1410,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
437 + struct drm_i915_gem_execbuffer2 *args,
438 + struct list_head *vmas)
439 + {
440 +- struct drm_i915_private *dev_priv = params->request->i915;
441 + u64 exec_start, exec_len;
442 +- int instp_mode;
443 +- u32 instp_mask;
444 + int ret;
445 +
446 + ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
447 +@@ -1424,56 +1421,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
448 + if (ret)
449 + return ret;
450 +
451 +- instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
452 +- instp_mask = I915_EXEC_CONSTANTS_MASK;
453 +- switch (instp_mode) {
454 +- case I915_EXEC_CONSTANTS_REL_GENERAL:
455 +- case I915_EXEC_CONSTANTS_ABSOLUTE:
456 +- case I915_EXEC_CONSTANTS_REL_SURFACE:
457 +- if (instp_mode != 0 && params->engine->id != RCS) {
458 +- DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
459 +- return -EINVAL;
460 +- }
461 +-
462 +- if (instp_mode != dev_priv->relative_constants_mode) {
463 +- if (INTEL_INFO(dev_priv)->gen < 4) {
464 +- DRM_DEBUG("no rel constants on pre-gen4\n");
465 +- return -EINVAL;
466 +- }
467 +-
468 +- if (INTEL_INFO(dev_priv)->gen > 5 &&
469 +- instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
470 +- DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
471 +- return -EINVAL;
472 +- }
473 +-
474 +- /* The HW changed the meaning on this bit on gen6 */
475 +- if (INTEL_INFO(dev_priv)->gen >= 6)
476 +- instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
477 +- }
478 +- break;
479 +- default:
480 +- DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
481 ++ if (args->flags & I915_EXEC_CONSTANTS_MASK) {
482 ++ DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
483 + return -EINVAL;
484 + }
485 +
486 +- if (params->engine->id == RCS &&
487 +- instp_mode != dev_priv->relative_constants_mode) {
488 +- struct intel_ring *ring = params->request->ring;
489 +-
490 +- ret = intel_ring_begin(params->request, 4);
491 +- if (ret)
492 +- return ret;
493 +-
494 +- intel_ring_emit(ring, MI_NOOP);
495 +- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
496 +- intel_ring_emit_reg(ring, INSTPM);
497 +- intel_ring_emit(ring, instp_mask << 16 | instp_mode);
498 +- intel_ring_advance(ring);
499 +-
500 +- dev_priv->relative_constants_mode = instp_mode;
501 +- }
502 +-
503 + if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
504 + ret = i915_reset_gen7_sol_offsets(params->request);
505 + if (ret)
506 +diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
507 +index 401006b4c6a3..d5d2b4c6ed38 100644
508 +--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
509 ++++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
510 +@@ -263,7 +263,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
511 + I915_SHRINK_BOUND |
512 + I915_SHRINK_UNBOUND |
513 + I915_SHRINK_ACTIVE);
514 +- rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
515 ++ synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
516 +
517 + return freed;
518 + }
519 +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
520 +index f914581b1729..de6710f02d95 100644
521 +--- a/drivers/gpu/drm/i915/i915_irq.c
522 ++++ b/drivers/gpu/drm/i915/i915_irq.c
523 +@@ -1046,68 +1046,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
524 + ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
525 + }
526 +
527 +-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
528 +- const struct intel_rps_ei *old,
529 +- const struct intel_rps_ei *now,
530 +- int threshold)
531 +-{
532 +- u64 time, c0;
533 +- unsigned int mul = 100;
534 +-
535 +- if (old->cz_clock == 0)
536 +- return false;
537 +-
538 +- if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
539 +- mul <<= 8;
540 +-
541 +- time = now->cz_clock - old->cz_clock;
542 +- time *= threshold * dev_priv->czclk_freq;
543 +-
544 +- /* Workload can be split between render + media, e.g. SwapBuffers
545 +- * being blitted in X after being rendered in mesa. To account for
546 +- * this we need to combine both engines into our activity counter.
547 +- */
548 +- c0 = now->render_c0 - old->render_c0;
549 +- c0 += now->media_c0 - old->media_c0;
550 +- c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
551 +-
552 +- return c0 >= time;
553 +-}
554 +-
555 + void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
556 + {
557 +- vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
558 +- dev_priv->rps.up_ei = dev_priv->rps.down_ei;
559 ++ memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
560 + }
561 +
562 + static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
563 + {
564 ++ const struct intel_rps_ei *prev = &dev_priv->rps.ei;
565 + struct intel_rps_ei now;
566 + u32 events = 0;
567 +
568 +- if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
569 ++ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
570 + return 0;
571 +
572 + vlv_c0_read(dev_priv, &now);
573 + if (now.cz_clock == 0)
574 + return 0;
575 +
576 +- if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
577 +- if (!vlv_c0_above(dev_priv,
578 +- &dev_priv->rps.down_ei, &now,
579 +- dev_priv->rps.down_threshold))
580 +- events |= GEN6_PM_RP_DOWN_THRESHOLD;
581 +- dev_priv->rps.down_ei = now;
582 +- }
583 ++ if (prev->cz_clock) {
584 ++ u64 time, c0;
585 ++ unsigned int mul;
586 ++
587 ++ mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
588 ++ if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
589 ++ mul <<= 8;
590 +
591 +- if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
592 +- if (vlv_c0_above(dev_priv,
593 +- &dev_priv->rps.up_ei, &now,
594 +- dev_priv->rps.up_threshold))
595 +- events |= GEN6_PM_RP_UP_THRESHOLD;
596 +- dev_priv->rps.up_ei = now;
597 ++ time = now.cz_clock - prev->cz_clock;
598 ++ time *= dev_priv->czclk_freq;
599 ++
600 ++ /* Workload can be split between render + media,
601 ++ * e.g. SwapBuffers being blitted in X after being rendered in
602 ++ * mesa. To account for this we need to combine both engines
603 ++ * into our activity counter.
604 ++ */
605 ++ c0 = now.render_c0 - prev->render_c0;
606 ++ c0 += now.media_c0 - prev->media_c0;
607 ++ c0 *= mul;
608 ++
609 ++ if (c0 > time * dev_priv->rps.up_threshold)
610 ++ events = GEN6_PM_RP_UP_THRESHOLD;
611 ++ else if (c0 < time * dev_priv->rps.down_threshold)
612 ++ events = GEN6_PM_RP_DOWN_THRESHOLD;
613 + }
614 +
615 ++ dev_priv->rps.ei = now;
616 + return events;
617 + }
618 +
619 +@@ -4178,7 +4161,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
620 + /* Let's track the enabled rps events */
621 + if (IS_VALLEYVIEW(dev_priv))
622 + /* WaGsvRC0ResidencyMethod:vlv */
623 +- dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
624 ++ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
625 + else
626 + dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
627 +
628 +@@ -4216,6 +4199,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
629 + if (!IS_GEN2(dev_priv))
630 + dev->vblank_disable_immediate = true;
631 +
632 ++ /* Most platforms treat the display irq block as an always-on
633 ++ * power domain. vlv/chv can disable it at runtime and need
634 ++ * special care to avoid writing any of the display block registers
635 ++ * outside of the power domain. We defer setting up the display irqs
636 ++ * in this case to the runtime pm.
637 ++ */
638 ++ dev_priv->display_irqs_enabled = true;
639 ++ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
640 ++ dev_priv->display_irqs_enabled = false;
641 ++
642 + dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
643 + dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
644 +
645 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
646 +index 891c86aef99d..59231312c4e0 100644
647 +--- a/drivers/gpu/drm/i915/intel_display.c
648 ++++ b/drivers/gpu/drm/i915/intel_display.c
649 +@@ -3677,10 +3677,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
650 + /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
651 + crtc->base.mode = crtc->base.state->mode;
652 +
653 +- DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
654 +- old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
655 +- pipe_config->pipe_src_w, pipe_config->pipe_src_h);
656 +-
657 + /*
658 + * Update pipe size and adjust fitter if needed: the reason for this is
659 + * that in compute_mode_changes we check the native mode (not the pfit
660 +@@ -4805,23 +4801,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
661 + struct intel_crtc_scaler_state *scaler_state =
662 + &crtc->config->scaler_state;
663 +
664 +- DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
665 +-
666 + if (crtc->config->pch_pfit.enabled) {
667 + int id;
668 +
669 +- if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
670 +- DRM_ERROR("Requesting pfit without getting a scaler first\n");
671 ++ if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
672 + return;
673 +- }
674 +
675 + id = scaler_state->scaler_id;
676 + I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
677 + PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
678 + I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
679 + I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
680 +-
681 +- DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
682 + }
683 + }
684 +
685 +@@ -14895,17 +14885,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
686 + to_intel_atomic_state(old_crtc_state->state);
687 + bool modeset = needs_modeset(crtc->state);
688 +
689 ++ if (!modeset &&
690 ++ (intel_cstate->base.color_mgmt_changed ||
691 ++ intel_cstate->update_pipe)) {
692 ++ intel_color_set_csc(crtc->state);
693 ++ intel_color_load_luts(crtc->state);
694 ++ }
695 ++
696 + /* Perform vblank evasion around commit operation */
697 + intel_pipe_update_start(intel_crtc);
698 +
699 + if (modeset)
700 + goto out;
701 +
702 +- if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
703 +- intel_color_set_csc(crtc->state);
704 +- intel_color_load_luts(crtc->state);
705 +- }
706 +-
707 + if (intel_cstate->update_pipe)
708 + intel_update_pipe_config(intel_crtc, old_intel_cstate);
709 + else if (INTEL_GEN(dev_priv) >= 9)
710 +@@ -16497,12 +16489,11 @@ int intel_modeset_init(struct drm_device *dev)
711 + }
712 + }
713 +
714 +- intel_update_czclk(dev_priv);
715 +- intel_update_cdclk(dev_priv);
716 +- dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
717 +-
718 + intel_shared_dpll_init(dev);
719 +
720 ++ intel_update_czclk(dev_priv);
721 ++ intel_modeset_init_hw(dev);
722 ++
723 + if (dev_priv->max_cdclk_freq == 0)
724 + intel_update_max_cdclk(dev_priv);
725 +
726 +@@ -17057,8 +17048,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
727 +
728 + intel_init_gt_powersave(dev_priv);
729 +
730 +- intel_modeset_init_hw(dev);
731 +-
732 + intel_setup_overlay(dev_priv);
733 + }
734 +
735 +diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
736 +index f4a8c4fc57c4..c20ca8e08390 100644
737 +--- a/drivers/gpu/drm/i915/intel_fbdev.c
738 ++++ b/drivers/gpu/drm/i915/intel_fbdev.c
739 +@@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
740 + bool *enabled, int width, int height)
741 + {
742 + struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
743 +- unsigned long conn_configured, mask;
744 ++ unsigned long conn_configured, conn_seq, mask;
745 + unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
746 + int i, j;
747 + bool *save_enabled;
748 + bool fallback = true;
749 + int num_connectors_enabled = 0;
750 + int num_connectors_detected = 0;
751 +- int pass = 0;
752 +
753 + save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
754 + if (!save_enabled)
755 +@@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
756 + mask = BIT(count) - 1;
757 + conn_configured = 0;
758 + retry:
759 ++ conn_seq = conn_configured;
760 + for (i = 0; i < count; i++) {
761 + struct drm_fb_helper_connector *fb_conn;
762 + struct drm_connector *connector;
763 +@@ -387,7 +387,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
764 + if (conn_configured & BIT(i))
765 + continue;
766 +
767 +- if (pass == 0 && !connector->has_tile)
768 ++ if (conn_seq == 0 && !connector->has_tile)
769 + continue;
770 +
771 + if (connector->status == connector_status_connected)
772 +@@ -498,10 +498,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
773 + conn_configured |= BIT(i);
774 + }
775 +
776 +- if ((conn_configured & mask) != mask) {
777 +- pass++;
778 ++ if ((conn_configured & mask) != mask && conn_configured != conn_seq)
779 + goto retry;
780 +- }
781 +
782 + /*
783 + * If the BIOS didn't enable everything it could, fall back to have the
784 +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
785 +index fb88e32e25a3..fe8f8a4c384e 100644
786 +--- a/drivers/gpu/drm/i915/intel_hdmi.c
787 ++++ b/drivers/gpu/drm/i915/intel_hdmi.c
788 +@@ -1293,16 +1293,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
789 +
790 + static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
791 + {
792 +- struct drm_device *dev = crtc_state->base.crtc->dev;
793 ++ struct drm_i915_private *dev_priv =
794 ++ to_i915(crtc_state->base.crtc->dev);
795 ++ struct drm_atomic_state *state = crtc_state->base.state;
796 ++ struct drm_connector_state *connector_state;
797 ++ struct drm_connector *connector;
798 ++ int i;
799 +
800 +- if (HAS_GMCH_DISPLAY(to_i915(dev)))
801 ++ if (HAS_GMCH_DISPLAY(dev_priv))
802 + return false;
803 +
804 + /*
805 + * HDMI 12bpc affects the clocks, so it's only possible
806 + * when not cloning with other encoder types.
807 + */
808 +- return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
809 ++ if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
810 ++ return false;
811 ++
812 ++ for_each_connector_in_state(state, connector, connector_state, i) {
813 ++ const struct drm_display_info *info = &connector->display_info;
814 ++
815 ++ if (connector_state->crtc != crtc_state->base.crtc)
816 ++ continue;
817 ++
818 ++ if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0)
819 ++ return false;
820 ++ }
821 ++
822 ++ return true;
823 + }
824 +
825 + bool intel_hdmi_compute_config(struct intel_encoder *encoder,
826 +diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
827 +index 3d546c019de0..b782f22856f8 100644
828 +--- a/drivers/gpu/drm/i915/intel_hotplug.c
829 ++++ b/drivers/gpu/drm/i915/intel_hotplug.c
830 +@@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
831 + }
832 + }
833 + }
834 +- if (dev_priv->display.hpd_irq_setup)
835 ++ if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
836 + dev_priv->display.hpd_irq_setup(dev_priv);
837 + spin_unlock_irq(&dev_priv->irq_lock);
838 +
839 +@@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
840 + }
841 + }
842 +
843 +- if (storm_detected)
844 ++ if (storm_detected && dev_priv->display_irqs_enabled)
845 + dev_priv->display.hpd_irq_setup(dev_priv);
846 + spin_unlock(&dev_priv->irq_lock);
847 +
848 +@@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
849 + * Interrupt setup is already guaranteed to be single-threaded, this is
850 + * just to make the assert_spin_locked checks happy.
851 + */
852 +- spin_lock_irq(&dev_priv->irq_lock);
853 +- if (dev_priv->display.hpd_irq_setup)
854 +- dev_priv->display.hpd_irq_setup(dev_priv);
855 +- spin_unlock_irq(&dev_priv->irq_lock);
856 ++ if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
857 ++ spin_lock_irq(&dev_priv->irq_lock);
858 ++ if (dev_priv->display_irqs_enabled)
859 ++ dev_priv->display.hpd_irq_setup(dev_priv);
860 ++ spin_unlock_irq(&dev_priv->irq_lock);
861 ++ }
862 + }
863 +
864 + static void i915_hpd_poll_init_work(struct work_struct *work)
865 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
866 +index ae2c0bb4b2e8..3af22cf865f4 100644
867 +--- a/drivers/gpu/drm/i915/intel_pm.c
868 ++++ b/drivers/gpu/drm/i915/intel_pm.c
869 +@@ -4876,6 +4876,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
870 + break;
871 + }
872 +
873 ++ /* When byt can survive without system hang with dynamic
874 ++ * sw freq adjustments, this restriction can be lifted.
875 ++ */
876 ++ if (IS_VALLEYVIEW(dev_priv))
877 ++ goto skip_hw_write;
878 ++
879 + I915_WRITE(GEN6_RP_UP_EI,
880 + GT_INTERVAL_FROM_US(dev_priv, ei_up));
881 + I915_WRITE(GEN6_RP_UP_THRESHOLD,
882 +@@ -4896,6 +4902,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
883 + GEN6_RP_UP_BUSY_AVG |
884 + GEN6_RP_DOWN_IDLE_AVG);
885 +
886 ++skip_hw_write:
887 + dev_priv->rps.power = new_power;
888 + dev_priv->rps.up_threshold = threshold_up;
889 + dev_priv->rps.down_threshold = threshold_down;
890 +@@ -4906,8 +4913,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
891 + {
892 + u32 mask = 0;
893 +
894 ++ /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
895 + if (val > dev_priv->rps.min_freq_softlimit)
896 +- mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
897 ++ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
898 + if (val < dev_priv->rps.max_freq_softlimit)
899 + mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
900 +
901 +@@ -5007,7 +5015,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
902 + {
903 + mutex_lock(&dev_priv->rps.hw_lock);
904 + if (dev_priv->rps.enabled) {
905 +- if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
906 ++ if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
907 + gen6_rps_reset_ei(dev_priv);
908 + I915_WRITE(GEN6_PMINTRMSK,
909 + gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
910 +@@ -7895,10 +7903,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
911 + * @timeout_base_ms: timeout for polling with preemption enabled
912 + *
913 + * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
914 +- * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
915 ++ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
916 + * The request is acknowledged once the PCODE reply dword equals @reply after
917 + * applying @reply_mask. Polling is first attempted with preemption enabled
918 +- * for @timeout_base_ms and if this times out for another 10 ms with
919 ++ * for @timeout_base_ms and if this times out for another 50 ms with
920 + * preemption disabled.
921 + *
922 + * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
923 +@@ -7934,14 +7942,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
924 + * worst case) _and_ PCODE was busy for some reason even after a
925 + * (queued) request and @timeout_base_ms delay. As a workaround retry
926 + * the poll with preemption disabled to maximize the number of
927 +- * requests. Increase the timeout from @timeout_base_ms to 10ms to
928 ++ * requests. Increase the timeout from @timeout_base_ms to 50ms to
929 + * account for interrupts that could reduce the number of these
930 +- * requests.
931 ++ * requests, and for any quirks of the PCODE firmware that delays
932 ++ * the request completion.
933 + */
934 + DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
935 + WARN_ON_ONCE(timeout_base_ms > 3);
936 + preempt_disable();
937 +- ret = wait_for_atomic(COND, 10);
938 ++ ret = wait_for_atomic(COND, 50);
939 + preempt_enable();
940 +
941 + out:
942 +diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
943 +index 0bffd3f0c15d..2e4fbed3a826 100644
944 +--- a/drivers/gpu/drm/i915/intel_uncore.c
945 ++++ b/drivers/gpu/drm/i915/intel_uncore.c
946 +@@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
947 +
948 + for_each_fw_domain_masked(d, fw_domains, dev_priv)
949 + fw_domain_wait_ack(d);
950 ++
951 ++ dev_priv->uncore.fw_domains_active |= fw_domains;
952 + }
953 +
954 + static void
955 +@@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
956 + fw_domain_put(d);
957 + fw_domain_posting_read(d);
958 + }
959 ++
960 ++ dev_priv->uncore.fw_domains_active &= ~fw_domains;
961 + }
962 +
963 + static void
964 +@@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
965 + if (WARN_ON(domain->wake_count == 0))
966 + domain->wake_count++;
967 +
968 +- if (--domain->wake_count == 0) {
969 ++ if (--domain->wake_count == 0)
970 + dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
971 +- dev_priv->uncore.fw_domains_active &= ~domain->mask;
972 +- }
973 +
974 + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
975 +
976 +@@ -455,10 +457,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
977 + fw_domains &= ~domain->mask;
978 + }
979 +
980 +- if (fw_domains) {
981 ++ if (fw_domains)
982 + dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
983 +- dev_priv->uncore.fw_domains_active |= fw_domains;
984 +- }
985 + }
986 +
987 + /**
988 +@@ -962,7 +962,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
989 + fw_domain_arm_timer(domain);
990 +
991 + dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
992 +- dev_priv->uncore.fw_domains_active |= fw_domains;
993 + }
994 +
995 + static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
996 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
997 +index 6005e14213ca..662705e31136 100644
998 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
999 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
1000 +@@ -319,10 +319,8 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
1001 + entry->skb->data, length,
1002 + rt2x00usb_interrupt_txdone, entry);
1003 +
1004 +- usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
1005 + status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
1006 + if (status) {
1007 +- usb_unanchor_urb(entry_priv->urb);
1008 + if (status == -ENODEV)
1009 + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1010 + set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
1011 +@@ -410,10 +408,8 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
1012 + entry->skb->data, entry->skb->len,
1013 + rt2x00usb_interrupt_rxdone, entry);
1014 +
1015 +- usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
1016 + status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
1017 + if (status) {
1018 +- usb_unanchor_urb(entry_priv->urb);
1019 + if (status == -ENODEV)
1020 + clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1021 + set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
1022 +@@ -824,10 +820,6 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
1023 + if (retval)
1024 + goto exit_free_device;
1025 +
1026 +- retval = rt2x00lib_probe_dev(rt2x00dev);
1027 +- if (retval)
1028 +- goto exit_free_reg;
1029 +-
1030 + rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
1031 + sizeof(struct usb_anchor),
1032 + GFP_KERNEL);
1033 +@@ -835,10 +827,17 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
1034 + retval = -ENOMEM;
1035 + goto exit_free_reg;
1036 + }
1037 +-
1038 + init_usb_anchor(rt2x00dev->anchor);
1039 ++
1040 ++ retval = rt2x00lib_probe_dev(rt2x00dev);
1041 ++ if (retval)
1042 ++ goto exit_free_anchor;
1043 ++
1044 + return 0;
1045 +
1046 ++exit_free_anchor:
1047 ++ usb_kill_anchored_urbs(rt2x00dev->anchor);
1048 ++
1049 + exit_free_reg:
1050 + rt2x00usb_free_reg(rt2x00dev);
1051 +
1052 +diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
1053 +index e5a6f248697b..15421e625a12 100644
1054 +--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
1055 ++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
1056 +@@ -208,6 +208,10 @@ static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
1057 + } else
1058 + goto outerr;
1059 + }
1060 ++
1061 ++ if (IS_ERR(mirror->mirror_ds))
1062 ++ goto outerr;
1063 ++
1064 + if (mirror->mirror_ds->ds == NULL) {
1065 + struct nfs4_deviceid_node *devid;
1066 + devid = &mirror->mirror_ds->id_node;
1067 +diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
1068 +index b0ced669427e..c4ab6fdf17a0 100644
1069 +--- a/fs/orangefs/devorangefs-req.c
1070 ++++ b/fs/orangefs/devorangefs-req.c
1071 +@@ -400,8 +400,9 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
1072 + /* remove the op from the in progress hash table */
1073 + op = orangefs_devreq_remove_op(head.tag);
1074 + if (!op) {
1075 +- gossip_err("WARNING: No one's waiting for tag %llu\n",
1076 +- llu(head.tag));
1077 ++ gossip_debug(GOSSIP_DEV_DEBUG,
1078 ++ "%s: No one's waiting for tag %llu\n",
1079 ++ __func__, llu(head.tag));
1080 + return ret;
1081 + }
1082 +
1083 +diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
1084 +index 27e75cf28b3a..791912da97d7 100644
1085 +--- a/fs/orangefs/orangefs-debugfs.c
1086 ++++ b/fs/orangefs/orangefs-debugfs.c
1087 +@@ -967,13 +967,13 @@ int orangefs_debugfs_new_client_string(void __user *arg)
1088 + int ret;
1089 +
1090 + ret = copy_from_user(&client_debug_array_string,
1091 +- (void __user *)arg,
1092 +- ORANGEFS_MAX_DEBUG_STRING_LEN);
1093 ++ (void __user *)arg,
1094 ++ ORANGEFS_MAX_DEBUG_STRING_LEN);
1095 +
1096 + if (ret != 0) {
1097 + pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
1098 + __func__);
1099 +- return -EIO;
1100 ++ return -EFAULT;
1101 + }
1102 +
1103 + /*
1104 +@@ -988,17 +988,18 @@ int orangefs_debugfs_new_client_string(void __user *arg)
1105 + */
1106 + client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN - 1] =
1107 + '\0';
1108 +-
1109 ++
1110 + pr_info("%s: client debug array string has been received.\n",
1111 + __func__);
1112 +
1113 + if (!help_string_initialized) {
1114 +
1115 + /* Build a proper debug help string. */
1116 +- if (orangefs_prepare_debugfs_help_string(0)) {
1117 ++ ret = orangefs_prepare_debugfs_help_string(0);
1118 ++ if (ret) {
1119 + gossip_err("%s: no debug help string \n",
1120 + __func__);
1121 +- return -EIO;
1122 ++ return ret;
1123 + }
1124 +
1125 + }
1126 +@@ -1011,7 +1012,7 @@ int orangefs_debugfs_new_client_string(void __user *arg)
1127 +
1128 + help_string_initialized++;
1129 +
1130 +- return ret;
1131 ++ return 0;
1132 + }
1133 +
1134 + int orangefs_debugfs_new_debug(void __user *arg)
1135 +diff --git a/fs/orangefs/orangefs-dev-proto.h b/fs/orangefs/orangefs-dev-proto.h
1136 +index a3d84ffee905..f380f9ed1b28 100644
1137 +--- a/fs/orangefs/orangefs-dev-proto.h
1138 ++++ b/fs/orangefs/orangefs-dev-proto.h
1139 +@@ -50,8 +50,7 @@
1140 + * Misc constants. Please retain them as multiples of 8!
1141 + * Otherwise 32-64 bit interactions will be messed up :)
1142 + */
1143 +-#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000400
1144 +-#define ORANGEFS_MAX_DEBUG_ARRAY_LEN 0x00000800
1145 ++#define ORANGEFS_MAX_DEBUG_STRING_LEN 0x00000800
1146 +
1147 + /*
1148 + * The maximum number of directory entries in a single request is 96.
1149 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1150 +index c59fcc79ba32..5c919933a39b 100644
1151 +--- a/net/packet/af_packet.c
1152 ++++ b/net/packet/af_packet.c
1153 +@@ -4177,8 +4177,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
1154 + if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
1155 + goto out;
1156 + if (po->tp_version >= TPACKET_V3 &&
1157 +- (int)(req->tp_block_size -
1158 +- BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
1159 ++ req->tp_block_size <=
1160 ++ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
1161 + goto out;
1162 + if (unlikely(req->tp_frame_size < po->tp_hdrlen +
1163 + po->tp_reserve))