Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.8 commit in: /
Date: Mon, 31 Oct 2016 12:34:18
Message-Id: 1477900886.27f3971a11c5ea990bee74d6c8fa48eed0fecadb.alicef@gentoo
1 commit: 27f3971a11c5ea990bee74d6c8fa48eed0fecadb
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Mon Oct 31 08:01:26 2016 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Mon Oct 31 08:01:26 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=27f3971a
7
8 Linux patch 4.8.6
9
10 0000_README | 8 +-
11 1005_linux-4.8.6.patch | 5137 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 5143 insertions(+), 2 deletions(-)
13
14 diff --git a/0000_README b/0000_README
15 index a5b48e4..cd373fa 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -59,9 +59,13 @@ Patch: 1003_linux-4.8.4.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.8.4
21
22 -Patch: 1003_linux-4.8.4.patch
23 +Patch: 1004_linux-4.8.5.patch
24 From: http://www.kernel.org
25 -Desc: Linux 4.8.4
26 +Desc: Linux 4.8.5
27 +
28 +Patch: 1005_linux-4.8.6.patch
29 +From: http://www.kernel.org
30 +Desc: Linux 4.8.6
31
32 Patch: 1500_XATTR_USER_PREFIX.patch
33 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
34
35 diff --git a/1005_linux-4.8.6.patch b/1005_linux-4.8.6.patch
36 new file mode 100644
37 index 0000000..e2d7370
38 --- /dev/null
39 +++ b/1005_linux-4.8.6.patch
40 @@ -0,0 +1,5137 @@
41 +diff --git a/Makefile b/Makefile
42 +index daa3a01d2525..b249529204cd 100644
43 +--- a/Makefile
44 ++++ b/Makefile
45 +@@ -1,6 +1,6 @@
46 + VERSION = 4
47 + PATCHLEVEL = 8
48 +-SUBLEVEL = 5
49 ++SUBLEVEL = 6
50 + EXTRAVERSION =
51 + NAME = Psychotic Stoned Sheep
52 +
53 +diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi
54 +index 1c6a040218e3..e2e9599596e2 100644
55 +--- a/arch/arm/boot/dts/arm-realview-eb.dtsi
56 ++++ b/arch/arm/boot/dts/arm-realview-eb.dtsi
57 +@@ -51,14 +51,6 @@
58 + regulator-boot-on;
59 + };
60 +
61 +- veth: fixedregulator@0 {
62 +- compatible = "regulator-fixed";
63 +- regulator-name = "veth";
64 +- regulator-min-microvolt = <3300000>;
65 +- regulator-max-microvolt = <3300000>;
66 +- regulator-boot-on;
67 +- };
68 +-
69 + xtal24mhz: xtal24mhz@24M {
70 + #clock-cells = <0>;
71 + compatible = "fixed-clock";
72 +@@ -134,16 +126,15 @@
73 + bank-width = <4>;
74 + };
75 +
76 +- /* SMSC 9118 ethernet with PHY and EEPROM */
77 ++ /* SMSC LAN91C111 ethernet with PHY and EEPROM */
78 + ethernet: ethernet@4e000000 {
79 +- compatible = "smsc,lan9118", "smsc,lan9115";
80 ++ compatible = "smsc,lan91c111";
81 + reg = <0x4e000000 0x10000>;
82 +- phy-mode = "mii";
83 +- reg-io-width = <4>;
84 +- smsc,irq-active-high;
85 +- smsc,irq-push-pull;
86 +- vdd33a-supply = <&veth>;
87 +- vddvario-supply = <&veth>;
88 ++ /*
89 ++ * This means the adapter can be accessed with 8, 16 or
90 ++ * 32 bit reads/writes.
91 ++ */
92 ++ reg-io-width = <7>;
93 + };
94 +
95 + usb: usb@4f000000 {
96 +diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
97 +index 03b8bbeb694f..652418aa2700 100644
98 +--- a/arch/arm/boot/dts/bcm958625hr.dts
99 ++++ b/arch/arm/boot/dts/bcm958625hr.dts
100 +@@ -47,7 +47,8 @@
101 + };
102 +
103 + memory {
104 +- reg = <0x60000000 0x20000000>;
105 ++ device_type = "memory";
106 ++ reg = <0x60000000 0x80000000>;
107 + };
108 + };
109 +
110 +diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
111 +index ca86da68220c..854117dc0b77 100644
112 +--- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
113 ++++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
114 +@@ -119,7 +119,7 @@
115 + pinctrl-names = "default";
116 + pinctrl-0 = <&mcspi1_pins>;
117 +
118 +- lcd0: display {
119 ++ lcd0: display@1 {
120 + compatible = "lgphilips,lb035q02";
121 + label = "lcd35";
122 +
123 +diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
124 +index f68b3242b33a..3f528a379288 100644
125 +--- a/arch/arm/boot/dts/sun9i-a80.dtsi
126 ++++ b/arch/arm/boot/dts/sun9i-a80.dtsi
127 +@@ -899,8 +899,7 @@
128 + resets = <&apbs_rst 0>;
129 + gpio-controller;
130 + interrupt-controller;
131 +- #address-cells = <1>;
132 +- #size-cells = <0>;
133 ++ #interrupt-cells = <3>;
134 + #gpio-cells = <3>;
135 +
136 + r_ir_pins: r_ir {
137 +diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
138 +index 1568cb5cd870..b88364aa149a 100644
139 +--- a/arch/arm/crypto/ghash-ce-glue.c
140 ++++ b/arch/arm/crypto/ghash-ce-glue.c
141 +@@ -220,6 +220,27 @@ static int ghash_async_digest(struct ahash_request *req)
142 + }
143 + }
144 +
145 ++static int ghash_async_import(struct ahash_request *req, const void *in)
146 ++{
147 ++ struct ahash_request *cryptd_req = ahash_request_ctx(req);
148 ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
149 ++ struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
150 ++ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
151 ++
152 ++ desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
153 ++ desc->flags = req->base.flags;
154 ++
155 ++ return crypto_shash_import(desc, in);
156 ++}
157 ++
158 ++static int ghash_async_export(struct ahash_request *req, void *out)
159 ++{
160 ++ struct ahash_request *cryptd_req = ahash_request_ctx(req);
161 ++ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
162 ++
163 ++ return crypto_shash_export(desc, out);
164 ++}
165 ++
166 + static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
167 + unsigned int keylen)
168 + {
169 +@@ -268,7 +289,10 @@ static struct ahash_alg ghash_async_alg = {
170 + .final = ghash_async_final,
171 + .setkey = ghash_async_setkey,
172 + .digest = ghash_async_digest,
173 ++ .import = ghash_async_import,
174 ++ .export = ghash_async_export,
175 + .halg.digestsize = GHASH_DIGEST_SIZE,
176 ++ .halg.statesize = sizeof(struct ghash_desc_ctx),
177 + .halg.base = {
178 + .cra_name = "ghash",
179 + .cra_driver_name = "ghash-ce",
180 +diff --git a/arch/arm/mach-pxa/corgi_pm.c b/arch/arm/mach-pxa/corgi_pm.c
181 +index d9206811be9b..c71c483f410e 100644
182 +--- a/arch/arm/mach-pxa/corgi_pm.c
183 ++++ b/arch/arm/mach-pxa/corgi_pm.c
184 +@@ -131,16 +131,11 @@ static int corgi_should_wakeup(unsigned int resume_on_alarm)
185 + return is_resume;
186 + }
187 +
188 +-static unsigned long corgi_charger_wakeup(void)
189 ++static bool corgi_charger_wakeup(void)
190 + {
191 +- unsigned long ret;
192 +-
193 +- ret = (!gpio_get_value(CORGI_GPIO_AC_IN) << GPIO_bit(CORGI_GPIO_AC_IN))
194 +- | (!gpio_get_value(CORGI_GPIO_KEY_INT)
195 +- << GPIO_bit(CORGI_GPIO_KEY_INT))
196 +- | (!gpio_get_value(CORGI_GPIO_WAKEUP)
197 +- << GPIO_bit(CORGI_GPIO_WAKEUP));
198 +- return ret;
199 ++ return !gpio_get_value(CORGI_GPIO_AC_IN) ||
200 ++ !gpio_get_value(CORGI_GPIO_KEY_INT) ||
201 ++ !gpio_get_value(CORGI_GPIO_WAKEUP);
202 + }
203 +
204 + unsigned long corgipm_read_devdata(int type)
205 +diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
206 +index 2385052b0ce1..e362f865fcd2 100644
207 +--- a/arch/arm/mach-pxa/pxa_cplds_irqs.c
208 ++++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
209 +@@ -41,30 +41,35 @@ static irqreturn_t cplds_irq_handler(int in_irq, void *d)
210 + unsigned long pending;
211 + unsigned int bit;
212 +
213 +- pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
214 +- for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
215 +- generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit));
216 ++ do {
217 ++ pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
218 ++ for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) {
219 ++ generic_handle_irq(irq_find_mapping(fpga->irqdomain,
220 ++ bit));
221 ++ }
222 ++ } while (pending);
223 +
224 + return IRQ_HANDLED;
225 + }
226 +
227 +-static void cplds_irq_mask_ack(struct irq_data *d)
228 ++static void cplds_irq_mask(struct irq_data *d)
229 + {
230 + struct cplds *fpga = irq_data_get_irq_chip_data(d);
231 + unsigned int cplds_irq = irqd_to_hwirq(d);
232 +- unsigned int set, bit = BIT(cplds_irq);
233 ++ unsigned int bit = BIT(cplds_irq);
234 +
235 + fpga->irq_mask &= ~bit;
236 + writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
237 +- set = readl(fpga->base + FPGA_IRQ_SET_CLR);
238 +- writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
239 + }
240 +
241 + static void cplds_irq_unmask(struct irq_data *d)
242 + {
243 + struct cplds *fpga = irq_data_get_irq_chip_data(d);
244 + unsigned int cplds_irq = irqd_to_hwirq(d);
245 +- unsigned int bit = BIT(cplds_irq);
246 ++ unsigned int set, bit = BIT(cplds_irq);
247 ++
248 ++ set = readl(fpga->base + FPGA_IRQ_SET_CLR);
249 ++ writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
250 +
251 + fpga->irq_mask |= bit;
252 + writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
253 +@@ -72,7 +77,8 @@ static void cplds_irq_unmask(struct irq_data *d)
254 +
255 + static struct irq_chip cplds_irq_chip = {
256 + .name = "pxa_cplds",
257 +- .irq_mask_ack = cplds_irq_mask_ack,
258 ++ .irq_ack = cplds_irq_mask,
259 ++ .irq_mask = cplds_irq_mask,
260 + .irq_unmask = cplds_irq_unmask,
261 + .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
262 + };
263 +diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
264 +index b80eab9993c5..249b7bd5fbc4 100644
265 +--- a/arch/arm/mach-pxa/sharpsl_pm.c
266 ++++ b/arch/arm/mach-pxa/sharpsl_pm.c
267 +@@ -744,7 +744,7 @@ static int sharpsl_off_charge_battery(void)
268 + time = RCNR;
269 + while (1) {
270 + /* Check if any wakeup event had occurred */
271 +- if (sharpsl_pm.machinfo->charger_wakeup() != 0)
272 ++ if (sharpsl_pm.machinfo->charger_wakeup())
273 + return 0;
274 + /* Check for timeout */
275 + if ((RCNR - time) > SHARPSL_WAIT_CO_TIME)
276 +diff --git a/arch/arm/mach-pxa/sharpsl_pm.h b/arch/arm/mach-pxa/sharpsl_pm.h
277 +index 905be6755f04..fa75b6df8134 100644
278 +--- a/arch/arm/mach-pxa/sharpsl_pm.h
279 ++++ b/arch/arm/mach-pxa/sharpsl_pm.h
280 +@@ -34,7 +34,7 @@ struct sharpsl_charger_machinfo {
281 + #define SHARPSL_STATUS_LOCK 5
282 + #define SHARPSL_STATUS_CHRGFULL 6
283 + #define SHARPSL_STATUS_FATAL 7
284 +- unsigned long (*charger_wakeup)(void);
285 ++ bool (*charger_wakeup)(void);
286 + int (*should_wakeup)(unsigned int resume_on_alarm);
287 + void (*backlight_limit)(int);
288 + int (*backlight_get_status) (void);
289 +diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c
290 +index ea9f9034cb54..4e64a140252e 100644
291 +--- a/arch/arm/mach-pxa/spitz_pm.c
292 ++++ b/arch/arm/mach-pxa/spitz_pm.c
293 +@@ -165,13 +165,10 @@ static int spitz_should_wakeup(unsigned int resume_on_alarm)
294 + return is_resume;
295 + }
296 +
297 +-static unsigned long spitz_charger_wakeup(void)
298 ++static bool spitz_charger_wakeup(void)
299 + {
300 +- unsigned long ret;
301 +- ret = ((!gpio_get_value(SPITZ_GPIO_KEY_INT)
302 +- << GPIO_bit(SPITZ_GPIO_KEY_INT))
303 +- | gpio_get_value(SPITZ_GPIO_SYNC));
304 +- return ret;
305 ++ return !gpio_get_value(SPITZ_GPIO_KEY_INT) ||
306 ++ gpio_get_value(SPITZ_GPIO_SYNC);
307 + }
308 +
309 + unsigned long spitzpm_read_devdata(int type)
310 +diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
311 +index 263bf39ced40..9bd84ba06ec4 100644
312 +--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
313 ++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
314 +@@ -6,6 +6,8 @@
315 + */
316 + #define _PAGE_BIT_SWAP_TYPE 0
317 +
318 ++#define _PAGE_RO 0
319 ++
320 + #define _PAGE_EXEC 0x00001 /* execute permission */
321 + #define _PAGE_WRITE 0x00002 /* write access allowed */
322 + #define _PAGE_READ 0x00004 /* read access allowed */
323 +diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
324 +index 64174bf95611..05a0a913ec38 100644
325 +--- a/arch/powerpc/kernel/nvram_64.c
326 ++++ b/arch/powerpc/kernel/nvram_64.c
327 +@@ -956,7 +956,7 @@ int __init nvram_remove_partition(const char *name, int sig,
328 +
329 + /* Make partition a free partition */
330 + part->header.signature = NVRAM_SIG_FREE;
331 +- strncpy(part->header.name, "wwwwwwwwwwww", 12);
332 ++ memset(part->header.name, 'w', 12);
333 + part->header.checksum = nvram_checksum(&part->header);
334 + rc = nvram_write_header(part);
335 + if (rc <= 0) {
336 +@@ -974,8 +974,8 @@ int __init nvram_remove_partition(const char *name, int sig,
337 + }
338 + if (prev) {
339 + prev->header.length += part->header.length;
340 +- prev->header.checksum = nvram_checksum(&part->header);
341 +- rc = nvram_write_header(part);
342 ++ prev->header.checksum = nvram_checksum(&prev->header);
343 ++ rc = nvram_write_header(prev);
344 + if (rc <= 0) {
345 + printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
346 + return rc;
347 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
348 +index 9ee2623e0f67..ad37aa175f59 100644
349 +--- a/arch/powerpc/kernel/process.c
350 ++++ b/arch/powerpc/kernel/process.c
351 +@@ -88,7 +88,13 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
352 + set_thread_flag(TIF_RESTORE_TM);
353 + }
354 + }
355 ++
356 ++static inline bool msr_tm_active(unsigned long msr)
357 ++{
358 ++ return MSR_TM_ACTIVE(msr);
359 ++}
360 + #else
361 ++static inline bool msr_tm_active(unsigned long msr) { return false; }
362 + static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
363 + #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
364 +
365 +@@ -208,7 +214,7 @@ void enable_kernel_fp(void)
366 + EXPORT_SYMBOL(enable_kernel_fp);
367 +
368 + static int restore_fp(struct task_struct *tsk) {
369 +- if (tsk->thread.load_fp) {
370 ++ if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) {
371 + load_fp_state(&current->thread.fp_state);
372 + current->thread.load_fp++;
373 + return 1;
374 +@@ -278,7 +284,8 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
375 +
376 + static int restore_altivec(struct task_struct *tsk)
377 + {
378 +- if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) {
379 ++ if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
380 ++ (tsk->thread.load_vec || msr_tm_active(tsk->thread.regs->msr))) {
381 + load_vr_state(&tsk->thread.vr_state);
382 + tsk->thread.used_vr = 1;
383 + tsk->thread.load_vec++;
384 +@@ -438,6 +445,7 @@ void giveup_all(struct task_struct *tsk)
385 + return;
386 +
387 + msr_check_and_set(msr_all_available);
388 ++ check_if_tm_restore_required(tsk);
389 +
390 + #ifdef CONFIG_PPC_FPU
391 + if (usermsr & MSR_FP)
392 +@@ -464,7 +472,8 @@ void restore_math(struct pt_regs *regs)
393 + {
394 + unsigned long msr;
395 +
396 +- if (!current->thread.load_fp && !loadvec(current->thread))
397 ++ if (!msr_tm_active(regs->msr) &&
398 ++ !current->thread.load_fp && !loadvec(current->thread))
399 + return;
400 +
401 + msr = regs->msr;
402 +@@ -983,6 +992,13 @@ void restore_tm_state(struct pt_regs *regs)
403 + msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
404 + msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
405 +
406 ++ /* Ensure that restore_math() will restore */
407 ++ if (msr_diff & MSR_FP)
408 ++ current->thread.load_fp = 1;
409 ++#ifdef CONFIG_ALIVEC
410 ++ if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
411 ++ current->thread.load_vec = 1;
412 ++#endif
413 + restore_math(regs);
414 +
415 + regs->msr |= msr_diff;
416 +diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
417 +index 7372ee13eb1e..a5d3ecdabc44 100644
418 +--- a/arch/powerpc/mm/hugetlbpage.c
419 ++++ b/arch/powerpc/mm/hugetlbpage.c
420 +@@ -1019,8 +1019,15 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
421 +
422 + pte = READ_ONCE(*ptep);
423 + mask = _PAGE_PRESENT | _PAGE_READ;
424 ++
425 ++ /*
426 ++ * On some CPUs like the 8xx, _PAGE_RW hence _PAGE_WRITE is defined
427 ++ * as 0 and _PAGE_RO has to be set when a page is not writable
428 ++ */
429 + if (write)
430 + mask |= _PAGE_WRITE;
431 ++ else
432 ++ mask |= _PAGE_RO;
433 +
434 + if ((pte_val(pte) & mask) != mask)
435 + return 0;
436 +diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
437 +index de7501edb21c..8b8852bc2f4a 100644
438 +--- a/arch/x86/kernel/early-quirks.c
439 ++++ b/arch/x86/kernel/early-quirks.c
440 +@@ -317,16 +317,11 @@ static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
441 + static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
442 + size_t stolen_size)
443 + {
444 +- u16 toud;
445 ++ u16 toud = 0;
446 +
447 +- /*
448 +- * FIXME is the graphics stolen memory region
449 +- * always at TOUD? Ie. is it always the last
450 +- * one to be allocated by the BIOS?
451 +- */
452 + toud = read_pci_config_16(0, 0, 0, I865_TOUD);
453 +
454 +- return (phys_addr_t)toud << 16;
455 ++ return (phys_addr_t)(toud << 16) + i845_tseg_size();
456 + }
457 +
458 + static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
459 +diff --git a/crypto/gcm.c b/crypto/gcm.c
460 +index 70a892e87ccb..f624ac98c94e 100644
461 +--- a/crypto/gcm.c
462 ++++ b/crypto/gcm.c
463 +@@ -117,7 +117,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
464 + struct crypto_skcipher *ctr = ctx->ctr;
465 + struct {
466 + be128 hash;
467 +- u8 iv[8];
468 ++ u8 iv[16];
469 +
470 + struct crypto_gcm_setkey_result result;
471 +
472 +diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
473 +index 01d4be2c354b..f5c26a5f6875 100644
474 +--- a/drivers/char/hw_random/omap-rng.c
475 ++++ b/drivers/char/hw_random/omap-rng.c
476 +@@ -385,7 +385,7 @@ static int omap_rng_probe(struct platform_device *pdev)
477 +
478 + pm_runtime_enable(&pdev->dev);
479 + ret = pm_runtime_get_sync(&pdev->dev);
480 +- if (ret) {
481 ++ if (ret < 0) {
482 + dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
483 + pm_runtime_put_noidle(&pdev->dev);
484 + goto err_ioremap;
485 +@@ -443,7 +443,7 @@ static int __maybe_unused omap_rng_resume(struct device *dev)
486 + int ret;
487 +
488 + ret = pm_runtime_get_sync(dev);
489 +- if (ret) {
490 ++ if (ret < 0) {
491 + dev_err(dev, "Failed to runtime_get device: %d\n", ret);
492 + pm_runtime_put_noidle(dev);
493 + return ret;
494 +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
495 +index 7a7970865c2d..0fc71cbaa440 100644
496 +--- a/drivers/clk/bcm/clk-bcm2835.c
497 ++++ b/drivers/clk/bcm/clk-bcm2835.c
498 +@@ -1006,16 +1006,28 @@ static int bcm2835_clock_set_rate(struct clk_hw *hw,
499 + return 0;
500 + }
501 +
502 ++static bool
503 ++bcm2835_clk_is_pllc(struct clk_hw *hw)
504 ++{
505 ++ if (!hw)
506 ++ return false;
507 ++
508 ++ return strncmp(clk_hw_get_name(hw), "pllc", 4) == 0;
509 ++}
510 ++
511 + static int bcm2835_clock_determine_rate(struct clk_hw *hw,
512 + struct clk_rate_request *req)
513 + {
514 + struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
515 + struct clk_hw *parent, *best_parent = NULL;
516 ++ bool current_parent_is_pllc;
517 + unsigned long rate, best_rate = 0;
518 + unsigned long prate, best_prate = 0;
519 + size_t i;
520 + u32 div;
521 +
522 ++ current_parent_is_pllc = bcm2835_clk_is_pllc(clk_hw_get_parent(hw));
523 ++
524 + /*
525 + * Select parent clock that results in the closest but lower rate
526 + */
527 +@@ -1023,6 +1035,17 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw,
528 + parent = clk_hw_get_parent_by_index(hw, i);
529 + if (!parent)
530 + continue;
531 ++
532 ++ /*
533 ++ * Don't choose a PLLC-derived clock as our parent
534 ++ * unless it had been manually set that way. PLLC's
535 ++ * frequency gets adjusted by the firmware due to
536 ++ * over-temp or under-voltage conditions, without
537 ++ * prior notification to our clock consumer.
538 ++ */
539 ++ if (bcm2835_clk_is_pllc(parent) && !current_parent_is_pllc)
540 ++ continue;
541 ++
542 + prate = clk_hw_get_rate(parent);
543 + div = bcm2835_clock_choose_div(hw, req->rate, prate, true);
544 + rate = bcm2835_clock_rate_from_divisor(clock, prate, div);
545 +diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
546 +index a0f55bc1ad3d..96386ffc8483 100644
547 +--- a/drivers/clk/clk-divider.c
548 ++++ b/drivers/clk/clk-divider.c
549 +@@ -352,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
550 +
551 + /* if read only, just return current value */
552 + if (divider->flags & CLK_DIVIDER_READ_ONLY) {
553 +- bestdiv = readl(divider->reg) >> divider->shift;
554 ++ bestdiv = clk_readl(divider->reg) >> divider->shift;
555 + bestdiv &= div_mask(divider->width);
556 + bestdiv = _get_div(divider->table, bestdiv, divider->flags,
557 + divider->width);
558 +diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
559 +index 58566a17944a..20b105584f82 100644
560 +--- a/drivers/clk/clk-qoriq.c
561 ++++ b/drivers/clk/clk-qoriq.c
562 +@@ -766,7 +766,11 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
563 + if (!hwc)
564 + return NULL;
565 +
566 +- hwc->reg = cg->regs + 0x20 * idx;
567 ++ if (cg->info.flags & CG_VER3)
568 ++ hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
569 ++ else
570 ++ hwc->reg = cg->regs + 0x20 * idx;
571 ++
572 + hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
573 +
574 + /*
575 +diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
576 +index 820a939fb6bb..2877a4ddeda2 100644
577 +--- a/drivers/clk/clk.c
578 ++++ b/drivers/clk/clk.c
579 +@@ -1908,10 +1908,6 @@ int clk_set_phase(struct clk *clk, int degrees)
580 +
581 + clk_prepare_lock();
582 +
583 +- /* bail early if nothing to do */
584 +- if (degrees == clk->core->phase)
585 +- goto out;
586 +-
587 + trace_clk_set_phase(clk->core, degrees);
588 +
589 + if (clk->core->ops->set_phase)
590 +@@ -1922,7 +1918,6 @@ int clk_set_phase(struct clk *clk, int degrees)
591 + if (!ret)
592 + clk->core->phase = degrees;
593 +
594 +-out:
595 + clk_prepare_unlock();
596 +
597 + return ret;
598 +@@ -3186,7 +3181,7 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
599 + {
600 + struct of_clk_provider *provider;
601 + struct clk *clk = ERR_PTR(-EPROBE_DEFER);
602 +- struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
603 ++ struct clk_hw *hw;
604 +
605 + if (!clkspec)
606 + return ERR_PTR(-EINVAL);
607 +@@ -3194,12 +3189,13 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
608 + /* Check if we have such a provider in our array */
609 + mutex_lock(&of_clk_mutex);
610 + list_for_each_entry(provider, &of_clk_providers, link) {
611 +- if (provider->node == clkspec->np)
612 ++ if (provider->node == clkspec->np) {
613 + hw = __of_clk_get_hw_from_provider(provider, clkspec);
614 +- if (!IS_ERR(hw)) {
615 + clk = __clk_create_clk(hw, dev_id, con_id);
616 ++ }
617 +
618 +- if (!IS_ERR(clk) && !__clk_get(clk)) {
619 ++ if (!IS_ERR(clk)) {
620 ++ if (!__clk_get(clk)) {
621 + __clk_free_clk(clk);
622 + clk = ERR_PTR(-ENOENT);
623 + }
624 +diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
625 +index b0978d3b83e2..d302ed3b8225 100644
626 +--- a/drivers/clk/imx/clk-imx35.c
627 ++++ b/drivers/clk/imx/clk-imx35.c
628 +@@ -115,7 +115,7 @@ static void __init _mx35_clocks_init(void)
629 + }
630 +
631 + clk[ckih] = imx_clk_fixed("ckih", 24000000);
632 +- clk[ckil] = imx_clk_fixed("ckih", 32768);
633 ++ clk[ckil] = imx_clk_fixed("ckil", 32768);
634 + clk[mpll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "mpll", "ckih", base + MX35_CCM_MPCTL);
635 + clk[ppll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "ppll", "ckih", base + MX35_CCM_PPCTL);
636 +
637 +diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
638 +index 95e3b3e0fa1c..98909b184d44 100644
639 +--- a/drivers/clk/qcom/Kconfig
640 ++++ b/drivers/clk/qcom/Kconfig
641 +@@ -117,6 +117,7 @@ config MSM_MMCC_8974
642 +
643 + config MSM_GCC_8996
644 + tristate "MSM8996 Global Clock Controller"
645 ++ select QCOM_GDSC
646 + depends on COMMON_CLK_QCOM
647 + help
648 + Support for the global clock controller on msm8996 devices.
649 +@@ -126,6 +127,7 @@ config MSM_GCC_8996
650 + config MSM_MMCC_8996
651 + tristate "MSM8996 Multimedia Clock Controller"
652 + select MSM_GCC_8996
653 ++ select QCOM_GDSC
654 + depends on COMMON_CLK_QCOM
655 + help
656 + Support for the multimedia clock controller on msm8996 devices.
657 +diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
658 +index bbf732bbc3fd..9f643cca85d0 100644
659 +--- a/drivers/clk/qcom/gcc-msm8996.c
660 ++++ b/drivers/clk/qcom/gcc-msm8996.c
661 +@@ -2592,9 +2592,9 @@ static struct clk_branch gcc_pcie_2_aux_clk = {
662 + };
663 +
664 + static struct clk_branch gcc_pcie_2_pipe_clk = {
665 +- .halt_reg = 0x6e108,
666 ++ .halt_reg = 0x6e018,
667 + .clkr = {
668 +- .enable_reg = 0x6e108,
669 ++ .enable_reg = 0x6e018,
670 + .enable_mask = BIT(0),
671 + .hw.init = &(struct clk_init_data){
672 + .name = "gcc_pcie_2_pipe_clk",
673 +diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
674 +index 94f77b0f9ae7..32f645ea77b8 100644
675 +--- a/drivers/crypto/ccp/ccp-dmaengine.c
676 ++++ b/drivers/crypto/ccp/ccp-dmaengine.c
677 +@@ -650,7 +650,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
678 + dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
679 + "%s-dmaengine-desc-cache",
680 + ccp->name);
681 +- if (!dma_cmd_cache_name)
682 ++ if (!dma_desc_cache_name)
683 + return -ENOMEM;
684 + ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
685 + sizeof(struct ccp_dma_desc),
686 +diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
687 +index d64af8625d7e..37dadb2a4feb 100644
688 +--- a/drivers/crypto/marvell/cesa.c
689 ++++ b/drivers/crypto/marvell/cesa.c
690 +@@ -166,6 +166,7 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
691 + if (!req)
692 + break;
693 +
694 ++ ctx = crypto_tfm_ctx(req->tfm);
695 + mv_cesa_complete_req(ctx, req, 0);
696 + }
697 + }
698 +diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
699 +index 82e0f4e6eb1c..b111e14bac1e 100644
700 +--- a/drivers/crypto/marvell/hash.c
701 ++++ b/drivers/crypto/marvell/hash.c
702 +@@ -805,13 +805,14 @@ static int mv_cesa_md5_init(struct ahash_request *req)
703 + struct mv_cesa_op_ctx tmpl = { };
704 +
705 + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
706 ++
707 ++ mv_cesa_ahash_init(req, &tmpl, true);
708 ++
709 + creq->state[0] = MD5_H0;
710 + creq->state[1] = MD5_H1;
711 + creq->state[2] = MD5_H2;
712 + creq->state[3] = MD5_H3;
713 +
714 +- mv_cesa_ahash_init(req, &tmpl, true);
715 +-
716 + return 0;
717 + }
718 +
719 +@@ -873,14 +874,15 @@ static int mv_cesa_sha1_init(struct ahash_request *req)
720 + struct mv_cesa_op_ctx tmpl = { };
721 +
722 + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
723 ++
724 ++ mv_cesa_ahash_init(req, &tmpl, false);
725 ++
726 + creq->state[0] = SHA1_H0;
727 + creq->state[1] = SHA1_H1;
728 + creq->state[2] = SHA1_H2;
729 + creq->state[3] = SHA1_H3;
730 + creq->state[4] = SHA1_H4;
731 +
732 +- mv_cesa_ahash_init(req, &tmpl, false);
733 +-
734 + return 0;
735 + }
736 +
737 +@@ -942,6 +944,9 @@ static int mv_cesa_sha256_init(struct ahash_request *req)
738 + struct mv_cesa_op_ctx tmpl = { };
739 +
740 + mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
741 ++
742 ++ mv_cesa_ahash_init(req, &tmpl, false);
743 ++
744 + creq->state[0] = SHA256_H0;
745 + creq->state[1] = SHA256_H1;
746 + creq->state[2] = SHA256_H2;
747 +@@ -951,8 +956,6 @@ static int mv_cesa_sha256_init(struct ahash_request *req)
748 + creq->state[6] = SHA256_H6;
749 + creq->state[7] = SHA256_H7;
750 +
751 +- mv_cesa_ahash_init(req, &tmpl, false);
752 +-
753 + return 0;
754 + }
755 +
756 +diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
757 +index 2bf37e68ad0f..dd184b50e5b4 100644
758 +--- a/drivers/dma/ipu/ipu_irq.c
759 ++++ b/drivers/dma/ipu/ipu_irq.c
760 +@@ -286,22 +286,21 @@ static void ipu_irq_handler(struct irq_desc *desc)
761 + raw_spin_unlock(&bank_lock);
762 + while ((line = ffs(status))) {
763 + struct ipu_irq_map *map;
764 +- unsigned int irq = NO_IRQ;
765 ++ unsigned int irq;
766 +
767 + line--;
768 + status &= ~(1UL << line);
769 +
770 + raw_spin_lock(&bank_lock);
771 + map = src2map(32 * i + line);
772 +- if (map)
773 +- irq = map->irq;
774 +- raw_spin_unlock(&bank_lock);
775 +-
776 + if (!map) {
777 ++ raw_spin_unlock(&bank_lock);
778 + pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
779 + line, i);
780 + continue;
781 + }
782 ++ irq = map->irq;
783 ++ raw_spin_unlock(&bank_lock);
784 + generic_handle_irq(irq);
785 + }
786 + }
787 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
788 +index 17e13621fae9..4e71a680e91b 100644
789 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
790 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
791 +@@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
792 + ctx->rings[i].sequence = 1;
793 + ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
794 + }
795 ++
796 ++ ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
797 ++
798 + /* create context entity for each ring */
799 + for (i = 0; i < adev->num_rings; i++) {
800 + struct amdgpu_ring *ring = adev->rings[i];
801 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
802 +index fe36caf1b7d7..14f57d9915e3 100644
803 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
804 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
805 +@@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
806 + printk("\n");
807 + }
808 +
809 ++
810 + u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
811 + {
812 + struct drm_device *dev = adev->ddev;
813 + struct drm_crtc *crtc;
814 + struct amdgpu_crtc *amdgpu_crtc;
815 +- u32 line_time_us, vblank_lines;
816 ++ u32 vblank_in_pixels;
817 + u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
818 +
819 + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
820 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
821 + amdgpu_crtc = to_amdgpu_crtc(crtc);
822 + if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
823 +- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
824 +- amdgpu_crtc->hw_mode.clock;
825 +- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
826 ++ vblank_in_pixels =
827 ++ amdgpu_crtc->hw_mode.crtc_htotal *
828 ++ (amdgpu_crtc->hw_mode.crtc_vblank_end -
829 + amdgpu_crtc->hw_mode.crtc_vdisplay +
830 +- (amdgpu_crtc->v_border * 2);
831 +- vblank_time_us = vblank_lines * line_time_us;
832 ++ (amdgpu_crtc->v_border * 2));
833 ++
834 ++ vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
835 + break;
836 + }
837 + }
838 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
839 +index d942654a1de0..e24a8af72d90 100644
840 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
841 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
842 +@@ -292,7 +292,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
843 + type = AMD_IP_BLOCK_TYPE_UVD;
844 + ring_mask = adev->uvd.ring.ready ? 1 : 0;
845 + ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
846 +- ib_size_alignment = 8;
847 ++ ib_size_alignment = 16;
848 + break;
849 + case AMDGPU_HW_IP_VCE:
850 + type = AMD_IP_BLOCK_TYPE_VCE;
851 +diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
852 +index c1b04e9aab57..172bed946287 100644
853 +--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
854 ++++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
855 +@@ -425,16 +425,6 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
856 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
857 + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
858 +
859 +- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
860 +- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
861 +- /* don't try to enable hpd on eDP or LVDS avoid breaking the
862 +- * aux dp channel on imac and help (but not completely fix)
863 +- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
864 +- * also avoid interrupt storms during dpms.
865 +- */
866 +- continue;
867 +- }
868 +-
869 + switch (amdgpu_connector->hpd.hpd) {
870 + case AMDGPU_HPD_1:
871 + idx = 0;
872 +@@ -458,6 +448,19 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
873 + continue;
874 + }
875 +
876 ++ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
877 ++ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
878 ++ /* don't try to enable hpd on eDP or LVDS avoid breaking the
879 ++ * aux dp channel on imac and help (but not completely fix)
880 ++ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
881 ++ * also avoid interrupt storms during dpms.
882 ++ */
883 ++ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
884 ++ tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
885 ++ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
886 ++ continue;
887 ++ }
888 ++
889 + tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
890 + tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
891 + WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
892 +diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
893 +index d4bf133908b1..67c7c05a751c 100644
894 +--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
895 ++++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
896 +@@ -443,16 +443,6 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
897 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
898 + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
899 +
900 +- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
901 +- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
902 +- /* don't try to enable hpd on eDP or LVDS avoid breaking the
903 +- * aux dp channel on imac and help (but not completely fix)
904 +- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
905 +- * also avoid interrupt storms during dpms.
906 +- */
907 +- continue;
908 +- }
909 +-
910 + switch (amdgpu_connector->hpd.hpd) {
911 + case AMDGPU_HPD_1:
912 + idx = 0;
913 +@@ -476,6 +466,19 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
914 + continue;
915 + }
916 +
917 ++ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
918 ++ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
919 ++ /* don't try to enable hpd on eDP or LVDS avoid breaking the
920 ++ * aux dp channel on imac and help (but not completely fix)
921 ++ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
922 ++ * also avoid interrupt storms during dpms.
923 ++ */
924 ++ tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
925 ++ tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
926 ++ WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
927 ++ continue;
928 ++ }
929 ++
930 + tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
931 + tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
932 + WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
933 +@@ -3109,6 +3112,7 @@ static int dce_v11_0_sw_fini(void *handle)
934 +
935 + dce_v11_0_afmt_fini(adev);
936 +
937 ++ drm_mode_config_cleanup(adev->ddev);
938 + adev->mode_info.mode_config_initialized = false;
939 +
940 + return 0;
941 +diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
942 +index 4fdfab1e9200..ea07c50369b4 100644
943 +--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
944 ++++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
945 +@@ -395,15 +395,6 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
946 + list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
947 + struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
948 +
949 +- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
950 +- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
951 +- /* don't try to enable hpd on eDP or LVDS avoid breaking the
952 +- * aux dp channel on imac and help (but not completely fix)
953 +- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
954 +- * also avoid interrupt storms during dpms.
955 +- */
956 +- continue;
957 +- }
958 + switch (amdgpu_connector->hpd.hpd) {
959 + case AMDGPU_HPD_1:
960 + WREG32(mmDC_HPD1_CONTROL, tmp);
961 +@@ -426,6 +417,45 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
962 + default:
963 + break;
964 + }
965 ++
966 ++ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
967 ++ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
968 ++ /* don't try to enable hpd on eDP or LVDS avoid breaking the
969 ++ * aux dp channel on imac and help (but not completely fix)
970 ++ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
971 ++ * also avoid interrupt storms during dpms.
972 ++ */
973 ++ u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl;
974 ++
975 ++ switch (amdgpu_connector->hpd.hpd) {
976 ++ case AMDGPU_HPD_1:
977 ++ dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL;
978 ++ break;
979 ++ case AMDGPU_HPD_2:
980 ++ dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL;
981 ++ break;
982 ++ case AMDGPU_HPD_3:
983 ++ dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL;
984 ++ break;
985 ++ case AMDGPU_HPD_4:
986 ++ dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL;
987 ++ break;
988 ++ case AMDGPU_HPD_5:
989 ++ dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL;
990 ++ break;
991 ++ case AMDGPU_HPD_6:
992 ++ dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL;
993 ++ break;
994 ++ default:
995 ++ continue;
996 ++ }
997 ++
998 ++ dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg);
999 ++ dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
1000 ++ WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl);
1001 ++ continue;
1002 ++ }
1003 ++
1004 + dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
1005 + amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
1006 + }
1007 +diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
1008 +index 635fc4b48184..92b117843875 100644
1009 +--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
1010 ++++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
1011 +@@ -262,6 +262,8 @@ static const pem_event_action * const display_config_change_event[] = {
1012 + unblock_adjust_power_state_tasks,
1013 + set_cpu_power_state,
1014 + notify_hw_power_source_tasks,
1015 ++ get_2d_performance_state_tasks,
1016 ++ set_performance_state_tasks,
1017 + /* updateDALConfigurationTasks,
1018 + variBrightDisplayConfigurationChangeTasks, */
1019 + adjust_power_state_tasks,
1020 +diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
1021 +index a46225c0fc01..d6bee727497c 100644
1022 +--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
1023 ++++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
1024 +@@ -100,11 +100,12 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
1025 + if (requested == NULL)
1026 + return 0;
1027 +
1028 ++ phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
1029 ++
1030 + if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
1031 + equal = false;
1032 +
1033 + if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
1034 +- phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
1035 + phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
1036 + hwmgr->current_ps = requested;
1037 + }
1038 +diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
1039 +index 780589b420a4..9c4387d79d11 100644
1040 +--- a/drivers/gpu/drm/drm_prime.c
1041 ++++ b/drivers/gpu/drm/drm_prime.c
1042 +@@ -335,14 +335,17 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
1043 + * using the PRIME helpers.
1044 + */
1045 + struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
1046 +- struct drm_gem_object *obj, int flags)
1047 ++ struct drm_gem_object *obj,
1048 ++ int flags)
1049 + {
1050 +- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1051 +-
1052 +- exp_info.ops = &drm_gem_prime_dmabuf_ops;
1053 +- exp_info.size = obj->size;
1054 +- exp_info.flags = flags;
1055 +- exp_info.priv = obj;
1056 ++ struct dma_buf_export_info exp_info = {
1057 ++ .exp_name = KBUILD_MODNAME, /* white lie for debug */
1058 ++ .owner = dev->driver->fops->owner,
1059 ++ .ops = &drm_gem_prime_dmabuf_ops,
1060 ++ .size = obj->size,
1061 ++ .flags = flags,
1062 ++ .priv = obj,
1063 ++ };
1064 +
1065 + if (dev->driver->gem_prime_res_obj)
1066 + exp_info.resv = dev->driver->gem_prime_res_obj(obj);
1067 +diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
1068 +index 7882387f9bff..5fc8ebdf40b2 100644
1069 +--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
1070 ++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
1071 +@@ -330,6 +330,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
1072 + const char *pix_clk_in_name;
1073 + const struct of_device_id *id;
1074 + int ret;
1075 ++ u8 div_ratio_shift = 0;
1076 +
1077 + fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL);
1078 + if (!fsl_dev)
1079 +@@ -382,11 +383,14 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
1080 + pix_clk_in = fsl_dev->clk;
1081 + }
1082 +
1083 ++ if (of_property_read_bool(dev->of_node, "big-endian"))
1084 ++ div_ratio_shift = 24;
1085 ++
1086 + pix_clk_in_name = __clk_get_name(pix_clk_in);
1087 + snprintf(pix_clk_name, sizeof(pix_clk_name), "%s_pix", pix_clk_in_name);
1088 + fsl_dev->pix_clk = clk_register_divider(dev, pix_clk_name,
1089 + pix_clk_in_name, 0, base + DCU_DIV_RATIO,
1090 +- 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
1091 ++ div_ratio_shift, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
1092 + if (IS_ERR(fsl_dev->pix_clk)) {
1093 + dev_err(dev, "failed to register pix clk\n");
1094 + ret = PTR_ERR(fsl_dev->pix_clk);
1095 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1096 +index f68c78918d63..84a00105871d 100644
1097 +--- a/drivers/gpu/drm/i915/i915_drv.h
1098 ++++ b/drivers/gpu/drm/i915/i915_drv.h
1099 +@@ -631,6 +631,8 @@ struct drm_i915_display_funcs {
1100 + struct intel_crtc_state *crtc_state);
1101 + void (*crtc_enable)(struct drm_crtc *crtc);
1102 + void (*crtc_disable)(struct drm_crtc *crtc);
1103 ++ void (*update_crtcs)(struct drm_atomic_state *state,
1104 ++ unsigned int *crtc_vblank_mask);
1105 + void (*audio_codec_enable)(struct drm_connector *connector,
1106 + struct intel_encoder *encoder,
1107 + const struct drm_display_mode *adjusted_mode);
1108 +@@ -1965,11 +1967,11 @@ struct drm_i915_private {
1109 + struct vlv_s0ix_state vlv_s0ix_state;
1110 +
1111 + enum {
1112 +- I915_SKL_SAGV_UNKNOWN = 0,
1113 +- I915_SKL_SAGV_DISABLED,
1114 +- I915_SKL_SAGV_ENABLED,
1115 +- I915_SKL_SAGV_NOT_CONTROLLED
1116 +- } skl_sagv_status;
1117 ++ I915_SAGV_UNKNOWN = 0,
1118 ++ I915_SAGV_DISABLED,
1119 ++ I915_SAGV_ENABLED,
1120 ++ I915_SAGV_NOT_CONTROLLED
1121 ++ } sagv_status;
1122 +
1123 + struct {
1124 + /*
1125 +@@ -2280,21 +2282,19 @@ struct drm_i915_gem_object {
1126 + /** Record of address bit 17 of each page at last unbind. */
1127 + unsigned long *bit_17;
1128 +
1129 +- union {
1130 +- /** for phy allocated objects */
1131 +- struct drm_dma_handle *phys_handle;
1132 +-
1133 +- struct i915_gem_userptr {
1134 +- uintptr_t ptr;
1135 +- unsigned read_only :1;
1136 +- unsigned workers :4;
1137 ++ struct i915_gem_userptr {
1138 ++ uintptr_t ptr;
1139 ++ unsigned read_only :1;
1140 ++ unsigned workers :4;
1141 + #define I915_GEM_USERPTR_MAX_WORKERS 15
1142 +
1143 +- struct i915_mm_struct *mm;
1144 +- struct i915_mmu_object *mmu_object;
1145 +- struct work_struct *work;
1146 +- } userptr;
1147 +- };
1148 ++ struct i915_mm_struct *mm;
1149 ++ struct i915_mmu_object *mmu_object;
1150 ++ struct work_struct *work;
1151 ++ } userptr;
1152 ++
1153 ++ /** for phys allocated objects */
1154 ++ struct drm_dma_handle *phys_handle;
1155 + };
1156 + #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1157 +
1158 +diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
1159 +index 66be299a1486..2bb69f3c5b84 100644
1160 +--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
1161 ++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
1162 +@@ -115,17 +115,28 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
1163 +
1164 + base = bsm & INTEL_BSM_MASK;
1165 + } else if (IS_I865G(dev)) {
1166 ++ u32 tseg_size = 0;
1167 + u16 toud = 0;
1168 ++ u8 tmp;
1169 ++
1170 ++ pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
1171 ++ I845_ESMRAMC, &tmp);
1172 ++
1173 ++ if (tmp & TSEG_ENABLE) {
1174 ++ switch (tmp & I845_TSEG_SIZE_MASK) {
1175 ++ case I845_TSEG_SIZE_512K:
1176 ++ tseg_size = KB(512);
1177 ++ break;
1178 ++ case I845_TSEG_SIZE_1M:
1179 ++ tseg_size = MB(1);
1180 ++ break;
1181 ++ }
1182 ++ }
1183 +
1184 +- /*
1185 +- * FIXME is the graphics stolen memory region
1186 +- * always at TOUD? Ie. is it always the last
1187 +- * one to be allocated by the BIOS?
1188 +- */
1189 + pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
1190 + I865_TOUD, &toud);
1191 +
1192 +- base = toud << 16;
1193 ++ base = (toud << 16) + tseg_size;
1194 + } else if (IS_I85X(dev)) {
1195 + u32 tseg_size = 0;
1196 + u32 tom;
1197 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1198 +index 175595fc3e45..e9a64fba6333 100644
1199 +--- a/drivers/gpu/drm/i915/intel_display.c
1200 ++++ b/drivers/gpu/drm/i915/intel_display.c
1201 +@@ -2980,6 +2980,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
1202 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
1203 + struct drm_framebuffer *fb = plane_state->base.fb;
1204 + struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1205 ++ const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
1206 + int pipe = intel_crtc->pipe;
1207 + u32 plane_ctl, stride_div, stride;
1208 + u32 tile_height, plane_offset, plane_size;
1209 +@@ -3031,6 +3032,9 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
1210 + intel_crtc->adjusted_x = x_offset;
1211 + intel_crtc->adjusted_y = y_offset;
1212 +
1213 ++ if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base))
1214 ++ skl_write_plane_wm(intel_crtc, wm, 0);
1215 ++
1216 + I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
1217 + I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
1218 + I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
1219 +@@ -3061,7 +3065,15 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
1220 + {
1221 + struct drm_device *dev = crtc->dev;
1222 + struct drm_i915_private *dev_priv = to_i915(dev);
1223 +- int pipe = to_intel_crtc(crtc)->pipe;
1224 ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1225 ++ int pipe = intel_crtc->pipe;
1226 ++
1227 ++ /*
1228 ++ * We only populate skl_results on watermark updates, and if the
1229 ++ * plane's visiblity isn't actually changing neither is its watermarks.
1230 ++ */
1231 ++ if (!to_intel_plane_state(crtc->primary->state)->visible)
1232 ++ skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0);
1233 +
1234 + I915_WRITE(PLANE_CTL(pipe, 0), 0);
1235 + I915_WRITE(PLANE_SURF(pipe, 0), 0);
1236 +@@ -8995,6 +9007,24 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
1237 + if (intel_crtc_has_dp_encoder(crtc_state))
1238 + dpll |= DPLL_SDVO_HIGH_SPEED;
1239 +
1240 ++ /*
1241 ++ * The high speed IO clock is only really required for
1242 ++ * SDVO/HDMI/DP, but we also enable it for CRT to make it
1243 ++ * possible to share the DPLL between CRT and HDMI. Enabling
1244 ++ * the clock needlessly does no real harm, except use up a
1245 ++ * bit of power potentially.
1246 ++ *
1247 ++ * We'll limit this to IVB with 3 pipes, since it has only two
1248 ++ * DPLLs and so DPLL sharing is the only way to get three pipes
1249 ++ * driving PCH ports at the same time. On SNB we could do this,
1250 ++ * and potentially avoid enabling the second DPLL, but it's not
1251 ++ * clear if it''s a win or loss power wise. No point in doing
1252 ++ * this on ILK at all since it has a fixed DPLL<->pipe mapping.
1253 ++ */
1254 ++ if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
1255 ++ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1256 ++ dpll |= DPLL_SDVO_HIGH_SPEED;
1257 ++
1258 + /* compute bitmask from p1 value */
1259 + dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1260 + /* also FPA1 */
1261 +@@ -10306,9 +10336,13 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
1262 + struct drm_device *dev = crtc->dev;
1263 + struct drm_i915_private *dev_priv = to_i915(dev);
1264 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1265 ++ const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
1266 + int pipe = intel_crtc->pipe;
1267 + uint32_t cntl = 0;
1268 +
1269 ++ if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc))
1270 ++ skl_write_cursor_wm(intel_crtc, wm);
1271 ++
1272 + if (plane_state && plane_state->visible) {
1273 + cntl = MCURSOR_GAMMA_ENABLE;
1274 + switch (plane_state->base.crtc_w) {
1275 +@@ -12956,16 +12990,23 @@ static void verify_wm_state(struct drm_crtc *crtc,
1276 + hw_entry->start, hw_entry->end);
1277 + }
1278 +
1279 +- /* cursor */
1280 +- hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
1281 +- sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
1282 +-
1283 +- if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
1284 +- DRM_ERROR("mismatch in DDB state pipe %c cursor "
1285 +- "(expected (%u,%u), found (%u,%u))\n",
1286 +- pipe_name(pipe),
1287 +- sw_entry->start, sw_entry->end,
1288 +- hw_entry->start, hw_entry->end);
1289 ++ /*
1290 ++ * cursor
1291 ++ * If the cursor plane isn't active, we may not have updated it's ddb
1292 ++ * allocation. In that case since the ddb allocation will be updated
1293 ++ * once the plane becomes visible, we can skip this check
1294 ++ */
1295 ++ if (intel_crtc->cursor_addr) {
1296 ++ hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
1297 ++ sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
1298 ++
1299 ++ if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
1300 ++ DRM_ERROR("mismatch in DDB state pipe %c cursor "
1301 ++ "(expected (%u,%u), found (%u,%u))\n",
1302 ++ pipe_name(pipe),
1303 ++ sw_entry->start, sw_entry->end,
1304 ++ hw_entry->start, hw_entry->end);
1305 ++ }
1306 + }
1307 + }
1308 +
1309 +@@ -13671,6 +13712,111 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
1310 + return false;
1311 + }
1312 +
1313 ++static void intel_update_crtc(struct drm_crtc *crtc,
1314 ++ struct drm_atomic_state *state,
1315 ++ struct drm_crtc_state *old_crtc_state,
1316 ++ unsigned int *crtc_vblank_mask)
1317 ++{
1318 ++ struct drm_device *dev = crtc->dev;
1319 ++ struct drm_i915_private *dev_priv = to_i915(dev);
1320 ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1321 ++ struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
1322 ++ bool modeset = needs_modeset(crtc->state);
1323 ++
1324 ++ if (modeset) {
1325 ++ update_scanline_offset(intel_crtc);
1326 ++ dev_priv->display.crtc_enable(crtc);
1327 ++ } else {
1328 ++ intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
1329 ++ }
1330 ++
1331 ++ if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
1332 ++ intel_fbc_enable(
1333 ++ intel_crtc, pipe_config,
1334 ++ to_intel_plane_state(crtc->primary->state));
1335 ++ }
1336 ++
1337 ++ drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
1338 ++
1339 ++ if (needs_vblank_wait(pipe_config))
1340 ++ *crtc_vblank_mask |= drm_crtc_mask(crtc);
1341 ++}
1342 ++
1343 ++static void intel_update_crtcs(struct drm_atomic_state *state,
1344 ++ unsigned int *crtc_vblank_mask)
1345 ++{
1346 ++ struct drm_crtc *crtc;
1347 ++ struct drm_crtc_state *old_crtc_state;
1348 ++ int i;
1349 ++
1350 ++ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
1351 ++ if (!crtc->state->active)
1352 ++ continue;
1353 ++
1354 ++ intel_update_crtc(crtc, state, old_crtc_state,
1355 ++ crtc_vblank_mask);
1356 ++ }
1357 ++}
1358 ++
1359 ++static void skl_update_crtcs(struct drm_atomic_state *state,
1360 ++ unsigned int *crtc_vblank_mask)
1361 ++{
1362 ++ struct drm_device *dev = state->dev;
1363 ++ struct drm_i915_private *dev_priv = to_i915(dev);
1364 ++ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
1365 ++ struct drm_crtc *crtc;
1366 ++ struct drm_crtc_state *old_crtc_state;
1367 ++ struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
1368 ++ struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
1369 ++ unsigned int updated = 0;
1370 ++ bool progress;
1371 ++ enum pipe pipe;
1372 ++
1373 ++ /*
1374 ++ * Whenever the number of active pipes changes, we need to make sure we
1375 ++ * update the pipes in the right order so that their ddb allocations
1376 ++ * never overlap with eachother inbetween CRTC updates. Otherwise we'll
1377 ++ * cause pipe underruns and other bad stuff.
1378 ++ */
1379 ++ do {
1380 ++ int i;
1381 ++ progress = false;
1382 ++
1383 ++ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
1384 ++ bool vbl_wait = false;
1385 ++ unsigned int cmask = drm_crtc_mask(crtc);
1386 ++ pipe = to_intel_crtc(crtc)->pipe;
1387 ++
1388 ++ if (updated & cmask || !crtc->state->active)
1389 ++ continue;
1390 ++ if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb,
1391 ++ pipe))
1392 ++ continue;
1393 ++
1394 ++ updated |= cmask;
1395 ++
1396 ++ /*
1397 ++ * If this is an already active pipe, it's DDB changed,
1398 ++ * and this isn't the last pipe that needs updating
1399 ++ * then we need to wait for a vblank to pass for the
1400 ++ * new ddb allocation to take effect.
1401 ++ */
1402 ++ if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) &&
1403 ++ !crtc->state->active_changed &&
1404 ++ intel_state->wm_results.dirty_pipes != updated)
1405 ++ vbl_wait = true;
1406 ++
1407 ++ intel_update_crtc(crtc, state, old_crtc_state,
1408 ++ crtc_vblank_mask);
1409 ++
1410 ++ if (vbl_wait)
1411 ++ intel_wait_for_vblank(dev, pipe);
1412 ++
1413 ++ progress = true;
1414 ++ }
1415 ++ } while (progress);
1416 ++}
1417 ++
1418 + static void intel_atomic_commit_tail(struct drm_atomic_state *state)
1419 + {
1420 + struct drm_device *dev = state->dev;
1421 +@@ -13763,23 +13909,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
1422 + * SKL workaround: bspec recommends we disable the SAGV when we
1423 + * have more then one pipe enabled
1424 + */
1425 +- if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
1426 +- skl_disable_sagv(dev_priv);
1427 ++ if (!intel_can_enable_sagv(state))
1428 ++ intel_disable_sagv(dev_priv);
1429 +
1430 + intel_modeset_verify_disabled(dev);
1431 + }
1432 +
1433 +- /* Now enable the clocks, plane, pipe, and connectors that we set up. */
1434 ++ /* Complete the events for pipes that have now been disabled */
1435 + for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
1436 +- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1437 + bool modeset = needs_modeset(crtc->state);
1438 +- struct intel_crtc_state *pipe_config =
1439 +- to_intel_crtc_state(crtc->state);
1440 +-
1441 +- if (modeset && crtc->state->active) {
1442 +- update_scanline_offset(to_intel_crtc(crtc));
1443 +- dev_priv->display.crtc_enable(crtc);
1444 +- }
1445 +
1446 + /* Complete events for now disable pipes here. */
1447 + if (modeset && !crtc->state->active && crtc->state->event) {
1448 +@@ -13789,21 +13927,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
1449 +
1450 + crtc->state->event = NULL;
1451 + }
1452 +-
1453 +- if (!modeset)
1454 +- intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
1455 +-
1456 +- if (crtc->state->active &&
1457 +- drm_atomic_get_existing_plane_state(state, crtc->primary))
1458 +- intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
1459 +-
1460 +- if (crtc->state->active)
1461 +- drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
1462 +-
1463 +- if (pipe_config->base.active && needs_vblank_wait(pipe_config))
1464 +- crtc_vblank_mask |= 1 << i;
1465 + }
1466 +
1467 ++ /* Now enable the clocks, plane, pipe, and connectors that we set up. */
1468 ++ dev_priv->display.update_crtcs(state, &crtc_vblank_mask);
1469 ++
1470 + /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
1471 + * already, but still need the state for the delayed optimization. To
1472 + * fix this:
1473 +@@ -13839,9 +13967,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
1474 + intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
1475 + }
1476 +
1477 +- if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
1478 +- skl_can_enable_sagv(state))
1479 +- skl_enable_sagv(dev_priv);
1480 ++ if (intel_state->modeset && intel_can_enable_sagv(state))
1481 ++ intel_enable_sagv(dev_priv);
1482 +
1483 + drm_atomic_helper_commit_hw_done(state);
1484 +
1485 +@@ -14221,10 +14348,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
1486 + struct drm_crtc_state *old_crtc_state)
1487 + {
1488 + struct drm_device *dev = crtc->dev;
1489 ++ struct drm_i915_private *dev_priv = to_i915(dev);
1490 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1491 + struct intel_crtc_state *old_intel_state =
1492 + to_intel_crtc_state(old_crtc_state);
1493 + bool modeset = needs_modeset(crtc->state);
1494 ++ enum pipe pipe = intel_crtc->pipe;
1495 +
1496 + /* Perform vblank evasion around commit operation */
1497 + intel_pipe_update_start(intel_crtc);
1498 +@@ -14239,8 +14368,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
1499 +
1500 + if (to_intel_crtc_state(crtc->state)->update_pipe)
1501 + intel_update_pipe_config(intel_crtc, old_intel_state);
1502 +- else if (INTEL_INFO(dev)->gen >= 9)
1503 ++ else if (INTEL_GEN(dev_priv) >= 9) {
1504 + skl_detach_scalers(intel_crtc);
1505 ++
1506 ++ I915_WRITE(PIPE_WM_LINETIME(pipe),
1507 ++ dev_priv->wm.skl_hw.wm_linetime[pipe]);
1508 ++ }
1509 + }
1510 +
1511 + static void intel_finish_crtc_commit(struct drm_crtc *crtc,
1512 +@@ -15347,6 +15480,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
1513 + skl_modeset_calc_cdclk;
1514 + }
1515 +
1516 ++ if (dev_priv->info.gen >= 9)
1517 ++ dev_priv->display.update_crtcs = skl_update_crtcs;
1518 ++ else
1519 ++ dev_priv->display.update_crtcs = intel_update_crtcs;
1520 ++
1521 + switch (INTEL_INFO(dev_priv)->gen) {
1522 + case 2:
1523 + dev_priv->display.queue_flip = intel_gen2_queue_flip;
1524 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1525 +index 21b04c3eda41..1ca155f4d368 100644
1526 +--- a/drivers/gpu/drm/i915/intel_dp.c
1527 ++++ b/drivers/gpu/drm/i915/intel_dp.c
1528 +@@ -4148,7 +4148,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
1529 + *
1530 + * Return %true if @port is connected, %false otherwise.
1531 + */
1532 +-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
1533 ++static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
1534 + struct intel_digital_port *port)
1535 + {
1536 + if (HAS_PCH_IBX(dev_priv))
1537 +@@ -4207,7 +4207,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
1538 + intel_dp->has_audio = false;
1539 + }
1540 +
1541 +-static void
1542 ++static enum drm_connector_status
1543 + intel_dp_long_pulse(struct intel_connector *intel_connector)
1544 + {
1545 + struct drm_connector *connector = &intel_connector->base;
1546 +@@ -4232,7 +4232,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
1547 + else
1548 + status = connector_status_disconnected;
1549 +
1550 +- if (status != connector_status_connected) {
1551 ++ if (status == connector_status_disconnected) {
1552 + intel_dp->compliance_test_active = 0;
1553 + intel_dp->compliance_test_type = 0;
1554 + intel_dp->compliance_test_data = 0;
1555 +@@ -4284,8 +4284,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
1556 + intel_dp->aux.i2c_defer_count = 0;
1557 +
1558 + intel_dp_set_edid(intel_dp);
1559 +-
1560 +- status = connector_status_connected;
1561 ++ if (is_edp(intel_dp) || intel_connector->detect_edid)
1562 ++ status = connector_status_connected;
1563 + intel_dp->detect_done = true;
1564 +
1565 + /* Try to read the source of the interrupt */
1566 +@@ -4303,12 +4303,11 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
1567 + }
1568 +
1569 + out:
1570 +- if ((status != connector_status_connected) &&
1571 +- (intel_dp->is_mst == false))
1572 ++ if (status != connector_status_connected && !intel_dp->is_mst)
1573 + intel_dp_unset_edid(intel_dp);
1574 +
1575 + intel_display_power_put(to_i915(dev), power_domain);
1576 +- return;
1577 ++ return status;
1578 + }
1579 +
1580 + static enum drm_connector_status
1581 +@@ -4317,7 +4316,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
1582 + struct intel_dp *intel_dp = intel_attached_dp(connector);
1583 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1584 + struct intel_encoder *intel_encoder = &intel_dig_port->base;
1585 +- struct intel_connector *intel_connector = to_intel_connector(connector);
1586 ++ enum drm_connector_status status = connector->status;
1587 +
1588 + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1589 + connector->base.id, connector->name);
1590 +@@ -4332,14 +4331,11 @@ intel_dp_detect(struct drm_connector *connector, bool force)
1591 +
1592 + /* If full detect is not performed yet, do a full detect */
1593 + if (!intel_dp->detect_done)
1594 +- intel_dp_long_pulse(intel_dp->attached_connector);
1595 ++ status = intel_dp_long_pulse(intel_dp->attached_connector);
1596 +
1597 + intel_dp->detect_done = false;
1598 +
1599 +- if (is_edp(intel_dp) || intel_connector->detect_edid)
1600 +- return connector_status_connected;
1601 +- else
1602 +- return connector_status_disconnected;
1603 ++ return status;
1604 + }
1605 +
1606 + static void
1607 +@@ -4696,36 +4692,34 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
1608 + port_name(intel_dig_port->port),
1609 + long_hpd ? "long" : "short");
1610 +
1611 ++ if (long_hpd) {
1612 ++ intel_dp->detect_done = false;
1613 ++ return IRQ_NONE;
1614 ++ }
1615 ++
1616 + power_domain = intel_display_port_aux_power_domain(intel_encoder);
1617 + intel_display_power_get(dev_priv, power_domain);
1618 +
1619 +- if (long_hpd) {
1620 +- intel_dp_long_pulse(intel_dp->attached_connector);
1621 +- if (intel_dp->is_mst)
1622 +- ret = IRQ_HANDLED;
1623 +- goto put_power;
1624 +-
1625 +- } else {
1626 +- if (intel_dp->is_mst) {
1627 +- if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
1628 +- /*
1629 +- * If we were in MST mode, and device is not
1630 +- * there, get out of MST mode
1631 +- */
1632 +- DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
1633 +- intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
1634 +- intel_dp->is_mst = false;
1635 +- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
1636 +- intel_dp->is_mst);
1637 +- goto put_power;
1638 +- }
1639 ++ if (intel_dp->is_mst) {
1640 ++ if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
1641 ++ /*
1642 ++ * If we were in MST mode, and device is not
1643 ++ * there, get out of MST mode
1644 ++ */
1645 ++ DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
1646 ++ intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
1647 ++ intel_dp->is_mst = false;
1648 ++ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
1649 ++ intel_dp->is_mst);
1650 ++ intel_dp->detect_done = false;
1651 ++ goto put_power;
1652 + }
1653 ++ }
1654 +
1655 +- if (!intel_dp->is_mst) {
1656 +- if (!intel_dp_short_pulse(intel_dp)) {
1657 +- intel_dp_long_pulse(intel_dp->attached_connector);
1658 +- goto put_power;
1659 +- }
1660 ++ if (!intel_dp->is_mst) {
1661 ++ if (!intel_dp_short_pulse(intel_dp)) {
1662 ++ intel_dp->detect_done = false;
1663 ++ goto put_power;
1664 + }
1665 + }
1666 +
1667 +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1668 +index ff399b9a5c1f..9a58800cba3b 100644
1669 +--- a/drivers/gpu/drm/i915/intel_drv.h
1670 ++++ b/drivers/gpu/drm/i915/intel_drv.h
1671 +@@ -236,6 +236,7 @@ struct intel_panel {
1672 + bool enabled;
1673 + bool combination_mode; /* gen 2/4 only */
1674 + bool active_low_pwm;
1675 ++ bool alternate_pwm_increment; /* lpt+ */
1676 +
1677 + /* PWM chip */
1678 + bool util_pin_active_low; /* bxt+ */
1679 +@@ -1387,8 +1388,6 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp);
1680 + void intel_edp_drrs_invalidate(struct drm_device *dev,
1681 + unsigned frontbuffer_bits);
1682 + void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
1683 +-bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
1684 +- struct intel_digital_port *port);
1685 +
1686 + void
1687 + intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
1688 +@@ -1716,9 +1715,21 @@ void ilk_wm_get_hw_state(struct drm_device *dev);
1689 + void skl_wm_get_hw_state(struct drm_device *dev);
1690 + void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
1691 + struct skl_ddb_allocation *ddb /* out */);
1692 +-bool skl_can_enable_sagv(struct drm_atomic_state *state);
1693 +-int skl_enable_sagv(struct drm_i915_private *dev_priv);
1694 +-int skl_disable_sagv(struct drm_i915_private *dev_priv);
1695 ++bool intel_can_enable_sagv(struct drm_atomic_state *state);
1696 ++int intel_enable_sagv(struct drm_i915_private *dev_priv);
1697 ++int intel_disable_sagv(struct drm_i915_private *dev_priv);
1698 ++bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
1699 ++ const struct skl_ddb_allocation *new,
1700 ++ enum pipe pipe);
1701 ++bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
1702 ++ const struct skl_ddb_allocation *old,
1703 ++ const struct skl_ddb_allocation *new,
1704 ++ enum pipe pipe);
1705 ++void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
1706 ++ const struct skl_wm_values *wm);
1707 ++void skl_write_plane_wm(struct intel_crtc *intel_crtc,
1708 ++ const struct skl_wm_values *wm,
1709 ++ int plane);
1710 + uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
1711 + bool ilk_disable_lp_wm(struct drm_device *dev);
1712 + int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
1713 +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
1714 +index 4df9f384910c..c3aa9e670d15 100644
1715 +--- a/drivers/gpu/drm/i915/intel_hdmi.c
1716 ++++ b/drivers/gpu/drm/i915/intel_hdmi.c
1717 +@@ -1422,24 +1422,22 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
1718 + }
1719 +
1720 + static bool
1721 +-intel_hdmi_set_edid(struct drm_connector *connector, bool force)
1722 ++intel_hdmi_set_edid(struct drm_connector *connector)
1723 + {
1724 + struct drm_i915_private *dev_priv = to_i915(connector->dev);
1725 + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1726 +- struct edid *edid = NULL;
1727 ++ struct edid *edid;
1728 + bool connected = false;
1729 +
1730 +- if (force) {
1731 +- intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1732 ++ intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1733 +
1734 +- edid = drm_get_edid(connector,
1735 +- intel_gmbus_get_adapter(dev_priv,
1736 +- intel_hdmi->ddc_bus));
1737 ++ edid = drm_get_edid(connector,
1738 ++ intel_gmbus_get_adapter(dev_priv,
1739 ++ intel_hdmi->ddc_bus));
1740 +
1741 +- intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
1742 ++ intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
1743 +
1744 +- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
1745 +- }
1746 ++ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
1747 +
1748 + to_intel_connector(connector)->detect_edid = edid;
1749 + if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
1750 +@@ -1465,37 +1463,16 @@ static enum drm_connector_status
1751 + intel_hdmi_detect(struct drm_connector *connector, bool force)
1752 + {
1753 + enum drm_connector_status status;
1754 +- struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1755 + struct drm_i915_private *dev_priv = to_i915(connector->dev);
1756 +- bool live_status = false;
1757 +- unsigned int try;
1758 +
1759 + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1760 + connector->base.id, connector->name);
1761 +
1762 + intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1763 +
1764 +- for (try = 0; !live_status && try < 9; try++) {
1765 +- if (try)
1766 +- msleep(10);
1767 +- live_status = intel_digital_port_connected(dev_priv,
1768 +- hdmi_to_dig_port(intel_hdmi));
1769 +- }
1770 +-
1771 +- if (!live_status) {
1772 +- DRM_DEBUG_KMS("HDMI live status down\n");
1773 +- /*
1774 +- * Live status register is not reliable on all intel platforms.
1775 +- * So consider live_status only for certain platforms, for
1776 +- * others, read EDID to determine presence of sink.
1777 +- */
1778 +- if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
1779 +- live_status = true;
1780 +- }
1781 +-
1782 + intel_hdmi_unset_edid(connector);
1783 +
1784 +- if (intel_hdmi_set_edid(connector, live_status)) {
1785 ++ if (intel_hdmi_set_edid(connector)) {
1786 + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1787 +
1788 + hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
1789 +@@ -1521,7 +1498,7 @@ intel_hdmi_force(struct drm_connector *connector)
1790 + if (connector->status != connector_status_connected)
1791 + return;
1792 +
1793 +- intel_hdmi_set_edid(connector, true);
1794 ++ intel_hdmi_set_edid(connector);
1795 + hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
1796 + }
1797 +
1798 +diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
1799 +index 96c65d77e886..9a2393a6b277 100644
1800 +--- a/drivers/gpu/drm/i915/intel_panel.c
1801 ++++ b/drivers/gpu/drm/i915/intel_panel.c
1802 +@@ -841,7 +841,7 @@ static void lpt_enable_backlight(struct intel_connector *connector)
1803 + {
1804 + struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1805 + struct intel_panel *panel = &connector->panel;
1806 +- u32 pch_ctl1, pch_ctl2;
1807 ++ u32 pch_ctl1, pch_ctl2, schicken;
1808 +
1809 + pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
1810 + if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
1811 +@@ -850,6 +850,22 @@ static void lpt_enable_backlight(struct intel_connector *connector)
1812 + I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
1813 + }
1814 +
1815 ++ if (HAS_PCH_LPT(dev_priv)) {
1816 ++ schicken = I915_READ(SOUTH_CHICKEN2);
1817 ++ if (panel->backlight.alternate_pwm_increment)
1818 ++ schicken |= LPT_PWM_GRANULARITY;
1819 ++ else
1820 ++ schicken &= ~LPT_PWM_GRANULARITY;
1821 ++ I915_WRITE(SOUTH_CHICKEN2, schicken);
1822 ++ } else {
1823 ++ schicken = I915_READ(SOUTH_CHICKEN1);
1824 ++ if (panel->backlight.alternate_pwm_increment)
1825 ++ schicken |= SPT_PWM_GRANULARITY;
1826 ++ else
1827 ++ schicken &= ~SPT_PWM_GRANULARITY;
1828 ++ I915_WRITE(SOUTH_CHICKEN1, schicken);
1829 ++ }
1830 ++
1831 + pch_ctl2 = panel->backlight.max << 16;
1832 + I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
1833 +
1834 +@@ -1242,10 +1258,10 @@ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1835 + */
1836 + static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1837 + {
1838 +- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1839 ++ struct intel_panel *panel = &connector->panel;
1840 + u32 mul;
1841 +
1842 +- if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
1843 ++ if (panel->backlight.alternate_pwm_increment)
1844 + mul = 128;
1845 + else
1846 + mul = 16;
1847 +@@ -1261,9 +1277,10 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1848 + static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1849 + {
1850 + struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1851 ++ struct intel_panel *panel = &connector->panel;
1852 + u32 mul, clock;
1853 +
1854 +- if (I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY)
1855 ++ if (panel->backlight.alternate_pwm_increment)
1856 + mul = 16;
1857 + else
1858 + mul = 128;
1859 +@@ -1414,6 +1431,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
1860 + struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1861 + struct intel_panel *panel = &connector->panel;
1862 + u32 pch_ctl1, pch_ctl2, val;
1863 ++ bool alt;
1864 ++
1865 ++ if (HAS_PCH_LPT(dev_priv))
1866 ++ alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
1867 ++ else
1868 ++ alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
1869 ++ panel->backlight.alternate_pwm_increment = alt;
1870 +
1871 + pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
1872 + panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
1873 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1874 +index 2d2481392824..e59a28cb3158 100644
1875 +--- a/drivers/gpu/drm/i915/intel_pm.c
1876 ++++ b/drivers/gpu/drm/i915/intel_pm.c
1877 +@@ -2119,32 +2119,34 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
1878 + GEN9_MEM_LATENCY_LEVEL_MASK;
1879 +
1880 + /*
1881 ++ * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
1882 ++ * need to be disabled. We make sure to sanitize the values out
1883 ++ * of the punit to satisfy this requirement.
1884 ++ */
1885 ++ for (level = 1; level <= max_level; level++) {
1886 ++ if (wm[level] == 0) {
1887 ++ for (i = level + 1; i <= max_level; i++)
1888 ++ wm[i] = 0;
1889 ++ break;
1890 ++ }
1891 ++ }
1892 ++
1893 ++ /*
1894 + * WaWmMemoryReadLatency:skl
1895 + *
1896 + * punit doesn't take into account the read latency so we need
1897 +- * to add 2us to the various latency levels we retrieve from
1898 +- * the punit.
1899 +- * - W0 is a bit special in that it's the only level that
1900 +- * can't be disabled if we want to have display working, so
1901 +- * we always add 2us there.
1902 +- * - For levels >=1, punit returns 0us latency when they are
1903 +- * disabled, so we respect that and don't add 2us then
1904 +- *
1905 +- * Additionally, if a level n (n > 1) has a 0us latency, all
1906 +- * levels m (m >= n) need to be disabled. We make sure to
1907 +- * sanitize the values out of the punit to satisfy this
1908 +- * requirement.
1909 ++ * to add 2us to the various latency levels we retrieve from the
1910 ++ * punit when level 0 response data us 0us.
1911 + */
1912 +- wm[0] += 2;
1913 +- for (level = 1; level <= max_level; level++)
1914 +- if (wm[level] != 0)
1915 ++ if (wm[0] == 0) {
1916 ++ wm[0] += 2;
1917 ++ for (level = 1; level <= max_level; level++) {
1918 ++ if (wm[level] == 0)
1919 ++ break;
1920 + wm[level] += 2;
1921 +- else {
1922 +- for (i = level + 1; i <= max_level; i++)
1923 +- wm[i] = 0;
1924 +-
1925 +- break;
1926 + }
1927 ++ }
1928 ++
1929 + } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1930 + uint64_t sskpd = I915_READ64(MCH_SSKPD);
1931 +
1932 +@@ -2876,6 +2878,19 @@ skl_wm_plane_id(const struct intel_plane *plane)
1933 + }
1934 + }
1935 +
1936 ++static bool
1937 ++intel_has_sagv(struct drm_i915_private *dev_priv)
1938 ++{
1939 ++ if (IS_KABYLAKE(dev_priv))
1940 ++ return true;
1941 ++
1942 ++ if (IS_SKYLAKE(dev_priv) &&
1943 ++ dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
1944 ++ return true;
1945 ++
1946 ++ return false;
1947 ++}
1948 ++
1949 + /*
1950 + * SAGV dynamically adjusts the system agent voltage and clock frequencies
1951 + * depending on power and performance requirements. The display engine access
1952 +@@ -2888,12 +2903,14 @@ skl_wm_plane_id(const struct intel_plane *plane)
1953 + * - We're not using an interlaced display configuration
1954 + */
1955 + int
1956 +-skl_enable_sagv(struct drm_i915_private *dev_priv)
1957 ++intel_enable_sagv(struct drm_i915_private *dev_priv)
1958 + {
1959 + int ret;
1960 +
1961 +- if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
1962 +- dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
1963 ++ if (!intel_has_sagv(dev_priv))
1964 ++ return 0;
1965 ++
1966 ++ if (dev_priv->sagv_status == I915_SAGV_ENABLED)
1967 + return 0;
1968 +
1969 + DRM_DEBUG_KMS("Enabling the SAGV\n");
1970 +@@ -2909,21 +2926,21 @@ skl_enable_sagv(struct drm_i915_private *dev_priv)
1971 + * Some skl systems, pre-release machines in particular,
1972 + * don't actually have an SAGV.
1973 + */
1974 +- if (ret == -ENXIO) {
1975 ++ if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
1976 + DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
1977 +- dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
1978 ++ dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
1979 + return 0;
1980 + } else if (ret < 0) {
1981 + DRM_ERROR("Failed to enable the SAGV\n");
1982 + return ret;
1983 + }
1984 +
1985 +- dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
1986 ++ dev_priv->sagv_status = I915_SAGV_ENABLED;
1987 + return 0;
1988 + }
1989 +
1990 + static int
1991 +-skl_do_sagv_disable(struct drm_i915_private *dev_priv)
1992 ++intel_do_sagv_disable(struct drm_i915_private *dev_priv)
1993 + {
1994 + int ret;
1995 + uint32_t temp = GEN9_SAGV_DISABLE;
1996 +@@ -2937,19 +2954,21 @@ skl_do_sagv_disable(struct drm_i915_private *dev_priv)
1997 + }
1998 +
1999 + int
2000 +-skl_disable_sagv(struct drm_i915_private *dev_priv)
2001 ++intel_disable_sagv(struct drm_i915_private *dev_priv)
2002 + {
2003 + int ret, result;
2004 +
2005 +- if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
2006 +- dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
2007 ++ if (!intel_has_sagv(dev_priv))
2008 ++ return 0;
2009 ++
2010 ++ if (dev_priv->sagv_status == I915_SAGV_DISABLED)
2011 + return 0;
2012 +
2013 + DRM_DEBUG_KMS("Disabling the SAGV\n");
2014 + mutex_lock(&dev_priv->rps.hw_lock);
2015 +
2016 + /* bspec says to keep retrying for at least 1 ms */
2017 +- ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
2018 ++ ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1);
2019 + mutex_unlock(&dev_priv->rps.hw_lock);
2020 +
2021 + if (ret == -ETIMEDOUT) {
2022 +@@ -2961,20 +2980,20 @@ skl_disable_sagv(struct drm_i915_private *dev_priv)
2023 + * Some skl systems, pre-release machines in particular,
2024 + * don't actually have an SAGV.
2025 + */
2026 +- if (result == -ENXIO) {
2027 ++ if (IS_SKYLAKE(dev_priv) && result == -ENXIO) {
2028 + DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2029 +- dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
2030 ++ dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
2031 + return 0;
2032 + } else if (result < 0) {
2033 + DRM_ERROR("Failed to disable the SAGV\n");
2034 + return result;
2035 + }
2036 +
2037 +- dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
2038 ++ dev_priv->sagv_status = I915_SAGV_DISABLED;
2039 + return 0;
2040 + }
2041 +
2042 +-bool skl_can_enable_sagv(struct drm_atomic_state *state)
2043 ++bool intel_can_enable_sagv(struct drm_atomic_state *state)
2044 + {
2045 + struct drm_device *dev = state->dev;
2046 + struct drm_i915_private *dev_priv = to_i915(dev);
2047 +@@ -2983,6 +3002,9 @@ bool skl_can_enable_sagv(struct drm_atomic_state *state)
2048 + enum pipe pipe;
2049 + int level, plane;
2050 +
2051 ++ if (!intel_has_sagv(dev_priv))
2052 ++ return false;
2053 ++
2054 + /*
2055 + * SKL workaround: bspec recommends we disable the SAGV when we have
2056 + * more then one pipe enabled
2057 +@@ -3473,29 +3495,14 @@ static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latenc
2058 + }
2059 +
2060 + static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2061 +- uint32_t horiz_pixels, uint8_t cpp,
2062 +- uint64_t tiling, uint32_t latency)
2063 ++ uint32_t latency, uint32_t plane_blocks_per_line)
2064 + {
2065 + uint32_t ret;
2066 +- uint32_t plane_bytes_per_line, plane_blocks_per_line;
2067 + uint32_t wm_intermediate_val;
2068 +
2069 + if (latency == 0)
2070 + return UINT_MAX;
2071 +
2072 +- plane_bytes_per_line = horiz_pixels * cpp;
2073 +-
2074 +- if (tiling == I915_FORMAT_MOD_Y_TILED ||
2075 +- tiling == I915_FORMAT_MOD_Yf_TILED) {
2076 +- plane_bytes_per_line *= 4;
2077 +- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2078 +- plane_blocks_per_line /= 4;
2079 +- } else if (tiling == DRM_FORMAT_MOD_NONE) {
2080 +- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
2081 +- } else {
2082 +- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2083 +- }
2084 +-
2085 + wm_intermediate_val = latency * pixel_rate;
2086 + ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
2087 + plane_blocks_per_line;
2088 +@@ -3546,6 +3553,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
2089 + uint8_t cpp;
2090 + uint32_t width = 0, height = 0;
2091 + uint32_t plane_pixel_rate;
2092 ++ uint32_t y_tile_minimum, y_min_scanlines;
2093 +
2094 + if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
2095 + *enabled = false;
2096 +@@ -3561,38 +3569,51 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
2097 + cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2098 + plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
2099 +
2100 ++ if (intel_rotation_90_or_270(pstate->rotation)) {
2101 ++ int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
2102 ++ drm_format_plane_cpp(fb->pixel_format, 1) :
2103 ++ drm_format_plane_cpp(fb->pixel_format, 0);
2104 ++
2105 ++ switch (cpp) {
2106 ++ case 1:
2107 ++ y_min_scanlines = 16;
2108 ++ break;
2109 ++ case 2:
2110 ++ y_min_scanlines = 8;
2111 ++ break;
2112 ++ default:
2113 ++ WARN(1, "Unsupported pixel depth for rotation");
2114 ++ case 4:
2115 ++ y_min_scanlines = 4;
2116 ++ break;
2117 ++ }
2118 ++ } else {
2119 ++ y_min_scanlines = 4;
2120 ++ }
2121 ++
2122 ++ plane_bytes_per_line = width * cpp;
2123 ++ if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
2124 ++ fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
2125 ++ plane_blocks_per_line =
2126 ++ DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
2127 ++ plane_blocks_per_line /= y_min_scanlines;
2128 ++ } else if (fb->modifier[0] == DRM_FORMAT_MOD_NONE) {
2129 ++ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
2130 ++ + 1;
2131 ++ } else {
2132 ++ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2133 ++ }
2134 ++
2135 + method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
2136 + method2 = skl_wm_method2(plane_pixel_rate,
2137 + cstate->base.adjusted_mode.crtc_htotal,
2138 +- width,
2139 +- cpp,
2140 +- fb->modifier[0],
2141 +- latency);
2142 ++ latency,
2143 ++ plane_blocks_per_line);
2144 +
2145 +- plane_bytes_per_line = width * cpp;
2146 +- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
2147 ++ y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
2148 +
2149 + if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
2150 + fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
2151 +- uint32_t min_scanlines = 4;
2152 +- uint32_t y_tile_minimum;
2153 +- if (intel_rotation_90_or_270(pstate->rotation)) {
2154 +- int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
2155 +- drm_format_plane_cpp(fb->pixel_format, 1) :
2156 +- drm_format_plane_cpp(fb->pixel_format, 0);
2157 +-
2158 +- switch (cpp) {
2159 +- case 1:
2160 +- min_scanlines = 16;
2161 +- break;
2162 +- case 2:
2163 +- min_scanlines = 8;
2164 +- break;
2165 +- case 8:
2166 +- WARN(1, "Unsupported pixel depth for rotation");
2167 +- }
2168 +- }
2169 +- y_tile_minimum = plane_blocks_per_line * min_scanlines;
2170 + selected_result = max(method2, y_tile_minimum);
2171 + } else {
2172 + if ((ddb_allocation / plane_blocks_per_line) >= 1)
2173 +@@ -3606,10 +3627,12 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
2174 +
2175 + if (level >= 1 && level <= 7) {
2176 + if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
2177 +- fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)
2178 +- res_lines += 4;
2179 +- else
2180 ++ fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
2181 ++ res_blocks += y_tile_minimum;
2182 ++ res_lines += y_min_scanlines;
2183 ++ } else {
2184 + res_blocks++;
2185 ++ }
2186 + }
2187 +
2188 + if (res_blocks >= ddb_allocation || res_lines > 31) {
2189 +@@ -3828,183 +3851,82 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
2190 + I915_WRITE(reg, 0);
2191 + }
2192 +
2193 +-static void skl_write_wm_values(struct drm_i915_private *dev_priv,
2194 +- const struct skl_wm_values *new)
2195 ++void skl_write_plane_wm(struct intel_crtc *intel_crtc,
2196 ++ const struct skl_wm_values *wm,
2197 ++ int plane)
2198 + {
2199 +- struct drm_device *dev = &dev_priv->drm;
2200 +- struct intel_crtc *crtc;
2201 +-
2202 +- for_each_intel_crtc(dev, crtc) {
2203 +- int i, level, max_level = ilk_wm_max_level(dev);
2204 +- enum pipe pipe = crtc->pipe;
2205 +-
2206 +- if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
2207 +- continue;
2208 +- if (!crtc->active)
2209 +- continue;
2210 +-
2211 +- I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
2212 +-
2213 +- for (level = 0; level <= max_level; level++) {
2214 +- for (i = 0; i < intel_num_planes(crtc); i++)
2215 +- I915_WRITE(PLANE_WM(pipe, i, level),
2216 +- new->plane[pipe][i][level]);
2217 +- I915_WRITE(CUR_WM(pipe, level),
2218 +- new->plane[pipe][PLANE_CURSOR][level]);
2219 +- }
2220 +- for (i = 0; i < intel_num_planes(crtc); i++)
2221 +- I915_WRITE(PLANE_WM_TRANS(pipe, i),
2222 +- new->plane_trans[pipe][i]);
2223 +- I915_WRITE(CUR_WM_TRANS(pipe),
2224 +- new->plane_trans[pipe][PLANE_CURSOR]);
2225 +-
2226 +- for (i = 0; i < intel_num_planes(crtc); i++) {
2227 +- skl_ddb_entry_write(dev_priv,
2228 +- PLANE_BUF_CFG(pipe, i),
2229 +- &new->ddb.plane[pipe][i]);
2230 +- skl_ddb_entry_write(dev_priv,
2231 +- PLANE_NV12_BUF_CFG(pipe, i),
2232 +- &new->ddb.y_plane[pipe][i]);
2233 +- }
2234 ++ struct drm_crtc *crtc = &intel_crtc->base;
2235 ++ struct drm_device *dev = crtc->dev;
2236 ++ struct drm_i915_private *dev_priv = to_i915(dev);
2237 ++ int level, max_level = ilk_wm_max_level(dev);
2238 ++ enum pipe pipe = intel_crtc->pipe;
2239 +
2240 +- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
2241 +- &new->ddb.plane[pipe][PLANE_CURSOR]);
2242 ++ for (level = 0; level <= max_level; level++) {
2243 ++ I915_WRITE(PLANE_WM(pipe, plane, level),
2244 ++ wm->plane[pipe][plane][level]);
2245 + }
2246 +-}
2247 ++ I915_WRITE(PLANE_WM_TRANS(pipe, plane), wm->plane_trans[pipe][plane]);
2248 +
2249 +-/*
2250 +- * When setting up a new DDB allocation arrangement, we need to correctly
2251 +- * sequence the times at which the new allocations for the pipes are taken into
2252 +- * account or we'll have pipes fetching from space previously allocated to
2253 +- * another pipe.
2254 +- *
2255 +- * Roughly the sequence looks like:
2256 +- * 1. re-allocate the pipe(s) with the allocation being reduced and not
2257 +- * overlapping with a previous light-up pipe (another way to put it is:
2258 +- * pipes with their new allocation strickly included into their old ones).
2259 +- * 2. re-allocate the other pipes that get their allocation reduced
2260 +- * 3. allocate the pipes having their allocation increased
2261 +- *
2262 +- * Steps 1. and 2. are here to take care of the following case:
2263 +- * - Initially DDB looks like this:
2264 +- * | B | C |
2265 +- * - enable pipe A.
2266 +- * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
2267 +- * allocation
2268 +- * | A | B | C |
2269 +- *
2270 +- * We need to sequence the re-allocation: C, B, A (and not B, C, A).
2271 +- */
2272 ++ skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
2273 ++ &wm->ddb.plane[pipe][plane]);
2274 ++ skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
2275 ++ &wm->ddb.y_plane[pipe][plane]);
2276 ++}
2277 +
2278 +-static void
2279 +-skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
2280 ++void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
2281 ++ const struct skl_wm_values *wm)
2282 + {
2283 +- int plane;
2284 +-
2285 +- DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
2286 ++ struct drm_crtc *crtc = &intel_crtc->base;
2287 ++ struct drm_device *dev = crtc->dev;
2288 ++ struct drm_i915_private *dev_priv = to_i915(dev);
2289 ++ int level, max_level = ilk_wm_max_level(dev);
2290 ++ enum pipe pipe = intel_crtc->pipe;
2291 +
2292 +- for_each_plane(dev_priv, pipe, plane) {
2293 +- I915_WRITE(PLANE_SURF(pipe, plane),
2294 +- I915_READ(PLANE_SURF(pipe, plane)));
2295 ++ for (level = 0; level <= max_level; level++) {
2296 ++ I915_WRITE(CUR_WM(pipe, level),
2297 ++ wm->plane[pipe][PLANE_CURSOR][level]);
2298 + }
2299 +- I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
2300 ++ I915_WRITE(CUR_WM_TRANS(pipe), wm->plane_trans[pipe][PLANE_CURSOR]);
2301 ++
2302 ++ skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
2303 ++ &wm->ddb.plane[pipe][PLANE_CURSOR]);
2304 + }
2305 +
2306 +-static bool
2307 +-skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
2308 +- const struct skl_ddb_allocation *new,
2309 +- enum pipe pipe)
2310 ++bool skl_ddb_allocation_equals(const struct skl_ddb_allocation *old,
2311 ++ const struct skl_ddb_allocation *new,
2312 ++ enum pipe pipe)
2313 + {
2314 +- uint16_t old_size, new_size;
2315 +-
2316 +- old_size = skl_ddb_entry_size(&old->pipe[pipe]);
2317 +- new_size = skl_ddb_entry_size(&new->pipe[pipe]);
2318 +-
2319 +- return old_size != new_size &&
2320 +- new->pipe[pipe].start >= old->pipe[pipe].start &&
2321 +- new->pipe[pipe].end <= old->pipe[pipe].end;
2322 ++ return new->pipe[pipe].start == old->pipe[pipe].start &&
2323 ++ new->pipe[pipe].end == old->pipe[pipe].end;
2324 + }
2325 +
2326 +-static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
2327 +- struct skl_wm_values *new_values)
2328 ++static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
2329 ++ const struct skl_ddb_entry *b)
2330 + {
2331 +- struct drm_device *dev = &dev_priv->drm;
2332 +- struct skl_ddb_allocation *cur_ddb, *new_ddb;
2333 +- bool reallocated[I915_MAX_PIPES] = {};
2334 +- struct intel_crtc *crtc;
2335 +- enum pipe pipe;
2336 +-
2337 +- new_ddb = &new_values->ddb;
2338 +- cur_ddb = &dev_priv->wm.skl_hw.ddb;
2339 +-
2340 +- /*
2341 +- * First pass: flush the pipes with the new allocation contained into
2342 +- * the old space.
2343 +- *
2344 +- * We'll wait for the vblank on those pipes to ensure we can safely
2345 +- * re-allocate the freed space without this pipe fetching from it.
2346 +- */
2347 +- for_each_intel_crtc(dev, crtc) {
2348 +- if (!crtc->active)
2349 +- continue;
2350 +-
2351 +- pipe = crtc->pipe;
2352 +-
2353 +- if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
2354 +- continue;
2355 +-
2356 +- skl_wm_flush_pipe(dev_priv, pipe, 1);
2357 +- intel_wait_for_vblank(dev, pipe);
2358 +-
2359 +- reallocated[pipe] = true;
2360 +- }
2361 +-
2362 ++ return a->start < b->end && b->start < a->end;
2363 ++}
2364 +
2365 +- /*
2366 +- * Second pass: flush the pipes that are having their allocation
2367 +- * reduced, but overlapping with a previous allocation.
2368 +- *
2369 +- * Here as well we need to wait for the vblank to make sure the freed
2370 +- * space is not used anymore.
2371 +- */
2372 +- for_each_intel_crtc(dev, crtc) {
2373 +- if (!crtc->active)
2374 +- continue;
2375 ++bool skl_ddb_allocation_overlaps(struct drm_atomic_state *state,
2376 ++ const struct skl_ddb_allocation *old,
2377 ++ const struct skl_ddb_allocation *new,
2378 ++ enum pipe pipe)
2379 ++{
2380 ++ struct drm_device *dev = state->dev;
2381 ++ struct intel_crtc *intel_crtc;
2382 ++ enum pipe otherp;
2383 +
2384 +- pipe = crtc->pipe;
2385 ++ for_each_intel_crtc(dev, intel_crtc) {
2386 ++ otherp = intel_crtc->pipe;
2387 +
2388 +- if (reallocated[pipe])
2389 ++ if (otherp == pipe)
2390 + continue;
2391 +
2392 +- if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
2393 +- skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
2394 +- skl_wm_flush_pipe(dev_priv, pipe, 2);
2395 +- intel_wait_for_vblank(dev, pipe);
2396 +- reallocated[pipe] = true;
2397 +- }
2398 ++ if (skl_ddb_entries_overlap(&new->pipe[pipe],
2399 ++ &old->pipe[otherp]))
2400 ++ return true;
2401 + }
2402 +
2403 +- /*
2404 +- * Third pass: flush the pipes that got more space allocated.
2405 +- *
2406 +- * We don't need to actively wait for the update here, next vblank
2407 +- * will just get more DDB space with the correct WM values.
2408 +- */
2409 +- for_each_intel_crtc(dev, crtc) {
2410 +- if (!crtc->active)
2411 +- continue;
2412 +-
2413 +- pipe = crtc->pipe;
2414 +-
2415 +- /*
2416 +- * At this point, only the pipes more space than before are
2417 +- * left to re-allocate.
2418 +- */
2419 +- if (reallocated[pipe])
2420 +- continue;
2421 +-
2422 +- skl_wm_flush_pipe(dev_priv, pipe, 3);
2423 +- }
2424 ++ return false;
2425 + }
2426 +
2427 + static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
2428 +@@ -4041,6 +3963,41 @@ pipes_modified(struct drm_atomic_state *state)
2429 + return ret;
2430 + }
2431 +
2432 ++int
2433 ++skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
2434 ++{
2435 ++ struct drm_atomic_state *state = cstate->base.state;
2436 ++ struct drm_device *dev = state->dev;
2437 ++ struct drm_crtc *crtc = cstate->base.crtc;
2438 ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2439 ++ struct drm_i915_private *dev_priv = to_i915(dev);
2440 ++ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2441 ++ struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
2442 ++ struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
2443 ++ struct drm_plane_state *plane_state;
2444 ++ struct drm_plane *plane;
2445 ++ enum pipe pipe = intel_crtc->pipe;
2446 ++ int id;
2447 ++
2448 ++ WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
2449 ++
2450 ++ drm_for_each_plane_mask(plane, dev, crtc->state->plane_mask) {
2451 ++ id = skl_wm_plane_id(to_intel_plane(plane));
2452 ++
2453 ++ if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
2454 ++ &new_ddb->plane[pipe][id]) &&
2455 ++ skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id],
2456 ++ &new_ddb->y_plane[pipe][id]))
2457 ++ continue;
2458 ++
2459 ++ plane_state = drm_atomic_get_plane_state(state, plane);
2460 ++ if (IS_ERR(plane_state))
2461 ++ return PTR_ERR(plane_state);
2462 ++ }
2463 ++
2464 ++ return 0;
2465 ++}
2466 ++
2467 + static int
2468 + skl_compute_ddb(struct drm_atomic_state *state)
2469 + {
2470 +@@ -4105,6 +4062,10 @@ skl_compute_ddb(struct drm_atomic_state *state)
2471 + if (ret)
2472 + return ret;
2473 +
2474 ++ ret = skl_ddb_add_affected_planes(cstate);
2475 ++ if (ret)
2476 ++ return ret;
2477 ++
2478 + ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
2479 + if (ret)
2480 + return ret;
2481 +@@ -4206,7 +4167,7 @@ static void skl_update_wm(struct drm_crtc *crtc)
2482 + struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
2483 + struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
2484 + struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
2485 +- int pipe;
2486 ++ enum pipe pipe = intel_crtc->pipe;
2487 +
2488 + if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
2489 + return;
2490 +@@ -4215,15 +4176,22 @@ static void skl_update_wm(struct drm_crtc *crtc)
2491 +
2492 + mutex_lock(&dev_priv->wm.wm_mutex);
2493 +
2494 +- skl_write_wm_values(dev_priv, results);
2495 +- skl_flush_wm_values(dev_priv, results);
2496 +-
2497 + /*
2498 +- * Store the new configuration (but only for the pipes that have
2499 +- * changed; the other values weren't recomputed).
2500 ++ * If this pipe isn't active already, we're going to be enabling it
2501 ++ * very soon. Since it's safe to update a pipe's ddb allocation while
2502 ++ * the pipe's shut off, just do so here. Already active pipes will have
2503 ++ * their watermarks updated once we update their planes.
2504 + */
2505 +- for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes)
2506 +- skl_copy_wm_for_pipe(hw_vals, results, pipe);
2507 ++ if (crtc->state->active_changed) {
2508 ++ int plane;
2509 ++
2510 ++ for (plane = 0; plane < intel_num_planes(intel_crtc); plane++)
2511 ++ skl_write_plane_wm(intel_crtc, results, plane);
2512 ++
2513 ++ skl_write_cursor_wm(intel_crtc, results);
2514 ++ }
2515 ++
2516 ++ skl_copy_wm_for_pipe(hw_vals, results, pipe);
2517 +
2518 + mutex_unlock(&dev_priv->wm.wm_mutex);
2519 + }
2520 +diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
2521 +index 7c08e4f29032..4178849631ad 100644
2522 +--- a/drivers/gpu/drm/i915/intel_sprite.c
2523 ++++ b/drivers/gpu/drm/i915/intel_sprite.c
2524 +@@ -203,6 +203,9 @@ skl_update_plane(struct drm_plane *drm_plane,
2525 + struct intel_plane *intel_plane = to_intel_plane(drm_plane);
2526 + struct drm_framebuffer *fb = plane_state->base.fb;
2527 + struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2528 ++ const struct skl_wm_values *wm = &dev_priv->wm.skl_results;
2529 ++ struct drm_crtc *crtc = crtc_state->base.crtc;
2530 ++ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2531 + const int pipe = intel_plane->pipe;
2532 + const int plane = intel_plane->plane + 1;
2533 + u32 plane_ctl, stride_div, stride;
2534 +@@ -238,6 +241,9 @@ skl_update_plane(struct drm_plane *drm_plane,
2535 + crtc_w--;
2536 + crtc_h--;
2537 +
2538 ++ if (wm->dirty_pipes & drm_crtc_mask(crtc))
2539 ++ skl_write_plane_wm(intel_crtc, wm, plane);
2540 ++
2541 + if (key->flags) {
2542 + I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
2543 + I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
2544 +@@ -308,6 +314,14 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
2545 + const int pipe = intel_plane->pipe;
2546 + const int plane = intel_plane->plane + 1;
2547 +
2548 ++ /*
2549 ++ * We only populate skl_results on watermark updates, and if the
2550 ++ * plane's visiblity isn't actually changing neither is its watermarks.
2551 ++ */
2552 ++ if (!to_intel_plane_state(dplane->state)->visible)
2553 ++ skl_write_plane_wm(to_intel_crtc(crtc),
2554 ++ &dev_priv->wm.skl_results, plane);
2555 ++
2556 + I915_WRITE(PLANE_CTL(pipe, plane), 0);
2557 +
2558 + I915_WRITE(PLANE_SURF(pipe, plane), 0);
2559 +diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
2560 +index ff80a81b1a84..ec28b15f2724 100644
2561 +--- a/drivers/gpu/drm/i915/intel_uncore.c
2562 ++++ b/drivers/gpu/drm/i915/intel_uncore.c
2563 +@@ -796,10 +796,9 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
2564 + const bool read,
2565 + const bool before)
2566 + {
2567 +- if (WARN(check_for_unclaimed_mmio(dev_priv),
2568 +- "Unclaimed register detected %s %s register 0x%x\n",
2569 +- before ? "before" : "after",
2570 +- read ? "reading" : "writing to",
2571 ++ if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
2572 ++ "Unclaimed %s register 0x%x\n",
2573 ++ read ? "read from" : "write to",
2574 + i915_mmio_reg_offset(reg)))
2575 + i915.mmio_debug--; /* Only report the first N failures */
2576 + }
2577 +diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
2578 +index 6a4b020dd0b4..5a26eb4545aa 100644
2579 +--- a/drivers/gpu/drm/radeon/r600_dpm.c
2580 ++++ b/drivers/gpu/drm/radeon/r600_dpm.c
2581 +@@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
2582 + struct drm_device *dev = rdev->ddev;
2583 + struct drm_crtc *crtc;
2584 + struct radeon_crtc *radeon_crtc;
2585 +- u32 line_time_us, vblank_lines;
2586 ++ u32 vblank_in_pixels;
2587 + u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
2588 +
2589 + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
2590 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2591 + radeon_crtc = to_radeon_crtc(crtc);
2592 + if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
2593 +- line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
2594 +- radeon_crtc->hw_mode.clock;
2595 +- vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
2596 +- radeon_crtc->hw_mode.crtc_vdisplay +
2597 +- (radeon_crtc->v_border * 2);
2598 +- vblank_time_us = vblank_lines * line_time_us;
2599 ++ vblank_in_pixels =
2600 ++ radeon_crtc->hw_mode.crtc_htotal *
2601 ++ (radeon_crtc->hw_mode.crtc_vblank_end -
2602 ++ radeon_crtc->hw_mode.crtc_vdisplay +
2603 ++ (radeon_crtc->v_border * 2));
2604 ++
2605 ++ vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
2606 + break;
2607 + }
2608 + }
2609 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2610 +index a00dd2f74527..554ca7115f98 100644
2611 +--- a/drivers/gpu/drm/radeon/radeon_device.c
2612 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
2613 +@@ -661,8 +661,9 @@ bool radeon_card_posted(struct radeon_device *rdev)
2614 + {
2615 + uint32_t reg;
2616 +
2617 +- /* for pass through, always force asic_init */
2618 +- if (radeon_device_is_virtual())
2619 ++ /* for pass through, always force asic_init for CI */
2620 ++ if (rdev->family >= CHIP_BONAIRE &&
2621 ++ radeon_device_is_virtual())
2622 + return false;
2623 +
2624 + /* required for EFI mode on macbook2,1 which uses an r5xx asic */
2625 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
2626 +index 1f78ec2548ec..89bdf20344ae 100644
2627 +--- a/drivers/gpu/drm/radeon/si_dpm.c
2628 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
2629 +@@ -4112,7 +4112,7 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
2630 + &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
2631 + si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
2632 +
2633 +- table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
2634 ++ table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
2635 + cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
2636 +
2637 + si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
2638 +diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
2639 +index 3c779838d9ab..966e3a556011 100644
2640 +--- a/drivers/gpu/drm/radeon/sislands_smc.h
2641 ++++ b/drivers/gpu/drm/radeon/sislands_smc.h
2642 +@@ -194,6 +194,7 @@ typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
2643 + #define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
2644 + #define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
2645 + #define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
2646 ++#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3
2647 + #define SISLANDS_SMC_VOLTAGEMASK_MAX 4
2648 +
2649 + struct SISLANDS_SMC_VOLTAGEMASKTABLE
2650 +diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
2651 +index 428e24919ef1..f696b752886b 100644
2652 +--- a/drivers/gpu/drm/vc4/vc4_drv.h
2653 ++++ b/drivers/gpu/drm/vc4/vc4_drv.h
2654 +@@ -122,9 +122,16 @@ to_vc4_dev(struct drm_device *dev)
2655 + struct vc4_bo {
2656 + struct drm_gem_cma_object base;
2657 +
2658 +- /* seqno of the last job to render to this BO. */
2659 ++ /* seqno of the last job to render using this BO. */
2660 + uint64_t seqno;
2661 +
2662 ++ /* seqno of the last job to use the RCL to write to this BO.
2663 ++ *
2664 ++ * Note that this doesn't include binner overflow memory
2665 ++ * writes.
2666 ++ */
2667 ++ uint64_t write_seqno;
2668 ++
2669 + /* List entry for the BO's position in either
2670 + * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
2671 + */
2672 +@@ -216,6 +223,9 @@ struct vc4_exec_info {
2673 + /* Sequence number for this bin/render job. */
2674 + uint64_t seqno;
2675 +
2676 ++ /* Latest write_seqno of any BO that binning depends on. */
2677 ++ uint64_t bin_dep_seqno;
2678 ++
2679 + /* Last current addresses the hardware was processing when the
2680 + * hangcheck timer checked on us.
2681 + */
2682 +@@ -230,6 +240,13 @@ struct vc4_exec_info {
2683 + struct drm_gem_cma_object **bo;
2684 + uint32_t bo_count;
2685 +
2686 ++ /* List of BOs that are being written by the RCL. Other than
2687 ++ * the binner temporary storage, this is all the BOs written
2688 ++ * by the job.
2689 ++ */
2690 ++ struct drm_gem_cma_object *rcl_write_bo[4];
2691 ++ uint32_t rcl_write_bo_count;
2692 ++
2693 + /* Pointers for our position in vc4->job_list */
2694 + struct list_head head;
2695 +
2696 +diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
2697 +index b262c5c26f10..ae1609e739ef 100644
2698 +--- a/drivers/gpu/drm/vc4/vc4_gem.c
2699 ++++ b/drivers/gpu/drm/vc4/vc4_gem.c
2700 +@@ -471,6 +471,11 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
2701 + list_for_each_entry(bo, &exec->unref_list, unref_head) {
2702 + bo->seqno = seqno;
2703 + }
2704 ++
2705 ++ for (i = 0; i < exec->rcl_write_bo_count; i++) {
2706 ++ bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
2707 ++ bo->write_seqno = seqno;
2708 ++ }
2709 + }
2710 +
2711 + /* Queues a struct vc4_exec_info for execution. If no job is
2712 +@@ -673,6 +678,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
2713 + goto fail;
2714 +
2715 + ret = vc4_validate_shader_recs(dev, exec);
2716 ++ if (ret)
2717 ++ goto fail;
2718 ++
2719 ++ /* Block waiting on any previous rendering into the CS's VBO,
2720 ++ * IB, or textures, so that pixels are actually written by the
2721 ++ * time we try to read them.
2722 ++ */
2723 ++ ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
2724 +
2725 + fail:
2726 + drm_free_large(temp);
2727 +diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
2728 +index 0f12418725e5..08886a309757 100644
2729 +--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
2730 ++++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
2731 +@@ -45,6 +45,8 @@ struct vc4_rcl_setup {
2732 +
2733 + struct drm_gem_cma_object *rcl;
2734 + u32 next_offset;
2735 ++
2736 ++ u32 next_write_bo_index;
2737 + };
2738 +
2739 + static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val)
2740 +@@ -407,6 +409,8 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
2741 + if (!*obj)
2742 + return -EINVAL;
2743 +
2744 ++ exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
2745 ++
2746 + if (surf->offset & 0xf) {
2747 + DRM_ERROR("MSAA write must be 16b aligned.\n");
2748 + return -EINVAL;
2749 +@@ -417,7 +421,8 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
2750 +
2751 + static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
2752 + struct drm_gem_cma_object **obj,
2753 +- struct drm_vc4_submit_rcl_surface *surf)
2754 ++ struct drm_vc4_submit_rcl_surface *surf,
2755 ++ bool is_write)
2756 + {
2757 + uint8_t tiling = VC4_GET_FIELD(surf->bits,
2758 + VC4_LOADSTORE_TILE_BUFFER_TILING);
2759 +@@ -440,6 +445,9 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
2760 + if (!*obj)
2761 + return -EINVAL;
2762 +
2763 ++ if (is_write)
2764 ++ exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
2765 ++
2766 + if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
2767 + if (surf == &exec->args->zs_write) {
2768 + DRM_ERROR("general zs write may not be a full-res.\n");
2769 +@@ -542,6 +550,8 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
2770 + if (!*obj)
2771 + return -EINVAL;
2772 +
2773 ++ exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
2774 ++
2775 + if (tiling > VC4_TILING_FORMAT_LT) {
2776 + DRM_ERROR("Bad tiling format\n");
2777 + return -EINVAL;
2778 +@@ -599,15 +609,18 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
2779 + if (ret)
2780 + return ret;
2781 +
2782 +- ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read);
2783 ++ ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read,
2784 ++ false);
2785 + if (ret)
2786 + return ret;
2787 +
2788 +- ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read);
2789 ++ ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read,
2790 ++ false);
2791 + if (ret)
2792 + return ret;
2793 +
2794 +- ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write);
2795 ++ ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write,
2796 ++ true);
2797 + if (ret)
2798 + return ret;
2799 +
2800 +diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
2801 +index 9ce1d0adf882..26503e307438 100644
2802 +--- a/drivers/gpu/drm/vc4/vc4_validate.c
2803 ++++ b/drivers/gpu/drm/vc4/vc4_validate.c
2804 +@@ -267,6 +267,9 @@ validate_indexed_prim_list(VALIDATE_ARGS)
2805 + if (!ib)
2806 + return -EINVAL;
2807 +
2808 ++ exec->bin_dep_seqno = max(exec->bin_dep_seqno,
2809 ++ to_vc4_bo(&ib->base)->write_seqno);
2810 ++
2811 + if (offset > ib->base.size ||
2812 + (ib->base.size - offset) / index_size < length) {
2813 + DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n",
2814 +@@ -555,8 +558,7 @@ static bool
2815 + reloc_tex(struct vc4_exec_info *exec,
2816 + void *uniform_data_u,
2817 + struct vc4_texture_sample_info *sample,
2818 +- uint32_t texture_handle_index)
2819 +-
2820 ++ uint32_t texture_handle_index, bool is_cs)
2821 + {
2822 + struct drm_gem_cma_object *tex;
2823 + uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
2824 +@@ -714,6 +716,11 @@ reloc_tex(struct vc4_exec_info *exec,
2825 +
2826 + *validated_p0 = tex->paddr + p0;
2827 +
2828 ++ if (is_cs) {
2829 ++ exec->bin_dep_seqno = max(exec->bin_dep_seqno,
2830 ++ to_vc4_bo(&tex->base)->write_seqno);
2831 ++ }
2832 ++
2833 + return true;
2834 + fail:
2835 + DRM_INFO("Texture p0 at %d: 0x%08x\n", sample->p_offset[0], p0);
2836 +@@ -835,7 +842,8 @@ validate_gl_shader_rec(struct drm_device *dev,
2837 + if (!reloc_tex(exec,
2838 + uniform_data_u,
2839 + &validated_shader->texture_samples[tex],
2840 +- texture_handles_u[tex])) {
2841 ++ texture_handles_u[tex],
2842 ++ i == 2)) {
2843 + return -EINVAL;
2844 + }
2845 + }
2846 +@@ -867,6 +875,9 @@ validate_gl_shader_rec(struct drm_device *dev,
2847 + uint32_t stride = *(uint8_t *)(pkt_u + o + 5);
2848 + uint32_t max_index;
2849 +
2850 ++ exec->bin_dep_seqno = max(exec->bin_dep_seqno,
2851 ++ to_vc4_bo(&vbo->base)->write_seqno);
2852 ++
2853 + if (state->addr & 0x8)
2854 + stride |= (*(uint32_t *)(pkt_u + 100 + i * 4)) & ~0xff;
2855 +
2856 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
2857 +index dc5beff2b4aa..8a15c4aa84c1 100644
2858 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
2859 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
2860 +@@ -34,6 +34,24 @@
2861 +
2862 + #define VMW_RES_HT_ORDER 12
2863 +
2864 ++ /**
2865 ++ * enum vmw_resource_relocation_type - Relocation type for resources
2866 ++ *
2867 ++ * @vmw_res_rel_normal: Traditional relocation. The resource id in the
2868 ++ * command stream is replaced with the actual id after validation.
2869 ++ * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
2870 ++ * with a NOP.
2871 ++ * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
2872 ++ * after validation is -1, the command is replaced with a NOP. Otherwise no
2873 ++ * action.
2874 ++ */
2875 ++enum vmw_resource_relocation_type {
2876 ++ vmw_res_rel_normal,
2877 ++ vmw_res_rel_nop,
2878 ++ vmw_res_rel_cond_nop,
2879 ++ vmw_res_rel_max
2880 ++};
2881 ++
2882 + /**
2883 + * struct vmw_resource_relocation - Relocation info for resources
2884 + *
2885 +@@ -41,11 +59,13 @@
2886 + * @res: Non-ref-counted pointer to the resource.
2887 + * @offset: Offset of 4 byte entries into the command buffer where the
2888 + * id that needs fixup is located.
2889 ++ * @rel_type: Type of relocation.
2890 + */
2891 + struct vmw_resource_relocation {
2892 + struct list_head head;
2893 + const struct vmw_resource *res;
2894 +- unsigned long offset;
2895 ++ u32 offset:29;
2896 ++ enum vmw_resource_relocation_type rel_type:3;
2897 + };
2898 +
2899 + /**
2900 +@@ -410,10 +430,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
2901 + * @res: The resource.
2902 + * @offset: Offset into the command buffer currently being parsed where the
2903 + * id that needs fixup is located. Granularity is 4 bytes.
2904 ++ * @rel_type: Relocation type.
2905 + */
2906 + static int vmw_resource_relocation_add(struct list_head *list,
2907 + const struct vmw_resource *res,
2908 +- unsigned long offset)
2909 ++ unsigned long offset,
2910 ++ enum vmw_resource_relocation_type
2911 ++ rel_type)
2912 + {
2913 + struct vmw_resource_relocation *rel;
2914 +
2915 +@@ -425,6 +448,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
2916 +
2917 + rel->res = res;
2918 + rel->offset = offset;
2919 ++ rel->rel_type = rel_type;
2920 + list_add_tail(&rel->head, list);
2921 +
2922 + return 0;
2923 +@@ -459,11 +483,23 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
2924 + {
2925 + struct vmw_resource_relocation *rel;
2926 +
2927 ++ /* Validate the struct vmw_resource_relocation member size */
2928 ++ BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
2929 ++ BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
2930 ++
2931 + list_for_each_entry(rel, list, head) {
2932 +- if (likely(rel->res != NULL))
2933 ++ switch (rel->rel_type) {
2934 ++ case vmw_res_rel_normal:
2935 + cb[rel->offset] = rel->res->id;
2936 +- else
2937 ++ break;
2938 ++ case vmw_res_rel_nop:
2939 + cb[rel->offset] = SVGA_3D_CMD_NOP;
2940 ++ break;
2941 ++ default:
2942 ++ if (rel->res->id == -1)
2943 ++ cb[rel->offset] = SVGA_3D_CMD_NOP;
2944 ++ break;
2945 ++ }
2946 + }
2947 + }
2948 +
2949 +@@ -655,7 +691,8 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
2950 + *p_val = NULL;
2951 + ret = vmw_resource_relocation_add(&sw_context->res_relocations,
2952 + res,
2953 +- id_loc - sw_context->buf_start);
2954 ++ id_loc - sw_context->buf_start,
2955 ++ vmw_res_rel_normal);
2956 + if (unlikely(ret != 0))
2957 + return ret;
2958 +
2959 +@@ -721,7 +758,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
2960 +
2961 + return vmw_resource_relocation_add
2962 + (&sw_context->res_relocations, res,
2963 +- id_loc - sw_context->buf_start);
2964 ++ id_loc - sw_context->buf_start,
2965 ++ vmw_res_rel_normal);
2966 + }
2967 +
2968 + ret = vmw_user_resource_lookup_handle(dev_priv,
2969 +@@ -2144,7 +2182,8 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2970 +
2971 + return vmw_resource_relocation_add(&sw_context->res_relocations,
2972 + NULL, &cmd->header.id -
2973 +- sw_context->buf_start);
2974 ++ sw_context->buf_start,
2975 ++ vmw_res_rel_nop);
2976 +
2977 + return 0;
2978 + }
2979 +@@ -2189,7 +2228,8 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2980 +
2981 + return vmw_resource_relocation_add(&sw_context->res_relocations,
2982 + NULL, &cmd->header.id -
2983 +- sw_context->buf_start);
2984 ++ sw_context->buf_start,
2985 ++ vmw_res_rel_nop);
2986 +
2987 + return 0;
2988 + }
2989 +@@ -2848,8 +2888,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2990 + * @header: Pointer to the command header in the command stream.
2991 + *
2992 + * Check that the view exists, and if it was not created using this
2993 +- * command batch, make sure it's validated (present in the device) so that
2994 +- * the remove command will not confuse the device.
2995 ++ * command batch, conditionally make this command a NOP.
2996 + */
2997 + static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2998 + struct vmw_sw_context *sw_context,
2999 +@@ -2877,10 +2916,15 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
3000 + return ret;
3001 +
3002 + /*
3003 +- * Add view to the validate list iff it was not created using this
3004 +- * command batch.
3005 ++ * If the view wasn't created during this command batch, it might
3006 ++ * have been removed due to a context swapout, so add a
3007 ++ * relocation to conditionally make this command a NOP to avoid
3008 ++ * device errors.
3009 + */
3010 +- return vmw_view_res_val_add(sw_context, view);
3011 ++ return vmw_resource_relocation_add(&sw_context->res_relocations,
3012 ++ view,
3013 ++ &cmd->header.id - sw_context->buf_start,
3014 ++ vmw_res_rel_cond_nop);
3015 + }
3016 +
3017 + /**
3018 +@@ -3848,14 +3892,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3019 + int ret;
3020 +
3021 + *header = NULL;
3022 +- if (!dev_priv->cman || kernel_commands)
3023 +- return kernel_commands;
3024 +-
3025 + if (command_size > SVGA_CB_MAX_SIZE) {
3026 + DRM_ERROR("Command buffer is too large.\n");
3027 + return ERR_PTR(-EINVAL);
3028 + }
3029 +
3030 ++ if (!dev_priv->cman || kernel_commands)
3031 ++ return kernel_commands;
3032 ++
3033 + /* If possible, add a little space for fencing. */
3034 + cmdbuf_size = command_size + 512;
3035 + cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3036 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
3037 +index 4ed9a4fdfea7..e92b09d32605 100644
3038 +--- a/drivers/hid/hid-ids.h
3039 ++++ b/drivers/hid/hid-ids.h
3040 +@@ -64,6 +64,9 @@
3041 + #define USB_VENDOR_ID_AKAI 0x2011
3042 + #define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715
3043 +
3044 ++#define USB_VENDOR_ID_AKAI_09E8 0x09E8
3045 ++#define USB_DEVICE_ID_AKAI_09E8_MIDIMIX 0x0031
3046 ++
3047 + #define USB_VENDOR_ID_ALCOR 0x058f
3048 + #define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
3049 +
3050 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
3051 +index b4b8c6abb03e..bb400081efe4 100644
3052 +--- a/drivers/hid/usbhid/hid-quirks.c
3053 ++++ b/drivers/hid/usbhid/hid-quirks.c
3054 +@@ -56,6 +56,7 @@ static const struct hid_blacklist {
3055 +
3056 + { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
3057 + { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
3058 ++ { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
3059 + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
3060 + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
3061 + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
3062 +diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
3063 +index 9e02ac963cd0..3978cbb6b038 100644
3064 +--- a/drivers/hwtracing/coresight/coresight-tmc.c
3065 ++++ b/drivers/hwtracing/coresight/coresight-tmc.c
3066 +@@ -388,9 +388,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
3067 + err_misc_register:
3068 + coresight_unregister(drvdata->csdev);
3069 + err_devm_kzalloc:
3070 +- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
3071 +- dma_free_coherent(dev, drvdata->size,
3072 +- drvdata->vaddr, drvdata->paddr);
3073 + return ret;
3074 + }
3075 +
3076 +diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
3077 +index 0fde593ec0d9..5f7968232564 100644
3078 +--- a/drivers/iio/dac/ad5755.c
3079 ++++ b/drivers/iio/dac/ad5755.c
3080 +@@ -655,7 +655,7 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
3081 +
3082 + devnr = 0;
3083 + for_each_child_of_node(np, pp) {
3084 +- if (devnr > AD5755_NUM_CHANNELS) {
3085 ++ if (devnr >= AD5755_NUM_CHANNELS) {
3086 + dev_err(dev,
3087 + "There is to many channels defined in DT\n");
3088 + goto error_out;
3089 +diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
3090 +index 20c40f780964..18cf2e29e4d5 100644
3091 +--- a/drivers/iio/light/us5182d.c
3092 ++++ b/drivers/iio/light/us5182d.c
3093 +@@ -894,7 +894,7 @@ static int us5182d_probe(struct i2c_client *client,
3094 + goto out_err;
3095 +
3096 + if (data->default_continuous) {
3097 +- pm_runtime_set_active(&client->dev);
3098 ++ ret = pm_runtime_set_active(&client->dev);
3099 + if (ret < 0)
3100 + goto out_err;
3101 + }
3102 +diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
3103 +index 4e4d8317c281..c17c9dd7cde1 100644
3104 +--- a/drivers/infiniband/hw/hfi1/qp.c
3105 ++++ b/drivers/infiniband/hw/hfi1/qp.c
3106 +@@ -808,6 +808,13 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
3107 + kfree(priv);
3108 + return ERR_PTR(-ENOMEM);
3109 + }
3110 ++ iowait_init(
3111 ++ &priv->s_iowait,
3112 ++ 1,
3113 ++ _hfi1_do_send,
3114 ++ iowait_sleep,
3115 ++ iowait_wakeup,
3116 ++ iowait_sdma_drained);
3117 + setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp);
3118 + qp->s_timer.function = hfi1_rc_timeout;
3119 + return priv;
3120 +@@ -873,13 +880,6 @@ void notify_qp_reset(struct rvt_qp *qp)
3121 + {
3122 + struct hfi1_qp_priv *priv = qp->priv;
3123 +
3124 +- iowait_init(
3125 +- &priv->s_iowait,
3126 +- 1,
3127 +- _hfi1_do_send,
3128 +- iowait_sleep,
3129 +- iowait_wakeup,
3130 +- iowait_sdma_drained);
3131 + priv->r_adefered = 0;
3132 + clear_ahg(qp);
3133 + }
3134 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
3135 +index e19537cf44ab..bff8707a2f1f 100644
3136 +--- a/drivers/infiniband/hw/mlx5/main.c
3137 ++++ b/drivers/infiniband/hw/mlx5/main.c
3138 +@@ -1843,6 +1843,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
3139 + &leftovers_specs[LEFTOVERS_UC].flow_attr,
3140 + dst);
3141 + if (IS_ERR(handler_ucast)) {
3142 ++ mlx5_del_flow_rule(handler->rule);
3143 + kfree(handler);
3144 + handler = handler_ucast;
3145 + } else {
3146 +diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
3147 +index bbf0a163aeab..54bb655f5332 100644
3148 +--- a/drivers/infiniband/hw/qib/qib.h
3149 ++++ b/drivers/infiniband/hw/qib/qib.h
3150 +@@ -1131,7 +1131,6 @@ extern spinlock_t qib_devs_lock;
3151 + extern struct qib_devdata *qib_lookup(int unit);
3152 + extern u32 qib_cpulist_count;
3153 + extern unsigned long *qib_cpulist;
3154 +-extern u16 qpt_mask;
3155 + extern unsigned qib_cc_table_size;
3156 +
3157 + int qib_init(struct qib_devdata *, int);
3158 +diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
3159 +index f9b8cd2354d1..99d31efe4c2f 100644
3160 +--- a/drivers/infiniband/hw/qib/qib_qp.c
3161 ++++ b/drivers/infiniband/hw/qib/qib_qp.c
3162 +@@ -41,14 +41,6 @@
3163 +
3164 + #include "qib.h"
3165 +
3166 +-/*
3167 +- * mask field which was present in now deleted qib_qpn_table
3168 +- * is not present in rvt_qpn_table. Defining the same field
3169 +- * as qpt_mask here instead of adding the mask field to
3170 +- * rvt_qpn_table.
3171 +- */
3172 +-u16 qpt_mask;
3173 +-
3174 + static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
3175 + struct rvt_qpn_map *map, unsigned off)
3176 + {
3177 +@@ -57,7 +49,7 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
3178 +
3179 + static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
3180 + struct rvt_qpn_map *map, unsigned off,
3181 +- unsigned n)
3182 ++ unsigned n, u16 qpt_mask)
3183 + {
3184 + if (qpt_mask) {
3185 + off++;
3186 +@@ -179,6 +171,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
3187 + struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
3188 + struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
3189 + verbs_dev);
3190 ++ u16 qpt_mask = dd->qpn_mask;
3191 +
3192 + if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
3193 + unsigned n;
3194 +@@ -215,7 +208,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
3195 + goto bail;
3196 + }
3197 + offset = find_next_offset(qpt, map, offset,
3198 +- dd->n_krcv_queues);
3199 ++ dd->n_krcv_queues, qpt_mask);
3200 + qpn = mk_qpn(qpt, map, offset);
3201 + /*
3202 + * This test differs from alloc_pidmap().
3203 +diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
3204 +index fd1dfbce5539..b2b845f9f7df 100644
3205 +--- a/drivers/infiniband/hw/qib/qib_verbs.c
3206 ++++ b/drivers/infiniband/hw/qib/qib_verbs.c
3207 +@@ -1606,8 +1606,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
3208 + /* Only need to initialize non-zero fields. */
3209 + setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
3210 +
3211 +- qpt_mask = dd->qpn_mask;
3212 +-
3213 + INIT_LIST_HEAD(&dev->piowait);
3214 + INIT_LIST_HEAD(&dev->dmawait);
3215 + INIT_LIST_HEAD(&dev->txwait);
3216 +diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
3217 +index 870b4f212fbc..5911c534cc18 100644
3218 +--- a/drivers/infiniband/sw/rdmavt/qp.c
3219 ++++ b/drivers/infiniband/sw/rdmavt/qp.c
3220 +@@ -501,12 +501,9 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
3221 + */
3222 + static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
3223 + enum ib_qp_type type)
3224 +- __releases(&qp->s_lock)
3225 +- __releases(&qp->s_hlock)
3226 +- __releases(&qp->r_lock)
3227 +- __acquires(&qp->r_lock)
3228 +- __acquires(&qp->s_hlock)
3229 +- __acquires(&qp->s_lock)
3230 ++ __must_hold(&qp->r_lock)
3231 ++ __must_hold(&qp->s_hlock)
3232 ++ __must_hold(&qp->s_lock)
3233 + {
3234 + if (qp->state != IB_QPS_RESET) {
3235 + qp->state = IB_QPS_RESET;
3236 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
3237 +index 618f18436618..c65e17fae24e 100644
3238 +--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
3239 ++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
3240 +@@ -1009,7 +1009,6 @@ int i40e_unregister_client(struct i40e_client *client)
3241 + if (!i40e_client_is_registered(client)) {
3242 + pr_info("i40e: Client %s has not been registered\n",
3243 + client->name);
3244 +- mutex_unlock(&i40e_client_mutex);
3245 + ret = -ENODEV;
3246 + goto out;
3247 + }
3248 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
3249 +index dad15b6c66dd..c74d16409941 100644
3250 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
3251 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
3252 +@@ -7990,45 +7990,34 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
3253 + static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
3254 + u8 *lut, u16 lut_size)
3255 + {
3256 +- struct i40e_aqc_get_set_rss_key_data rss_key;
3257 + struct i40e_pf *pf = vsi->back;
3258 + struct i40e_hw *hw = &pf->hw;
3259 +- bool pf_lut = false;
3260 +- u8 *rss_lut;
3261 +- int ret, i;
3262 +-
3263 +- memcpy(&rss_key, seed, sizeof(rss_key));
3264 +-
3265 +- rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
3266 +- if (!rss_lut)
3267 +- return -ENOMEM;
3268 +-
3269 +- /* Populate the LUT with max no. of queues in round robin fashion */
3270 +- for (i = 0; i < vsi->rss_table_size; i++)
3271 +- rss_lut[i] = i % vsi->rss_size;
3272 ++ int ret = 0;
3273 +
3274 +- ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
3275 +- if (ret) {
3276 +- dev_info(&pf->pdev->dev,
3277 +- "Cannot set RSS key, err %s aq_err %s\n",
3278 +- i40e_stat_str(&pf->hw, ret),
3279 +- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3280 +- goto config_rss_aq_out;
3281 ++ if (seed) {
3282 ++ struct i40e_aqc_get_set_rss_key_data *seed_dw =
3283 ++ (struct i40e_aqc_get_set_rss_key_data *)seed;
3284 ++ ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
3285 ++ if (ret) {
3286 ++ dev_info(&pf->pdev->dev,
3287 ++ "Cannot set RSS key, err %s aq_err %s\n",
3288 ++ i40e_stat_str(hw, ret),
3289 ++ i40e_aq_str(hw, hw->aq.asq_last_status));
3290 ++ return ret;
3291 ++ }
3292 + }
3293 ++ if (lut) {
3294 ++ bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
3295 +
3296 +- if (vsi->type == I40E_VSI_MAIN)
3297 +- pf_lut = true;
3298 +-
3299 +- ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
3300 +- vsi->rss_table_size);
3301 +- if (ret)
3302 +- dev_info(&pf->pdev->dev,
3303 +- "Cannot set RSS lut, err %s aq_err %s\n",
3304 +- i40e_stat_str(&pf->hw, ret),
3305 +- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3306 +-
3307 +-config_rss_aq_out:
3308 +- kfree(rss_lut);
3309 ++ ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
3310 ++ if (ret) {
3311 ++ dev_info(&pf->pdev->dev,
3312 ++ "Cannot set RSS lut, err %s aq_err %s\n",
3313 ++ i40e_stat_str(hw, ret),
3314 ++ i40e_aq_str(hw, hw->aq.asq_last_status));
3315 ++ return ret;
3316 ++ }
3317 ++ }
3318 + return ret;
3319 + }
3320 +
3321 +diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
3322 +index 24c8d65bcf34..09ca63466504 100644
3323 +--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
3324 ++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
3325 +@@ -2394,6 +2394,8 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
3326 + skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
3327 + spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
3328 +
3329 ++ ath10k_mac_tx_push_pending(ar);
3330 ++
3331 + spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
3332 + skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
3333 + spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
3334 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
3335 +index 0bbd0a00edcc..146365b93ff5 100644
3336 +--- a/drivers/net/wireless/ath/ath10k/mac.c
3337 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
3338 +@@ -3777,7 +3777,9 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3339 + enum ath10k_hw_txrx_mode txmode;
3340 + enum ath10k_mac_tx_path txpath;
3341 + struct sk_buff *skb;
3342 ++ struct ieee80211_hdr *hdr;
3343 + size_t skb_len;
3344 ++ bool is_mgmt, is_presp;
3345 + int ret;
3346 +
3347 + spin_lock_bh(&ar->htt.tx_lock);
3348 +@@ -3801,6 +3803,22 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3349 + skb_len = skb->len;
3350 + txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3351 + txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3352 ++ is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
3353 ++
3354 ++ if (is_mgmt) {
3355 ++ hdr = (struct ieee80211_hdr *)skb->data;
3356 ++ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
3357 ++
3358 ++ spin_lock_bh(&ar->htt.tx_lock);
3359 ++ ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
3360 ++
3361 ++ if (ret) {
3362 ++ ath10k_htt_tx_dec_pending(htt);
3363 ++ spin_unlock_bh(&ar->htt.tx_lock);
3364 ++ return ret;
3365 ++ }
3366 ++ spin_unlock_bh(&ar->htt.tx_lock);
3367 ++ }
3368 +
3369 + ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3370 + if (unlikely(ret)) {
3371 +@@ -3808,6 +3826,8 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3372 +
3373 + spin_lock_bh(&ar->htt.tx_lock);
3374 + ath10k_htt_tx_dec_pending(htt);
3375 ++ if (is_mgmt)
3376 ++ ath10k_htt_tx_mgmt_dec_pending(htt);
3377 + spin_unlock_bh(&ar->htt.tx_lock);
3378 +
3379 + return ret;
3380 +@@ -6538,7 +6558,7 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
3381 + goto exit;
3382 + }
3383 +
3384 +- ath10k_mac_update_bss_chan_survey(ar, survey->channel);
3385 ++ ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
3386 +
3387 + spin_lock_bh(&ar->data_lock);
3388 + memcpy(survey, ar_survey, sizeof(*survey));
3389 +diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
3390 +index b29a86a26c13..28ff5cb4ec28 100644
3391 +--- a/drivers/net/wireless/ath/ath10k/txrx.c
3392 ++++ b/drivers/net/wireless/ath/ath10k/txrx.c
3393 +@@ -119,8 +119,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
3394 + ieee80211_tx_status(htt->ar->hw, msdu);
3395 + /* we do not own the msdu anymore */
3396 +
3397 +- ath10k_mac_tx_push_pending(ar);
3398 +-
3399 + return 0;
3400 + }
3401 +
3402 +diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
3403 +index 3ef468893b3f..f67cc198bc0e 100644
3404 +--- a/drivers/net/wireless/ath/ath10k/wmi.h
3405 ++++ b/drivers/net/wireless/ath/ath10k/wmi.h
3406 +@@ -180,6 +180,7 @@ enum wmi_service {
3407 + WMI_SERVICE_MESH_NON_11S,
3408 + WMI_SERVICE_PEER_STATS,
3409 + WMI_SERVICE_RESTRT_CHNL_SUPPORT,
3410 ++ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
3411 + WMI_SERVICE_TX_MODE_PUSH_ONLY,
3412 + WMI_SERVICE_TX_MODE_PUSH_PULL,
3413 + WMI_SERVICE_TX_MODE_DYNAMIC,
3414 +@@ -305,6 +306,7 @@ enum wmi_10_4_service {
3415 + WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
3416 + WMI_10_4_SERVICE_PEER_STATS,
3417 + WMI_10_4_SERVICE_MESH_11S,
3418 ++ WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
3419 + WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
3420 + WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
3421 + WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
3422 +@@ -402,6 +404,7 @@ static inline char *wmi_service_name(int service_id)
3423 + SVCSTR(WMI_SERVICE_MESH_NON_11S);
3424 + SVCSTR(WMI_SERVICE_PEER_STATS);
3425 + SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
3426 ++ SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT);
3427 + SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
3428 + SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
3429 + SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
3430 +@@ -652,6 +655,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
3431 + WMI_SERVICE_PEER_STATS, len);
3432 + SVCMAP(WMI_10_4_SERVICE_MESH_11S,
3433 + WMI_SERVICE_MESH_11S, len);
3434 ++ SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
3435 ++ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len);
3436 + SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
3437 + WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
3438 + SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
3439 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
3440 +index 43f8f7d45ddb..adba3b003f55 100644
3441 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
3442 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
3443 +@@ -564,11 +564,16 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
3444 + __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
3445 + __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
3446 +
3447 +- /* If OEM did not fuse address - get it from OTP */
3448 +- if (!mac_addr0 && !mac_addr1) {
3449 +- mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
3450 +- mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
3451 +- }
3452 ++ iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3453 ++ /*
3454 ++ * If the OEM fused a valid address, use it instead of the one in the
3455 ++ * OTP
3456 ++ */
3457 ++ if (is_valid_ether_addr(data->hw_addr))
3458 ++ return;
3459 ++
3460 ++ mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
3461 ++ mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
3462 +
3463 + iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3464 + }
3465 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
3466 +index 7e0cdbf8bf74..794c57486e02 100644
3467 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
3468 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
3469 +@@ -1214,9 +1214,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
3470 + }
3471 +
3472 + /* TODO: read the budget from BIOS / Platform NVM */
3473 +- if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0)
3474 ++ if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) {
3475 + ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
3476 + mvm->cooling_dev.cur_state);
3477 ++ if (ret)
3478 ++ goto error;
3479 ++ }
3480 + #else
3481 + /* Initialize tx backoffs to the minimal possible */
3482 + iwl_mvm_tt_tx_backoff(mvm, 0);
3483 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
3484 +index 69c42ce45b8a..d742d27d8de0 100644
3485 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
3486 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
3487 +@@ -539,6 +539,11 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3488 + iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
3489 + IWL_MVM_OFFCHANNEL_QUEUE,
3490 + IWL_MAX_TID_COUNT, 0);
3491 ++ else
3492 ++ iwl_mvm_disable_txq(mvm,
3493 ++ IWL_MVM_DQA_P2P_DEVICE_QUEUE,
3494 ++ vif->hw_queue[0], IWL_MAX_TID_COUNT,
3495 ++ 0);
3496 +
3497 + break;
3498 + case NL80211_IFTYPE_AP:
3499 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
3500 +index df6c32caa5f0..afb7eb60e454 100644
3501 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
3502 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
3503 +@@ -598,9 +598,10 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
3504 +
3505 + mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3506 +
3507 +- /* not a data packet */
3508 +- if (!ieee80211_is_data_qos(hdr->frame_control) ||
3509 +- is_multicast_ether_addr(hdr->addr1))
3510 ++ /* not a data packet or a bar */
3511 ++ if (!ieee80211_is_back_req(hdr->frame_control) &&
3512 ++ (!ieee80211_is_data_qos(hdr->frame_control) ||
3513 ++ is_multicast_ether_addr(hdr->addr1)))
3514 + return false;
3515 +
3516 + if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
3517 +@@ -624,6 +625,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
3518 +
3519 + spin_lock_bh(&buffer->lock);
3520 +
3521 ++ if (ieee80211_is_back_req(hdr->frame_control)) {
3522 ++ iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
3523 ++ goto drop;
3524 ++ }
3525 ++
3526 + /*
3527 + * If there was a significant jump in the nssn - adjust.
3528 + * If the SN is smaller than the NSSN it might need to first go into
3529 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
3530 +index 3130b9c68a74..e933c12d80aa 100644
3531 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
3532 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
3533 +@@ -576,9 +576,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
3534 + ret);
3535 +
3536 + /* Make sure the SCD wrptr is correctly set before reconfiguring */
3537 +- iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac],
3538 +- cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
3539 +- ssn, wdg_timeout);
3540 ++ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
3541 +
3542 + /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
3543 +
3544 +@@ -1270,9 +1268,31 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
3545 + ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
3546 +
3547 + /* If DQA is supported - the queues can be disabled now */
3548 +- if (iwl_mvm_is_dqa_supported(mvm))
3549 ++ if (iwl_mvm_is_dqa_supported(mvm)) {
3550 ++ u8 reserved_txq = mvm_sta->reserved_queue;
3551 ++ enum iwl_mvm_queue_status *status;
3552 ++
3553 + iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
3554 +
3555 ++ /*
3556 ++ * If no traffic has gone through the reserved TXQ - it
3557 ++ * is still marked as IWL_MVM_QUEUE_RESERVED, and
3558 ++ * should be manually marked as free again
3559 ++ */
3560 ++ spin_lock_bh(&mvm->queue_info_lock);
3561 ++ status = &mvm->queue_info[reserved_txq].status;
3562 ++ if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
3563 ++ (*status != IWL_MVM_QUEUE_FREE),
3564 ++ "sta_id %d reserved txq %d status %d",
3565 ++ mvm_sta->sta_id, reserved_txq, *status)) {
3566 ++ spin_unlock_bh(&mvm->queue_info_lock);
3567 ++ return -EINVAL;
3568 ++ }
3569 ++
3570 ++ *status = IWL_MVM_QUEUE_FREE;
3571 ++ spin_unlock_bh(&mvm->queue_info_lock);
3572 ++ }
3573 ++
3574 + if (vif->type == NL80211_IFTYPE_STATION &&
3575 + mvmvif->ap_sta_id == mvm_sta->sta_id) {
3576 + /* if associated - we can't remove the AP STA now */
3577 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
3578 +index b3a87a31de30..a0c1e3d07db5 100644
3579 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
3580 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
3581 +@@ -903,9 +903,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
3582 + tid = IWL_MAX_TID_COUNT;
3583 + }
3584 +
3585 +- if (iwl_mvm_is_dqa_supported(mvm))
3586 ++ if (iwl_mvm_is_dqa_supported(mvm)) {
3587 + txq_id = mvmsta->tid_data[tid].txq_id;
3588 +
3589 ++ if (ieee80211_is_mgmt(fc))
3590 ++ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
3591 ++ }
3592 ++
3593 + /* Copy MAC header from skb into command buffer */
3594 + memcpy(tx_cmd->hdr, hdr, hdrlen);
3595 +
3596 +diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
3597 +index 1c7b00630b90..b89596c18b41 100644
3598 +--- a/drivers/net/wireless/marvell/mwifiex/join.c
3599 ++++ b/drivers/net/wireless/marvell/mwifiex/join.c
3600 +@@ -669,9 +669,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
3601 + priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
3602 + sizeof(priv->assoc_rsp_buf));
3603 +
3604 +- memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
3605 +-
3606 + assoc_rsp->a_id = cpu_to_le16(aid);
3607 ++ memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
3608 +
3609 + if (status_code) {
3610 + priv->adapter->dbg.num_cmd_assoc_failure++;
3611 +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
3612 +index a422f3306d4d..7e394d485f54 100644
3613 +--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
3614 ++++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
3615 +@@ -708,7 +708,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
3616 +
3617 + case EVENT_EXT_SCAN_REPORT:
3618 + mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
3619 +- if (adapter->ext_scan && !priv->scan_aborting)
3620 ++ /* We intend to skip this event during suspend, but handle
3621 ++ * it in interface disabled case
3622 ++ */
3623 ++ if (adapter->ext_scan && (!priv->scan_aborting ||
3624 ++ !netif_running(priv->netdev)))
3625 + ret = mwifiex_handle_event_ext_scan_report(priv,
3626 + adapter->event_skb->data);
3627 +
3628 +diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
3629 +index 7cf26c6124d1..6005e14213ca 100644
3630 +--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
3631 ++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
3632 +@@ -831,8 +831,10 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
3633 + rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
3634 + sizeof(struct usb_anchor),
3635 + GFP_KERNEL);
3636 +- if (!rt2x00dev->anchor)
3637 ++ if (!rt2x00dev->anchor) {
3638 ++ retval = -ENOMEM;
3639 + goto exit_free_reg;
3640 ++ }
3641 +
3642 + init_usb_anchor(rt2x00dev->anchor);
3643 + return 0;
3644 +diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
3645 +index 935866fe5ec2..a8b6949a8778 100644
3646 +--- a/drivers/nvdimm/bus.c
3647 ++++ b/drivers/nvdimm/bus.c
3648 +@@ -217,6 +217,8 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
3649 + return rc;
3650 + if (cmd_rc < 0)
3651 + return cmd_rc;
3652 ++
3653 ++ nvdimm_clear_from_poison_list(nvdimm_bus, phys, len);
3654 + return clear_err.cleared;
3655 + }
3656 + EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
3657 +diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
3658 +index 4d7bbd2df5c0..7ceba08774b6 100644
3659 +--- a/drivers/nvdimm/core.c
3660 ++++ b/drivers/nvdimm/core.c
3661 +@@ -547,11 +547,12 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
3662 + }
3663 + EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
3664 +
3665 +-static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
3666 ++static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
3667 ++ gfp_t flags)
3668 + {
3669 + struct nd_poison *pl;
3670 +
3671 +- pl = kzalloc(sizeof(*pl), GFP_KERNEL);
3672 ++ pl = kzalloc(sizeof(*pl), flags);
3673 + if (!pl)
3674 + return -ENOMEM;
3675 +
3676 +@@ -567,7 +568,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
3677 + struct nd_poison *pl;
3678 +
3679 + if (list_empty(&nvdimm_bus->poison_list))
3680 +- return add_poison(nvdimm_bus, addr, length);
3681 ++ return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
3682 +
3683 + /*
3684 + * There is a chance this is a duplicate, check for those first.
3685 +@@ -587,7 +588,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
3686 + * as any overlapping ranges will get resolved when the list is consumed
3687 + * and converted to badblocks
3688 + */
3689 +- return add_poison(nvdimm_bus, addr, length);
3690 ++ return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
3691 + }
3692 +
3693 + int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
3694 +@@ -602,6 +603,70 @@ int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
3695 + }
3696 + EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
3697 +
3698 ++void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
3699 ++ phys_addr_t start, unsigned int len)
3700 ++{
3701 ++ struct list_head *poison_list = &nvdimm_bus->poison_list;
3702 ++ u64 clr_end = start + len - 1;
3703 ++ struct nd_poison *pl, *next;
3704 ++
3705 ++ nvdimm_bus_lock(&nvdimm_bus->dev);
3706 ++ WARN_ON_ONCE(list_empty(poison_list));
3707 ++
3708 ++ /*
3709 ++ * [start, clr_end] is the poison interval being cleared.
3710 ++ * [pl->start, pl_end] is the poison_list entry we're comparing
3711 ++ * the above interval against. The poison list entry may need
3712 ++ * to be modified (update either start or length), deleted, or
3713 ++ * split into two based on the overlap characteristics
3714 ++ */
3715 ++
3716 ++ list_for_each_entry_safe(pl, next, poison_list, list) {
3717 ++ u64 pl_end = pl->start + pl->length - 1;
3718 ++
3719 ++ /* Skip intervals with no intersection */
3720 ++ if (pl_end < start)
3721 ++ continue;
3722 ++ if (pl->start > clr_end)
3723 ++ continue;
3724 ++ /* Delete completely overlapped poison entries */
3725 ++ if ((pl->start >= start) && (pl_end <= clr_end)) {
3726 ++ list_del(&pl->list);
3727 ++ kfree(pl);
3728 ++ continue;
3729 ++ }
3730 ++ /* Adjust start point of partially cleared entries */
3731 ++ if ((start <= pl->start) && (clr_end > pl->start)) {
3732 ++ pl->length -= clr_end - pl->start + 1;
3733 ++ pl->start = clr_end + 1;
3734 ++ continue;
3735 ++ }
3736 ++ /* Adjust pl->length for partial clearing at the tail end */
3737 ++ if ((pl->start < start) && (pl_end <= clr_end)) {
3738 ++ /* pl->start remains the same */
3739 ++ pl->length = start - pl->start;
3740 ++ continue;
3741 ++ }
3742 ++ /*
3743 ++ * If clearing in the middle of an entry, we split it into
3744 ++ * two by modifying the current entry to represent one half of
3745 ++ * the split, and adding a new entry for the second half.
3746 ++ */
3747 ++ if ((pl->start < start) && (pl_end > clr_end)) {
3748 ++ u64 new_start = clr_end + 1;
3749 ++ u64 new_len = pl_end - new_start + 1;
3750 ++
3751 ++ /* Add new entry covering the right half */
3752 ++ add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO);
3753 ++ /* Adjust this entry to cover the left half */
3754 ++ pl->length = start - pl->start;
3755 ++ continue;
3756 ++ }
3757 ++ }
3758 ++ nvdimm_bus_unlock(&nvdimm_bus->dev);
3759 ++}
3760 ++EXPORT_SYMBOL_GPL(nvdimm_clear_from_poison_list);
3761 ++
3762 + #ifdef CONFIG_BLK_DEV_INTEGRITY
3763 + int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
3764 + {
3765 +diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
3766 +index ef9893fa3176..4f5e567fd7e0 100644
3767 +--- a/drivers/pci/host/pci-aardvark.c
3768 ++++ b/drivers/pci/host/pci-aardvark.c
3769 +@@ -848,7 +848,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
3770 + int err, res_valid = 0;
3771 + struct device *dev = &pcie->pdev->dev;
3772 + struct device_node *np = dev->of_node;
3773 +- struct resource_entry *win;
3774 ++ struct resource_entry *win, *tmp;
3775 + resource_size_t iobase;
3776 +
3777 + INIT_LIST_HEAD(&pcie->resources);
3778 +@@ -862,7 +862,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
3779 + if (err)
3780 + goto out_release_res;
3781 +
3782 +- resource_list_for_each_entry(win, &pcie->resources) {
3783 ++ resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
3784 + struct resource *res = win->res;
3785 +
3786 + switch (resource_type(res)) {
3787 +@@ -874,9 +874,11 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
3788 + lower_32_bits(res->start),
3789 + OB_PCIE_IO);
3790 + err = pci_remap_iospace(res, iobase);
3791 +- if (err)
3792 ++ if (err) {
3793 + dev_warn(dev, "error %d: failed to map resource %pR\n",
3794 + err, res);
3795 ++ resource_list_destroy_entry(win);
3796 ++ }
3797 + break;
3798 + case IORESOURCE_MEM:
3799 + advk_pcie_set_ob_win(pcie, 0,
3800 +diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
3801 +index 9d9d34e959b6..61eb4d46eb50 100644
3802 +--- a/drivers/pci/host/pci-host-common.c
3803 ++++ b/drivers/pci/host/pci-host-common.c
3804 +@@ -29,7 +29,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
3805 + int err, res_valid = 0;
3806 + struct device_node *np = dev->of_node;
3807 + resource_size_t iobase;
3808 +- struct resource_entry *win;
3809 ++ struct resource_entry *win, *tmp;
3810 +
3811 + err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
3812 + if (err)
3813 +@@ -39,15 +39,17 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
3814 + if (err)
3815 + return err;
3816 +
3817 +- resource_list_for_each_entry(win, resources) {
3818 ++ resource_list_for_each_entry_safe(win, tmp, resources) {
3819 + struct resource *res = win->res;
3820 +
3821 + switch (resource_type(res)) {
3822 + case IORESOURCE_IO:
3823 + err = pci_remap_iospace(res, iobase);
3824 +- if (err)
3825 ++ if (err) {
3826 + dev_warn(dev, "error %d: failed to map resource %pR\n",
3827 + err, res);
3828 ++ resource_list_destroy_entry(win);
3829 ++ }
3830 + break;
3831 + case IORESOURCE_MEM:
3832 + res_valid |= !(res->flags & IORESOURCE_PREFETCH);
3833 +diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
3834 +index 84d650d892e7..7ec1e800096a 100644
3835 +--- a/drivers/pci/host/pci-tegra.c
3836 ++++ b/drivers/pci/host/pci-tegra.c
3837 +@@ -621,7 +621,11 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
3838 + if (err < 0)
3839 + return err;
3840 +
3841 +- pci_add_resource_offset(&sys->resources, &pcie->pio, sys->io_offset);
3842 ++ err = pci_remap_iospace(&pcie->pio, pcie->io.start);
3843 ++ if (!err)
3844 ++ pci_add_resource_offset(&sys->resources, &pcie->pio,
3845 ++ sys->io_offset);
3846 ++
3847 + pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
3848 + pci_add_resource_offset(&sys->resources, &pcie->prefetch,
3849 + sys->mem_offset);
3850 +@@ -631,7 +635,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
3851 + if (err < 0)
3852 + return err;
3853 +
3854 +- pci_remap_iospace(&pcie->pio, pcie->io.start);
3855 + return 1;
3856 + }
3857 +
3858 +diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
3859 +index f234405770ab..b7dc07002f13 100644
3860 +--- a/drivers/pci/host/pci-versatile.c
3861 ++++ b/drivers/pci/host/pci-versatile.c
3862 +@@ -74,7 +74,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
3863 + int err, mem = 1, res_valid = 0;
3864 + struct device_node *np = dev->of_node;
3865 + resource_size_t iobase;
3866 +- struct resource_entry *win;
3867 ++ struct resource_entry *win, *tmp;
3868 +
3869 + err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
3870 + if (err)
3871 +@@ -84,15 +84,17 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
3872 + if (err)
3873 + goto out_release_res;
3874 +
3875 +- resource_list_for_each_entry(win, res) {
3876 ++ resource_list_for_each_entry_safe(win, tmp, res) {
3877 + struct resource *res = win->res;
3878 +
3879 + switch (resource_type(res)) {
3880 + case IORESOURCE_IO:
3881 + err = pci_remap_iospace(res, iobase);
3882 +- if (err)
3883 ++ if (err) {
3884 + dev_warn(dev, "error %d: failed to map resource %pR\n",
3885 + err, res);
3886 ++ resource_list_destroy_entry(win);
3887 ++ }
3888 + break;
3889 + case IORESOURCE_MEM:
3890 + res_valid |= !(res->flags & IORESOURCE_PREFETCH);
3891 +diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
3892 +index 12afce19890b..2a500f270c01 100644
3893 +--- a/drivers/pci/host/pcie-designware.c
3894 ++++ b/drivers/pci/host/pcie-designware.c
3895 +@@ -436,7 +436,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
3896 + struct resource *cfg_res;
3897 + int i, ret;
3898 + LIST_HEAD(res);
3899 +- struct resource_entry *win;
3900 ++ struct resource_entry *win, *tmp;
3901 +
3902 + cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
3903 + if (cfg_res) {
3904 +@@ -457,17 +457,20 @@ int dw_pcie_host_init(struct pcie_port *pp)
3905 + goto error;
3906 +
3907 + /* Get the I/O and memory ranges from DT */
3908 +- resource_list_for_each_entry(win, &res) {
3909 ++ resource_list_for_each_entry_safe(win, tmp, &res) {
3910 + switch (resource_type(win->res)) {
3911 + case IORESOURCE_IO:
3912 +- pp->io = win->res;
3913 +- pp->io->name = "I/O";
3914 +- pp->io_size = resource_size(pp->io);
3915 +- pp->io_bus_addr = pp->io->start - win->offset;
3916 +- ret = pci_remap_iospace(pp->io, pp->io_base);
3917 +- if (ret)
3918 ++ ret = pci_remap_iospace(win->res, pp->io_base);
3919 ++ if (ret) {
3920 + dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
3921 +- ret, pp->io);
3922 ++ ret, win->res);
3923 ++ resource_list_destroy_entry(win);
3924 ++ } else {
3925 ++ pp->io = win->res;
3926 ++ pp->io->name = "I/O";
3927 ++ pp->io_size = resource_size(pp->io);
3928 ++ pp->io_bus_addr = pp->io->start - win->offset;
3929 ++ }
3930 + break;
3931 + case IORESOURCE_MEM:
3932 + pp->mem = win->res;
3933 +diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
3934 +index 65db7a221509..5f7fcc971cae 100644
3935 +--- a/drivers/pci/host/pcie-rcar.c
3936 ++++ b/drivers/pci/host/pcie-rcar.c
3937 +@@ -945,7 +945,7 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
3938 + struct device *dev = pci->dev;
3939 + struct device_node *np = dev->of_node;
3940 + resource_size_t iobase;
3941 +- struct resource_entry *win;
3942 ++ struct resource_entry *win, *tmp;
3943 +
3944 + err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, &iobase);
3945 + if (err)
3946 +@@ -955,14 +955,17 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
3947 + if (err)
3948 + goto out_release_res;
3949 +
3950 +- resource_list_for_each_entry(win, &pci->resources) {
3951 ++ resource_list_for_each_entry_safe(win, tmp, &pci->resources) {
3952 + struct resource *res = win->res;
3953 +
3954 + if (resource_type(res) == IORESOURCE_IO) {
3955 + err = pci_remap_iospace(res, iobase);
3956 +- if (err)
3957 ++ if (err) {
3958 + dev_warn(dev, "error %d: failed to map resource %pR\n",
3959 + err, res);
3960 ++
3961 ++ resource_list_destroy_entry(win);
3962 ++ }
3963 + }
3964 + }
3965 +
3966 +diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
3967 +index 51c42d746883..775c88303017 100644
3968 +--- a/drivers/pinctrl/qcom/pinctrl-msm.c
3969 ++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
3970 +@@ -156,7 +156,7 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
3971 + spin_lock_irqsave(&pctrl->lock, flags);
3972 +
3973 + val = readl(pctrl->regs + g->ctl_reg);
3974 +- val &= mask;
3975 ++ val &= ~mask;
3976 + val |= i << g->mux_bit;
3977 + writel(val, pctrl->regs + g->ctl_reg);
3978 +
3979 +diff --git a/drivers/power/bq24257_charger.c b/drivers/power/bq24257_charger.c
3980 +index 1fea2c7ef97f..6fc31bdc639b 100644
3981 +--- a/drivers/power/bq24257_charger.c
3982 ++++ b/drivers/power/bq24257_charger.c
3983 +@@ -1068,6 +1068,12 @@ static int bq24257_probe(struct i2c_client *client,
3984 + return ret;
3985 + }
3986 +
3987 ++ ret = bq24257_power_supply_init(bq);
3988 ++ if (ret < 0) {
3989 ++ dev_err(dev, "Failed to register power supply\n");
3990 ++ return ret;
3991 ++ }
3992 ++
3993 + ret = devm_request_threaded_irq(dev, client->irq, NULL,
3994 + bq24257_irq_handler_thread,
3995 + IRQF_TRIGGER_FALLING |
3996 +@@ -1078,12 +1084,6 @@ static int bq24257_probe(struct i2c_client *client,
3997 + return ret;
3998 + }
3999 +
4000 +- ret = bq24257_power_supply_init(bq);
4001 +- if (ret < 0) {
4002 +- dev_err(dev, "Failed to register power supply\n");
4003 +- return ret;
4004 +- }
4005 +-
4006 + ret = sysfs_create_group(&bq->charger->dev.kobj, &bq24257_attr_group);
4007 + if (ret < 0) {
4008 + dev_err(dev, "Can't create sysfs entries\n");
4009 +diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
4010 +index 6b1577c73fe7..285b4006f44b 100644
4011 +--- a/drivers/s390/char/con3270.c
4012 ++++ b/drivers/s390/char/con3270.c
4013 +@@ -124,7 +124,12 @@ con3270_create_status(struct con3270 *cp)
4014 + static void
4015 + con3270_update_string(struct con3270 *cp, struct string *s, int nr)
4016 + {
4017 +- if (s->len >= cp->view.cols - 5)
4018 ++ if (s->len < 4) {
4019 ++ /* This indicates a bug, but printing a warning would
4020 ++ * cause a deadlock. */
4021 ++ return;
4022 ++ }
4023 ++ if (s->string[s->len - 4] != TO_RA)
4024 + return;
4025 + raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
4026 + cp->view.cols * (nr + 1));
4027 +@@ -460,11 +465,11 @@ con3270_cline_end(struct con3270 *cp)
4028 + cp->cline->len + 4 : cp->view.cols;
4029 + s = con3270_alloc_string(cp, size);
4030 + memcpy(s->string, cp->cline->string, cp->cline->len);
4031 +- if (s->len < cp->view.cols - 5) {
4032 ++ if (cp->cline->len < cp->view.cols - 5) {
4033 + s->string[s->len - 4] = TO_RA;
4034 + s->string[s->len - 1] = 0;
4035 + } else {
4036 +- while (--size > cp->cline->len)
4037 ++ while (--size >= cp->cline->len)
4038 + s->string[size] = cp->view.ascebc[' '];
4039 + }
4040 + /* Replace cline with allocated line s and reset cline. */
4041 +diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
4042 +index 940e725bde1e..11674698b36d 100644
4043 +--- a/drivers/s390/cio/chsc.c
4044 ++++ b/drivers/s390/cio/chsc.c
4045 +@@ -95,12 +95,13 @@ struct chsc_ssd_area {
4046 + int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
4047 + {
4048 + struct chsc_ssd_area *ssd_area;
4049 ++ unsigned long flags;
4050 + int ccode;
4051 + int ret;
4052 + int i;
4053 + int mask;
4054 +
4055 +- spin_lock_irq(&chsc_page_lock);
4056 ++ spin_lock_irqsave(&chsc_page_lock, flags);
4057 + memset(chsc_page, 0, PAGE_SIZE);
4058 + ssd_area = chsc_page;
4059 + ssd_area->request.length = 0x0010;
4060 +@@ -144,7 +145,7 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
4061 + ssd->fla[i] = ssd_area->fla[i];
4062 + }
4063 + out:
4064 +- spin_unlock_irq(&chsc_page_lock);
4065 ++ spin_unlock_irqrestore(&chsc_page_lock, flags);
4066 + return ret;
4067 + }
4068 +
4069 +@@ -832,9 +833,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
4070 + u32 fmt : 4;
4071 + u32 : 16;
4072 + } __attribute__ ((packed)) *secm_area;
4073 ++ unsigned long flags;
4074 + int ret, ccode;
4075 +
4076 +- spin_lock_irq(&chsc_page_lock);
4077 ++ spin_lock_irqsave(&chsc_page_lock, flags);
4078 + memset(chsc_page, 0, PAGE_SIZE);
4079 + secm_area = chsc_page;
4080 + secm_area->request.length = 0x0050;
4081 +@@ -864,7 +866,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
4082 + CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
4083 + secm_area->response.code);
4084 + out:
4085 +- spin_unlock_irq(&chsc_page_lock);
4086 ++ spin_unlock_irqrestore(&chsc_page_lock, flags);
4087 + return ret;
4088 + }
4089 +
4090 +@@ -992,6 +994,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
4091 +
4092 + int chsc_get_channel_measurement_chars(struct channel_path *chp)
4093 + {
4094 ++ unsigned long flags;
4095 + int ccode, ret;
4096 +
4097 + struct {
4098 +@@ -1021,7 +1024,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
4099 + if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
4100 + return -EINVAL;
4101 +
4102 +- spin_lock_irq(&chsc_page_lock);
4103 ++ spin_lock_irqsave(&chsc_page_lock, flags);
4104 + memset(chsc_page, 0, PAGE_SIZE);
4105 + scmc_area = chsc_page;
4106 + scmc_area->request.length = 0x0010;
4107 +@@ -1053,7 +1056,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
4108 + chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
4109 + (struct cmg_chars *) &scmc_area->data);
4110 + out:
4111 +- spin_unlock_irq(&chsc_page_lock);
4112 ++ spin_unlock_irqrestore(&chsc_page_lock, flags);
4113 + return ret;
4114 + }
4115 +
4116 +@@ -1134,6 +1137,7 @@ struct css_chsc_char css_chsc_characteristics;
4117 + int __init
4118 + chsc_determine_css_characteristics(void)
4119 + {
4120 ++ unsigned long flags;
4121 + int result;
4122 + struct {
4123 + struct chsc_header request;
4124 +@@ -1146,7 +1150,7 @@ chsc_determine_css_characteristics(void)
4125 + u32 chsc_char[508];
4126 + } __attribute__ ((packed)) *scsc_area;
4127 +
4128 +- spin_lock_irq(&chsc_page_lock);
4129 ++ spin_lock_irqsave(&chsc_page_lock, flags);
4130 + memset(chsc_page, 0, PAGE_SIZE);
4131 + scsc_area = chsc_page;
4132 + scsc_area->request.length = 0x0010;
4133 +@@ -1168,7 +1172,7 @@ chsc_determine_css_characteristics(void)
4134 + CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
4135 + scsc_area->response.code);
4136 + exit:
4137 +- spin_unlock_irq(&chsc_page_lock);
4138 ++ spin_unlock_irqrestore(&chsc_page_lock, flags);
4139 + return result;
4140 + }
4141 +
4142 +diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
4143 +index 661bb94e2548..228b99ee0483 100644
4144 +--- a/drivers/scsi/cxlflash/main.c
4145 ++++ b/drivers/scsi/cxlflash/main.c
4146 +@@ -823,17 +823,6 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
4147 + }
4148 +
4149 + /**
4150 +- * cxlflash_shutdown() - shutdown handler
4151 +- * @pdev: PCI device associated with the host.
4152 +- */
4153 +-static void cxlflash_shutdown(struct pci_dev *pdev)
4154 +-{
4155 +- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
4156 +-
4157 +- notify_shutdown(cfg, false);
4158 +-}
4159 +-
4160 +-/**
4161 + * cxlflash_remove() - PCI entry point to tear down host
4162 + * @pdev: PCI device associated with the host.
4163 + *
4164 +@@ -844,6 +833,11 @@ static void cxlflash_remove(struct pci_dev *pdev)
4165 + struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
4166 + ulong lock_flags;
4167 +
4168 ++ if (!pci_is_enabled(pdev)) {
4169 ++ pr_debug("%s: Device is disabled\n", __func__);
4170 ++ return;
4171 ++ }
4172 ++
4173 + /* If a Task Management Function is active, wait for it to complete
4174 + * before continuing with remove.
4175 + */
4176 +@@ -2685,7 +2679,7 @@ static struct pci_driver cxlflash_driver = {
4177 + .id_table = cxlflash_pci_table,
4178 + .probe = cxlflash_probe,
4179 + .remove = cxlflash_remove,
4180 +- .shutdown = cxlflash_shutdown,
4181 ++ .shutdown = cxlflash_remove,
4182 + .err_handler = &cxlflash_err_handler,
4183 + };
4184 +
4185 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
4186 +index cd91a684c945..4cb79902e7a8 100644
4187 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
4188 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
4189 +@@ -4701,7 +4701,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4190 + le16_to_cpu(mpi_reply->DevHandle));
4191 + mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
4192 +
4193 +- if (!(ioc->logging_level & MPT_DEBUG_REPLY) &&
4194 ++ if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
4195 + ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
4196 + (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
4197 + (scmd->sense_buffer[2] == HARDWARE_ERROR)))
4198 +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
4199 +index 9e9dadb52b3d..eec5e3f6e06b 100644
4200 +--- a/drivers/spi/spi-fsl-dspi.c
4201 ++++ b/drivers/spi/spi-fsl-dspi.c
4202 +@@ -760,7 +760,6 @@ static int dspi_remove(struct platform_device *pdev)
4203 + /* Disconnect from the SPI framework */
4204 + clk_disable_unprepare(dspi->clk);
4205 + spi_unregister_master(dspi->master);
4206 +- spi_master_put(dspi->master);
4207 +
4208 + return 0;
4209 + }
4210 +diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
4211 +index 19c1572f1525..800245eac390 100644
4212 +--- a/drivers/staging/android/ion/Kconfig
4213 ++++ b/drivers/staging/android/ion/Kconfig
4214 +@@ -36,6 +36,7 @@ config ION_TEGRA
4215 + config ION_HISI
4216 + tristate "Ion for Hisilicon"
4217 + depends on ARCH_HISI && ION
4218 ++ select ION_OF
4219 + help
4220 + Choose this option if you wish to use ion on Hisilicon Platform.
4221 +
4222 +diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
4223 +index a8822fe2bd60..f4cee811cabd 100644
4224 +--- a/drivers/staging/ks7010/ks_hostif.c
4225 ++++ b/drivers/staging/ks7010/ks_hostif.c
4226 +@@ -69,16 +69,20 @@ inline u32 get_DWORD(struct ks_wlan_private *priv)
4227 + return data;
4228 + }
4229 +
4230 +-void ks_wlan_hw_wakeup_task(struct work_struct *work)
4231 ++static void ks_wlan_hw_wakeup_task(struct work_struct *work)
4232 + {
4233 + struct ks_wlan_private *priv =
4234 + container_of(work, struct ks_wlan_private, ks_wlan_wakeup_task);
4235 + int ps_status = atomic_read(&priv->psstatus.status);
4236 ++ long time_left;
4237 +
4238 + if (ps_status == PS_SNOOZE) {
4239 + ks_wlan_hw_wakeup_request(priv);
4240 +- if (!wait_for_completion_interruptible_timeout(&priv->psstatus.wakeup_wait, HZ / 50)) { /* 20ms timeout */
4241 +- DPRINTK(1, "wake up timeout !!!\n");
4242 ++ time_left = wait_for_completion_interruptible_timeout(
4243 ++ &priv->psstatus.wakeup_wait,
4244 ++ msecs_to_jiffies(20));
4245 ++ if (time_left <= 0) {
4246 ++ DPRINTK(1, "wake up timeout or interrupted !!!\n");
4247 + schedule_work(&priv->ks_wlan_wakeup_task);
4248 + return;
4249 + }
4250 +@@ -1505,7 +1509,7 @@ void hostif_infrastructure_set_request(struct ks_wlan_private *priv)
4251 + ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
4252 + }
4253 +
4254 +-void hostif_infrastructure_set2_request(struct ks_wlan_private *priv)
4255 ++static void hostif_infrastructure_set2_request(struct ks_wlan_private *priv)
4256 + {
4257 + struct hostif_infrastructure_set2_request_t *pp;
4258 + uint16_t capability;
4259 +diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
4260 +index 77485235c615..32d3a9c07aa3 100644
4261 +--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
4262 ++++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
4263 +@@ -670,13 +670,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
4264 + u8 res = _SUCCESS;
4265 +
4266 +
4267 +- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
4268 ++ ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4269 + if (!ph2c) {
4270 + res = _FAIL;
4271 + goto exit;
4272 + }
4273 +
4274 +- paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_KERNEL);
4275 ++ paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_ATOMIC);
4276 + if (!paddbareq_parm) {
4277 + kfree(ph2c);
4278 + res = _FAIL;
4279 +diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
4280 +index ccb4e067661a..e29d4bd5dcec 100644
4281 +--- a/drivers/staging/sm750fb/ddk750_mode.c
4282 ++++ b/drivers/staging/sm750fb/ddk750_mode.c
4283 +@@ -63,7 +63,7 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
4284 + dispControl |= (CRT_DISPLAY_CTRL_CRTSELECT | CRT_DISPLAY_CTRL_RGBBIT);
4285 +
4286 + /* Set bit 14 of display controller */
4287 +- dispControl = DISPLAY_CTRL_CLOCK_PHASE;
4288 ++ dispControl |= DISPLAY_CTRL_CLOCK_PHASE;
4289 +
4290 + POKE32(CRT_DISPLAY_CTRL, dispControl);
4291 +
4292 +diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
4293 +index 915facbf552e..e1134a4d97f3 100644
4294 +--- a/drivers/uio/uio_dmem_genirq.c
4295 ++++ b/drivers/uio/uio_dmem_genirq.c
4296 +@@ -229,7 +229,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
4297 + ++uiomem;
4298 + }
4299 +
4300 +- priv->dmem_region_start = i;
4301 ++ priv->dmem_region_start = uiomem - &uioinfo->mem[0];
4302 + priv->num_dmem_regions = pdata->num_dynamic_regions;
4303 +
4304 + for (i = 0; i < pdata->num_dynamic_regions; ++i) {
4305 +diff --git a/fs/9p/acl.c b/fs/9p/acl.c
4306 +index 5b6a1743ea17..b3c2cc79c20d 100644
4307 +--- a/fs/9p/acl.c
4308 ++++ b/fs/9p/acl.c
4309 +@@ -276,32 +276,26 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
4310 + switch (handler->flags) {
4311 + case ACL_TYPE_ACCESS:
4312 + if (acl) {
4313 +- umode_t mode = inode->i_mode;
4314 +- retval = posix_acl_equiv_mode(acl, &mode);
4315 +- if (retval < 0)
4316 ++ struct iattr iattr;
4317 ++
4318 ++ retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
4319 ++ if (retval)
4320 + goto err_out;
4321 +- else {
4322 +- struct iattr iattr;
4323 +- if (retval == 0) {
4324 +- /*
4325 +- * ACL can be represented
4326 +- * by the mode bits. So don't
4327 +- * update ACL.
4328 +- */
4329 +- acl = NULL;
4330 +- value = NULL;
4331 +- size = 0;
4332 +- }
4333 +- /* Updte the mode bits */
4334 +- iattr.ia_mode = ((mode & S_IALLUGO) |
4335 +- (inode->i_mode & ~S_IALLUGO));
4336 +- iattr.ia_valid = ATTR_MODE;
4337 +- /* FIXME should we update ctime ?
4338 +- * What is the following setxattr update the
4339 +- * mode ?
4340 ++ if (!acl) {
4341 ++ /*
4342 ++ * ACL can be represented
4343 ++ * by the mode bits. So don't
4344 ++ * update ACL.
4345 + */
4346 +- v9fs_vfs_setattr_dotl(dentry, &iattr);
4347 ++ value = NULL;
4348 ++ size = 0;
4349 + }
4350 ++ iattr.ia_valid = ATTR_MODE;
4351 ++ /* FIXME should we update ctime ?
4352 ++ * What is the following setxattr update the
4353 ++ * mode ?
4354 ++ */
4355 ++ v9fs_vfs_setattr_dotl(dentry, &iattr);
4356 + }
4357 + break;
4358 + case ACL_TYPE_DEFAULT:
4359 +diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
4360 +index 53bb7af4e5f0..247b8dfaf6e5 100644
4361 +--- a/fs/btrfs/acl.c
4362 ++++ b/fs/btrfs/acl.c
4363 +@@ -79,11 +79,9 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
4364 + case ACL_TYPE_ACCESS:
4365 + name = XATTR_NAME_POSIX_ACL_ACCESS;
4366 + if (acl) {
4367 +- ret = posix_acl_equiv_mode(acl, &inode->i_mode);
4368 +- if (ret < 0)
4369 ++ ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4370 ++ if (ret)
4371 + return ret;
4372 +- if (ret == 0)
4373 +- acl = NULL;
4374 + }
4375 + ret = 0;
4376 + break;
4377 +diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
4378 +index 4f67227f69a5..d0b6b342dff9 100644
4379 +--- a/fs/ceph/acl.c
4380 ++++ b/fs/ceph/acl.c
4381 +@@ -95,11 +95,9 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
4382 + case ACL_TYPE_ACCESS:
4383 + name = XATTR_NAME_POSIX_ACL_ACCESS;
4384 + if (acl) {
4385 +- ret = posix_acl_equiv_mode(acl, &new_mode);
4386 +- if (ret < 0)
4387 ++ ret = posix_acl_update_mode(inode, &new_mode, &acl);
4388 ++ if (ret)
4389 + goto out;
4390 +- if (ret == 0)
4391 +- acl = NULL;
4392 + }
4393 + break;
4394 + case ACL_TYPE_DEFAULT:
4395 +diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
4396 +index 42f1d1814083..e725aa0890e0 100644
4397 +--- a/fs/ext2/acl.c
4398 ++++ b/fs/ext2/acl.c
4399 +@@ -190,15 +190,11 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
4400 + case ACL_TYPE_ACCESS:
4401 + name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
4402 + if (acl) {
4403 +- error = posix_acl_equiv_mode(acl, &inode->i_mode);
4404 +- if (error < 0)
4405 ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4406 ++ if (error)
4407 + return error;
4408 +- else {
4409 +- inode->i_ctime = CURRENT_TIME_SEC;
4410 +- mark_inode_dirty(inode);
4411 +- if (error == 0)
4412 +- acl = NULL;
4413 +- }
4414 ++ inode->i_ctime = CURRENT_TIME_SEC;
4415 ++ mark_inode_dirty(inode);
4416 + }
4417 + break;
4418 +
4419 +diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
4420 +index c6601a476c02..dfa519979038 100644
4421 +--- a/fs/ext4/acl.c
4422 ++++ b/fs/ext4/acl.c
4423 +@@ -193,15 +193,11 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
4424 + case ACL_TYPE_ACCESS:
4425 + name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
4426 + if (acl) {
4427 +- error = posix_acl_equiv_mode(acl, &inode->i_mode);
4428 +- if (error < 0)
4429 ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4430 ++ if (error)
4431 + return error;
4432 +- else {
4433 +- inode->i_ctime = ext4_current_time(inode);
4434 +- ext4_mark_inode_dirty(handle, inode);
4435 +- if (error == 0)
4436 +- acl = NULL;
4437 +- }
4438 ++ inode->i_ctime = ext4_current_time(inode);
4439 ++ ext4_mark_inode_dirty(handle, inode);
4440 + }
4441 + break;
4442 +
4443 +diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
4444 +index 4dcc9e28dc5c..31344247ce89 100644
4445 +--- a/fs/f2fs/acl.c
4446 ++++ b/fs/f2fs/acl.c
4447 +@@ -210,12 +210,10 @@ static int __f2fs_set_acl(struct inode *inode, int type,
4448 + case ACL_TYPE_ACCESS:
4449 + name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
4450 + if (acl) {
4451 +- error = posix_acl_equiv_mode(acl, &inode->i_mode);
4452 +- if (error < 0)
4453 ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4454 ++ if (error)
4455 + return error;
4456 + set_acl_inode(inode, inode->i_mode);
4457 +- if (error == 0)
4458 +- acl = NULL;
4459 + }
4460 + break;
4461 +
4462 +diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
4463 +index 363ba9e9d8d0..2524807ee070 100644
4464 +--- a/fs/gfs2/acl.c
4465 ++++ b/fs/gfs2/acl.c
4466 +@@ -92,17 +92,11 @@ int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
4467 + if (type == ACL_TYPE_ACCESS) {
4468 + umode_t mode = inode->i_mode;
4469 +
4470 +- error = posix_acl_equiv_mode(acl, &mode);
4471 +- if (error < 0)
4472 ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4473 ++ if (error)
4474 + return error;
4475 +-
4476 +- if (error == 0)
4477 +- acl = NULL;
4478 +-
4479 +- if (mode != inode->i_mode) {
4480 +- inode->i_mode = mode;
4481 ++ if (mode != inode->i_mode)
4482 + mark_inode_dirty(inode);
4483 +- }
4484 + }
4485 +
4486 + if (acl) {
4487 +diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
4488 +index ab7ea2506b4d..9b92058a1240 100644
4489 +--- a/fs/hfsplus/posix_acl.c
4490 ++++ b/fs/hfsplus/posix_acl.c
4491 +@@ -65,8 +65,8 @@ int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
4492 + case ACL_TYPE_ACCESS:
4493 + xattr_name = XATTR_NAME_POSIX_ACL_ACCESS;
4494 + if (acl) {
4495 +- err = posix_acl_equiv_mode(acl, &inode->i_mode);
4496 +- if (err < 0)
4497 ++ err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4498 ++ if (err)
4499 + return err;
4500 + }
4501 + err = 0;
4502 +diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
4503 +index bc2693d56298..2a0f2a1044c1 100644
4504 +--- a/fs/jffs2/acl.c
4505 ++++ b/fs/jffs2/acl.c
4506 +@@ -233,9 +233,10 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
4507 + case ACL_TYPE_ACCESS:
4508 + xprefix = JFFS2_XPREFIX_ACL_ACCESS;
4509 + if (acl) {
4510 +- umode_t mode = inode->i_mode;
4511 +- rc = posix_acl_equiv_mode(acl, &mode);
4512 +- if (rc < 0)
4513 ++ umode_t mode;
4514 ++
4515 ++ rc = posix_acl_update_mode(inode, &mode, &acl);
4516 ++ if (rc)
4517 + return rc;
4518 + if (inode->i_mode != mode) {
4519 + struct iattr attr;
4520 +@@ -247,8 +248,6 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
4521 + if (rc < 0)
4522 + return rc;
4523 + }
4524 +- if (rc == 0)
4525 +- acl = NULL;
4526 + }
4527 + break;
4528 + case ACL_TYPE_DEFAULT:
4529 +diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
4530 +index 21fa92ba2c19..3a1e1554a4e3 100644
4531 +--- a/fs/jfs/acl.c
4532 ++++ b/fs/jfs/acl.c
4533 +@@ -78,13 +78,11 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
4534 + case ACL_TYPE_ACCESS:
4535 + ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
4536 + if (acl) {
4537 +- rc = posix_acl_equiv_mode(acl, &inode->i_mode);
4538 +- if (rc < 0)
4539 ++ rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4540 ++ if (rc)
4541 + return rc;
4542 + inode->i_ctime = CURRENT_TIME;
4543 + mark_inode_dirty(inode);
4544 +- if (rc == 0)
4545 +- acl = NULL;
4546 + }
4547 + break;
4548 + case ACL_TYPE_DEFAULT:
4549 +diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
4550 +index 2162434728c0..164307b99405 100644
4551 +--- a/fs/ocfs2/acl.c
4552 ++++ b/fs/ocfs2/acl.c
4553 +@@ -241,13 +241,11 @@ int ocfs2_set_acl(handle_t *handle,
4554 + case ACL_TYPE_ACCESS:
4555 + name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
4556 + if (acl) {
4557 +- umode_t mode = inode->i_mode;
4558 +- ret = posix_acl_equiv_mode(acl, &mode);
4559 +- if (ret < 0)
4560 +- return ret;
4561 ++ umode_t mode;
4562 +
4563 +- if (ret == 0)
4564 +- acl = NULL;
4565 ++ ret = posix_acl_update_mode(inode, &mode, &acl);
4566 ++ if (ret)
4567 ++ return ret;
4568 +
4569 + ret = ocfs2_acl_set_mode(inode, di_bh,
4570 + handle, mode);
4571 +diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
4572 +index 28f2195cd798..7a3754488312 100644
4573 +--- a/fs/orangefs/acl.c
4574 ++++ b/fs/orangefs/acl.c
4575 +@@ -73,14 +73,11 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
4576 + case ACL_TYPE_ACCESS:
4577 + name = XATTR_NAME_POSIX_ACL_ACCESS;
4578 + if (acl) {
4579 +- umode_t mode = inode->i_mode;
4580 +- /*
4581 +- * can we represent this with the traditional file
4582 +- * mode permission bits?
4583 +- */
4584 +- error = posix_acl_equiv_mode(acl, &mode);
4585 +- if (error < 0) {
4586 +- gossip_err("%s: posix_acl_equiv_mode err: %d\n",
4587 ++ umode_t mode;
4588 ++
4589 ++ error = posix_acl_update_mode(inode, &mode, &acl);
4590 ++ if (error) {
4591 ++ gossip_err("%s: posix_acl_update_mode err: %d\n",
4592 + __func__,
4593 + error);
4594 + return error;
4595 +@@ -90,8 +87,6 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
4596 + SetModeFlag(orangefs_inode);
4597 + inode->i_mode = mode;
4598 + mark_inode_dirty_sync(inode);
4599 +- if (error == 0)
4600 +- acl = NULL;
4601 + }
4602 + break;
4603 + case ACL_TYPE_DEFAULT:
4604 +diff --git a/fs/posix_acl.c b/fs/posix_acl.c
4605 +index 59d47ab0791a..bfc3ec388322 100644
4606 +--- a/fs/posix_acl.c
4607 ++++ b/fs/posix_acl.c
4608 +@@ -626,6 +626,37 @@ no_mem:
4609 + }
4610 + EXPORT_SYMBOL_GPL(posix_acl_create);
4611 +
4612 ++/**
4613 ++ * posix_acl_update_mode - update mode in set_acl
4614 ++ *
4615 ++ * Update the file mode when setting an ACL: compute the new file permission
4616 ++ * bits based on the ACL. In addition, if the ACL is equivalent to the new
4617 ++ * file mode, set *acl to NULL to indicate that no ACL should be set.
4618 ++ *
4619 ++ * As with chmod, clear the setgit bit if the caller is not in the owning group
4620 ++ * or capable of CAP_FSETID (see inode_change_ok).
4621 ++ *
4622 ++ * Called from set_acl inode operations.
4623 ++ */
4624 ++int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
4625 ++ struct posix_acl **acl)
4626 ++{
4627 ++ umode_t mode = inode->i_mode;
4628 ++ int error;
4629 ++
4630 ++ error = posix_acl_equiv_mode(*acl, &mode);
4631 ++ if (error < 0)
4632 ++ return error;
4633 ++ if (error == 0)
4634 ++ *acl = NULL;
4635 ++ if (!in_group_p(inode->i_gid) &&
4636 ++ !capable_wrt_inode_uidgid(inode, CAP_FSETID))
4637 ++ mode &= ~S_ISGID;
4638 ++ *mode_p = mode;
4639 ++ return 0;
4640 ++}
4641 ++EXPORT_SYMBOL(posix_acl_update_mode);
4642 ++
4643 + /*
4644 + * Fix up the uids and gids in posix acl extended attributes in place.
4645 + */
4646 +diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
4647 +index dbed42f755e0..27376681c640 100644
4648 +--- a/fs/reiserfs/xattr_acl.c
4649 ++++ b/fs/reiserfs/xattr_acl.c
4650 +@@ -242,13 +242,9 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
4651 + case ACL_TYPE_ACCESS:
4652 + name = XATTR_NAME_POSIX_ACL_ACCESS;
4653 + if (acl) {
4654 +- error = posix_acl_equiv_mode(acl, &inode->i_mode);
4655 +- if (error < 0)
4656 ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
4657 ++ if (error)
4658 + return error;
4659 +- else {
4660 +- if (error == 0)
4661 +- acl = NULL;
4662 +- }
4663 + }
4664 + break;
4665 + case ACL_TYPE_DEFAULT:
4666 +diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
4667 +index b6e527b8eccb..8a0dec89ca56 100644
4668 +--- a/fs/xfs/xfs_acl.c
4669 ++++ b/fs/xfs/xfs_acl.c
4670 +@@ -257,16 +257,11 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
4671 + return error;
4672 +
4673 + if (type == ACL_TYPE_ACCESS) {
4674 +- umode_t mode = inode->i_mode;
4675 +- error = posix_acl_equiv_mode(acl, &mode);
4676 +-
4677 +- if (error <= 0) {
4678 +- acl = NULL;
4679 +-
4680 +- if (error < 0)
4681 +- return error;
4682 +- }
4683 ++ umode_t mode;
4684 +
4685 ++ error = posix_acl_update_mode(inode, &mode, &acl);
4686 ++ if (error)
4687 ++ return error;
4688 + error = xfs_set_mode(inode, mode);
4689 + if (error)
4690 + return error;
4691 +diff --git a/include/drm/drmP.h b/include/drm/drmP.h
4692 +index d3778652e462..988903a59007 100644
4693 +--- a/include/drm/drmP.h
4694 ++++ b/include/drm/drmP.h
4695 +@@ -938,7 +938,8 @@ static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
4696 + #endif
4697 +
4698 + extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
4699 +- struct drm_gem_object *obj, int flags);
4700 ++ struct drm_gem_object *obj,
4701 ++ int flags);
4702 + extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
4703 + struct drm_file *file_priv, uint32_t handle, uint32_t flags,
4704 + int *prime_fd);
4705 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
4706 +index c26d4638f665..fe99e6f956e2 100644
4707 +--- a/include/linux/hugetlb.h
4708 ++++ b/include/linux/hugetlb.h
4709 +@@ -450,8 +450,8 @@ static inline pgoff_t basepage_index(struct page *page)
4710 + return __basepage_index(page);
4711 + }
4712 +
4713 +-extern void dissolve_free_huge_pages(unsigned long start_pfn,
4714 +- unsigned long end_pfn);
4715 ++extern int dissolve_free_huge_pages(unsigned long start_pfn,
4716 ++ unsigned long end_pfn);
4717 + static inline bool hugepage_migration_supported(struct hstate *h)
4718 + {
4719 + #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
4720 +@@ -518,7 +518,7 @@ static inline pgoff_t basepage_index(struct page *page)
4721 + {
4722 + return page->index;
4723 + }
4724 +-#define dissolve_free_huge_pages(s, e) do {} while (0)
4725 ++#define dissolve_free_huge_pages(s, e) 0
4726 + #define hugepage_migration_supported(h) false
4727 +
4728 + static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
4729 +diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
4730 +index b519e137b9b7..bbfce62a0bd7 100644
4731 +--- a/include/linux/libnvdimm.h
4732 ++++ b/include/linux/libnvdimm.h
4733 +@@ -129,6 +129,8 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
4734 + }
4735 +
4736 + int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length);
4737 ++void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
4738 ++ phys_addr_t start, unsigned int len);
4739 + struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
4740 + struct nvdimm_bus_descriptor *nfit_desc);
4741 + void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
4742 +diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
4743 +index d5d3d741f028..bf1046d0397b 100644
4744 +--- a/include/linux/posix_acl.h
4745 ++++ b/include/linux/posix_acl.h
4746 +@@ -93,6 +93,7 @@ extern int set_posix_acl(struct inode *, int, struct posix_acl *);
4747 + extern int posix_acl_chmod(struct inode *, umode_t);
4748 + extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
4749 + struct posix_acl **);
4750 ++extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **);
4751 +
4752 + extern int simple_set_acl(struct inode *, struct posix_acl *, int);
4753 + extern int simple_acl_create(struct inode *, struct inode *);
4754 +diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
4755 +index abd286afbd27..a4775f3451b9 100644
4756 +--- a/kernel/irq/generic-chip.c
4757 ++++ b/kernel/irq/generic-chip.c
4758 +@@ -411,8 +411,29 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
4759 + }
4760 + EXPORT_SYMBOL_GPL(irq_map_generic_chip);
4761 +
4762 ++static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
4763 ++{
4764 ++ struct irq_data *data = irq_domain_get_irq_data(d, virq);
4765 ++ struct irq_domain_chip_generic *dgc = d->gc;
4766 ++ unsigned int hw_irq = data->hwirq;
4767 ++ struct irq_chip_generic *gc;
4768 ++ int irq_idx;
4769 ++
4770 ++ gc = irq_get_domain_generic_chip(d, hw_irq);
4771 ++ if (!gc)
4772 ++ return;
4773 ++
4774 ++ irq_idx = hw_irq % dgc->irqs_per_chip;
4775 ++
4776 ++ clear_bit(irq_idx, &gc->installed);
4777 ++ irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
4778 ++ NULL);
4779 ++
4780 ++}
4781 ++
4782 + struct irq_domain_ops irq_generic_chip_ops = {
4783 + .map = irq_map_generic_chip,
4784 ++ .unmap = irq_unmap_generic_chip,
4785 + .xlate = irq_domain_xlate_onetwocell,
4786 + };
4787 + EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
4788 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
4789 +index 603bdd01ec2c..770d83eb3f48 100644
4790 +--- a/mm/hugetlb.c
4791 ++++ b/mm/hugetlb.c
4792 +@@ -1437,22 +1437,32 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
4793 +
4794 + /*
4795 + * Dissolve a given free hugepage into free buddy pages. This function does
4796 +- * nothing for in-use (including surplus) hugepages.
4797 ++ * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
4798 ++ * number of free hugepages would be reduced below the number of reserved
4799 ++ * hugepages.
4800 + */
4801 +-static void dissolve_free_huge_page(struct page *page)
4802 ++static int dissolve_free_huge_page(struct page *page)
4803 + {
4804 ++ int rc = 0;
4805 ++
4806 + spin_lock(&hugetlb_lock);
4807 + if (PageHuge(page) && !page_count(page)) {
4808 + struct page *head = compound_head(page);
4809 + struct hstate *h = page_hstate(head);
4810 + int nid = page_to_nid(head);
4811 ++ if (h->free_huge_pages - h->resv_huge_pages == 0) {
4812 ++ rc = -EBUSY;
4813 ++ goto out;
4814 ++ }
4815 + list_del(&head->lru);
4816 + h->free_huge_pages--;
4817 + h->free_huge_pages_node[nid]--;
4818 + h->max_huge_pages--;
4819 + update_and_free_page(h, head);
4820 + }
4821 ++out:
4822 + spin_unlock(&hugetlb_lock);
4823 ++ return rc;
4824 + }
4825 +
4826 + /*
4827 +@@ -1460,16 +1470,28 @@ static void dissolve_free_huge_page(struct page *page)
4828 + * make specified memory blocks removable from the system.
4829 + * Note that this will dissolve a free gigantic hugepage completely, if any
4830 + * part of it lies within the given range.
4831 ++ * Also note that if dissolve_free_huge_page() returns with an error, all
4832 ++ * free hugepages that were dissolved before that error are lost.
4833 + */
4834 +-void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
4835 ++int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
4836 + {
4837 + unsigned long pfn;
4838 ++ struct page *page;
4839 ++ int rc = 0;
4840 +
4841 + if (!hugepages_supported())
4842 +- return;
4843 ++ return rc;
4844 ++
4845 ++ for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
4846 ++ page = pfn_to_page(pfn);
4847 ++ if (PageHuge(page) && !page_count(page)) {
4848 ++ rc = dissolve_free_huge_page(page);
4849 ++ if (rc)
4850 ++ break;
4851 ++ }
4852 ++ }
4853 +
4854 +- for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
4855 +- dissolve_free_huge_page(pfn_to_page(pfn));
4856 ++ return rc;
4857 + }
4858 +
4859 + /*
4860 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4861 +index 9d29ba0f7192..962927309b6e 100644
4862 +--- a/mm/memory_hotplug.c
4863 ++++ b/mm/memory_hotplug.c
4864 +@@ -1945,7 +1945,9 @@ repeat:
4865 + * dissolve free hugepages in the memory block before doing offlining
4866 + * actually in order to make hugetlbfs's object counting consistent.
4867 + */
4868 +- dissolve_free_huge_pages(start_pfn, end_pfn);
4869 ++ ret = dissolve_free_huge_pages(start_pfn, end_pfn);
4870 ++ if (ret)
4871 ++ goto failed_removal;
4872 + /* check again */
4873 + offlined_pages = check_pages_isolated(start_pfn, end_pfn);
4874 + if (offlined_pages < 0) {
4875 +diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
4876 +index 3774b117d365..49b65d481949 100644
4877 +--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
4878 ++++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
4879 +@@ -255,7 +255,7 @@ static struct snd_soc_ops broxton_da7219_ops = {
4880 + /* broxton digital audio interface glue - connects codec <--> CPU */
4881 + static struct snd_soc_dai_link broxton_dais[] = {
4882 + /* Front End DAI links */
4883 +- [BXT_DPCM_AUDIO_PB]
4884 ++ [BXT_DPCM_AUDIO_PB] =
4885 + {
4886 + .name = "Bxt Audio Port",
4887 + .stream_name = "Audio",
4888 +@@ -271,7 +271,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
4889 + .dpcm_playback = 1,
4890 + .ops = &broxton_da7219_fe_ops,
4891 + },
4892 +- [BXT_DPCM_AUDIO_CP]
4893 ++ [BXT_DPCM_AUDIO_CP] =
4894 + {
4895 + .name = "Bxt Audio Capture Port",
4896 + .stream_name = "Audio Record",
4897 +@@ -286,7 +286,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
4898 + .dpcm_capture = 1,
4899 + .ops = &broxton_da7219_fe_ops,
4900 + },
4901 +- [BXT_DPCM_AUDIO_REF_CP]
4902 ++ [BXT_DPCM_AUDIO_REF_CP] =
4903 + {
4904 + .name = "Bxt Audio Reference cap",
4905 + .stream_name = "Refcap",
4906 +@@ -300,7 +300,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
4907 + .nonatomic = 1,
4908 + .dynamic = 1,
4909 + },
4910 +- [BXT_DPCM_AUDIO_HDMI1_PB]
4911 ++ [BXT_DPCM_AUDIO_HDMI1_PB] =
4912 + {
4913 + .name = "Bxt HDMI Port1",
4914 + .stream_name = "Hdmi1",
4915 +@@ -313,7 +313,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
4916 + .nonatomic = 1,
4917 + .dynamic = 1,
4918 + },
4919 +- [BXT_DPCM_AUDIO_HDMI2_PB]
4920 ++ [BXT_DPCM_AUDIO_HDMI2_PB] =
4921 + {
4922 + .name = "Bxt HDMI Port2",
4923 + .stream_name = "Hdmi2",
4924 +@@ -326,7 +326,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
4925 + .nonatomic = 1,
4926 + .dynamic = 1,
4927 + },
4928 +- [BXT_DPCM_AUDIO_HDMI3_PB]
4929 ++ [BXT_DPCM_AUDIO_HDMI3_PB] =
4930 + {
4931 + .name = "Bxt HDMI Port3",
4932 + .stream_name = "Hdmi3",
4933 +diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
4934 +index 253d7bfbf511..d610bdca1608 100644
4935 +--- a/sound/soc/intel/boards/bxt_rt298.c
4936 ++++ b/sound/soc/intel/boards/bxt_rt298.c
4937 +@@ -271,7 +271,7 @@ static const struct snd_soc_ops broxton_rt286_fe_ops = {
4938 + /* broxton digital audio interface glue - connects codec <--> CPU */
4939 + static struct snd_soc_dai_link broxton_rt298_dais[] = {
4940 + /* Front End DAI links */
4941 +- [BXT_DPCM_AUDIO_PB]
4942 ++ [BXT_DPCM_AUDIO_PB] =
4943 + {
4944 + .name = "Bxt Audio Port",
4945 + .stream_name = "Audio",
4946 +@@ -286,7 +286,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
4947 + .dpcm_playback = 1,
4948 + .ops = &broxton_rt286_fe_ops,
4949 + },
4950 +- [BXT_DPCM_AUDIO_CP]
4951 ++ [BXT_DPCM_AUDIO_CP] =
4952 + {
4953 + .name = "Bxt Audio Capture Port",
4954 + .stream_name = "Audio Record",
4955 +@@ -300,7 +300,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
4956 + .dpcm_capture = 1,
4957 + .ops = &broxton_rt286_fe_ops,
4958 + },
4959 +- [BXT_DPCM_AUDIO_REF_CP]
4960 ++ [BXT_DPCM_AUDIO_REF_CP] =
4961 + {
4962 + .name = "Bxt Audio Reference cap",
4963 + .stream_name = "refcap",
4964 +@@ -313,7 +313,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
4965 + .nonatomic = 1,
4966 + .dynamic = 1,
4967 + },
4968 +- [BXT_DPCM_AUDIO_DMIC_CP]
4969 ++ [BXT_DPCM_AUDIO_DMIC_CP] =
4970 + {
4971 + .name = "Bxt Audio DMIC cap",
4972 + .stream_name = "dmiccap",
4973 +@@ -327,7 +327,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
4974 + .dynamic = 1,
4975 + .ops = &broxton_dmic_ops,
4976 + },
4977 +- [BXT_DPCM_AUDIO_HDMI1_PB]
4978 ++ [BXT_DPCM_AUDIO_HDMI1_PB] =
4979 + {
4980 + .name = "Bxt HDMI Port1",
4981 + .stream_name = "Hdmi1",
4982 +@@ -340,7 +340,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
4983 + .nonatomic = 1,
4984 + .dynamic = 1,
4985 + },
4986 +- [BXT_DPCM_AUDIO_HDMI2_PB]
4987 ++ [BXT_DPCM_AUDIO_HDMI2_PB] =
4988 + {
4989 + .name = "Bxt HDMI Port2",
4990 + .stream_name = "Hdmi2",
4991 +@@ -353,7 +353,7 @@ static struct snd_soc_dai_link broxton_rt298_dais[] = {
4992 + .nonatomic = 1,
4993 + .dynamic = 1,
4994 + },
4995 +- [BXT_DPCM_AUDIO_HDMI3_PB]
4996 ++ [BXT_DPCM_AUDIO_HDMI3_PB] =
4997 + {
4998 + .name = "Bxt HDMI Port3",
4999 + .stream_name = "Hdmi3",
5000 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
5001 +index d908ff8f9755..801082fdc3e0 100644
5002 +--- a/sound/soc/soc-dapm.c
5003 ++++ b/sound/soc/soc-dapm.c
5004 +@@ -823,6 +823,7 @@ static int dapm_create_or_share_kcontrol(struct snd_soc_dapm_widget *w,
5005 + case snd_soc_dapm_switch:
5006 + case snd_soc_dapm_mixer:
5007 + case snd_soc_dapm_pga:
5008 ++ case snd_soc_dapm_out_drv:
5009 + wname_in_long_name = true;
5010 + kcname_in_long_name = true;
5011 + break;
5012 +@@ -3049,6 +3050,9 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
5013 + }
5014 + mutex_unlock(&card->dapm_mutex);
5015 +
5016 ++ if (ret)
5017 ++ return ret;
5018 ++
5019 + if (invert)
5020 + ucontrol->value.integer.value[0] = max - val;
5021 + else
5022 +@@ -3200,7 +3204,7 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
5023 + if (e->shift_l != e->shift_r) {
5024 + if (item[1] > e->items)
5025 + return -EINVAL;
5026 +- val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_l;
5027 ++ val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r;
5028 + mask |= e->mask << e->shift_r;
5029 + }
5030 +
5031 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
5032 +index ee7f15aa46fc..34069076bf8e 100644
5033 +--- a/sound/soc/soc-topology.c
5034 ++++ b/sound/soc/soc-topology.c
5035 +@@ -1475,6 +1475,7 @@ widget:
5036 + if (widget == NULL) {
5037 + dev_err(tplg->dev, "ASoC: failed to create widget %s controls\n",
5038 + w->name);
5039 ++ ret = -ENOMEM;
5040 + goto hdr_err;
5041 + }
5042 +
5043 +diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
5044 +index 7ed72a475c57..e4b717e9eb6c 100644
5045 +--- a/tools/perf/perf-sys.h
5046 ++++ b/tools/perf/perf-sys.h
5047 +@@ -20,7 +20,6 @@
5048 + #endif
5049 +
5050 + #ifdef __powerpc__
5051 +-#include "../../arch/powerpc/include/uapi/asm/unistd.h"
5052 + #define CPUINFO_PROC {"cpu"}
5053 + #endif
5054 +
5055 +diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
5056 +index 13d414384739..7aee954b307f 100644
5057 +--- a/tools/perf/ui/browsers/hists.c
5058 ++++ b/tools/perf/ui/browsers/hists.c
5059 +@@ -1091,7 +1091,6 @@ static int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...)
5060 + ret = scnprintf(hpp->buf, hpp->size, fmt, len, percent);
5061 + ui_browser__printf(arg->b, "%s", hpp->buf);
5062 +
5063 +- advance_hpp(hpp, ret);
5064 + return ret;
5065 + }
5066 +
5067 +@@ -2046,6 +2045,7 @@ void hist_browser__init(struct hist_browser *browser,
5068 + struct hists *hists)
5069 + {
5070 + struct perf_hpp_fmt *fmt;
5071 ++ struct perf_hpp_list_node *node;
5072 +
5073 + browser->hists = hists;
5074 + browser->b.refresh = hist_browser__refresh;
5075 +@@ -2058,6 +2058,11 @@ void hist_browser__init(struct hist_browser *browser,
5076 + perf_hpp__reset_width(fmt, hists);
5077 + ++browser->b.columns;
5078 + }
5079 ++ /* hierarchy entries have their own hpp list */
5080 ++ list_for_each_entry(node, &hists->hpp_formats, list) {
5081 ++ perf_hpp_list__for_each_format(&node->hpp, fmt)
5082 ++ perf_hpp__reset_width(fmt, hists);
5083 ++ }
5084 + }
5085 +
5086 + struct hist_browser *hist_browser__new(struct hists *hists)
5087 +diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
5088 +index f04a63112079..d0cae75408ff 100644
5089 +--- a/tools/perf/ui/stdio/hist.c
5090 ++++ b/tools/perf/ui/stdio/hist.c
5091 +@@ -628,14 +628,6 @@ hists__fprintf_hierarchy_headers(struct hists *hists,
5092 + struct perf_hpp *hpp,
5093 + FILE *fp)
5094 + {
5095 +- struct perf_hpp_list_node *fmt_node;
5096 +- struct perf_hpp_fmt *fmt;
5097 +-
5098 +- list_for_each_entry(fmt_node, &hists->hpp_formats, list) {
5099 +- perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
5100 +- perf_hpp__reset_width(fmt, hists);
5101 +- }
5102 +-
5103 + return print_hierarchy_header(hists, hpp, symbol_conf.field_sep, fp);
5104 + }
5105 +
5106 +@@ -714,6 +706,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
5107 + bool use_callchain)
5108 + {
5109 + struct perf_hpp_fmt *fmt;
5110 ++ struct perf_hpp_list_node *node;
5111 + struct rb_node *nd;
5112 + size_t ret = 0;
5113 + const char *sep = symbol_conf.field_sep;
5114 +@@ -726,6 +719,11 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
5115 +
5116 + hists__for_each_format(hists, fmt)
5117 + perf_hpp__reset_width(fmt, hists);
5118 ++ /* hierarchy entries have their own hpp list */
5119 ++ list_for_each_entry(node, &hists->hpp_formats, list) {
5120 ++ perf_hpp_list__for_each_format(&node->hpp, fmt)
5121 ++ perf_hpp__reset_width(fmt, hists);
5122 ++ }
5123 +
5124 + if (symbol_conf.col_width_list_str)
5125 + perf_hpp__set_user_width(symbol_conf.col_width_list_str);
5126 +diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
5127 +index 4f979bb27b6c..7123f4de32cc 100644
5128 +--- a/tools/perf/util/data-convert-bt.c
5129 ++++ b/tools/perf/util/data-convert-bt.c
5130 +@@ -437,7 +437,7 @@ add_bpf_output_values(struct bt_ctf_event_class *event_class,
5131 + int ret;
5132 +
5133 + if (nr_elements * sizeof(u32) != raw_size)
5134 +- pr_warning("Incorrect raw_size (%u) in bpf output event, skip %lu bytes\n",
5135 ++ pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
5136 + raw_size, nr_elements * sizeof(u32) - raw_size);
5137 +
5138 + len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
5139 +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
5140 +index a811c13a74d6..f77b3167585c 100644
5141 +--- a/tools/perf/util/symbol-elf.c
5142 ++++ b/tools/perf/util/symbol-elf.c
5143 +@@ -1113,9 +1113,8 @@ new_symbol:
5144 + * For misannotated, zeroed, ASM function sizes.
5145 + */
5146 + if (nr > 0) {
5147 +- if (!symbol_conf.allow_aliases)
5148 +- symbols__fixup_duplicate(&dso->symbols[map->type]);
5149 + symbols__fixup_end(&dso->symbols[map->type]);
5150 ++ symbols__fixup_duplicate(&dso->symbols[map->type]);
5151 + if (kmap) {
5152 + /*
5153 + * We need to fixup this here too because we create new
5154 +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
5155 +index 37e8d20ae03e..f29f336ed17b 100644
5156 +--- a/tools/perf/util/symbol.c
5157 ++++ b/tools/perf/util/symbol.c
5158 +@@ -152,6 +152,9 @@ void symbols__fixup_duplicate(struct rb_root *symbols)
5159 + struct rb_node *nd;
5160 + struct symbol *curr, *next;
5161 +
5162 ++ if (symbol_conf.allow_aliases)
5163 ++ return;
5164 ++
5165 + nd = rb_first(symbols);
5166 +
5167 + while (nd) {
5168 +@@ -1234,8 +1237,8 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename,
5169 + if (kallsyms__delta(map, filename, &delta))
5170 + return -1;
5171 +
5172 +- symbols__fixup_duplicate(&dso->symbols[map->type]);
5173 + symbols__fixup_end(&dso->symbols[map->type]);
5174 ++ symbols__fixup_duplicate(&dso->symbols[map->type]);
5175 +
5176 + if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
5177 + dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;