Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.12 commit in: /
Date: Sun, 06 Aug 2017 19:34:50
Message-Id: 1502048067.0f23b605fb69e470f285ce960cb4fd7e0492050e.mpagano@gentoo
1 commit: 0f23b605fb69e470f285ce960cb4fd7e0492050e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Aug 6 19:34:27 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Aug 6 19:34:27 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0f23b605
7
8 Linux patch 4.12.5
9
10 0000_README | 4 +
11 1004_linux-4.12.5.patch | 997 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1001 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 09d6e6c..29e1ca2 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -59,6 +59,10 @@ Patch: 1003_linux-4.12.4.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.12.4
21
22 +Patch: 1004_linux-4.12.5.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.12.5
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1004_linux-4.12.5.patch b/1004_linux-4.12.5.patch
31 new file mode 100644
32 index 0000000..0b6a672
33 --- /dev/null
34 +++ b/1004_linux-4.12.5.patch
35 @@ -0,0 +1,997 @@
36 +diff --git a/Makefile b/Makefile
37 +index bfdc92c2e47a..382e967b0792 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 12
43 +-SUBLEVEL = 4
44 ++SUBLEVEL = 5
45 + EXTRAVERSION =
46 + NAME = Fearless Coyote
47 +
48 +diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
49 +index c32a09095216..85a92db70afc 100644
50 +--- a/arch/parisc/kernel/cache.c
51 ++++ b/arch/parisc/kernel/cache.c
52 +@@ -453,8 +453,8 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
53 + before it can be accessed through the kernel mapping. */
54 + preempt_disable();
55 + flush_dcache_page_asm(__pa(vfrom), vaddr);
56 +- preempt_enable();
57 + copy_page_asm(vto, vfrom);
58 ++ preempt_enable();
59 + }
60 + EXPORT_SYMBOL(copy_user_page);
61 +
62 +@@ -539,6 +539,10 @@ void flush_cache_mm(struct mm_struct *mm)
63 + struct vm_area_struct *vma;
64 + pgd_t *pgd;
65 +
66 ++ /* Flush the TLB to avoid speculation if coherency is required. */
67 ++ if (parisc_requires_coherency())
68 ++ flush_tlb_all();
69 ++
70 + /* Flushing the whole cache on each cpu takes forever on
71 + rp3440, etc. So, avoid it if the mm isn't too big. */
72 + if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
73 +@@ -577,33 +581,22 @@ void flush_cache_mm(struct mm_struct *mm)
74 + void flush_cache_range(struct vm_area_struct *vma,
75 + unsigned long start, unsigned long end)
76 + {
77 +- unsigned long addr;
78 +- pgd_t *pgd;
79 +-
80 + BUG_ON(!vma->vm_mm->context);
81 +
82 ++ /* Flush the TLB to avoid speculation if coherency is required. */
83 ++ if (parisc_requires_coherency())
84 ++ flush_tlb_range(vma, start, end);
85 ++
86 + if ((end - start) >= parisc_cache_flush_threshold) {
87 + flush_cache_all();
88 + return;
89 + }
90 +
91 +- if (vma->vm_mm->context == mfsp(3)) {
92 +- flush_user_dcache_range_asm(start, end);
93 +- if (vma->vm_flags & VM_EXEC)
94 +- flush_user_icache_range_asm(start, end);
95 +- return;
96 +- }
97 ++ BUG_ON(vma->vm_mm->context != mfsp(3));
98 +
99 +- pgd = vma->vm_mm->pgd;
100 +- for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
101 +- unsigned long pfn;
102 +- pte_t *ptep = get_ptep(pgd, addr);
103 +- if (!ptep)
104 +- continue;
105 +- pfn = pte_pfn(*ptep);
106 +- if (pfn_valid(pfn))
107 +- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
108 +- }
109 ++ flush_user_dcache_range_asm(start, end);
110 ++ if (vma->vm_flags & VM_EXEC)
111 ++ flush_user_icache_range_asm(start, end);
112 + }
113 +
114 + void
115 +@@ -612,7 +605,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
116 + BUG_ON(!vma->vm_mm->context);
117 +
118 + if (pfn_valid(pfn)) {
119 +- flush_tlb_page(vma, vmaddr);
120 ++ if (parisc_requires_coherency())
121 ++ flush_tlb_page(vma, vmaddr);
122 + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
123 + }
124 + }
125 +diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
126 +index b64d7d21646e..a45a67d526f8 100644
127 +--- a/arch/parisc/kernel/process.c
128 ++++ b/arch/parisc/kernel/process.c
129 +@@ -53,6 +53,7 @@
130 + #include <linux/uaccess.h>
131 + #include <linux/rcupdate.h>
132 + #include <linux/random.h>
133 ++#include <linux/nmi.h>
134 +
135 + #include <asm/io.h>
136 + #include <asm/asm-offsets.h>
137 +@@ -145,6 +146,7 @@ void machine_power_off(void)
138 +
139 + /* prevent soft lockup/stalled CPU messages for endless loop. */
140 + rcu_sysrq_start();
141 ++ lockup_detector_suspend();
142 + for (;;);
143 + }
144 +
145 +diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
146 +index 710e491206ed..1c10e26cebbb 100644
147 +--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
148 ++++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
149 +@@ -164,8 +164,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
150 + goto out;
151 + }
152 +
153 +- if (kvm->arch.hpt.virt)
154 ++ if (kvm->arch.hpt.virt) {
155 + kvmppc_free_hpt(&kvm->arch.hpt);
156 ++ kvmppc_rmap_reset(kvm);
157 ++ }
158 +
159 + err = kvmppc_allocate_hpt(&info, order);
160 + if (err < 0)
161 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
162 +index 8d1a365b8edc..1d3602f7ec22 100644
163 +--- a/arch/powerpc/kvm/book3s_hv.c
164 ++++ b/arch/powerpc/kvm/book3s_hv.c
165 +@@ -2938,6 +2938,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
166 + run->fail_entry.hardware_entry_failure_reason = 0;
167 + return -EINVAL;
168 + }
169 ++ /* Enable TM so we can read the TM SPRs */
170 ++ mtmsr(mfmsr() | MSR_TM);
171 + current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
172 + current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
173 + current->thread.tm_texasr = mfspr(SPRN_TEXASR);
174 +diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
175 +index e5bf1e84047f..011ef2180fe6 100644
176 +--- a/arch/powerpc/platforms/pseries/reconfig.c
177 ++++ b/arch/powerpc/platforms/pseries/reconfig.c
178 +@@ -82,7 +82,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
179 +
180 + of_detach_node(np);
181 + of_node_put(parent);
182 +- of_node_put(np); /* Must decrement the refcount */
183 + return 0;
184 + }
185 +
186 +diff --git a/crypto/authencesn.c b/crypto/authencesn.c
187 +index 6f8f6b86bfe2..0cf5fefdb859 100644
188 +--- a/crypto/authencesn.c
189 ++++ b/crypto/authencesn.c
190 +@@ -248,6 +248,9 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
191 + u8 *ihash = ohash + crypto_ahash_digestsize(auth);
192 + u32 tmp[2];
193 +
194 ++ if (!authsize)
195 ++ goto decrypt;
196 ++
197 + /* Move high-order bits of sequence number back. */
198 + scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
199 + scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
200 +@@ -256,6 +259,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
201 + if (crypto_memneq(ihash, ohash, authsize))
202 + return -EBADMSG;
203 +
204 ++decrypt:
205 ++
206 + sg_init_table(areq_ctx->dst, 2);
207 + dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
208 +
209 +diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
210 +index d165af8abe36..4161d9961a24 100644
211 +--- a/drivers/char/ipmi/ipmi_watchdog.c
212 ++++ b/drivers/char/ipmi/ipmi_watchdog.c
213 +@@ -1163,10 +1163,11 @@ static int wdog_reboot_handler(struct notifier_block *this,
214 + ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
215 + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
216 + } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
217 +- /* Set a long timer to let the reboot happens, but
218 +- reboot if it hangs, but only if the watchdog
219 ++ /* Set a long timer to let the reboot happen or
220 ++ reset if it hangs, but only if the watchdog
221 + timer was already running. */
222 +- timeout = 120;
223 ++ if (timeout < 120)
224 ++ timeout = 120;
225 + pretimeout = 0;
226 + ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
227 + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
228 +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
229 +index fb1e60f5002e..778fc1bcccee 100644
230 +--- a/drivers/crypto/Kconfig
231 ++++ b/drivers/crypto/Kconfig
232 +@@ -629,7 +629,7 @@ source "drivers/crypto/virtio/Kconfig"
233 + config CRYPTO_DEV_BCM_SPU
234 + tristate "Broadcom symmetric crypto/hash acceleration support"
235 + depends on ARCH_BCM_IPROC
236 +- depends on BCM_PDC_MBOX
237 ++ depends on MAILBOX
238 + default m
239 + select CRYPTO_DES
240 + select CRYPTO_MD5
241 +diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
242 +index ef04c9748317..bf7ac621c591 100644
243 +--- a/drivers/crypto/bcm/spu2.c
244 ++++ b/drivers/crypto/bcm/spu2.c
245 +@@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
246 + break;
247 + case HASH_ALG_SHA3_512:
248 + *spu2_type = SPU2_HASH_TYPE_SHA3_512;
249 ++ break;
250 + case HASH_ALG_LAST:
251 + default:
252 + err = -EINVAL;
253 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
254 +index 9106ea32b048..881df8843e66 100644
255 +--- a/drivers/gpu/drm/i915/intel_display.c
256 ++++ b/drivers/gpu/drm/i915/intel_display.c
257 +@@ -9085,6 +9085,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
258 + u64 power_domain_mask;
259 + bool active;
260 +
261 ++ if (INTEL_GEN(dev_priv) >= 9) {
262 ++ intel_crtc_init_scalers(crtc, pipe_config);
263 ++
264 ++ pipe_config->scaler_state.scaler_id = -1;
265 ++ pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
266 ++ }
267 ++
268 + power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
269 + if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
270 + return false;
271 +@@ -9113,13 +9120,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
272 + pipe_config->gamma_mode =
273 + I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
274 +
275 +- if (INTEL_GEN(dev_priv) >= 9) {
276 +- intel_crtc_init_scalers(crtc, pipe_config);
277 +-
278 +- pipe_config->scaler_state.scaler_id = -1;
279 +- pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
280 +- }
281 +-
282 + power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
283 + if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
284 + power_domain_mask |= BIT_ULL(power_domain);
285 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
286 +index 1e1de6bfe85a..5893be9788d3 100644
287 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
288 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
289 +@@ -27,7 +27,7 @@ struct nv50_disp {
290 + u8 type[3];
291 + } pior;
292 +
293 +- struct nv50_disp_chan *chan[17];
294 ++ struct nv50_disp_chan *chan[21];
295 + };
296 +
297 + int nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
298 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
299 +index c794b2c2d21e..6d8f21290aa2 100644
300 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
301 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
302 +@@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base)
303 +
304 + if (bar->bar[0].mem) {
305 + addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
306 +- nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
307 ++ nvkm_wr32(device, 0x001714, 0x80000000 | addr);
308 + }
309 +
310 + return 0;
311 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
312 +index c7b53d987f06..fefb9d995d2c 100644
313 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
314 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
315 +@@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
316 + struct vmw_sw_context *sw_context,
317 + SVGA3dCmdHeader *header)
318 + {
319 +- return capable(CAP_SYS_ADMIN) ? : -EINVAL;
320 ++ return -EINVAL;
321 + }
322 +
323 + static int vmw_cmd_ok(struct vmw_private *dev_priv,
324 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
325 +index 50be1f034f9e..5284e8d2f7ba 100644
326 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
327 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
328 +@@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
329 + * something arbitrarily large and we will reject any layout
330 + * that doesn't fit prim_bb_mem later
331 + */
332 +- dev->mode_config.max_width = 16384;
333 +- dev->mode_config.max_height = 16384;
334 ++ dev->mode_config.max_width = 8192;
335 ++ dev->mode_config.max_height = 8192;
336 + }
337 +
338 + vmw_kms_create_implicit_placement_property(dev_priv, false);
339 +diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
340 +index 9b856e1890d1..e4c43a17b333 100644
341 +--- a/drivers/isdn/i4l/isdn_common.c
342 ++++ b/drivers/isdn/i4l/isdn_common.c
343 +@@ -1379,6 +1379,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
344 + if (arg) {
345 + if (copy_from_user(bname, argp, sizeof(bname) - 1))
346 + return -EFAULT;
347 ++ bname[sizeof(bname)-1] = 0;
348 + } else
349 + return -EINVAL;
350 + ret = mutex_lock_interruptible(&dev->mtx);
351 +diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
352 +index c151c6daa67e..f63a110b7bcb 100644
353 +--- a/drivers/isdn/i4l/isdn_net.c
354 ++++ b/drivers/isdn/i4l/isdn_net.c
355 +@@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm)
356 + char newname[10];
357 +
358 + if (p) {
359 +- /* Slave-Name MUST not be empty */
360 +- if (!strlen(p + 1))
361 ++ /* Slave-Name MUST not be empty or overflow 'newname' */
362 ++ if (strscpy(newname, p + 1, sizeof(newname)) <= 0)
363 + return NULL;
364 +- strcpy(newname, p + 1);
365 + *p = 0;
366 + /* Master must already exist */
367 + if (!(n = isdn_net_findif(parm)))
368 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
369 +index 93b181088168..b68e21c25a17 100644
370 +--- a/drivers/md/dm-integrity.c
371 ++++ b/drivers/md/dm-integrity.c
372 +@@ -1587,16 +1587,18 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
373 + if (likely(ic->mode == 'J')) {
374 + if (dio->write) {
375 + unsigned next_entry, i, pos;
376 +- unsigned ws, we;
377 ++ unsigned ws, we, range_sectors;
378 +
379 +- dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors);
380 ++ dio->range.n_sectors = min(dio->range.n_sectors,
381 ++ ic->free_sectors << ic->sb->log2_sectors_per_block);
382 + if (unlikely(!dio->range.n_sectors))
383 + goto sleep;
384 +- ic->free_sectors -= dio->range.n_sectors;
385 ++ range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
386 ++ ic->free_sectors -= range_sectors;
387 + journal_section = ic->free_section;
388 + journal_entry = ic->free_section_entry;
389 +
390 +- next_entry = ic->free_section_entry + dio->range.n_sectors;
391 ++ next_entry = ic->free_section_entry + range_sectors;
392 + ic->free_section_entry = next_entry % ic->journal_section_entries;
393 + ic->free_section += next_entry / ic->journal_section_entries;
394 + ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
395 +@@ -3019,6 +3021,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
396 + ti->error = "Block size doesn't match the information in superblock";
397 + goto bad;
398 + }
399 ++ if (!le32_to_cpu(ic->sb->journal_sections)) {
400 ++ r = -EINVAL;
401 ++ ti->error = "Corrupted superblock, journal_sections is 0";
402 ++ goto bad;
403 ++ }
404 + /* make sure that ti->max_io_len doesn't overflow */
405 + if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
406 + ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
407 +diff --git a/drivers/md/md.h b/drivers/md/md.h
408 +index 63d342d560b8..33611a91b1d9 100644
409 +--- a/drivers/md/md.h
410 ++++ b/drivers/md/md.h
411 +@@ -733,7 +733,6 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
412 +
413 + /* for managing resync I/O pages */
414 + struct resync_pages {
415 +- unsigned idx; /* for get/put page from the pool */
416 + void *raid_bio;
417 + struct page *pages[RESYNC_PAGES];
418 + };
419 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
420 +index 7866563338fa..5de4b3d04eb5 100644
421 +--- a/drivers/md/raid1.c
422 ++++ b/drivers/md/raid1.c
423 +@@ -170,7 +170,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
424 + resync_get_all_pages(rp);
425 + }
426 +
427 +- rp->idx = 0;
428 + rp->raid_bio = r1_bio;
429 + bio->bi_private = rp;
430 + }
431 +@@ -492,10 +491,6 @@ static void raid1_end_write_request(struct bio *bio)
432 + }
433 +
434 + if (behind) {
435 +- /* we release behind master bio when all write are done */
436 +- if (r1_bio->behind_master_bio == bio)
437 +- to_put = NULL;
438 +-
439 + if (test_bit(WriteMostly, &rdev->flags))
440 + atomic_dec(&r1_bio->behind_remaining);
441 +
442 +@@ -1088,7 +1083,7 @@ static void unfreeze_array(struct r1conf *conf)
443 + wake_up(&conf->wait_barrier);
444 + }
445 +
446 +-static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
447 ++static void alloc_behind_master_bio(struct r1bio *r1_bio,
448 + struct bio *bio)
449 + {
450 + int size = bio->bi_iter.bi_size;
451 +@@ -1098,11 +1093,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
452 +
453 + behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
454 + if (!behind_bio)
455 +- goto fail;
456 ++ return;
457 +
458 + /* discard op, we don't support writezero/writesame yet */
459 +- if (!bio_has_data(bio))
460 ++ if (!bio_has_data(bio)) {
461 ++ behind_bio->bi_iter.bi_size = size;
462 + goto skip_copy;
463 ++ }
464 +
465 + while (i < vcnt && size) {
466 + struct page *page;
467 +@@ -1123,14 +1120,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
468 + r1_bio->behind_master_bio = behind_bio;;
469 + set_bit(R1BIO_BehindIO, &r1_bio->state);
470 +
471 +- return behind_bio;
472 ++ return;
473 +
474 + free_pages:
475 + pr_debug("%dB behind alloc failed, doing sync I/O\n",
476 + bio->bi_iter.bi_size);
477 + bio_free_pages(behind_bio);
478 +-fail:
479 +- return behind_bio;
480 ++ bio_put(behind_bio);
481 + }
482 +
483 + struct raid1_plug_cb {
484 +@@ -1483,7 +1479,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
485 + (atomic_read(&bitmap->behind_writes)
486 + < mddev->bitmap_info.max_write_behind) &&
487 + !waitqueue_active(&bitmap->behind_wait)) {
488 +- mbio = alloc_behind_master_bio(r1_bio, bio);
489 ++ alloc_behind_master_bio(r1_bio, bio);
490 + }
491 +
492 + bitmap_startwrite(bitmap, r1_bio->sector,
493 +@@ -1493,14 +1489,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
494 + first_clone = 0;
495 + }
496 +
497 +- if (!mbio) {
498 +- if (r1_bio->behind_master_bio)
499 +- mbio = bio_clone_fast(r1_bio->behind_master_bio,
500 +- GFP_NOIO,
501 +- mddev->bio_set);
502 +- else
503 +- mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
504 +- }
505 ++ if (r1_bio->behind_master_bio)
506 ++ mbio = bio_clone_fast(r1_bio->behind_master_bio,
507 ++ GFP_NOIO, mddev->bio_set);
508 ++ else
509 ++ mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
510 +
511 + if (r1_bio->behind_master_bio) {
512 + if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
513 +@@ -2368,8 +2361,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
514 + wbio = bio_clone_fast(r1_bio->behind_master_bio,
515 + GFP_NOIO,
516 + mddev->bio_set);
517 +- /* We really need a _all clone */
518 +- wbio->bi_iter = (struct bvec_iter){ 0 };
519 + } else {
520 + wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
521 + mddev->bio_set);
522 +@@ -2621,6 +2612,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
523 + int good_sectors = RESYNC_SECTORS;
524 + int min_bad = 0; /* number of sectors that are bad in all devices */
525 + int idx = sector_to_idx(sector_nr);
526 ++ int page_idx = 0;
527 +
528 + if (!conf->r1buf_pool)
529 + if (init_resync(conf))
530 +@@ -2848,7 +2840,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
531 + bio = r1_bio->bios[i];
532 + rp = get_resync_pages(bio);
533 + if (bio->bi_end_io) {
534 +- page = resync_fetch_page(rp, rp->idx++);
535 ++ page = resync_fetch_page(rp, page_idx);
536 +
537 + /*
538 + * won't fail because the vec table is big
539 +@@ -2860,7 +2852,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
540 + nr_sectors += len>>9;
541 + sector_nr += len>>9;
542 + sync_blocks -= (len>>9);
543 +- } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES);
544 ++ } while (++page_idx < RESYNC_PAGES);
545 +
546 + r1_bio->sectors = nr_sectors;
547 +
548 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
549 +index 52acffa7a06a..bfc6db236348 100644
550 +--- a/drivers/md/raid10.c
551 ++++ b/drivers/md/raid10.c
552 +@@ -221,7 +221,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
553 + resync_get_all_pages(rp);
554 + }
555 +
556 +- rp->idx = 0;
557 + rp->raid_bio = r10_bio;
558 + bio->bi_private = rp;
559 + if (rbio) {
560 +@@ -2853,6 +2852,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
561 + sector_t sectors_skipped = 0;
562 + int chunks_skipped = 0;
563 + sector_t chunk_mask = conf->geo.chunk_mask;
564 ++ int page_idx = 0;
565 +
566 + if (!conf->r10buf_pool)
567 + if (init_resync(conf))
568 +@@ -3355,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
569 + break;
570 + for (bio= biolist ; bio ; bio=bio->bi_next) {
571 + struct resync_pages *rp = get_resync_pages(bio);
572 +- page = resync_fetch_page(rp, rp->idx++);
573 ++ page = resync_fetch_page(rp, page_idx);
574 + /*
575 + * won't fail because the vec table is big enough
576 + * to hold all these pages
577 +@@ -3364,7 +3364,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
578 + }
579 + nr_sectors += len>>9;
580 + sector_nr += len>>9;
581 +- } while (get_resync_pages(biolist)->idx < RESYNC_PAGES);
582 ++ } while (++page_idx < RESYNC_PAGES);
583 + r10_bio->sectors = nr_sectors;
584 +
585 + while (biolist) {
586 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
587 +index d524daddc630..e92dd2dc4b5a 100644
588 +--- a/drivers/md/raid5.c
589 ++++ b/drivers/md/raid5.c
590 +@@ -6237,6 +6237,8 @@ static void raid5_do_work(struct work_struct *work)
591 + pr_debug("%d stripes handled\n", handled);
592 +
593 + spin_unlock_irq(&conf->device_lock);
594 ++
595 ++ async_tx_issue_pending_all();
596 + blk_finish_plug(&plug);
597 +
598 + pr_debug("--- raid5worker inactive\n");
599 +diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
600 +index d6fa2214aaae..0fb4e4c119e1 100644
601 +--- a/drivers/mmc/host/sunxi-mmc.c
602 ++++ b/drivers/mmc/host/sunxi-mmc.c
603 +@@ -793,8 +793,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
604 + }
605 + mmc_writel(host, REG_CLKCR, rval);
606 +
607 +- if (host->cfg->needs_new_timings)
608 +- mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE);
609 ++ if (host->cfg->needs_new_timings) {
610 ++ /* Don't touch the delay bits */
611 ++ rval = mmc_readl(host, REG_SD_NTSR);
612 ++ rval |= SDXC_2X_TIMING_MODE;
613 ++ mmc_writel(host, REG_SD_NTSR, rval);
614 ++ }
615 +
616 + ret = sunxi_mmc_clk_set_phase(host, ios, rate);
617 + if (ret)
618 +diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
619 +index a2d92f10501b..a3d20e39e5b5 100644
620 +--- a/drivers/mmc/host/tmio_mmc_pio.c
621 ++++ b/drivers/mmc/host/tmio_mmc_pio.c
622 +@@ -404,30 +404,29 @@ static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
623 + * Transfer the data
624 + */
625 + if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
626 +- u8 data[4] = { };
627 ++ u32 data = 0;
628 ++ u32 *buf32 = (u32 *)buf;
629 +
630 + if (is_read)
631 +- sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
632 ++ sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32,
633 + count >> 2);
634 + else
635 +- sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf,
636 ++ sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32,
637 + count >> 2);
638 +
639 + /* if count was multiple of 4 */
640 + if (!(count & 0x3))
641 + return;
642 +
643 +- buf8 = (u8 *)(buf + (count >> 2));
644 ++ buf32 += count >> 2;
645 + count %= 4;
646 +
647 + if (is_read) {
648 +- sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT,
649 +- (u32 *)data, 1);
650 +- memcpy(buf8, data, count);
651 ++ sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1);
652 ++ memcpy(buf32, &data, count);
653 + } else {
654 +- memcpy(data, buf8, count);
655 +- sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT,
656 +- (u32 *)data, 1);
657 ++ memcpy(&data, buf32, count);
658 ++ sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1);
659 + }
660 +
661 + return;
662 +diff --git a/fs/dcache.c b/fs/dcache.c
663 +index 1161390f4935..736754c5ab63 100644
664 +--- a/fs/dcache.c
665 ++++ b/fs/dcache.c
666 +@@ -277,6 +277,33 @@ static inline int dname_external(const struct dentry *dentry)
667 + return dentry->d_name.name != dentry->d_iname;
668 + }
669 +
670 ++void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
671 ++{
672 ++ spin_lock(&dentry->d_lock);
673 ++ if (unlikely(dname_external(dentry))) {
674 ++ struct external_name *p = external_name(dentry);
675 ++ atomic_inc(&p->u.count);
676 ++ spin_unlock(&dentry->d_lock);
677 ++ name->name = p->name;
678 ++ } else {
679 ++ memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
680 ++ spin_unlock(&dentry->d_lock);
681 ++ name->name = name->inline_name;
682 ++ }
683 ++}
684 ++EXPORT_SYMBOL(take_dentry_name_snapshot);
685 ++
686 ++void release_dentry_name_snapshot(struct name_snapshot *name)
687 ++{
688 ++ if (unlikely(name->name != name->inline_name)) {
689 ++ struct external_name *p;
690 ++ p = container_of(name->name, struct external_name, name[0]);
691 ++ if (unlikely(atomic_dec_and_test(&p->u.count)))
692 ++ kfree_rcu(p, u.head);
693 ++ }
694 ++}
695 ++EXPORT_SYMBOL(release_dentry_name_snapshot);
696 ++
697 + static inline void __d_set_inode_and_type(struct dentry *dentry,
698 + struct inode *inode,
699 + unsigned type_flags)
700 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
701 +index e892ae7d89f8..acd3be2cc691 100644
702 +--- a/fs/debugfs/inode.c
703 ++++ b/fs/debugfs/inode.c
704 +@@ -766,7 +766,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
705 + {
706 + int error;
707 + struct dentry *dentry = NULL, *trap;
708 +- const char *old_name;
709 ++ struct name_snapshot old_name;
710 +
711 + trap = lock_rename(new_dir, old_dir);
712 + /* Source or destination directories don't exist? */
713 +@@ -781,19 +781,19 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
714 + if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
715 + goto exit;
716 +
717 +- old_name = fsnotify_oldname_init(old_dentry->d_name.name);
718 ++ take_dentry_name_snapshot(&old_name, old_dentry);
719 +
720 + error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir),
721 + dentry, 0);
722 + if (error) {
723 +- fsnotify_oldname_free(old_name);
724 ++ release_dentry_name_snapshot(&old_name);
725 + goto exit;
726 + }
727 + d_move(old_dentry, dentry);
728 +- fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name,
729 ++ fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name.name,
730 + d_is_dir(old_dentry),
731 + NULL, old_dentry);
732 +- fsnotify_oldname_free(old_name);
733 ++ release_dentry_name_snapshot(&old_name);
734 + unlock_rename(new_dir, old_dir);
735 + dput(dentry);
736 + return old_dentry;
737 +diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
738 +index 7bc186f4ed4d..1be45c8d460d 100644
739 +--- a/fs/jfs/acl.c
740 ++++ b/fs/jfs/acl.c
741 +@@ -77,13 +77,6 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
742 + switch (type) {
743 + case ACL_TYPE_ACCESS:
744 + ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
745 +- if (acl) {
746 +- rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
747 +- if (rc)
748 +- return rc;
749 +- inode->i_ctime = current_time(inode);
750 +- mark_inode_dirty(inode);
751 +- }
752 + break;
753 + case ACL_TYPE_DEFAULT:
754 + ea_name = XATTR_NAME_POSIX_ACL_DEFAULT;
755 +@@ -118,9 +111,17 @@ int jfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
756 +
757 + tid = txBegin(inode->i_sb, 0);
758 + mutex_lock(&JFS_IP(inode)->commit_mutex);
759 ++ if (type == ACL_TYPE_ACCESS && acl) {
760 ++ rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
761 ++ if (rc)
762 ++ goto end_tx;
763 ++ inode->i_ctime = current_time(inode);
764 ++ mark_inode_dirty(inode);
765 ++ }
766 + rc = __jfs_set_acl(tid, inode, type, acl);
767 + if (!rc)
768 + rc = txCommit(tid, 1, &inode, 0);
769 ++end_tx:
770 + txEnd(tid);
771 + mutex_unlock(&JFS_IP(inode)->commit_mutex);
772 + return rc;
773 +diff --git a/fs/namei.c b/fs/namei.c
774 +index 6571a5f5112e..281c1f7fa983 100644
775 +--- a/fs/namei.c
776 ++++ b/fs/namei.c
777 +@@ -4362,11 +4362,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
778 + {
779 + int error;
780 + bool is_dir = d_is_dir(old_dentry);
781 +- const unsigned char *old_name;
782 + struct inode *source = old_dentry->d_inode;
783 + struct inode *target = new_dentry->d_inode;
784 + bool new_is_dir = false;
785 + unsigned max_links = new_dir->i_sb->s_max_links;
786 ++ struct name_snapshot old_name;
787 +
788 + if (source == target)
789 + return 0;
790 +@@ -4413,7 +4413,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
791 + if (error)
792 + return error;
793 +
794 +- old_name = fsnotify_oldname_init(old_dentry->d_name.name);
795 ++ take_dentry_name_snapshot(&old_name, old_dentry);
796 + dget(new_dentry);
797 + if (!is_dir || (flags & RENAME_EXCHANGE))
798 + lock_two_nondirectories(source, target);
799 +@@ -4468,14 +4468,14 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
800 + inode_unlock(target);
801 + dput(new_dentry);
802 + if (!error) {
803 +- fsnotify_move(old_dir, new_dir, old_name, is_dir,
804 ++ fsnotify_move(old_dir, new_dir, old_name.name, is_dir,
805 + !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
806 + if (flags & RENAME_EXCHANGE) {
807 + fsnotify_move(new_dir, old_dir, old_dentry->d_name.name,
808 + new_is_dir, NULL, new_dentry);
809 + }
810 + }
811 +- fsnotify_oldname_free(old_name);
812 ++ release_dentry_name_snapshot(&old_name);
813 +
814 + return error;
815 + }
816 +diff --git a/fs/nfs/file.c b/fs/nfs/file.c
817 +index 5713eb32a45e..d264363559db 100644
818 +--- a/fs/nfs/file.c
819 ++++ b/fs/nfs/file.c
820 +@@ -750,7 +750,7 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
821 + */
822 + nfs_sync_mapping(filp->f_mapping);
823 + if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
824 +- nfs_zap_mapping(inode, filp->f_mapping);
825 ++ nfs_zap_caches(inode);
826 + out:
827 + return status;
828 + }
829 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
830 +index dbfa18900e25..f5a7faac39a7 100644
831 +--- a/fs/nfs/nfs4proc.c
832 ++++ b/fs/nfs/nfs4proc.c
833 +@@ -6441,7 +6441,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
834 + set_current_state(TASK_INTERRUPTIBLE);
835 + spin_unlock_irqrestore(&q->lock, flags);
836 +
837 +- freezable_schedule_timeout_interruptible(NFS4_LOCK_MAXTIMEOUT);
838 ++ freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT);
839 + }
840 +
841 + finish_wait(q, &wait);
842 +diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
843 +index 01a9f0f007d4..0c4583b61717 100644
844 +--- a/fs/notify/fsnotify.c
845 ++++ b/fs/notify/fsnotify.c
846 +@@ -161,16 +161,20 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask
847 + if (unlikely(!fsnotify_inode_watches_children(p_inode)))
848 + __fsnotify_update_child_dentry_flags(p_inode);
849 + else if (p_inode->i_fsnotify_mask & mask) {
850 ++ struct name_snapshot name;
851 ++
852 + /* we are notifying a parent so come up with the new mask which
853 + * specifies these are events which came from a child. */
854 + mask |= FS_EVENT_ON_CHILD;
855 +
856 ++ take_dentry_name_snapshot(&name, dentry);
857 + if (path)
858 + ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
859 +- dentry->d_name.name, 0);
860 ++ name.name, 0);
861 + else
862 + ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
863 +- dentry->d_name.name, 0);
864 ++ name.name, 0);
865 ++ release_dentry_name_snapshot(&name);
866 + }
867 +
868 + dput(parent);
869 +diff --git a/include/linux/dcache.h b/include/linux/dcache.h
870 +index d2e38dc6172c..025727bf6797 100644
871 +--- a/include/linux/dcache.h
872 ++++ b/include/linux/dcache.h
873 +@@ -591,5 +591,11 @@ static inline struct inode *d_real_inode(const struct dentry *dentry)
874 + return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0));
875 + }
876 +
877 ++struct name_snapshot {
878 ++ const char *name;
879 ++ char inline_name[DNAME_INLINE_LEN];
880 ++};
881 ++void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
882 ++void release_dentry_name_snapshot(struct name_snapshot *);
883 +
884 + #endif /* __LINUX_DCACHE_H */
885 +diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
886 +index b43d3f5bd9ea..b78aa7ac77ce 100644
887 +--- a/include/linux/fsnotify.h
888 ++++ b/include/linux/fsnotify.h
889 +@@ -293,35 +293,4 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
890 + }
891 + }
892 +
893 +-#if defined(CONFIG_FSNOTIFY) /* notify helpers */
894 +-
895 +-/*
896 +- * fsnotify_oldname_init - save off the old filename before we change it
897 +- */
898 +-static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
899 +-{
900 +- return kstrdup(name, GFP_KERNEL);
901 +-}
902 +-
903 +-/*
904 +- * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init
905 +- */
906 +-static inline void fsnotify_oldname_free(const unsigned char *old_name)
907 +-{
908 +- kfree(old_name);
909 +-}
910 +-
911 +-#else /* CONFIG_FSNOTIFY */
912 +-
913 +-static inline const char *fsnotify_oldname_init(const unsigned char *name)
914 +-{
915 +- return NULL;
916 +-}
917 +-
918 +-static inline void fsnotify_oldname_free(const unsigned char *old_name)
919 +-{
920 +-}
921 +-
922 +-#endif /* CONFIG_FSNOTIFY */
923 +-
924 + #endif /* _LINUX_FS_NOTIFY_H */
925 +diff --git a/scripts/dtc/dtx_diff b/scripts/dtc/dtx_diff
926 +index ec47f95991a3..586cccea46ce 100755
927 +--- a/scripts/dtc/dtx_diff
928 ++++ b/scripts/dtc/dtx_diff
929 +@@ -321,7 +321,7 @@ fi
930 + cpp_flags="\
931 + -nostdinc \
932 + -I${srctree}/arch/${ARCH}/boot/dts \
933 +- -I${srctree}/arch/${ARCH}/boot/dts/include \
934 ++ -I${srctree}/scripts/dtc/include-prefixes \
935 + -I${srctree}/drivers/of/testcase-data \
936 + -undef -D__DTS__"
937 +
938 +diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
939 +index c47287d79306..a178e0d03088 100644
940 +--- a/sound/pci/fm801.c
941 ++++ b/sound/pci/fm801.c
942 +@@ -1235,8 +1235,6 @@ static int snd_fm801_create(struct snd_card *card,
943 + }
944 + }
945 +
946 +- snd_fm801_chip_init(chip);
947 +-
948 + if ((chip->tea575x_tuner & TUNER_ONLY) == 0) {
949 + if (devm_request_irq(&pci->dev, pci->irq, snd_fm801_interrupt,
950 + IRQF_SHARED, KBUILD_MODNAME, chip)) {
951 +@@ -1248,6 +1246,8 @@ static int snd_fm801_create(struct snd_card *card,
952 + pci_set_master(pci);
953 + }
954 +
955 ++ snd_fm801_chip_init(chip);
956 ++
957 + if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
958 + snd_fm801_free(chip);
959 + return err;
960 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
961 +index 63bc894ddf5e..8c1289963c80 100644
962 +--- a/sound/pci/hda/patch_conexant.c
963 ++++ b/sound/pci/hda/patch_conexant.c
964 +@@ -933,6 +933,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
965 + SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
966 + SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
967 + SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
968 ++ SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
969 + SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
970 + SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
971 + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
972 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
973 +index 90e4ff87445e..c87ff8e5d1d5 100644
974 +--- a/sound/pci/hda/patch_hdmi.c
975 ++++ b/sound/pci/hda/patch_hdmi.c
976 +@@ -3757,11 +3757,15 @@ HDA_CODEC_ENTRY(0x1002aa01, "R6xx HDMI", patch_atihdmi),
977 + HDA_CODEC_ENTRY(0x10951390, "SiI1390 HDMI", patch_generic_hdmi),
978 + HDA_CODEC_ENTRY(0x10951392, "SiI1392 HDMI", patch_generic_hdmi),
979 + HDA_CODEC_ENTRY(0x17e80047, "Chrontel HDMI", patch_generic_hdmi),
980 ++HDA_CODEC_ENTRY(0x10de0001, "MCP73 HDMI", patch_nvhdmi_2ch),
981 + HDA_CODEC_ENTRY(0x10de0002, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
982 + HDA_CODEC_ENTRY(0x10de0003, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
983 ++HDA_CODEC_ENTRY(0x10de0004, "GPU 04 HDMI", patch_nvhdmi_8ch_7x),
984 + HDA_CODEC_ENTRY(0x10de0005, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
985 + HDA_CODEC_ENTRY(0x10de0006, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
986 + HDA_CODEC_ENTRY(0x10de0007, "MCP79/7A HDMI", patch_nvhdmi_8ch_7x),
987 ++HDA_CODEC_ENTRY(0x10de0008, "GPU 08 HDMI/DP", patch_nvhdmi),
988 ++HDA_CODEC_ENTRY(0x10de0009, "GPU 09 HDMI/DP", patch_nvhdmi),
989 + HDA_CODEC_ENTRY(0x10de000a, "GPU 0a HDMI/DP", patch_nvhdmi),
990 + HDA_CODEC_ENTRY(0x10de000b, "GPU 0b HDMI/DP", patch_nvhdmi),
991 + HDA_CODEC_ENTRY(0x10de000c, "MCP89 HDMI", patch_nvhdmi),
992 +@@ -3788,17 +3792,40 @@ HDA_CODEC_ENTRY(0x10de0041, "GPU 41 HDMI/DP", patch_nvhdmi),
993 + HDA_CODEC_ENTRY(0x10de0042, "GPU 42 HDMI/DP", patch_nvhdmi),
994 + HDA_CODEC_ENTRY(0x10de0043, "GPU 43 HDMI/DP", patch_nvhdmi),
995 + HDA_CODEC_ENTRY(0x10de0044, "GPU 44 HDMI/DP", patch_nvhdmi),
996 ++HDA_CODEC_ENTRY(0x10de0045, "GPU 45 HDMI/DP", patch_nvhdmi),
997 ++HDA_CODEC_ENTRY(0x10de0050, "GPU 50 HDMI/DP", patch_nvhdmi),
998 + HDA_CODEC_ENTRY(0x10de0051, "GPU 51 HDMI/DP", patch_nvhdmi),
999 ++HDA_CODEC_ENTRY(0x10de0052, "GPU 52 HDMI/DP", patch_nvhdmi),
1000 + HDA_CODEC_ENTRY(0x10de0060, "GPU 60 HDMI/DP", patch_nvhdmi),
1001 ++HDA_CODEC_ENTRY(0x10de0061, "GPU 61 HDMI/DP", patch_nvhdmi),
1002 ++HDA_CODEC_ENTRY(0x10de0062, "GPU 62 HDMI/DP", patch_nvhdmi),
1003 + HDA_CODEC_ENTRY(0x10de0067, "MCP67 HDMI", patch_nvhdmi_2ch),
1004 + HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi),
1005 + HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
1006 + HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
1007 ++HDA_CODEC_ENTRY(0x10de0073, "GPU 73 HDMI/DP", patch_nvhdmi),
1008 ++HDA_CODEC_ENTRY(0x10de0074, "GPU 74 HDMI/DP", patch_nvhdmi),
1009 ++HDA_CODEC_ENTRY(0x10de0076, "GPU 76 HDMI/DP", patch_nvhdmi),
1010 ++HDA_CODEC_ENTRY(0x10de007b, "GPU 7b HDMI/DP", patch_nvhdmi),
1011 ++HDA_CODEC_ENTRY(0x10de007c, "GPU 7c HDMI/DP", patch_nvhdmi),
1012 + HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
1013 ++HDA_CODEC_ENTRY(0x10de007e, "GPU 7e HDMI/DP", patch_nvhdmi),
1014 + HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi),
1015 ++HDA_CODEC_ENTRY(0x10de0081, "GPU 81 HDMI/DP", patch_nvhdmi),
1016 + HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
1017 + HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
1018 ++HDA_CODEC_ENTRY(0x10de0084, "GPU 84 HDMI/DP", patch_nvhdmi),
1019 ++HDA_CODEC_ENTRY(0x10de0090, "GPU 90 HDMI/DP", patch_nvhdmi),
1020 ++HDA_CODEC_ENTRY(0x10de0091, "GPU 91 HDMI/DP", patch_nvhdmi),
1021 ++HDA_CODEC_ENTRY(0x10de0092, "GPU 92 HDMI/DP", patch_nvhdmi),
1022 ++HDA_CODEC_ENTRY(0x10de0093, "GPU 93 HDMI/DP", patch_nvhdmi),
1023 ++HDA_CODEC_ENTRY(0x10de0094, "GPU 94 HDMI/DP", patch_nvhdmi),
1024 ++HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi),
1025 ++HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi),
1026 ++HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi),
1027 ++HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi),
1028 + HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
1029 ++HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch),
1030 + HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
1031 + HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi),
1032 + HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi),