Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.14 commit in: /
Date: Wed, 27 Jul 2016 19:16:00
Message-Id: 1469646944.402317fd26761fffa2da5219860ed7be0e3984f9.mpagano@gentoo
1 commit: 402317fd26761fffa2da5219860ed7be0e3984f9
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jul 27 19:15:44 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jul 27 19:15:44 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=402317fd
7
8 Linux patch 3.14.74
9
10 0000_README | 4 +
11 1073_linux-3.14.74.patch | 1456 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1460 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 7e32ae1..0e993b3 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -334,6 +334,10 @@ Patch: 1072_linux-3.14.73.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.14.73
21
22 +Patch: 1073_linux-3.14.74.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.14.74
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1073_linux-3.14.74.patch b/1073_linux-3.14.74.patch
31 new file mode 100644
32 index 0000000..1cc5a75
33 --- /dev/null
34 +++ b/1073_linux-3.14.74.patch
35 @@ -0,0 +1,1456 @@
36 +diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
37 +index a0c85110a07e..689ab9b9953a 100644
38 +--- a/Documentation/scsi/scsi_eh.txt
39 ++++ b/Documentation/scsi/scsi_eh.txt
40 +@@ -263,19 +263,23 @@ scmd->allowed.
41 +
42 + 3. scmd recovered
43 + ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
44 +- - shost->host_failed--
45 + - clear scmd->eh_eflags
46 + - scsi_setup_cmd_retry()
47 + - move from local eh_work_q to local eh_done_q
48 + LOCKING: none
49 ++ CONCURRENCY: at most one thread per separate eh_work_q to
50 ++ keep queue manipulation lockless
51 +
52 + 4. EH completes
53 + ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
54 +- layer of failure.
55 ++ layer of failure. May be called concurrently but must have
56 ++ a no more than one thread per separate eh_work_q to
57 ++ manipulate the queue locklessly
58 + - scmd is removed from eh_done_q and scmd->eh_entry is cleared
59 + - if retry is necessary, scmd is requeued using
60 + scsi_queue_insert()
61 + - otherwise, scsi_finish_command() is invoked for scmd
62 ++ - zero shost->host_failed
63 + LOCKING: queue or finish function performs appropriate locking
64 +
65 +
66 +diff --git a/Makefile b/Makefile
67 +index 939dfae7bb5f..d2fb4dae6ecb 100644
68 +--- a/Makefile
69 ++++ b/Makefile
70 +@@ -1,6 +1,6 @@
71 + VERSION = 3
72 + PATCHLEVEL = 14
73 +-SUBLEVEL = 73
74 ++SUBLEVEL = 74
75 + EXTRAVERSION =
76 + NAME = Remembering Coco
77 +
78 +diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
79 +index 219ac88a9542..bed6c8fa54b5 100644
80 +--- a/arch/arm/include/asm/pgtable-2level.h
81 ++++ b/arch/arm/include/asm/pgtable-2level.h
82 +@@ -163,6 +163,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
83 +
84 + #define pmd_large(pmd) (pmd_val(pmd) & 2)
85 + #define pmd_bad(pmd) (pmd_val(pmd) & 2)
86 ++#define pmd_present(pmd) (pmd_val(pmd))
87 +
88 + #define copy_pmd(pmdpd,pmdps) \
89 + do { \
90 +diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
91 +index 06e0bc0f8b00..ab7ee9205ca4 100644
92 +--- a/arch/arm/include/asm/pgtable-3level.h
93 ++++ b/arch/arm/include/asm/pgtable-3level.h
94 +@@ -212,6 +212,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
95 + : !!(pmd_val(pmd) & (val)))
96 + #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
97 +
98 ++#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
99 + #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
100 +
101 + #define __HAVE_ARCH_PMD_WRITE
102 +diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
103 +index 89dba131703b..9a9701815b57 100644
104 +--- a/arch/arm/include/asm/pgtable.h
105 ++++ b/arch/arm/include/asm/pgtable.h
106 +@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
107 + #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
108 +
109 + #define pmd_none(pmd) (!pmd_val(pmd))
110 +-#define pmd_present(pmd) (pmd_val(pmd))
111 +
112 + static inline pte_t *pmd_page_vaddr(pmd_t pmd)
113 + {
114 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
115 +index a995fce87791..3ff5b4921b76 100644
116 +--- a/arch/mips/include/asm/kvm_host.h
117 ++++ b/arch/mips/include/asm/kvm_host.h
118 +@@ -342,6 +342,7 @@ struct kvm_mips_tlb {
119 + #define KVM_MIPS_GUEST_TLB_SIZE 64
120 + struct kvm_vcpu_arch {
121 + void *host_ebase, *guest_ebase;
122 ++ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
123 + unsigned long host_stack;
124 + unsigned long host_gp;
125 +
126 +diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
127 +index ba5ce99c021d..d1fa2a57218b 100644
128 +--- a/arch/mips/kvm/kvm_locore.S
129 ++++ b/arch/mips/kvm/kvm_locore.S
130 +@@ -229,6 +229,7 @@ FEXPORT(__kvm_mips_load_k0k1)
131 +
132 + /* Jump to guest */
133 + eret
134 ++EXPORT(__kvm_mips_vcpu_run_end)
135 +
136 + VECTOR(MIPSX(exception), unknown)
137 + /*
138 +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
139 +index 12d850b68763..2b2dd4ec03fb 100644
140 +--- a/arch/mips/kvm/kvm_mips.c
141 ++++ b/arch/mips/kvm/kvm_mips.c
142 +@@ -348,6 +348,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
143 + memcpy(gebase + offset, mips32_GuestException,
144 + mips32_GuestExceptionEnd - mips32_GuestException);
145 +
146 ++#ifdef MODULE
147 ++ offset += mips32_GuestExceptionEnd - mips32_GuestException;
148 ++ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
149 ++ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
150 ++ vcpu->arch.vcpu_run = gebase + offset;
151 ++#else
152 ++ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
153 ++#endif
154 ++
155 + /* Invalidate the icache for these ranges */
156 + mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
157 +
158 +@@ -431,7 +440,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
159 +
160 + kvm_guest_enter();
161 +
162 +- r = __kvm_mips_vcpu_run(run, vcpu);
163 ++ r = vcpu->arch.vcpu_run(run, vcpu);
164 +
165 + kvm_guest_exit();
166 + local_irq_enable();
167 +diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
168 +index 20da7d29eede..bf41ea36210e 100644
169 +--- a/arch/mips/kvm/kvm_mips_int.h
170 ++++ b/arch/mips/kvm/kvm_mips_int.h
171 +@@ -27,6 +27,8 @@
172 + #define MIPS_EXC_MAX 12
173 + /* XXXSL More to follow */
174 +
175 ++extern char __kvm_mips_vcpu_run_end[];
176 ++
177 + #define C_TI (_ULCAST_(1) << 30)
178 +
179 + #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
180 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
181 +index 6e15abf30eb8..916b3a5db859 100644
182 +--- a/arch/powerpc/kernel/process.c
183 ++++ b/arch/powerpc/kernel/process.c
184 +@@ -1237,6 +1237,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
185 + current->thread.regs = regs - 1;
186 + }
187 +
188 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
189 ++ /*
190 ++ * Clear any transactional state, we're exec()ing. The cause is
191 ++ * not important as there will never be a recheckpoint so it's not
192 ++ * user visible.
193 ++ */
194 ++ if (MSR_TM_SUSPENDED(mfmsr()))
195 ++ tm_reclaim_current(0);
196 ++#endif
197 ++
198 + memset(regs->gpr, 0, sizeof(regs->gpr));
199 + regs->ctr = 0;
200 + regs->link = 0;
201 +diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
202 +index de1ec54a2a57..a2082be599c8 100644
203 +--- a/arch/powerpc/platforms/pseries/iommu.c
204 ++++ b/arch/powerpc/platforms/pseries/iommu.c
205 +@@ -826,7 +826,8 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
206 + static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
207 + struct ddw_query_response *query)
208 + {
209 +- struct eeh_dev *edev;
210 ++ struct device_node *dn;
211 ++ struct pci_dn *pdn;
212 + u32 cfg_addr;
213 + u64 buid;
214 + int ret;
215 +@@ -837,11 +838,10 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
216 + * Retrieve them from the pci device, not the node with the
217 + * dma-window property
218 + */
219 +- edev = pci_dev_to_eeh_dev(dev);
220 +- cfg_addr = edev->config_addr;
221 +- if (edev->pe_config_addr)
222 +- cfg_addr = edev->pe_config_addr;
223 +- buid = edev->phb->buid;
224 ++ dn = pci_device_to_OF_node(dev);
225 ++ pdn = PCI_DN(dn);
226 ++ buid = pdn->phb->buid;
227 ++ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
228 +
229 + ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
230 + cfg_addr, BUID_HI(buid), BUID_LO(buid));
231 +@@ -855,7 +855,8 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
232 + struct ddw_create_response *create, int page_shift,
233 + int window_shift)
234 + {
235 +- struct eeh_dev *edev;
236 ++ struct device_node *dn;
237 ++ struct pci_dn *pdn;
238 + u32 cfg_addr;
239 + u64 buid;
240 + int ret;
241 +@@ -866,11 +867,10 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
242 + * Retrieve them from the pci device, not the node with the
243 + * dma-window property
244 + */
245 +- edev = pci_dev_to_eeh_dev(dev);
246 +- cfg_addr = edev->config_addr;
247 +- if (edev->pe_config_addr)
248 +- cfg_addr = edev->pe_config_addr;
249 +- buid = edev->phb->buid;
250 ++ dn = pci_device_to_OF_node(dev);
251 ++ pdn = PCI_DN(dn);
252 ++ buid = pdn->phb->buid;
253 ++ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
254 +
255 + do {
256 + /* extra outputs are LIOBN and dma-addr (hi, lo) */
257 +diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
258 +index cd29d2f4e4f3..749313b452ae 100644
259 +--- a/arch/s390/include/asm/syscall.h
260 ++++ b/arch/s390/include/asm/syscall.h
261 +@@ -54,7 +54,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
262 + struct pt_regs *regs,
263 + int error, long val)
264 + {
265 +- regs->gprs[2] = error ? -error : val;
266 ++ regs->gprs[2] = error ? error : val;
267 + }
268 +
269 + static inline void syscall_get_arguments(struct task_struct *task,
270 +diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
271 +index 878df7e88cd4..3ef7e11b06fb 100644
272 +--- a/arch/x86/boot/Makefile
273 ++++ b/arch/x86/boot/Makefile
274 +@@ -156,6 +156,9 @@ isoimage: $(obj)/bzImage
275 + for i in lib lib64 share end ; do \
276 + if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
277 + cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
278 ++ if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
279 ++ cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
280 ++ fi ; \
281 + break ; \
282 + fi ; \
283 + if [ $$i = end ] ; then exit 1 ; fi ; \
284 +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
285 +index dec8de4e1663..0c02d794e802 100644
286 +--- a/arch/x86/kernel/amd_nb.c
287 ++++ b/arch/x86/kernel/amd_nb.c
288 +@@ -67,8 +67,8 @@ int amd_cache_northbridges(void)
289 + while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
290 + i++;
291 +
292 +- if (i == 0)
293 +- return 0;
294 ++ if (!i)
295 ++ return -ENODEV;
296 +
297 + nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
298 + if (!nb)
299 +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
300 +index 61cd5200608d..a5186a47b12f 100644
301 +--- a/arch/x86/kernel/cpu/perf_event_intel.c
302 ++++ b/arch/x86/kernel/cpu/perf_event_intel.c
303 +@@ -2606,13 +2606,13 @@ __init int intel_pmu_init(void)
304 + * counter, so do not extend mask to generic counters
305 + */
306 + for_each_event_constraint(c, x86_pmu.event_constraints) {
307 +- if (c->cmask != FIXED_EVENT_FLAGS
308 +- || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
309 +- continue;
310 ++ if (c->cmask == FIXED_EVENT_FLAGS
311 ++ && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
312 ++ c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
313 + }
314 +-
315 +- c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
316 +- c->weight += x86_pmu.num_counters;
317 ++ c->idxmsk64 &=
318 ++ ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
319 ++ c->weight = hweight64(c->idxmsk64);
320 + }
321 + }
322 +
323 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
324 +index 490fee15fea5..6cd32acb376f 100644
325 +--- a/arch/x86/kernel/kprobes/core.c
326 ++++ b/arch/x86/kernel/kprobes/core.c
327 +@@ -911,7 +911,19 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
328 + * normal page fault.
329 + */
330 + regs->ip = (unsigned long)cur->addr;
331 ++ /*
332 ++ * Trap flag (TF) has been set here because this fault
333 ++ * happened where the single stepping will be done.
334 ++ * So clear it by resetting the current kprobe:
335 ++ */
336 ++ regs->flags &= ~X86_EFLAGS_TF;
337 ++
338 ++ /*
339 ++ * If the TF flag was set before the kprobe hit,
340 ++ * don't touch it:
341 ++ */
342 + regs->flags |= kcb->kprobe_old_flags;
343 ++
344 + if (kcb->kprobe_status == KPROBE_REENTER)
345 + restore_previous_kprobe(kcb);
346 + else
347 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
348 +index c6c77b767a8d..05383d644e3a 100644
349 +--- a/drivers/ata/libata-eh.c
350 ++++ b/drivers/ata/libata-eh.c
351 +@@ -604,7 +604,7 @@ void ata_scsi_error(struct Scsi_Host *host)
352 + ata_scsi_port_error_handler(host, ap);
353 +
354 + /* finish or retry handled scmd's and clean up */
355 +- WARN_ON(host->host_failed || !list_empty(&eh_work_q));
356 ++ WARN_ON(!list_empty(&eh_work_q));
357 +
358 + DPRINTK("EXIT\n");
359 + }
360 +diff --git a/drivers/base/module.c b/drivers/base/module.c
361 +index db930d3ee312..2a215780eda2 100644
362 +--- a/drivers/base/module.c
363 ++++ b/drivers/base/module.c
364 +@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
365 +
366 + static void module_create_drivers_dir(struct module_kobject *mk)
367 + {
368 +- if (!mk || mk->drivers_dir)
369 +- return;
370 ++ static DEFINE_MUTEX(drivers_dir_mutex);
371 +
372 +- mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
373 ++ mutex_lock(&drivers_dir_mutex);
374 ++ if (mk && !mk->drivers_dir)
375 ++ mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
376 ++ mutex_unlock(&drivers_dir_mutex);
377 + }
378 +
379 + void module_add_driver(struct module *mod, struct device_driver *drv)
380 +diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
381 +index 8e5e0187506f..3ff21c3e9ab2 100644
382 +--- a/drivers/crypto/ux500/hash/hash_core.c
383 ++++ b/drivers/crypto/ux500/hash/hash_core.c
384 +@@ -797,7 +797,7 @@ static int hash_process_data(struct hash_device_data *device_data,
385 + &device_data->state);
386 + memmove(req_ctx->state.buffer,
387 + device_data->state.buffer,
388 +- HASH_BLOCK_SIZE / sizeof(u32));
389 ++ HASH_BLOCK_SIZE);
390 + if (ret) {
391 + dev_err(device_data->dev,
392 + "%s: hash_resume_state() failed!\n",
393 +@@ -848,7 +848,7 @@ static int hash_process_data(struct hash_device_data *device_data,
394 +
395 + memmove(device_data->state.buffer,
396 + req_ctx->state.buffer,
397 +- HASH_BLOCK_SIZE / sizeof(u32));
398 ++ HASH_BLOCK_SIZE);
399 + if (ret) {
400 + dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
401 + __func__);
402 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
403 +index 0a9d1fd32994..d30aba867a3a 100644
404 +--- a/drivers/gpu/drm/i915/intel_display.c
405 ++++ b/drivers/gpu/drm/i915/intel_display.c
406 +@@ -5590,12 +5590,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
407 + struct drm_i915_private *dev_priv = dev->dev_private;
408 + struct drm_mode_config *mode_config = &dev->mode_config;
409 + struct intel_encoder *encoder;
410 ++ int i;
411 + u32 val, final;
412 + bool has_lvds = false;
413 + bool has_cpu_edp = false;
414 + bool has_panel = false;
415 + bool has_ck505 = false;
416 + bool can_ssc = false;
417 ++ bool using_ssc_source = false;
418 +
419 + /* We need to take the global config into account */
420 + list_for_each_entry(encoder, &mode_config->encoder_list,
421 +@@ -5621,8 +5623,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
422 + can_ssc = true;
423 + }
424 +
425 +- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
426 +- has_panel, has_lvds, has_ck505);
427 ++ /* Check if any DPLLs are using the SSC source */
428 ++ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
429 ++ u32 temp = I915_READ(PCH_DPLL(i));
430 ++
431 ++ if (!(temp & DPLL_VCO_ENABLE))
432 ++ continue;
433 ++
434 ++ if ((temp & PLL_REF_INPUT_MASK) ==
435 ++ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
436 ++ using_ssc_source = true;
437 ++ break;
438 ++ }
439 ++ }
440 ++
441 ++ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
442 ++ has_panel, has_lvds, has_ck505, using_ssc_source);
443 +
444 + /* Ironlake: try to setup display ref clock before DPLL
445 + * enabling. This is only under driver's control after
446 +@@ -5659,9 +5675,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
447 + final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
448 + } else
449 + final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
450 +- } else {
451 +- final |= DREF_SSC_SOURCE_DISABLE;
452 +- final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
453 ++ } else if (using_ssc_source) {
454 ++ final |= DREF_SSC_SOURCE_ENABLE;
455 ++ final |= DREF_SSC1_ENABLE;
456 + }
457 +
458 + if (final == val)
459 +@@ -5708,7 +5724,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
460 + POSTING_READ(PCH_DREF_CONTROL);
461 + udelay(200);
462 + } else {
463 +- DRM_DEBUG_KMS("Disabling SSC entirely\n");
464 ++ DRM_DEBUG_KMS("Disabling CPU source output\n");
465 +
466 + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
467 +
468 +@@ -5719,16 +5735,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
469 + POSTING_READ(PCH_DREF_CONTROL);
470 + udelay(200);
471 +
472 +- /* Turn off the SSC source */
473 +- val &= ~DREF_SSC_SOURCE_MASK;
474 +- val |= DREF_SSC_SOURCE_DISABLE;
475 ++ if (!using_ssc_source) {
476 ++ DRM_DEBUG_KMS("Disabling SSC source\n");
477 +
478 +- /* Turn off SSC1 */
479 +- val &= ~DREF_SSC1_ENABLE;
480 ++ /* Turn off the SSC source */
481 ++ val &= ~DREF_SSC_SOURCE_MASK;
482 ++ val |= DREF_SSC_SOURCE_DISABLE;
483 +
484 +- I915_WRITE(PCH_DREF_CONTROL, val);
485 +- POSTING_READ(PCH_DREF_CONTROL);
486 +- udelay(200);
487 ++ /* Turn off SSC1 */
488 ++ val &= ~DREF_SSC1_ENABLE;
489 ++
490 ++ I915_WRITE(PCH_DREF_CONTROL, val);
491 ++ POSTING_READ(PCH_DREF_CONTROL);
492 ++ udelay(200);
493 ++ }
494 + }
495 +
496 + BUG_ON(val != final);
497 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
498 +index 129915eca07b..578eb21ca584 100644
499 +--- a/drivers/gpu/drm/radeon/radeon_device.c
500 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
501 +@@ -548,6 +548,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
502 + /*
503 + * GPU helpers function.
504 + */
505 ++
506 ++/**
507 ++ * radeon_device_is_virtual - check if we are running is a virtual environment
508 ++ *
509 ++ * Check if the asic has been passed through to a VM (all asics).
510 ++ * Used at driver startup.
511 ++ * Returns true if virtual or false if not.
512 ++ */
513 ++static bool radeon_device_is_virtual(void)
514 ++{
515 ++#ifdef CONFIG_X86
516 ++ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
517 ++#else
518 ++ return false;
519 ++#endif
520 ++}
521 ++
522 + /**
523 + * radeon_card_posted - check if the hw has already been initialized
524 + *
525 +@@ -561,6 +578,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
526 + {
527 + uint32_t reg;
528 +
529 ++ /* for pass through, always force asic_init */
530 ++ if (radeon_device_is_virtual())
531 ++ return false;
532 ++
533 + /* required for EFI mode on macbook2,1 which uses an r5xx asic */
534 + if (efi_enabled(EFI_BOOT) &&
535 + (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
536 +diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
537 +index 4e49462870ab..d0c8a1c1e1fe 100644
538 +--- a/drivers/hid/hid-elo.c
539 ++++ b/drivers/hid/hid-elo.c
540 +@@ -259,7 +259,7 @@ static void elo_remove(struct hid_device *hdev)
541 + struct elo_priv *priv = hid_get_drvdata(hdev);
542 +
543 + hid_hw_stop(hdev);
544 +- flush_workqueue(wq);
545 ++ cancel_delayed_work_sync(&priv->work);
546 + kfree(priv);
547 + }
548 +
549 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
550 +index 2f1ddca6f2e0..700145b15088 100644
551 +--- a/drivers/hid/usbhid/hiddev.c
552 ++++ b/drivers/hid/usbhid/hiddev.c
553 +@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
554 + goto inval;
555 + } else if (uref->usage_index >= field->report_count)
556 + goto inval;
557 +-
558 +- else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
559 +- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
560 +- uref->usage_index + uref_multi->num_values > field->report_count))
561 +- goto inval;
562 + }
563 +
564 ++ if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
565 ++ (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
566 ++ uref->usage_index + uref_multi->num_values > field->report_count))
567 ++ goto inval;
568 ++
569 + switch (cmd) {
570 + case HIDIOCGUSAGE:
571 + uref->value = field->value[uref->usage_index];
572 +diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
573 +index 98ba761cbb9c..d8738d4f8df3 100644
574 +--- a/drivers/iio/accel/kxsd9.c
575 ++++ b/drivers/iio/accel/kxsd9.c
576 +@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
577 +
578 + mutex_lock(&st->buf_lock);
579 + ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
580 +- if (ret)
581 ++ if (ret < 0)
582 + goto error_ret;
583 + st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
584 + st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
585 +@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
586 + break;
587 + case IIO_CHAN_INFO_SCALE:
588 + ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
589 +- if (ret)
590 ++ if (ret < 0)
591 + goto error_ret;
592 + *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
593 + ret = IIO_VAL_INT_PLUS_MICRO;
594 +diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
595 +index 70f78c3062a7..8e2b9e70511d 100644
596 +--- a/drivers/iio/adc/ad7266.c
597 ++++ b/drivers/iio/adc/ad7266.c
598 +@@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi)
599 +
600 + st = iio_priv(indio_dev);
601 +
602 +- st->reg = devm_regulator_get(&spi->dev, "vref");
603 +- if (!IS_ERR_OR_NULL(st->reg)) {
604 ++ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
605 ++ if (!IS_ERR(st->reg)) {
606 + ret = regulator_enable(st->reg);
607 + if (ret)
608 + return ret;
609 +@@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi)
610 +
611 + st->vref_mv = ret / 1000;
612 + } else {
613 ++ /* Any other error indicates that the regulator does exist */
614 ++ if (PTR_ERR(st->reg) != -ENODEV)
615 ++ return PTR_ERR(st->reg);
616 + /* Use internal reference */
617 + st->vref_mv = 2500;
618 + }
619 +diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
620 +index 766fab24b720..bc7f51f9a00a 100644
621 +--- a/drivers/iio/industrialio-trigger.c
622 ++++ b/drivers/iio/industrialio-trigger.c
623 +@@ -205,22 +205,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
624 +
625 + /* Prevent the module from being removed whilst attached to a trigger */
626 + __module_get(pf->indio_dev->info->driver_module);
627 ++
628 ++ /* Get irq number */
629 + pf->irq = iio_trigger_get_irq(trig);
630 ++ if (pf->irq < 0)
631 ++ goto out_put_module;
632 ++
633 ++ /* Request irq */
634 + ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
635 + pf->type, pf->name,
636 + pf);
637 +- if (ret < 0) {
638 +- module_put(pf->indio_dev->info->driver_module);
639 +- return ret;
640 +- }
641 ++ if (ret < 0)
642 ++ goto out_put_irq;
643 +
644 ++ /* Enable trigger in driver */
645 + if (trig->ops && trig->ops->set_trigger_state && notinuse) {
646 + ret = trig->ops->set_trigger_state(trig, true);
647 + if (ret < 0)
648 +- module_put(pf->indio_dev->info->driver_module);
649 ++ goto out_free_irq;
650 + }
651 +
652 + return ret;
653 ++
654 ++out_free_irq:
655 ++ free_irq(pf->irq, pf);
656 ++out_put_irq:
657 ++ iio_trigger_put_irq(trig, pf->irq);
658 ++out_put_module:
659 ++ module_put(pf->indio_dev->info->driver_module);
660 ++ return ret;
661 + }
662 +
663 + static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
664 +diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
665 +index 1ddcebd84622..e05c73aeec66 100644
666 +--- a/drivers/infiniband/hw/mlx4/ah.c
667 ++++ b/drivers/infiniband/hw/mlx4/ah.c
668 +@@ -46,6 +46,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
669 +
670 + ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
671 + ah->av.ib.g_slid = ah_attr->src_path_bits;
672 ++ ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
673 + if (ah_attr->ah_flags & IB_AH_GRH) {
674 + ah->av.ib.g_slid |= 0x80;
675 + ah->av.ib.gid_index = ah_attr->grh.sgid_index;
676 +@@ -63,7 +64,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
677 + !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
678 + --ah->av.ib.stat_rate;
679 + }
680 +- ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
681 +
682 + return &ah->ibah;
683 + }
684 +diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
685 +index 3e1d7d29b4ec..758596021cf0 100644
686 +--- a/drivers/net/ethernet/atheros/alx/main.c
687 ++++ b/drivers/net/ethernet/atheros/alx/main.c
688 +@@ -86,9 +86,14 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
689 + while (!cur_buf->skb && next != rxq->read_idx) {
690 + struct alx_rfd *rfd = &rxq->rfd[cur];
691 +
692 +- skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
693 ++ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
694 + if (!skb)
695 + break;
696 ++
697 ++ /* Workround for the HW RX DMA overflow issue */
698 ++ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
699 ++ skb_reserve(skb, 64);
700 ++
701 + dma = dma_map_single(&alx->hw.pdev->dev,
702 + skb->data, alx->rxbuf_size,
703 + DMA_FROM_DEVICE);
704 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
705 +index c6637229bdb8..584504e6e95c 100644
706 +--- a/drivers/net/usb/cdc_ncm.c
707 ++++ b/drivers/net/usb/cdc_ncm.c
708 +@@ -438,6 +438,13 @@ advance:
709 + if (cdc_ncm_setup(dev))
710 + goto error2;
711 +
712 ++ /* Some firmwares need a pause here or they will silently fail
713 ++ * to set up the interface properly. This value was decided
714 ++ * empirically on a Sierra Wireless MC7455 running 02.08.02.00
715 ++ * firmware.
716 ++ */
717 ++ usleep_range(10000, 20000);
718 ++
719 + /* configure data interface */
720 + temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
721 + if (temp) {
722 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
723 +index 505ff601d9f4..cb58ad0311da 100644
724 +--- a/drivers/net/wireless/mac80211_hwsim.c
725 ++++ b/drivers/net/wireless/mac80211_hwsim.c
726 +@@ -2251,6 +2251,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
727 + if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
728 + !info->attrs[HWSIM_ATTR_FLAGS] ||
729 + !info->attrs[HWSIM_ATTR_COOKIE] ||
730 ++ !info->attrs[HWSIM_ATTR_SIGNAL] ||
731 + !info->attrs[HWSIM_ATTR_TX_INFO])
732 + goto out;
733 +
734 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
735 +index 787c8a883c3c..e11ca1f7bef6 100644
736 +--- a/drivers/scsi/scsi_error.c
737 ++++ b/drivers/scsi/scsi_error.c
738 +@@ -1111,7 +1111,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
739 + */
740 + void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
741 + {
742 +- scmd->device->host->host_failed--;
743 + scmd->eh_eflags = 0;
744 + list_move_tail(&scmd->eh_entry, done_q);
745 + }
746 +@@ -2193,6 +2192,9 @@ int scsi_error_handler(void *data)
747 + else
748 + scsi_unjam_host(shost);
749 +
750 ++ /* All scmds have been handled */
751 ++ shost->host_failed = 0;
752 ++
753 + /*
754 + * Note - if the above fails completely, the action is to take
755 + * individual devices offline and flush the queue of any
756 +diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
757 +index 7f6ccdfaf168..99e148899404 100644
758 +--- a/drivers/staging/iio/accel/sca3000_core.c
759 ++++ b/drivers/staging/iio/accel/sca3000_core.c
760 +@@ -592,7 +592,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
761 + goto error_ret_mut;
762 + ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
763 + mutex_unlock(&st->lock);
764 +- if (ret)
765 ++ if (ret < 0)
766 + goto error_ret;
767 + val = ret;
768 + if (base_freq > 0)
769 +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
770 +index d0e3a4497707..adf4d3124cc6 100644
771 +--- a/drivers/tty/vt/keyboard.c
772 ++++ b/drivers/tty/vt/keyboard.c
773 +@@ -365,34 +365,22 @@ static void to_utf8(struct vc_data *vc, uint c)
774 +
775 + static void do_compute_shiftstate(void)
776 + {
777 +- unsigned int i, j, k, sym, val;
778 ++ unsigned int k, sym, val;
779 +
780 + shift_state = 0;
781 + memset(shift_down, 0, sizeof(shift_down));
782 +
783 +- for (i = 0; i < ARRAY_SIZE(key_down); i++) {
784 +-
785 +- if (!key_down[i])
786 ++ for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
787 ++ sym = U(key_maps[0][k]);
788 ++ if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
789 + continue;
790 +
791 +- k = i * BITS_PER_LONG;
792 +-
793 +- for (j = 0; j < BITS_PER_LONG; j++, k++) {
794 +-
795 +- if (!test_bit(k, key_down))
796 +- continue;
797 ++ val = KVAL(sym);
798 ++ if (val == KVAL(K_CAPSSHIFT))
799 ++ val = KVAL(K_SHIFT);
800 +
801 +- sym = U(key_maps[0][k]);
802 +- if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
803 +- continue;
804 +-
805 +- val = KVAL(sym);
806 +- if (val == KVAL(K_CAPSSHIFT))
807 +- val = KVAL(K_SHIFT);
808 +-
809 +- shift_down[val]++;
810 +- shift_state |= (1 << val);
811 +- }
812 ++ shift_down[val]++;
813 ++ shift_state |= BIT(val);
814 + }
815 + }
816 +
817 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
818 +index 00addda9ad53..b6c85fbd0a14 100644
819 +--- a/drivers/usb/core/quirks.c
820 ++++ b/drivers/usb/core/quirks.c
821 +@@ -205,6 +205,9 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
822 + /* Logitech Optical Mouse M90/M100 */
823 + { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
824 +
825 ++ /* Acer C120 LED Projector */
826 ++ { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
827 ++
828 + /* Blackmagic Design Intensity Shuttle */
829 + { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
830 +
831 +diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
832 +index 6b0fb6af6815..a2df3e7e66e2 100644
833 +--- a/drivers/usb/musb/musb_host.c
834 ++++ b/drivers/usb/musb/musb_host.c
835 +@@ -583,14 +583,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
836 + musb_writew(ep->regs, MUSB_TXCSR, 0);
837 +
838 + /* scrub all previous state, clearing toggle */
839 +- } else {
840 +- csr = musb_readw(ep->regs, MUSB_RXCSR);
841 +- if (csr & MUSB_RXCSR_RXPKTRDY)
842 +- WARNING("rx%d, packet/%d ready?\n", ep->epnum,
843 +- musb_readw(ep->regs, MUSB_RXCOUNT));
844 +-
845 +- musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
846 + }
847 ++ csr = musb_readw(ep->regs, MUSB_RXCSR);
848 ++ if (csr & MUSB_RXCSR_RXPKTRDY)
849 ++ WARNING("rx%d, packet/%d ready?\n", ep->epnum,
850 ++ musb_readw(ep->regs, MUSB_RXCOUNT));
851 ++
852 ++ musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
853 +
854 + /* target addr and (for multipoint) hub addr/port */
855 + if (musb->is_multipoint) {
856 +@@ -950,9 +949,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
857 + if (is_in) {
858 + dma = is_dma_capable() ? ep->rx_channel : NULL;
859 +
860 +- /* clear nak timeout bit */
861 ++ /*
862 ++ * Need to stop the transaction by clearing REQPKT first
863 ++ * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
864 ++ * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
865 ++ */
866 + rx_csr = musb_readw(epio, MUSB_RXCSR);
867 + rx_csr |= MUSB_RXCSR_H_WZC_BITS;
868 ++ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
869 ++ musb_writew(epio, MUSB_RXCSR, rx_csr);
870 + rx_csr &= ~MUSB_RXCSR_DATAERROR;
871 + musb_writew(epio, MUSB_RXCSR, rx_csr);
872 +
873 +diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
874 +index 7231859119f1..9deb30b0e077 100644
875 +--- a/drivers/xen/xen-acpi-processor.c
876 ++++ b/drivers/xen/xen-acpi-processor.c
877 +@@ -423,36 +423,7 @@ upload:
878 +
879 + return 0;
880 + }
881 +-static int __init check_prereq(void)
882 +-{
883 +- struct cpuinfo_x86 *c = &cpu_data(0);
884 +-
885 +- if (!xen_initial_domain())
886 +- return -ENODEV;
887 +-
888 +- if (!acpi_gbl_FADT.smi_command)
889 +- return -ENODEV;
890 +-
891 +- if (c->x86_vendor == X86_VENDOR_INTEL) {
892 +- if (!cpu_has(c, X86_FEATURE_EST))
893 +- return -ENODEV;
894 +
895 +- return 0;
896 +- }
897 +- if (c->x86_vendor == X86_VENDOR_AMD) {
898 +- /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
899 +- * as we get compile warnings for the static functions.
900 +- */
901 +-#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
902 +-#define USE_HW_PSTATE 0x00000080
903 +- u32 eax, ebx, ecx, edx;
904 +- cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
905 +- if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
906 +- return -ENODEV;
907 +- return 0;
908 +- }
909 +- return -ENODEV;
910 +-}
911 + /* acpi_perf_data is a pointer to percpu data. */
912 + static struct acpi_processor_performance __percpu *acpi_perf_data;
913 +
914 +@@ -508,10 +479,10 @@ static struct syscore_ops xap_syscore_ops = {
915 + static int __init xen_acpi_processor_init(void)
916 + {
917 + unsigned int i;
918 +- int rc = check_prereq();
919 ++ int rc;
920 +
921 +- if (rc)
922 +- return rc;
923 ++ if (!xen_initial_domain())
924 ++ return -ENODEV;
925 +
926 + nr_acpi_bits = get_max_acpi_id() + 1;
927 + acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
928 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
929 +index 8813ff776ba3..7522829b29ef 100644
930 +--- a/fs/cifs/connect.c
931 ++++ b/fs/cifs/connect.c
932 +@@ -410,7 +410,9 @@ cifs_echo_request(struct work_struct *work)
933 + * server->ops->need_neg() == true. Also, no need to ping if
934 + * we got a response recently.
935 + */
936 +- if (!server->ops->need_neg || server->ops->need_neg(server) ||
937 ++
938 ++ if (server->tcpStatus == CifsNeedReconnect ||
939 ++ server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
940 + (server->ops->can_echo && !server->ops->can_echo(server)) ||
941 + time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
942 + goto requeue_echo;
943 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
944 +index fc656bc5d6cb..e83f7d2b585b 100644
945 +--- a/fs/cifs/smb2pdu.c
946 ++++ b/fs/cifs/smb2pdu.c
947 +@@ -1590,6 +1590,33 @@ SMB2_echo(struct TCP_Server_Info *server)
948 +
949 + cifs_dbg(FYI, "In echo request\n");
950 +
951 ++ if (server->tcpStatus == CifsNeedNegotiate) {
952 ++ struct list_head *tmp, *tmp2;
953 ++ struct cifs_ses *ses;
954 ++ struct cifs_tcon *tcon;
955 ++
956 ++ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
957 ++ spin_lock(&cifs_tcp_ses_lock);
958 ++ list_for_each(tmp, &server->smb_ses_list) {
959 ++ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
960 ++ list_for_each(tmp2, &ses->tcon_list) {
961 ++ tcon = list_entry(tmp2, struct cifs_tcon,
962 ++ tcon_list);
963 ++ /* add check for persistent handle reconnect */
964 ++ if (tcon && tcon->need_reconnect) {
965 ++ spin_unlock(&cifs_tcp_ses_lock);
966 ++ rc = smb2_reconnect(SMB2_ECHO, tcon);
967 ++ spin_lock(&cifs_tcp_ses_lock);
968 ++ }
969 ++ }
970 ++ }
971 ++ spin_unlock(&cifs_tcp_ses_lock);
972 ++ }
973 ++
974 ++ /* if no session, renegotiate failed above */
975 ++ if (server->tcpStatus == CifsNeedNegotiate)
976 ++ return -EIO;
977 ++
978 + rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
979 + if (rc)
980 + return rc;
981 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
982 +index 4a48fe4b84b6..589418d44310 100644
983 +--- a/fs/nfs/dir.c
984 ++++ b/fs/nfs/dir.c
985 +@@ -1459,9 +1459,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
986 + err = PTR_ERR(inode);
987 + trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
988 + put_nfs_open_context(ctx);
989 ++ d_drop(dentry);
990 + switch (err) {
991 + case -ENOENT:
992 +- d_drop(dentry);
993 + d_add(dentry, NULL);
994 + break;
995 + case -EISDIR:
996 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
997 +index 3b5e86fd2800..98eae9cc78be 100644
998 +--- a/fs/nfs/nfs4proc.c
999 ++++ b/fs/nfs/nfs4proc.c
1000 +@@ -2583,12 +2583,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
1001 + call_close |= is_wronly;
1002 + else if (is_wronly)
1003 + calldata->arg.fmode |= FMODE_WRITE;
1004 ++ if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
1005 ++ call_close |= is_rdwr;
1006 + } else if (is_rdwr)
1007 + calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
1008 +
1009 +- if (calldata->arg.fmode == 0)
1010 +- call_close |= is_rdwr;
1011 +-
1012 + if (!nfs4_valid_open_stateid(state))
1013 + call_close = 0;
1014 + spin_unlock(&state->owner->so_lock);
1015 +diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
1016 +index 11c1fba29312..f8e5593884a5 100644
1017 +--- a/fs/nfsd/nfs2acl.c
1018 ++++ b/fs/nfsd/nfs2acl.c
1019 +@@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
1020 + goto out;
1021 +
1022 + inode = fh->fh_dentry->d_inode;
1023 +- if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
1024 +- error = -EOPNOTSUPP;
1025 +- goto out_errno;
1026 +- }
1027 +
1028 + error = fh_want_write(fh);
1029 + if (error)
1030 + goto out_errno;
1031 +
1032 +- error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
1033 ++ fh_lock(fh);
1034 ++
1035 ++ error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
1036 + if (error)
1037 +- goto out_drop_write;
1038 +- error = inode->i_op->set_acl(inode, argp->acl_default,
1039 +- ACL_TYPE_DEFAULT);
1040 ++ goto out_drop_lock;
1041 ++ error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
1042 + if (error)
1043 +- goto out_drop_write;
1044 ++ goto out_drop_lock;
1045 ++
1046 ++ fh_unlock(fh);
1047 +
1048 + fh_drop_write(fh);
1049 +
1050 +@@ -131,7 +130,8 @@ out:
1051 + posix_acl_release(argp->acl_access);
1052 + posix_acl_release(argp->acl_default);
1053 + return nfserr;
1054 +-out_drop_write:
1055 ++out_drop_lock:
1056 ++ fh_unlock(fh);
1057 + fh_drop_write(fh);
1058 + out_errno:
1059 + nfserr = nfserrno(error);
1060 +diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
1061 +index adc5f1b1dc26..bcfef1c2b2a5 100644
1062 +--- a/fs/nfsd/nfs3acl.c
1063 ++++ b/fs/nfsd/nfs3acl.c
1064 +@@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
1065 + goto out;
1066 +
1067 + inode = fh->fh_dentry->d_inode;
1068 +- if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
1069 +- error = -EOPNOTSUPP;
1070 +- goto out_errno;
1071 +- }
1072 +
1073 + error = fh_want_write(fh);
1074 + if (error)
1075 + goto out_errno;
1076 +
1077 +- error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
1078 ++ fh_lock(fh);
1079 ++
1080 ++ error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
1081 + if (error)
1082 +- goto out_drop_write;
1083 +- error = inode->i_op->set_acl(inode, argp->acl_default,
1084 +- ACL_TYPE_DEFAULT);
1085 ++ goto out_drop_lock;
1086 ++ error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
1087 +
1088 +-out_drop_write:
1089 ++out_drop_lock:
1090 ++ fh_unlock(fh);
1091 + fh_drop_write(fh);
1092 + out_errno:
1093 + nfserr = nfserrno(error);
1094 +diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
1095 +index dea8c60954ba..1e8857b6dbba 100644
1096 +--- a/fs/nfsd/nfs4acl.c
1097 ++++ b/fs/nfsd/nfs4acl.c
1098 +@@ -818,9 +818,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
1099 + dentry = fhp->fh_dentry;
1100 + inode = dentry->d_inode;
1101 +
1102 +- if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
1103 +- return nfserr_attrnotsupp;
1104 +-
1105 + if (S_ISDIR(inode->i_mode))
1106 + flags = NFS4_ACL_DIR;
1107 +
1108 +@@ -830,16 +827,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
1109 + if (host_error < 0)
1110 + goto out_nfserr;
1111 +
1112 +- host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
1113 ++ fh_lock(fhp);
1114 ++
1115 ++ host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
1116 + if (host_error < 0)
1117 +- goto out_release;
1118 ++ goto out_drop_lock;
1119 +
1120 + if (S_ISDIR(inode->i_mode)) {
1121 +- host_error = inode->i_op->set_acl(inode, dpacl,
1122 +- ACL_TYPE_DEFAULT);
1123 ++ host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
1124 + }
1125 +
1126 +-out_release:
1127 ++out_drop_lock:
1128 ++ fh_unlock(fhp);
1129 ++
1130 + posix_acl_release(pacl);
1131 + posix_acl_release(dpacl);
1132 + out_nfserr:
1133 +diff --git a/fs/posix_acl.c b/fs/posix_acl.c
1134 +index 0855f772cd41..3de7c223c963 100644
1135 +--- a/fs/posix_acl.c
1136 ++++ b/fs/posix_acl.c
1137 +@@ -787,38 +787,42 @@ posix_acl_xattr_get(struct dentry *dentry, const char *name,
1138 + return error;
1139 + }
1140 +
1141 +-static int
1142 +-posix_acl_xattr_set(struct dentry *dentry, const char *name,
1143 +- const void *value, size_t size, int flags, int type)
1144 ++int
1145 ++set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
1146 + {
1147 +- struct inode *inode = dentry->d_inode;
1148 +- struct posix_acl *acl = NULL;
1149 +- int ret;
1150 +-
1151 + if (!IS_POSIXACL(inode))
1152 + return -EOPNOTSUPP;
1153 + if (!inode->i_op->set_acl)
1154 + return -EOPNOTSUPP;
1155 +
1156 + if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
1157 +- return value ? -EACCES : 0;
1158 ++ return acl ? -EACCES : 0;
1159 + if (!inode_owner_or_capable(inode))
1160 + return -EPERM;
1161 +
1162 ++ if (acl) {
1163 ++ int ret = posix_acl_valid(acl);
1164 ++ if (ret)
1165 ++ return ret;
1166 ++ }
1167 ++ return inode->i_op->set_acl(inode, acl, type);
1168 ++}
1169 ++EXPORT_SYMBOL(set_posix_acl);
1170 ++
1171 ++static int
1172 ++posix_acl_xattr_set(struct dentry *dentry, const char *name,
1173 ++ const void *value, size_t size, int flags, int type)
1174 ++{
1175 ++ struct inode *inode = dentry->d_inode;
1176 ++ struct posix_acl *acl = NULL;
1177 ++ int ret;
1178 ++
1179 + if (value) {
1180 + acl = posix_acl_from_xattr(&init_user_ns, value, size);
1181 + if (IS_ERR(acl))
1182 + return PTR_ERR(acl);
1183 +-
1184 +- if (acl) {
1185 +- ret = posix_acl_valid(acl);
1186 +- if (ret)
1187 +- goto out;
1188 +- }
1189 + }
1190 +-
1191 +- ret = inode->i_op->set_acl(inode, acl, type);
1192 +-out:
1193 ++ ret = set_posix_acl(inode, type, acl);
1194 + posix_acl_release(acl);
1195 + return ret;
1196 + }
1197 +diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
1198 +index b56eb6275744..c0d8fde3e6d9 100644
1199 +--- a/fs/ubifs/file.c
1200 ++++ b/fs/ubifs/file.c
1201 +@@ -54,6 +54,7 @@
1202 + #include <linux/mount.h>
1203 + #include <linux/namei.h>
1204 + #include <linux/slab.h>
1205 ++#include <linux/migrate.h>
1206 +
1207 + static int read_block(struct inode *inode, void *addr, unsigned int block,
1208 + struct ubifs_data_node *dn)
1209 +@@ -1423,6 +1424,26 @@ static int ubifs_set_page_dirty(struct page *page)
1210 + return ret;
1211 + }
1212 +
1213 ++#ifdef CONFIG_MIGRATION
1214 ++static int ubifs_migrate_page(struct address_space *mapping,
1215 ++ struct page *newpage, struct page *page, enum migrate_mode mode)
1216 ++{
1217 ++ int rc;
1218 ++
1219 ++ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
1220 ++ if (rc != MIGRATEPAGE_SUCCESS)
1221 ++ return rc;
1222 ++
1223 ++ if (PagePrivate(page)) {
1224 ++ ClearPagePrivate(page);
1225 ++ SetPagePrivate(newpage);
1226 ++ }
1227 ++
1228 ++ migrate_page_copy(newpage, page);
1229 ++ return MIGRATEPAGE_SUCCESS;
1230 ++}
1231 ++#endif
1232 ++
1233 + static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1234 + {
1235 + /*
1236 +@@ -1559,6 +1580,9 @@ const struct address_space_operations ubifs_file_address_operations = {
1237 + .write_end = ubifs_write_end,
1238 + .invalidatepage = ubifs_invalidatepage,
1239 + .set_page_dirty = ubifs_set_page_dirty,
1240 ++#ifdef CONFIG_MIGRATION
1241 ++ .migratepage = ubifs_migrate_page,
1242 ++#endif
1243 + .releasepage = ubifs_releasepage,
1244 + };
1245 +
1246 +diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
1247 +index daec99af5d54..1c88b177cb9c 100644
1248 +--- a/include/linux/usb/ehci_def.h
1249 ++++ b/include/linux/usb/ehci_def.h
1250 +@@ -178,11 +178,11 @@ struct ehci_regs {
1251 + * PORTSCx
1252 + */
1253 + /* HOSTPC: offset 0x84 */
1254 +- u32 hostpc[1]; /* HOSTPC extension */
1255 ++ u32 hostpc[0]; /* HOSTPC extension */
1256 + #define HOSTPC_PHCD (1<<22) /* Phy clock disable */
1257 + #define HOSTPC_PSPD (3<<25) /* Port speed detection */
1258 +
1259 +- u32 reserved5[16];
1260 ++ u32 reserved5[17];
1261 +
1262 + /* USBMODE_EX: offset 0xc8 */
1263 + u32 usbmode_ex; /* USB Device mode extension */
1264 +diff --git a/kernel/signal.c b/kernel/signal.c
1265 +index d8db156e5f5c..78d0e8f3f4ad 100644
1266 +--- a/kernel/signal.c
1267 ++++ b/kernel/signal.c
1268 +@@ -3004,11 +3004,9 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
1269 + * Nor can they impersonate a kill()/tgkill(), which adds source info.
1270 + */
1271 + if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
1272 +- (task_pid_vnr(current) != pid)) {
1273 +- /* We used to allow any < 0 si_code */
1274 +- WARN_ON_ONCE(info->si_code < 0);
1275 ++ (task_pid_vnr(current) != pid))
1276 + return -EPERM;
1277 +- }
1278 ++
1279 + info->si_signo = sig;
1280 +
1281 + /* POSIX.1b doesn't mention process groups. */
1282 +@@ -3053,12 +3051,10 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
1283 + /* Not even root can pretend to send signals from the kernel.
1284 + * Nor can they impersonate a kill()/tgkill(), which adds source info.
1285 + */
1286 +- if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
1287 +- (task_pid_vnr(current) != pid)) {
1288 +- /* We used to allow any < 0 si_code */
1289 +- WARN_ON_ONCE(info->si_code < 0);
1290 ++ if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
1291 ++ (task_pid_vnr(current) != pid))
1292 + return -EPERM;
1293 +- }
1294 ++
1295 + info->si_signo = sig;
1296 +
1297 + return do_send_specific(tgid, pid, sig, info);
1298 +diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
1299 +index 7b900474209d..6973eeca7d99 100644
1300 +--- a/kernel/trace/trace_printk.c
1301 ++++ b/kernel/trace/trace_printk.c
1302 +@@ -38,6 +38,10 @@ struct trace_bprintk_fmt {
1303 + static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
1304 + {
1305 + struct trace_bprintk_fmt *pos;
1306 ++
1307 ++ if (!fmt)
1308 ++ return ERR_PTR(-EINVAL);
1309 ++
1310 + list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
1311 + if (!strcmp(pos->fmt, fmt))
1312 + return pos;
1313 +@@ -59,7 +63,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
1314 + for (iter = start; iter < end; iter++) {
1315 + struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
1316 + if (tb_fmt) {
1317 +- *iter = tb_fmt->fmt;
1318 ++ if (!IS_ERR(tb_fmt))
1319 ++ *iter = tb_fmt->fmt;
1320 + continue;
1321 + }
1322 +
1323 +diff --git a/mm/migrate.c b/mm/migrate.c
1324 +index 3acac4a62c4b..23ca861c93e9 100644
1325 +--- a/mm/migrate.c
1326 ++++ b/mm/migrate.c
1327 +@@ -423,6 +423,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
1328 +
1329 + return MIGRATEPAGE_SUCCESS;
1330 + }
1331 ++EXPORT_SYMBOL(migrate_page_move_mapping);
1332 +
1333 + /*
1334 + * The expected number of remaining references is the same as that
1335 +@@ -582,6 +583,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
1336 + if (PageWriteback(newpage))
1337 + end_page_writeback(newpage);
1338 + }
1339 ++EXPORT_SYMBOL(migrate_page_copy);
1340 +
1341 + /************************************************************
1342 + * Migration functions
1343 +diff --git a/mm/shmem.c b/mm/shmem.c
1344 +index 85d8a1a3626c..8791289974c3 100644
1345 +--- a/mm/shmem.c
1346 ++++ b/mm/shmem.c
1347 +@@ -1893,9 +1893,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1348 + NULL);
1349 + if (error) {
1350 + /* Remove the !PageUptodate pages we added */
1351 +- shmem_undo_range(inode,
1352 +- (loff_t)start << PAGE_CACHE_SHIFT,
1353 +- (loff_t)index << PAGE_CACHE_SHIFT, true);
1354 ++ if (index > start) {
1355 ++ shmem_undo_range(inode,
1356 ++ (loff_t)start << PAGE_CACHE_SHIFT,
1357 ++ ((loff_t)index << PAGE_CACHE_SHIFT) - 1, true);
1358 ++ }
1359 + goto undone;
1360 + }
1361 +
1362 +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
1363 +index 42809cf91488..afdbfd65e71f 100644
1364 +--- a/net/ipv4/ipmr.c
1365 ++++ b/net/ipv4/ipmr.c
1366 +@@ -883,8 +883,10 @@ static struct mfc_cache *ipmr_cache_alloc(void)
1367 + {
1368 + struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1369 +
1370 +- if (c)
1371 ++ if (c) {
1372 ++ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1373 + c->mfc_un.res.minvif = MAXVIFS;
1374 ++ }
1375 + return c;
1376 + }
1377 +
1378 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
1379 +index 8b61288e5746..86d30e60242a 100644
1380 +--- a/net/ipv6/ip6mr.c
1381 ++++ b/net/ipv6/ip6mr.c
1382 +@@ -1076,6 +1076,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void)
1383 + struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1384 + if (c == NULL)
1385 + return NULL;
1386 ++ c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1387 + c->mfc_un.res.minvif = MAXMIFS;
1388 + return c;
1389 + }
1390 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1391 +index 317b6dbf3190..fe2c3320e8fe 100644
1392 +--- a/net/ipv6/sit.c
1393 ++++ b/net/ipv6/sit.c
1394 +@@ -559,13 +559,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
1395 +
1396 + if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
1397 + ipv4_update_pmtu(skb, dev_net(skb->dev), info,
1398 +- t->parms.link, 0, IPPROTO_IPV6, 0);
1399 ++ t->parms.link, 0, iph->protocol, 0);
1400 + err = 0;
1401 + goto out;
1402 + }
1403 + if (type == ICMP_REDIRECT) {
1404 + ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
1405 +- IPPROTO_IPV6, 0);
1406 ++ iph->protocol, 0);
1407 + err = 0;
1408 + goto out;
1409 + }
1410 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
1411 +index 3d52d1d68431..f3057767b53d 100644
1412 +--- a/net/mac80211/mesh.c
1413 ++++ b/net/mac80211/mesh.c
1414 +@@ -161,6 +161,10 @@ void mesh_sta_cleanup(struct sta_info *sta)
1415 + del_timer_sync(&sta->plink_timer);
1416 + }
1417 +
1418 ++ /* make sure no readers can access nexthop sta from here on */
1419 ++ mesh_path_flush_by_nexthop(sta);
1420 ++ synchronize_net();
1421 ++
1422 + if (changed)
1423 + ieee80211_mbss_info_change_notify(sdata, changed);
1424 + }
1425 +diff --git a/security/keys/key.c b/security/keys/key.c
1426 +index 6e21c11e48bc..9478d668f874 100644
1427 +--- a/security/keys/key.c
1428 ++++ b/security/keys/key.c
1429 +@@ -575,7 +575,7 @@ int key_reject_and_link(struct key *key,
1430 +
1431 + mutex_unlock(&key_construction_mutex);
1432 +
1433 +- if (keyring)
1434 ++ if (keyring && link_ret == 0)
1435 + __key_link_end(keyring, &key->index_key, edit);
1436 +
1437 + /* wake up anyone waiting for a key to be constructed */
1438 +diff --git a/sound/core/control.c b/sound/core/control.c
1439 +index 3fcead61f0ef..251bc575f5c3 100644
1440 +--- a/sound/core/control.c
1441 ++++ b/sound/core/control.c
1442 +@@ -150,6 +150,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
1443 +
1444 + if (snd_BUG_ON(!card || !id))
1445 + return;
1446 ++ if (card->shutdown)
1447 ++ return;
1448 + read_lock(&card->ctl_files_rwlock);
1449 + #if defined(CONFIG_SND_MIXER_OSS) || defined(CONFIG_SND_MIXER_OSS_MODULE)
1450 + card->mixer_oss_change_count++;
1451 +diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
1452 +index 8946cef245fc..fe5750a05368 100644
1453 +--- a/sound/drivers/dummy.c
1454 ++++ b/sound/drivers/dummy.c
1455 +@@ -422,6 +422,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
1456 +
1457 + static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
1458 + {
1459 ++ hrtimer_cancel(&dpcm->timer);
1460 + tasklet_kill(&dpcm->tasklet);
1461 + }
1462 +
1463 +diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
1464 +index ae59dbaa53d9..42d4b13f1fa7 100644
1465 +--- a/sound/pci/au88x0/au88x0_core.c
1466 ++++ b/sound/pci/au88x0/au88x0_core.c
1467 +@@ -1442,9 +1442,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
1468 + int page, p, pp, delta, i;
1469 +
1470 + page =
1471 +- (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
1472 +- WT_SUBBUF_MASK)
1473 +- >> WT_SUBBUF_SHIFT;
1474 ++ (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
1475 ++ >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
1476 + if (dma->nr_periods >= 4)
1477 + delta = (page - dma->period_real) & 3;
1478 + else {
1479 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
1480 +index 9e5cd217ffaf..2b7ad3a908de 100644
1481 +--- a/virt/kvm/kvm_main.c
1482 ++++ b/virt/kvm/kvm_main.c
1483 +@@ -2455,7 +2455,7 @@ static long kvm_vm_ioctl(struct file *filp,
1484 + if (copy_from_user(&routing, argp, sizeof(routing)))
1485 + goto out;
1486 + r = -EINVAL;
1487 +- if (routing.nr >= KVM_MAX_IRQ_ROUTES)
1488 ++ if (routing.nr > KVM_MAX_IRQ_ROUTES)
1489 + goto out;
1490 + if (routing.flags)
1491 + goto out;