Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2174 - genpatches-2.6/trunk/3.2
Date: Sat, 07 Jul 2012 16:52:54
Message-Id: 20120707165239.E70D02004B@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2012-07-07 16:52:38 +0000 (Sat, 07 Jul 2012)
3 New Revision: 2174
4
5 Added:
6 genpatches-2.6/trunk/3.2/1021_linux-3.2.22.patch
7 Modified:
8 genpatches-2.6/trunk/3.2/0000_README
9 Log:
10 Linux patch 3.2.22
11
12 Modified: genpatches-2.6/trunk/3.2/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/3.2/0000_README 2012-07-03 23:10:22 UTC (rev 2173)
15 +++ genpatches-2.6/trunk/3.2/0000_README 2012-07-07 16:52:38 UTC (rev 2174)
16 @@ -124,6 +124,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 3.2.21
19
20 +Patch: 1021_linux-3.2.22.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 3.2.22
23 +
24 Patch: 2300_per-pci-device-msi-irq-listing.patch
25 From: http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=da8d1c8ba4dcb16d60be54b233deca9a7cac98dc
26 Desc: Add a per-pci-device subdirectory in sysfs
27
28 Added: genpatches-2.6/trunk/3.2/1021_linux-3.2.22.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/3.2/1021_linux-3.2.22.patch (rev 0)
31 +++ genpatches-2.6/trunk/3.2/1021_linux-3.2.22.patch 2012-07-07 16:52:38 UTC (rev 2174)
32 @@ -0,0 +1,1245 @@
33 +diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
34 +index 21fd05c..e1f856b 100644
35 +--- a/Documentation/stable_kernel_rules.txt
36 ++++ b/Documentation/stable_kernel_rules.txt
37 +@@ -12,6 +12,12 @@ Rules on what kind of patches are accepted, and which ones are not, into the
38 + marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
39 + security issue, or some "oh, that's not good" issue. In short, something
40 + critical.
41 ++ - Serious issues as reported by a user of a distribution kernel may also
42 ++ be considered if they fix a notable performance or interactivity issue.
43 ++ As these fixes are not as obvious and have a higher risk of a subtle
44 ++ regression they should only be submitted by a distribution kernel
45 ++ maintainer and include an addendum linking to a bugzilla entry if it
46 ++ exists and additional information on the user-visible impact.
47 + - New device IDs and quirks are also accepted.
48 + - No "theoretical race condition" issues, unless an explanation of how the
49 + race can be exploited is also provided.
50 +diff --git a/Makefile b/Makefile
51 +index 7eb465e..9a7d921 100644
52 +--- a/Makefile
53 ++++ b/Makefile
54 +@@ -1,6 +1,6 @@
55 + VERSION = 3
56 + PATCHLEVEL = 2
57 +-SUBLEVEL = 21
58 ++SUBLEVEL = 22
59 + EXTRAVERSION =
60 + NAME = Saber-toothed Squirrel
61 +
62 +diff --git a/arch/arm/plat-samsung/include/plat/map-s3c.h b/arch/arm/plat-samsung/include/plat/map-s3c.h
63 +index 7d04875..c0c70a8 100644
64 +--- a/arch/arm/plat-samsung/include/plat/map-s3c.h
65 ++++ b/arch/arm/plat-samsung/include/plat/map-s3c.h
66 +@@ -22,7 +22,7 @@
67 + #define S3C24XX_VA_WATCHDOG S3C_VA_WATCHDOG
68 +
69 + #define S3C2412_VA_SSMC S3C_ADDR_CPU(0x00000000)
70 +-#define S3C2412_VA_EBI S3C_ADDR_CPU(0x00010000)
71 ++#define S3C2412_VA_EBI S3C_ADDR_CPU(0x00100000)
72 +
73 + #define S3C2410_PA_UART (0x50000000)
74 + #define S3C24XX_PA_UART S3C2410_PA_UART
75 +diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
76 +index 40dbb2b..11b19ea 100644
77 +--- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h
78 ++++ b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
79 +@@ -24,7 +24,7 @@ static inline void arch_wdt_reset(void)
80 +
81 + __raw_writel(0, S3C2410_WTCON); /* disable watchdog, to be safe */
82 +
83 +- if (s3c2410_wdtclk)
84 ++ if (!IS_ERR(s3c2410_wdtclk))
85 + clk_enable(s3c2410_wdtclk);
86 +
87 + /* put initial values into count and data */
88 +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
89 +index f3444f7..0c3b775 100644
90 +--- a/arch/x86/include/asm/cpufeature.h
91 ++++ b/arch/x86/include/asm/cpufeature.h
92 +@@ -175,7 +175,7 @@
93 + #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
94 + #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
95 + #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
96 +-#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
97 ++#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */
98 +
99 + /* Virtualization flags: Linux defined, word 8 */
100 + #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
101 +diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
102 +index effff47..cb00ccc 100644
103 +--- a/arch/x86/include/asm/pgtable-3level.h
104 ++++ b/arch/x86/include/asm/pgtable-3level.h
105 +@@ -31,6 +31,60 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
106 + ptep->pte_low = pte.pte_low;
107 + }
108 +
109 ++#define pmd_read_atomic pmd_read_atomic
110 ++/*
111 ++ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
112 ++ * a "*pmdp" dereference done by gcc. Problem is, in certain places
113 ++ * where pte_offset_map_lock is called, concurrent page faults are
114 ++ * allowed, if the mmap_sem is hold for reading. An example is mincore
115 ++ * vs page faults vs MADV_DONTNEED. On the page fault side
116 ++ * pmd_populate rightfully does a set_64bit, but if we're reading the
117 ++ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
118 ++ * because gcc will not read the 64bit of the pmd atomically. To fix
119 ++ * this all places running pmd_offset_map_lock() while holding the
120 ++ * mmap_sem in read mode, shall read the pmdp pointer using this
121 ++ * function to know if the pmd is null nor not, and in turn to know if
122 ++ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
123 ++ * operations.
124 ++ *
125 ++ * Without THP if the mmap_sem is hold for reading, the pmd can only
126 ++ * transition from null to not null while pmd_read_atomic runs. So
127 ++ * we can always return atomic pmd values with this function.
128 ++ *
129 ++ * With THP if the mmap_sem is hold for reading, the pmd can become
130 ++ * trans_huge or none or point to a pte (and in turn become "stable")
131 ++ * at any time under pmd_read_atomic. We could read it really
132 ++ * atomically here with a atomic64_read for the THP enabled case (and
133 ++ * it would be a whole lot simpler), but to avoid using cmpxchg8b we
134 ++ * only return an atomic pmdval if the low part of the pmdval is later
135 ++ * found stable (i.e. pointing to a pte). And we're returning a none
136 ++ * pmdval if the low part of the pmd is none. In some cases the high
137 ++ * and low part of the pmdval returned may not be consistent if THP is
138 ++ * enabled (the low part may point to previously mapped hugepage,
139 ++ * while the high part may point to a more recently mapped hugepage),
140 ++ * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
141 ++ * of the pmd to be read atomically to decide if the pmd is unstable
142 ++ * or not, with the only exception of when the low part of the pmd is
143 ++ * zero in which case we return a none pmd.
144 ++ */
145 ++static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
146 ++{
147 ++ pmdval_t ret;
148 ++ u32 *tmp = (u32 *)pmdp;
149 ++
150 ++ ret = (pmdval_t) (*tmp);
151 ++ if (ret) {
152 ++ /*
153 ++ * If the low part is null, we must not read the high part
154 ++ * or we can end up with a partial pmd.
155 ++ */
156 ++ smp_rmb();
157 ++ ret |= ((pmdval_t)*(tmp + 1)) << 32;
158 ++ }
159 ++
160 ++ return (pmd_t) { ret };
161 ++}
162 ++
163 + static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
164 + {
165 + set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
166 +diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
167 +index c7f64e6..ea6106c 100644
168 +--- a/arch/x86/kernel/cpu/scattered.c
169 ++++ b/arch/x86/kernel/cpu/scattered.c
170 +@@ -31,7 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
171 + const struct cpuid_bit *cb;
172 +
173 + static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
174 +- { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
175 ++ { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
176 + { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
177 + { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
178 + { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
179 +diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
180 +index a43fa1a..1502c502 100644
181 +--- a/drivers/acpi/acpi_pad.c
182 ++++ b/drivers/acpi/acpi_pad.c
183 +@@ -36,6 +36,7 @@
184 + #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
185 + #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
186 + static DEFINE_MUTEX(isolated_cpus_lock);
187 ++static DEFINE_MUTEX(round_robin_lock);
188 +
189 + static unsigned long power_saving_mwait_eax;
190 +
191 +@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
192 + if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
193 + return;
194 +
195 +- mutex_lock(&isolated_cpus_lock);
196 ++ mutex_lock(&round_robin_lock);
197 + cpumask_clear(tmp);
198 + for_each_cpu(cpu, pad_busy_cpus)
199 + cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
200 +@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
201 + if (cpumask_empty(tmp))
202 + cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
203 + if (cpumask_empty(tmp)) {
204 +- mutex_unlock(&isolated_cpus_lock);
205 ++ mutex_unlock(&round_robin_lock);
206 + return;
207 + }
208 + for_each_cpu(cpu, tmp) {
209 +@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
210 + tsk_in_cpu[tsk_index] = preferred_cpu;
211 + cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
212 + cpu_weight[preferred_cpu]++;
213 +- mutex_unlock(&isolated_cpus_lock);
214 ++ mutex_unlock(&round_robin_lock);
215 +
216 + set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
217 + }
218 +diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
219 +index c3d2dfc..b96544a 100644
220 +--- a/drivers/base/power/main.c
221 ++++ b/drivers/base/power/main.c
222 +@@ -869,7 +869,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
223 + dpm_wait_for_children(dev, async);
224 +
225 + if (async_error)
226 +- return 0;
227 ++ goto Complete;
228 +
229 + pm_runtime_get_noresume(dev);
230 + if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
231 +@@ -878,7 +878,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
232 + if (pm_wakeup_pending()) {
233 + pm_runtime_put_sync(dev);
234 + async_error = -EBUSY;
235 +- return 0;
236 ++ goto Complete;
237 + }
238 +
239 + device_lock(dev);
240 +@@ -926,6 +926,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
241 + }
242 +
243 + device_unlock(dev);
244 ++
245 ++ Complete:
246 + complete_all(&dev->power.completion);
247 +
248 + if (error) {
249 +diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
250 +index 0477982..1b5675b 100644
251 +--- a/drivers/char/hw_random/atmel-rng.c
252 ++++ b/drivers/char/hw_random/atmel-rng.c
253 +@@ -34,7 +34,7 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
254 + u32 *data = buf;
255 +
256 + /* data ready? */
257 +- if (readl(trng->base + TRNG_ODATA) & 1) {
258 ++ if (readl(trng->base + TRNG_ISR) & 1) {
259 + *data = readl(trng->base + TRNG_ODATA);
260 + /*
261 + ensure data ready is only set again AFTER the next data
262 +diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
263 +index 70ad892..b3ccefa 100644
264 +--- a/drivers/edac/i7core_edac.c
265 ++++ b/drivers/edac/i7core_edac.c
266 +@@ -1932,12 +1932,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
267 + if (mce->bank != 8)
268 + return NOTIFY_DONE;
269 +
270 +-#ifdef CONFIG_SMP
271 +- /* Only handle if it is the right mc controller */
272 +- if (mce->socketid != pvt->i7core_dev->socket)
273 +- return NOTIFY_DONE;
274 +-#endif
275 +-
276 + smp_rmb();
277 + if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
278 + smp_wmb();
279 +@@ -2234,8 +2228,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
280 + if (pvt->enable_scrub)
281 + disable_sdram_scrub_setting(mci);
282 +
283 +- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
284 +-
285 + /* Disable EDAC polling */
286 + i7core_pci_ctl_release(pvt);
287 +
288 +@@ -2336,8 +2328,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
289 + /* DCLK for scrub rate setting */
290 + pvt->dclk_freq = get_dclk_freq();
291 +
292 +- atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
293 +-
294 + return 0;
295 +
296 + fail0:
297 +@@ -2481,8 +2471,10 @@ static int __init i7core_init(void)
298 +
299 + pci_rc = pci_register_driver(&i7core_driver);
300 +
301 +- if (pci_rc >= 0)
302 ++ if (pci_rc >= 0) {
303 ++ atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
304 + return 0;
305 ++ }
306 +
307 + i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
308 + pci_rc);
309 +@@ -2498,6 +2490,7 @@ static void __exit i7core_exit(void)
310 + {
311 + debugf2("MC: " __FILE__ ": %s()\n", __func__);
312 + pci_unregister_driver(&i7core_driver);
313 ++ atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
314 + }
315 +
316 + module_init(i7core_init);
317 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
318 +index 7a402bf..18a1293 100644
319 +--- a/drivers/edac/sb_edac.c
320 ++++ b/drivers/edac/sb_edac.c
321 +@@ -1661,9 +1661,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
322 + debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
323 + __func__, mci, &sbridge_dev->pdev[0]->dev);
324 +
325 +- atomic_notifier_chain_unregister(&x86_mce_decoder_chain,
326 +- &sbridge_mce_dec);
327 +-
328 + /* Remove MC sysfs nodes */
329 + edac_mc_del_mc(mci->dev);
330 +
331 +@@ -1731,8 +1728,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
332 + goto fail0;
333 + }
334 +
335 +- atomic_notifier_chain_register(&x86_mce_decoder_chain,
336 +- &sbridge_mce_dec);
337 + return 0;
338 +
339 + fail0:
340 +@@ -1861,8 +1856,10 @@ static int __init sbridge_init(void)
341 +
342 + pci_rc = pci_register_driver(&sbridge_driver);
343 +
344 +- if (pci_rc >= 0)
345 ++ if (pci_rc >= 0) {
346 ++ atomic_notifier_chain_register(&x86_mce_decoder_chain, &sbridge_mce_dec);
347 + return 0;
348 ++ }
349 +
350 + sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
351 + pci_rc);
352 +@@ -1878,6 +1875,7 @@ static void __exit sbridge_exit(void)
353 + {
354 + debugf2("MC: " __FILE__ ": %s()\n", __func__);
355 + pci_unregister_driver(&sbridge_driver);
356 ++ atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &sbridge_mce_dec);
357 + }
358 +
359 + module_init(sbridge_init);
360 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
361 +index 3e927ce..a1ee634 100644
362 +--- a/drivers/gpu/drm/drm_edid.c
363 ++++ b/drivers/gpu/drm/drm_edid.c
364 +@@ -585,7 +585,7 @@ static bool
365 + drm_monitor_supports_rb(struct edid *edid)
366 + {
367 + if (edid->revision >= 4) {
368 +- bool ret;
369 ++ bool ret = false;
370 + drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
371 + return ret;
372 + }
373 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
374 +index 3e7c478..3e2edc6 100644
375 +--- a/drivers/gpu/drm/i915/i915_gem.c
376 ++++ b/drivers/gpu/drm/i915/i915_gem.c
377 +@@ -3312,6 +3312,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
378 +
379 + if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
380 + ret = -EIO;
381 ++ } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
382 ++ seqno) ||
383 ++ atomic_read(&dev_priv->mm.wedged), 3000)) {
384 ++ ret = -EBUSY;
385 + }
386 + }
387 +
388 +diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
389 +index d3820c2..578ddfc 100644
390 +--- a/drivers/gpu/drm/i915/i915_irq.c
391 ++++ b/drivers/gpu/drm/i915/i915_irq.c
392 +@@ -424,6 +424,30 @@ static void gen6_pm_rps_work(struct work_struct *work)
393 + mutex_unlock(&dev_priv->dev->struct_mutex);
394 + }
395 +
396 ++static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
397 ++ u32 pm_iir)
398 ++{
399 ++ unsigned long flags;
400 ++
401 ++ /*
402 ++ * IIR bits should never already be set because IMR should
403 ++ * prevent an interrupt from being shown in IIR. The warning
404 ++ * displays a case where we've unsafely cleared
405 ++ * dev_priv->pm_iir. Although missing an interrupt of the same
406 ++ * type is not a problem, it displays a problem in the logic.
407 ++ *
408 ++ * The mask bit in IMR is cleared by rps_work.
409 ++ */
410 ++
411 ++ spin_lock_irqsave(&dev_priv->rps_lock, flags);
412 ++ dev_priv->pm_iir |= pm_iir;
413 ++ I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
414 ++ POSTING_READ(GEN6_PMIMR);
415 ++ spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
416 ++
417 ++ queue_work(dev_priv->wq, &dev_priv->rps_work);
418 ++}
419 ++
420 + static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
421 + {
422 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
423 +@@ -529,16 +553,8 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
424 + pch_irq_handler(dev, pch_iir);
425 + }
426 +
427 +- if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
428 +- unsigned long flags;
429 +- spin_lock_irqsave(&dev_priv->rps_lock, flags);
430 +- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
431 +- dev_priv->pm_iir |= pm_iir;
432 +- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
433 +- POSTING_READ(GEN6_PMIMR);
434 +- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
435 +- queue_work(dev_priv->wq, &dev_priv->rps_work);
436 +- }
437 ++ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
438 ++ gen6_queue_rps_work(dev_priv, pm_iir);
439 +
440 + /* should clear PCH hotplug event before clear CPU irq */
441 + I915_WRITE(SDEIIR, pch_iir);
442 +@@ -634,25 +650,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
443 + i915_handle_rps_change(dev);
444 + }
445 +
446 +- if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
447 +- /*
448 +- * IIR bits should never already be set because IMR should
449 +- * prevent an interrupt from being shown in IIR. The warning
450 +- * displays a case where we've unsafely cleared
451 +- * dev_priv->pm_iir. Although missing an interrupt of the same
452 +- * type is not a problem, it displays a problem in the logic.
453 +- *
454 +- * The mask bit in IMR is cleared by rps_work.
455 +- */
456 +- unsigned long flags;
457 +- spin_lock_irqsave(&dev_priv->rps_lock, flags);
458 +- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
459 +- dev_priv->pm_iir |= pm_iir;
460 +- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
461 +- POSTING_READ(GEN6_PMIMR);
462 +- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
463 +- queue_work(dev_priv->wq, &dev_priv->rps_work);
464 +- }
465 ++ if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
466 ++ gen6_queue_rps_work(dev_priv, pm_iir);
467 +
468 + /* should clear PCH hotplug event before clear CPU irq */
469 + I915_WRITE(SDEIIR, pch_iir);
470 +diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
471 +index a1eb83d..f38d196 100644
472 +--- a/drivers/gpu/drm/i915/i915_suspend.c
473 ++++ b/drivers/gpu/drm/i915/i915_suspend.c
474 +@@ -739,8 +739,11 @@ static void i915_restore_display(struct drm_device *dev)
475 + if (HAS_PCH_SPLIT(dev)) {
476 + I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
477 + I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
478 +- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
479 ++ /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
480 ++ * otherwise we get blank eDP screen after S3 on some machines
481 ++ */
482 + I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
483 ++ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
484 + I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
485 + I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
486 + I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
487 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
488 +index 5c1cdb8..6aa7716 100644
489 +--- a/drivers/gpu/drm/i915/intel_display.c
490 ++++ b/drivers/gpu/drm/i915/intel_display.c
491 +@@ -2187,6 +2187,33 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
492 + }
493 +
494 + static int
495 ++intel_finish_fb(struct drm_framebuffer *old_fb)
496 ++{
497 ++ struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
498 ++ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
499 ++ bool was_interruptible = dev_priv->mm.interruptible;
500 ++ int ret;
501 ++
502 ++ wait_event(dev_priv->pending_flip_queue,
503 ++ atomic_read(&dev_priv->mm.wedged) ||
504 ++ atomic_read(&obj->pending_flip) == 0);
505 ++
506 ++ /* Big Hammer, we also need to ensure that any pending
507 ++ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
508 ++ * current scanout is retired before unpinning the old
509 ++ * framebuffer.
510 ++ *
511 ++ * This should only fail upon a hung GPU, in which case we
512 ++ * can safely continue.
513 ++ */
514 ++ dev_priv->mm.interruptible = false;
515 ++ ret = i915_gem_object_finish_gpu(obj);
516 ++ dev_priv->mm.interruptible = was_interruptible;
517 ++
518 ++ return ret;
519 ++}
520 ++
521 ++static int
522 + intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
523 + struct drm_framebuffer *old_fb)
524 + {
525 +@@ -2224,25 +2251,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
526 + return ret;
527 + }
528 +
529 +- if (old_fb) {
530 +- struct drm_i915_private *dev_priv = dev->dev_private;
531 +- struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
532 +-
533 +- wait_event(dev_priv->pending_flip_queue,
534 +- atomic_read(&dev_priv->mm.wedged) ||
535 +- atomic_read(&obj->pending_flip) == 0);
536 +-
537 +- /* Big Hammer, we also need to ensure that any pending
538 +- * MI_WAIT_FOR_EVENT inside a user batch buffer on the
539 +- * current scanout is retired before unpinning the old
540 +- * framebuffer.
541 +- *
542 +- * This should only fail upon a hung GPU, in which case we
543 +- * can safely continue.
544 +- */
545 +- ret = i915_gem_object_finish_gpu(obj);
546 +- (void) ret;
547 +- }
548 ++ if (old_fb)
549 ++ intel_finish_fb(old_fb);
550 +
551 + ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
552 + LEAVE_ATOMIC_MODE_SET);
553 +@@ -3312,6 +3322,23 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
554 + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
555 + struct drm_device *dev = crtc->dev;
556 +
557 ++ /* Flush any pending WAITs before we disable the pipe. Note that
558 ++ * we need to drop the struct_mutex in order to acquire it again
559 ++ * during the lowlevel dpms routines around a couple of the
560 ++ * operations. It does not look trivial nor desirable to move
561 ++ * that locking higher. So instead we leave a window for the
562 ++ * submission of further commands on the fb before we can actually
563 ++ * disable it. This race with userspace exists anyway, and we can
564 ++ * only rely on the pipe being disabled by userspace after it
565 ++ * receives the hotplug notification and has flushed any pending
566 ++ * batches.
567 ++ */
568 ++ if (crtc->fb) {
569 ++ mutex_lock(&dev->struct_mutex);
570 ++ intel_finish_fb(crtc->fb);
571 ++ mutex_unlock(&dev->struct_mutex);
572 ++ }
573 ++
574 + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
575 +
576 + if (crtc->fb) {
577 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
578 +index 933e66b..f6613dc 100644
579 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
580 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
581 +@@ -306,7 +306,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
582 +
583 + I915_WRITE_CTL(ring,
584 + ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
585 +- | RING_REPORT_64K | RING_VALID);
586 ++ | RING_VALID);
587 +
588 + /* If the head is still not zero, the ring is dead */
589 + if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
590 +@@ -1157,18 +1157,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
591 + struct drm_device *dev = ring->dev;
592 + struct drm_i915_private *dev_priv = dev->dev_private;
593 + unsigned long end;
594 +- u32 head;
595 +-
596 +- /* If the reported head position has wrapped or hasn't advanced,
597 +- * fallback to the slow and accurate path.
598 +- */
599 +- head = intel_read_status_page(ring, 4);
600 +- if (head > ring->head) {
601 +- ring->head = head;
602 +- ring->space = ring_space(ring);
603 +- if (ring->space >= n)
604 +- return 0;
605 +- }
606 +
607 + trace_i915_ring_wait_begin(ring);
608 + end = jiffies + 3 * HZ;
609 +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
610 +index 3a4cc32..cc0801d 100644
611 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
612 ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
613 +@@ -499,7 +499,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
614 + nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
615 +
616 + ret = drm_fb_helper_init(dev, &nfbdev->helper,
617 +- nv_two_heads(dev) ? 2 : 1, 4);
618 ++ dev->mode_config.num_crtc, 4);
619 + if (ret) {
620 + kfree(nfbdev);
621 + return ret;
622 +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
623 +index 4c07436..d99aa84 100644
624 +--- a/drivers/hwmon/applesmc.c
625 ++++ b/drivers/hwmon/applesmc.c
626 +@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
627 + int i;
628 +
629 + if (send_command(cmd) || send_argument(key)) {
630 +- pr_warn("%s: read arg fail\n", key);
631 ++ pr_warn("%.4s: read arg fail\n", key);
632 + return -EIO;
633 + }
634 +
635 +@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
636 +
637 + for (i = 0; i < len; i++) {
638 + if (__wait_status(0x05)) {
639 +- pr_warn("%s: read data fail\n", key);
640 ++ pr_warn("%.4s: read data fail\n", key);
641 + return -EIO;
642 + }
643 + buffer[i] = inb(APPLESMC_DATA_PORT);
644 +diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
645 +index 427468f..0790c98 100644
646 +--- a/drivers/hwmon/coretemp.c
647 ++++ b/drivers/hwmon/coretemp.c
648 +@@ -660,7 +660,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
649 + * sensors. We check this bit only, all the early CPUs
650 + * without thermal sensors will be filtered out.
651 + */
652 +- if (!cpu_has(c, X86_FEATURE_DTS))
653 ++ if (!cpu_has(c, X86_FEATURE_DTHERM))
654 + return;
655 +
656 + if (!pdev) {
657 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
658 +index da2f021..532a902 100644
659 +--- a/drivers/md/dm-thin.c
660 ++++ b/drivers/md/dm-thin.c
661 +@@ -288,8 +288,10 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
662 +
663 + hlist_del(&cell->list);
664 +
665 +- bio_list_add(inmates, cell->holder);
666 +- bio_list_merge(inmates, &cell->bios);
667 ++ if (inmates) {
668 ++ bio_list_add(inmates, cell->holder);
669 ++ bio_list_merge(inmates, &cell->bios);
670 ++ }
671 +
672 + mempool_free(cell, prison->cell_pool);
673 + }
674 +@@ -312,9 +314,10 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
675 + */
676 + static void __cell_release_singleton(struct cell *cell, struct bio *bio)
677 + {
678 +- hlist_del(&cell->list);
679 + BUG_ON(cell->holder != bio);
680 + BUG_ON(!bio_list_empty(&cell->bios));
681 ++
682 ++ __cell_release(cell, NULL);
683 + }
684 +
685 + static void cell_release_singleton(struct cell *cell, struct bio *bio)
686 +diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
687 +index b7d1e3e..fb68805 100644
688 +--- a/drivers/media/dvb/siano/smsusb.c
689 ++++ b/drivers/media/dvb/siano/smsusb.c
690 +@@ -544,6 +544,8 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
691 + .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
692 + { USB_DEVICE(0x2040, 0xc0a0),
693 + .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
694 ++ { USB_DEVICE(0x2040, 0xf5a0),
695 ++ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
696 + { } /* Terminating entry */
697 + };
698 +
699 +diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
700 +index 2ca10df..981501f 100644
701 +--- a/drivers/media/video/gspca/gspca.c
702 ++++ b/drivers/media/video/gspca/gspca.c
703 +@@ -1697,7 +1697,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
704 + enum v4l2_buf_type buf_type)
705 + {
706 + struct gspca_dev *gspca_dev = priv;
707 +- int ret;
708 ++ int i, ret;
709 +
710 + if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
711 + return -EINVAL;
712 +@@ -1728,6 +1728,8 @@ static int vidioc_streamoff(struct file *file, void *priv,
713 + wake_up_interruptible(&gspca_dev->wq);
714 +
715 + /* empty the transfer queues */
716 ++ for (i = 0; i < gspca_dev->nframes; i++)
717 ++ gspca_dev->frame[i].v4l2_buf.flags &= ~BUF_ALL_FLAGS;
718 + atomic_set(&gspca_dev->fr_q, 0);
719 + atomic_set(&gspca_dev->fr_i, 0);
720 + gspca_dev->fr_o = 0;
721 +diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
722 +index 8dc84d6..86cd532 100644
723 +--- a/drivers/net/can/c_can/c_can.c
724 ++++ b/drivers/net/can/c_can/c_can.c
725 +@@ -590,8 +590,8 @@ static void c_can_chip_config(struct net_device *dev)
726 + priv->write_reg(priv, &priv->regs->control,
727 + CONTROL_ENABLE_AR);
728 +
729 +- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
730 +- CAN_CTRLMODE_LOOPBACK)) {
731 ++ if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
732 ++ (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
733 + /* loopback + silent mode : useful for hot self-test */
734 + priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
735 + CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
736 +diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
737 +index e023379..e59d006 100644
738 +--- a/drivers/net/can/flexcan.c
739 ++++ b/drivers/net/can/flexcan.c
740 +@@ -933,12 +933,12 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
741 + u32 clock_freq = 0;
742 +
743 + if (pdev->dev.of_node) {
744 +- const u32 *clock_freq_p;
745 ++ const __be32 *clock_freq_p;
746 +
747 + clock_freq_p = of_get_property(pdev->dev.of_node,
748 + "clock-frequency", NULL);
749 + if (clock_freq_p)
750 +- clock_freq = *clock_freq_p;
751 ++ clock_freq = be32_to_cpup(clock_freq_p);
752 + }
753 +
754 + if (!clock_freq) {
755 +diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
756 +index a3e65fd..e556fc3 100644
757 +--- a/drivers/net/ethernet/intel/e1000e/82571.c
758 ++++ b/drivers/net/ethernet/intel/e1000e/82571.c
759 +@@ -2080,8 +2080,9 @@ const struct e1000_info e1000_82574_info = {
760 + | FLAG_HAS_SMART_POWER_DOWN
761 + | FLAG_HAS_AMT
762 + | FLAG_HAS_CTRLEXT_ON_LOAD,
763 +- .flags2 = FLAG2_CHECK_PHY_HANG
764 ++ .flags2 = FLAG2_CHECK_PHY_HANG
765 + | FLAG2_DISABLE_ASPM_L0S
766 ++ | FLAG2_DISABLE_ASPM_L1
767 + | FLAG2_NO_DISABLE_RX,
768 + .pba = 32,
769 + .max_hw_frame_size = DEFAULT_JUMBO,
770 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
771 +index 4e933d1..64d3f98 100644
772 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
773 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
774 +@@ -5132,14 +5132,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
775 + return -EINVAL;
776 + }
777 +
778 +- /* 82573 Errata 17 */
779 +- if (((adapter->hw.mac.type == e1000_82573) ||
780 +- (adapter->hw.mac.type == e1000_82574)) &&
781 +- (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
782 +- adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
783 +- e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
784 +- }
785 +-
786 + while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
787 + usleep_range(1000, 2000);
788 + /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
789 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
790 +index 8b0c2ca..6973620 100644
791 +--- a/drivers/net/wireless/ath/ath9k/hw.c
792 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
793 +@@ -718,13 +718,25 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
794 +
795 + u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
796 + {
797 ++ struct ath_common *common = ath9k_hw_common(ah);
798 ++ int i = 0;
799 ++
800 + REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
801 + udelay(100);
802 + REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
803 +
804 +- while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
805 ++ while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) {
806 ++
807 + udelay(100);
808 +
809 ++ if (WARN_ON_ONCE(i >= 100)) {
810 ++ ath_err(common, "PLL4 meaurement not done\n");
811 ++ break;
812 ++ }
813 ++
814 ++ i++;
815 ++ }
816 ++
817 + return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
818 + }
819 + EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
820 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
821 +index f76a814..95437fc 100644
822 +--- a/drivers/net/wireless/ath/ath9k/main.c
823 ++++ b/drivers/net/wireless/ath/ath9k/main.c
824 +@@ -1042,6 +1042,15 @@ void ath_hw_pll_work(struct work_struct *work)
825 + hw_pll_work.work);
826 + u32 pll_sqsum;
827 +
828 ++ /*
829 ++ * ensure that the PLL WAR is executed only
830 ++ * after the STA is associated (or) if the
831 ++ * beaconing had started in interfaces that
832 ++ * uses beacons.
833 ++ */
834 ++ if (!(sc->sc_flags & SC_OP_BEACONS))
835 ++ return;
836 ++
837 + if (AR_SREV_9485(sc->sc_ah)) {
838 +
839 + ath9k_ps_wakeup(sc);
840 +@@ -1486,15 +1495,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
841 + }
842 + }
843 +
844 +- if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
845 +- ((vif->type == NL80211_IFTYPE_ADHOC) &&
846 +- sc->nvifs > 0)) {
847 +- ath_err(common, "Cannot create ADHOC interface when other"
848 +- " interfaces already exist.\n");
849 +- ret = -EINVAL;
850 +- goto out;
851 +- }
852 +-
853 + ath_dbg(common, ATH_DBG_CONFIG,
854 + "Attach a VIF of type: %d\n", vif->type);
855 +
856 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
857 +index 76fd277..c59c592 100644
858 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
859 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
860 +@@ -936,13 +936,13 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
861 + }
862 +
863 + /* legacy rates */
864 ++ rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
865 + if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
866 + !(rate->flags & IEEE80211_RATE_ERP_G))
867 + phy = WLAN_RC_PHY_CCK;
868 + else
869 + phy = WLAN_RC_PHY_OFDM;
870 +
871 +- rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
872 + info->rates[i].Rate = rate->hw_value;
873 + if (rate->hw_value_short) {
874 + if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
875 +diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
876 +index 5815cf5..4661a64 100644
877 +--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
878 ++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
879 +@@ -1777,6 +1777,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
880 + return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
881 + }
882 +
883 ++#ifdef CONFIG_IWLWIFI_DEBUG
884 + static ssize_t iwl_dbgfs_log_event_read(struct file *file,
885 + char __user *user_buf,
886 + size_t count, loff_t *ppos)
887 +@@ -1814,6 +1815,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
888 +
889 + return count;
890 + }
891 ++#endif
892 +
893 + static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
894 + char __user *user_buf,
895 +@@ -1941,7 +1943,9 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
896 + return ret;
897 + }
898 +
899 ++#ifdef CONFIG_IWLWIFI_DEBUG
900 + DEBUGFS_READ_WRITE_FILE_OPS(log_event);
901 ++#endif
902 + DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
903 + DEBUGFS_READ_FILE_OPS(fh_reg);
904 + DEBUGFS_READ_FILE_OPS(rx_queue);
905 +@@ -1957,7 +1961,9 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
906 + {
907 + DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
908 + DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
909 ++#ifdef CONFIG_IWLWIFI_DEBUG
910 + DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
911 ++#endif
912 + DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
913 + DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
914 + DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
915 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
916 +index 226faab..fc35308 100644
917 +--- a/drivers/net/xen-netfront.c
918 ++++ b/drivers/net/xen-netfront.c
919 +@@ -1922,14 +1922,14 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
920 +
921 + dev_dbg(&dev->dev, "%s\n", dev->nodename);
922 +
923 +- unregister_netdev(info->netdev);
924 +-
925 + xennet_disconnect_backend(info);
926 +
927 +- del_timer_sync(&info->rx_refill_timer);
928 +-
929 + xennet_sysfs_delif(info->netdev);
930 +
931 ++ unregister_netdev(info->netdev);
932 ++
933 ++ del_timer_sync(&info->rx_refill_timer);
934 ++
935 + free_percpu(info->stats);
936 +
937 + free_netdev(info->netdev);
938 +diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
939 +index da14432..efc4b7f 100644
940 +--- a/drivers/oprofile/oprofile_perf.c
941 ++++ b/drivers/oprofile/oprofile_perf.c
942 +@@ -25,7 +25,7 @@ static int oprofile_perf_enabled;
943 + static DEFINE_MUTEX(oprofile_perf_mutex);
944 +
945 + static struct op_counter_config *counter_config;
946 +-static struct perf_event **perf_events[nr_cpumask_bits];
947 ++static struct perf_event **perf_events[NR_CPUS];
948 + static int num_counters;
949 +
950 + /*
951 +diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
952 +index 54423ab..2ee187f 100644
953 +--- a/drivers/staging/iio/adc/ad7606_core.c
954 ++++ b/drivers/staging/iio/adc/ad7606_core.c
955 +@@ -241,6 +241,7 @@ static const struct attribute_group ad7606_attribute_group = {
956 + .indexed = 1, \
957 + .channel = num, \
958 + .address = num, \
959 ++ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
960 + .scan_index = num, \
961 + .scan_type = IIO_ST('s', 16, 16, 0), \
962 + }
963 +diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
964 +index ec41d38..f4b738f 100644
965 +--- a/drivers/staging/rtl8712/usb_intf.c
966 ++++ b/drivers/staging/rtl8712/usb_intf.c
967 +@@ -102,6 +102,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
968 + /* - */
969 + {USB_DEVICE(0x20F4, 0x646B)},
970 + {USB_DEVICE(0x083A, 0xC512)},
971 ++ {USB_DEVICE(0x25D4, 0x4CA1)},
972 ++ {USB_DEVICE(0x25D4, 0x4CAB)},
973 +
974 + /* RTL8191SU */
975 + /* Realtek */
976 +diff --git a/drivers/staging/rts_pstor/rtsx_transport.c b/drivers/staging/rts_pstor/rtsx_transport.c
977 +index 4e3d2c1..9b2e5c9 100644
978 +--- a/drivers/staging/rts_pstor/rtsx_transport.c
979 ++++ b/drivers/staging/rts_pstor/rtsx_transport.c
980 +@@ -335,6 +335,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
981 + int sg_cnt, i, resid;
982 + int err = 0;
983 + long timeleft;
984 ++ struct scatterlist *sg_ptr;
985 + u32 val = TRIG_DMA;
986 +
987 + if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
988 +@@ -371,7 +372,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
989 + sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
990 +
991 + resid = size;
992 +-
993 ++ sg_ptr = sg;
994 + chip->sgi = 0;
995 + /* Usually the next entry will be @sg@ + 1, but if this sg element
996 + * is part of a chained scatterlist, it could jump to the start of
997 +@@ -379,14 +380,14 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
998 + * the proper sg
999 + */
1000 + for (i = 0; i < *index; i++)
1001 +- sg = sg_next(sg);
1002 ++ sg_ptr = sg_next(sg_ptr);
1003 + for (i = *index; i < sg_cnt; i++) {
1004 + dma_addr_t addr;
1005 + unsigned int len;
1006 + u8 option;
1007 +
1008 +- addr = sg_dma_address(sg);
1009 +- len = sg_dma_len(sg);
1010 ++ addr = sg_dma_address(sg_ptr);
1011 ++ len = sg_dma_len(sg_ptr);
1012 +
1013 + RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
1014 + (unsigned int)addr, len);
1015 +@@ -415,7 +416,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
1016 + if (!resid)
1017 + break;
1018 +
1019 +- sg = sg_next(sg);
1020 ++ sg_ptr = sg_next(sg_ptr);
1021 + }
1022 +
1023 + RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
1024 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1025 +index aa0c43f..35e6b5f 100644
1026 +--- a/drivers/usb/serial/cp210x.c
1027 ++++ b/drivers/usb/serial/cp210x.c
1028 +@@ -93,6 +93,7 @@ static const struct usb_device_id id_table[] = {
1029 + { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
1030 + { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
1031 + { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
1032 ++ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
1033 + { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
1034 + { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
1035 + { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
1036 +@@ -134,7 +135,13 @@ static const struct usb_device_id id_table[] = {
1037 + { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
1038 + { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
1039 + { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
1040 ++ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
1041 ++ { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
1042 + { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
1043 ++ { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */
1044 ++ { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
1045 ++ { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
1046 ++ { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
1047 + { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
1048 + { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
1049 + { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
1050 +@@ -146,7 +153,11 @@ static const struct usb_device_id id_table[] = {
1051 + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
1052 + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
1053 + { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
1054 ++ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
1055 ++ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
1056 + { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
1057 ++ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
1058 ++ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
1059 + { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
1060 + { } /* Terminating Entry */
1061 + };
1062 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1063 +index 61d6c31..21a4734 100644
1064 +--- a/drivers/usb/serial/option.c
1065 ++++ b/drivers/usb/serial/option.c
1066 +@@ -235,6 +235,7 @@ static void option_instat_callback(struct urb *urb);
1067 + #define NOVATELWIRELESS_PRODUCT_G1 0xA001
1068 + #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
1069 + #define NOVATELWIRELESS_PRODUCT_G2 0xA010
1070 ++#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
1071 +
1072 + /* AMOI PRODUCTS */
1073 + #define AMOI_VENDOR_ID 0x1614
1074 +@@ -496,6 +497,10 @@ static void option_instat_callback(struct urb *urb);
1075 + /* MediaTek products */
1076 + #define MEDIATEK_VENDOR_ID 0x0e8d
1077 +
1078 ++/* Cellient products */
1079 ++#define CELLIENT_VENDOR_ID 0x2692
1080 ++#define CELLIENT_PRODUCT_MEN200 0x9005
1081 ++
1082 + /* some devices interfaces need special handling due to a number of reasons */
1083 + enum option_blacklist_reason {
1084 + OPTION_BLACKLIST_NONE = 0,
1085 +@@ -730,6 +735,8 @@ static const struct usb_device_id option_ids[] = {
1086 + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
1087 + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
1088 + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
1089 ++ /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
1090 ++ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
1091 +
1092 + { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
1093 + { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
1094 +@@ -1227,6 +1234,7 @@ static const struct usb_device_id option_ids[] = {
1095 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
1096 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
1097 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
1098 ++ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
1099 + { } /* Terminating entry */
1100 + };
1101 + MODULE_DEVICE_TABLE(usb, option_ids);
1102 +diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
1103 +index 08a07a2..57ceaf3 100644
1104 +--- a/fs/nilfs2/gcinode.c
1105 ++++ b/fs/nilfs2/gcinode.c
1106 +@@ -191,6 +191,8 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
1107 + while (!list_empty(head)) {
1108 + ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
1109 + list_del_init(&ii->i_dirty);
1110 ++ truncate_inode_pages(&ii->vfs_inode.i_data, 0);
1111 ++ nilfs_btnode_cache_clear(&ii->i_btnode_cache);
1112 + iput(&ii->vfs_inode);
1113 + }
1114 + }
1115 +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
1116 +index bb24ab6..6f24e67 100644
1117 +--- a/fs/nilfs2/segment.c
1118 ++++ b/fs/nilfs2/segment.c
1119 +@@ -2309,6 +2309,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
1120 + if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
1121 + continue;
1122 + list_del_init(&ii->i_dirty);
1123 ++ truncate_inode_pages(&ii->vfs_inode.i_data, 0);
1124 ++ nilfs_btnode_cache_clear(&ii->i_btnode_cache);
1125 + iput(&ii->vfs_inode);
1126 + }
1127 + }
1128 +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
1129 +index a03c098..bc00876 100644
1130 +--- a/include/asm-generic/pgtable.h
1131 ++++ b/include/asm-generic/pgtable.h
1132 +@@ -445,6 +445,18 @@ static inline int pmd_write(pmd_t pmd)
1133 + #endif /* __HAVE_ARCH_PMD_WRITE */
1134 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1135 +
1136 ++#ifndef pmd_read_atomic
1137 ++static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
1138 ++{
1139 ++ /*
1140 ++ * Depend on compiler for an atomic pmd read. NOTE: this is
1141 ++ * only going to work, if the pmdval_t isn't larger than
1142 ++ * an unsigned long.
1143 ++ */
1144 ++ return *pmdp;
1145 ++}
1146 ++#endif
1147 ++
1148 + /*
1149 + * This function is meant to be used by sites walking pagetables with
1150 + * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
1151 +@@ -458,14 +470,30 @@ static inline int pmd_write(pmd_t pmd)
1152 + * undefined so behaving like if the pmd was none is safe (because it
1153 + * can return none anyway). The compiler level barrier() is critically
1154 + * important to compute the two checks atomically on the same pmdval.
1155 ++ *
1156 ++ * For 32bit kernels with a 64bit large pmd_t this automatically takes
1157 ++ * care of reading the pmd atomically to avoid SMP race conditions
1158 ++ * against pmd_populate() when the mmap_sem is hold for reading by the
1159 ++ * caller (a special atomic read not done by "gcc" as in the generic
1160 ++ * version above, is also needed when THP is disabled because the page
1161 ++ * fault can populate the pmd from under us).
1162 + */
1163 + static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
1164 + {
1165 +- /* depend on compiler for an atomic pmd read */
1166 +- pmd_t pmdval = *pmd;
1167 ++ pmd_t pmdval = pmd_read_atomic(pmd);
1168 + /*
1169 + * The barrier will stabilize the pmdval in a register or on
1170 + * the stack so that it will stop changing under the code.
1171 ++ *
1172 ++ * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
1173 ++ * pmd_read_atomic is allowed to return a not atomic pmdval
1174 ++ * (for example pointing to an hugepage that has never been
1175 ++ * mapped in the pmd). The below checks will only care about
1176 ++ * the low part of the pmd with 32bit PAE x86 anyway, with the
1177 ++ * exception of pmd_none(). So the important thing is that if
1178 ++ * the low part of the pmd is found null, the high part will
1179 ++ * be also null or the pmd_none() check below would be
1180 ++ * confused.
1181 + */
1182 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1183 + barrier();
1184 +diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
1185 +index f961cc5..da587ad 100644
1186 +--- a/net/batman-adv/routing.c
1187 ++++ b/net/batman-adv/routing.c
1188 +@@ -619,6 +619,8 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
1189 + /* packet needs to be linearized to access the TT changes */
1190 + if (skb_linearize(skb) < 0)
1191 + goto out;
1192 ++ /* skb_linearize() possibly changed skb->data */
1193 ++ tt_query = (struct tt_query_packet *)skb->data;
1194 +
1195 + if (is_my_mac(tt_query->dst))
1196 + handle_tt_response(bat_priv, tt_query);
1197 +diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
1198 +index 5f09a57..088af45 100644
1199 +--- a/net/batman-adv/translation-table.c
1200 ++++ b/net/batman-adv/translation-table.c
1201 +@@ -1816,10 +1816,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
1202 + {
1203 + struct tt_local_entry *tt_local_entry = NULL;
1204 + struct tt_global_entry *tt_global_entry = NULL;
1205 +- bool ret = true;
1206 ++ bool ret = false;
1207 +
1208 + if (!atomic_read(&bat_priv->ap_isolation))
1209 +- return false;
1210 ++ goto out;
1211 +
1212 + tt_local_entry = tt_local_hash_find(bat_priv, dst);
1213 + if (!tt_local_entry)
1214 +@@ -1829,10 +1829,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
1215 + if (!tt_global_entry)
1216 + goto out;
1217 +
1218 +- if (_is_ap_isolated(tt_local_entry, tt_global_entry))
1219 ++ if (!_is_ap_isolated(tt_local_entry, tt_global_entry))
1220 + goto out;
1221 +
1222 +- ret = false;
1223 ++ ret = true;
1224 +
1225 + out:
1226 + if (tt_global_entry)
1227 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
1228 +index c1c99dd..d57d05b 100644
1229 +--- a/net/wireless/reg.c
1230 ++++ b/net/wireless/reg.c
1231 +@@ -1369,7 +1369,7 @@ static void reg_set_request_processed(void)
1232 + spin_unlock(&reg_requests_lock);
1233 +
1234 + if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
1235 +- cancel_delayed_work_sync(&reg_timeout);
1236 ++ cancel_delayed_work(&reg_timeout);
1237 +
1238 + if (need_more_processing)
1239 + schedule_work(&reg_work);
1240 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1241 +index 0005bde..5f096a5 100644
1242 +--- a/sound/pci/hda/patch_realtek.c
1243 ++++ b/sound/pci/hda/patch_realtek.c
1244 +@@ -5988,6 +5988,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
1245 + { .id = 0x10ec0272, .name = "ALC272", .patch = patch_alc662 },
1246 + { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
1247 + { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
1248 ++ { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
1249 + { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
1250 + .patch = patch_alc861 },
1251 + { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
1252 +diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
1253 +index 11224ed..323d4d9 100644
1254 +--- a/tools/hv/hv_kvp_daemon.c
1255 ++++ b/tools/hv/hv_kvp_daemon.c
1256 +@@ -384,14 +384,18 @@ int main(void)
1257 + pfd.fd = fd;
1258 +
1259 + while (1) {
1260 ++ struct sockaddr *addr_p = (struct sockaddr *) &addr;
1261 ++ socklen_t addr_l = sizeof(addr);
1262 + pfd.events = POLLIN;
1263 + pfd.revents = 0;
1264 + poll(&pfd, 1, -1);
1265 +
1266 +- len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0);
1267 ++ len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
1268 ++ addr_p, &addr_l);
1269 +
1270 +- if (len < 0) {
1271 +- syslog(LOG_ERR, "recv failed; error:%d", len);
1272 ++ if (len < 0 || addr.nl_pid) {
1273 ++ syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
1274 ++ addr.nl_pid, errno, strerror(errno));
1275 + close(fd);
1276 + return -1;
1277 + }