Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2195 - genpatches-2.6/trunk/3.0
Date: Thu, 23 Aug 2012 17:23:26
Message-Id: 20120823172313.6AC6B2049A@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2012-08-23 17:22:09 +0000 (Thu, 23 Aug 2012)
3 New Revision: 2195
4
5 Added:
6 genpatches-2.6/trunk/3.0/1039_linux-3.0.40.patch
7 genpatches-2.6/trunk/3.0/1040_linux-3.0.41.patch
8 Modified:
9 genpatches-2.6/trunk/3.0/0000_README
10 Log:
11 Linux patches 3.0.40 and 3.0.41.
12
13 Modified: genpatches-2.6/trunk/3.0/0000_README
14 ===================================================================
15 --- genpatches-2.6/trunk/3.0/0000_README 2012-08-23 15:35:01 UTC (rev 2194)
16 +++ genpatches-2.6/trunk/3.0/0000_README 2012-08-23 17:22:09 UTC (rev 2195)
17 @@ -191,6 +191,14 @@
18 From: http://www.kernel.org
19 Desc: Linux 3.0.39
20
21 +Patch: 1039_linux-3.0.40.patch
22 +From: http://www.kernel.org
23 +Desc: Linux 3.0.40
24 +
25 +Patch: 1040_linux-3.0.41.patch
26 +From: http://www.kernel.org
27 +Desc: Linux 3.0.41
28 +
29 Patch: 1800_fix-zcache-build.patch
30 From: http://bugs.gentoo.org/show_bug.cgi?id=376325
31 Desc: Fix zcache build error
32
33 Added: genpatches-2.6/trunk/3.0/1039_linux-3.0.40.patch
34 ===================================================================
35 --- genpatches-2.6/trunk/3.0/1039_linux-3.0.40.patch (rev 0)
36 +++ genpatches-2.6/trunk/3.0/1039_linux-3.0.40.patch 2012-08-23 17:22:09 UTC (rev 2195)
37 @@ -0,0 +1,1504 @@
38 +diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
39 +index e1f856b..22bf11b 100644
40 +--- a/Documentation/stable_kernel_rules.txt
41 ++++ b/Documentation/stable_kernel_rules.txt
42 +@@ -1,4 +1,4 @@
43 +-Everything you ever wanted to know about Linux 2.6 -stable releases.
44 ++Everything you ever wanted to know about Linux -stable releases.
45 +
46 + Rules on what kind of patches are accepted, and which ones are not, into the
47 + "-stable" tree:
48 +@@ -41,10 +41,10 @@ Procedure for submitting patches to the -stable tree:
49 + cherry-picked than this can be specified in the following format in
50 + the sign-off area:
51 +
52 +- Cc: <stable@×××××××××××.org> # .32.x: a1f84a3: sched: Check for idle
53 +- Cc: <stable@×××××××××××.org> # .32.x: 1b9508f: sched: Rate-limit newidle
54 +- Cc: <stable@×××××××××××.org> # .32.x: fd21073: sched: Fix affinity logic
55 +- Cc: <stable@×××××××××××.org> # .32.x
56 ++ Cc: <stable@×××××××××××.org> # 3.3.x: a1f84a3: sched: Check for idle
57 ++ Cc: <stable@×××××××××××.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
58 ++ Cc: <stable@×××××××××××.org> # 3.3.x: fd21073: sched: Fix affinity logic
59 ++ Cc: <stable@×××××××××××.org> # 3.3.x
60 + Signed-off-by: Ingo Molnar <mingo@××××.hu>
61 +
62 + The tag sequence has the meaning of:
63 +@@ -78,6 +78,15 @@ Review cycle:
64 + security kernel team, and not go through the normal review cycle.
65 + Contact the kernel security team for more details on this procedure.
66 +
67 ++Trees:
68 ++
69 ++ - The queues of patches, for both completed versions and in progress
70 ++ versions can be found at:
71 ++ http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git
72 ++ - The finalized and tagged releases of all stable kernels can be found
73 ++ in separate branches per version at:
74 ++ http://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git
75 ++
76 +
77 + Review committee:
78 +
79 +diff --git a/Makefile b/Makefile
80 +index 3ec1722..ec4fee5 100644
81 +--- a/Makefile
82 ++++ b/Makefile
83 +@@ -1,6 +1,6 @@
84 + VERSION = 3
85 + PATCHLEVEL = 0
86 +-SUBLEVEL = 39
87 ++SUBLEVEL = 40
88 + EXTRAVERSION =
89 + NAME = Sneaky Weasel
90 +
91 +diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c
92 +index ab8b35b..0627494 100644
93 +--- a/arch/arm/mach-omap2/opp.c
94 ++++ b/arch/arm/mach-omap2/opp.c
95 +@@ -53,7 +53,7 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
96 + omap_table_init = 1;
97 +
98 + /* Lets now register with OPP library */
99 +- for (i = 0; i < opp_def_size; i++) {
100 ++ for (i = 0; i < opp_def_size; i++, opp_def++) {
101 + struct omap_hwmod *oh;
102 + struct device *dev;
103 +
104 +@@ -86,7 +86,6 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
105 + __func__, opp_def->freq,
106 + opp_def->hwmod_name, i, r);
107 + }
108 +- opp_def++;
109 + }
110 +
111 + return 0;
112 +diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h
113 +index 73b8c8f..bdace4b 100644
114 +--- a/arch/m68k/include/asm/entry_mm.h
115 ++++ b/arch/m68k/include/asm/entry_mm.h
116 +@@ -35,8 +35,8 @@
117 +
118 + /* the following macro is used when enabling interrupts */
119 + #if defined(MACH_ATARI_ONLY)
120 +- /* block out HSYNC on the atari */
121 +-#define ALLOWINT (~0x400)
122 ++ /* block out HSYNC = ipl 2 on the atari */
123 ++#define ALLOWINT (~0x500)
124 + #define MAX_NOINT_IPL 3
125 + #else
126 + /* portable version */
127 +diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
128 +index 8623f8d..9a5932e 100644
129 +--- a/arch/m68k/kernel/sys_m68k.c
130 ++++ b/arch/m68k/kernel/sys_m68k.c
131 +@@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
132 + goto bad_access;
133 + }
134 +
135 +- mem_value = *mem;
136 ++ /*
137 ++ * No need to check for EFAULT; we know that the page is
138 ++ * present and writable.
139 ++ */
140 ++ __get_user(mem_value, mem);
141 + if (mem_value == oldval)
142 +- *mem = newval;
143 ++ __put_user(newval, mem);
144 +
145 + pte_unmap_unlock(pte, ptl);
146 + up_read(&mm->mmap_sem);
147 +diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
148 +index 1cf20bd..33a3580 100644
149 +--- a/arch/powerpc/include/asm/cputime.h
150 ++++ b/arch/powerpc/include/asm/cputime.h
151 +@@ -126,11 +126,11 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct)
152 + /*
153 + * Convert cputime <-> microseconds
154 + */
155 +-extern u64 __cputime_msec_factor;
156 ++extern u64 __cputime_usec_factor;
157 +
158 + static inline unsigned long cputime_to_usecs(const cputime_t ct)
159 + {
160 +- return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
161 ++ return mulhdu(ct, __cputime_usec_factor);
162 + }
163 +
164 + static inline cputime_t usecs_to_cputime(const unsigned long us)
165 +@@ -143,7 +143,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
166 + sec = us / 1000000;
167 + if (ct) {
168 + ct *= tb_ticks_per_sec;
169 +- do_div(ct, 1000);
170 ++ do_div(ct, 1000000);
171 + }
172 + if (sec)
173 + ct += (cputime_t) sec * tb_ticks_per_sec;
174 +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
175 +index c5cae0d..764e99c 100644
176 +--- a/arch/powerpc/include/asm/reg.h
177 ++++ b/arch/powerpc/include/asm/reg.h
178 +@@ -1000,7 +1000,8 @@
179 + /* Macros for setting and retrieving special purpose registers */
180 + #ifndef __ASSEMBLY__
181 + #define mfmsr() ({unsigned long rval; \
182 +- asm volatile("mfmsr %0" : "=r" (rval)); rval;})
183 ++ asm volatile("mfmsr %0" : "=r" (rval) : \
184 ++ : "memory"); rval;})
185 + #ifdef CONFIG_PPC_BOOK3S_64
186 + #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
187 + : : "r" (v) : "memory")
188 +diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
189 +index bf99cfa..6324008 100644
190 +--- a/arch/powerpc/kernel/ftrace.c
191 ++++ b/arch/powerpc/kernel/ftrace.c
192 +@@ -245,9 +245,9 @@ __ftrace_make_nop(struct module *mod,
193 +
194 + /*
195 + * On PPC32 the trampoline looks like:
196 +- * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
197 +- * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
198 +- * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
199 ++ * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
200 ++ * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
201 ++ * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
202 + * 0x4e, 0x80, 0x04, 0x20 bctr
203 + */
204 +
205 +@@ -262,9 +262,9 @@ __ftrace_make_nop(struct module *mod,
206 + pr_devel(" %08x %08x ", jmp[0], jmp[1]);
207 +
208 + /* verify that this is what we expect it to be */
209 +- if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
210 +- ((jmp[1] & 0xffff0000) != 0x396b0000) ||
211 +- (jmp[2] != 0x7d6903a6) ||
212 ++ if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
213 ++ ((jmp[1] & 0xffff0000) != 0x398c0000) ||
214 ++ (jmp[2] != 0x7d8903a6) ||
215 + (jmp[3] != 0x4e800420)) {
216 + printk(KERN_ERR "Not a trampoline\n");
217 + return -EINVAL;
218 +diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
219 +index 2de304a..1becd7b 100644
220 +--- a/arch/powerpc/kernel/time.c
221 ++++ b/arch/powerpc/kernel/time.c
222 +@@ -168,13 +168,13 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq);
223 + #ifdef CONFIG_VIRT_CPU_ACCOUNTING
224 + /*
225 + * Factors for converting from cputime_t (timebase ticks) to
226 +- * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
227 ++ * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
228 + * These are all stored as 0.64 fixed-point binary fractions.
229 + */
230 + u64 __cputime_jiffies_factor;
231 + EXPORT_SYMBOL(__cputime_jiffies_factor);
232 +-u64 __cputime_msec_factor;
233 +-EXPORT_SYMBOL(__cputime_msec_factor);
234 ++u64 __cputime_usec_factor;
235 ++EXPORT_SYMBOL(__cputime_usec_factor);
236 + u64 __cputime_sec_factor;
237 + EXPORT_SYMBOL(__cputime_sec_factor);
238 + u64 __cputime_clockt_factor;
239 +@@ -192,8 +192,8 @@ static void calc_cputime_factors(void)
240 +
241 + div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
242 + __cputime_jiffies_factor = res.result_low;
243 +- div128_by_32(1000, 0, tb_ticks_per_sec, &res);
244 +- __cputime_msec_factor = res.result_low;
245 ++ div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
246 ++ __cputime_usec_factor = res.result_low;
247 + div128_by_32(1, 0, tb_ticks_per_sec, &res);
248 + __cputime_sec_factor = res.result_low;
249 + div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
250 +diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
251 +index 58c3f74..9582050 100644
252 +--- a/drivers/acpi/ac.c
253 ++++ b/drivers/acpi/ac.c
254 +@@ -292,7 +292,9 @@ static int acpi_ac_add(struct acpi_device *device)
255 + ac->charger.properties = ac_props;
256 + ac->charger.num_properties = ARRAY_SIZE(ac_props);
257 + ac->charger.get_property = get_ac_property;
258 +- power_supply_register(&ac->device->dev, &ac->charger);
259 ++ result = power_supply_register(&ac->device->dev, &ac->charger);
260 ++ if (result)
261 ++ goto end;
262 +
263 + printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
264 + acpi_device_name(device), acpi_device_bid(device),
265 +diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
266 +index 3b77ad6..efc2b21 100644
267 +--- a/drivers/gpu/drm/radeon/atombios_dp.c
268 ++++ b/drivers/gpu/drm/radeon/atombios_dp.c
269 +@@ -22,6 +22,7 @@
270 + *
271 + * Authors: Dave Airlie
272 + * Alex Deucher
273 ++ * Jerome Glisse
274 + */
275 + #include "drmP.h"
276 + #include "radeon_drm.h"
277 +@@ -624,7 +625,6 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
278 + ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
279 + link_status, DP_LINK_STATUS_SIZE, 100);
280 + if (ret <= 0) {
281 +- DRM_ERROR("displayport link status failed\n");
282 + return false;
283 + }
284 +
285 +@@ -797,8 +797,10 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
286 + else
287 + mdelay(dp_info->rd_interval * 4);
288 +
289 +- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
290 ++ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
291 ++ DRM_ERROR("displayport link status failed\n");
292 + break;
293 ++ }
294 +
295 + if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
296 + clock_recovery = true;
297 +@@ -860,8 +862,10 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
298 + else
299 + mdelay(dp_info->rd_interval * 4);
300 +
301 +- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
302 ++ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
303 ++ DRM_ERROR("displayport link status failed\n");
304 + break;
305 ++ }
306 +
307 + if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
308 + channel_eq = true;
309 +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
310 +index 1f6a0f5..f1a1e8a 100644
311 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c
312 ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
313 +@@ -66,14 +66,33 @@ void radeon_connector_hotplug(struct drm_connector *connector)
314 +
315 + /* just deal with DP (not eDP) here. */
316 + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
317 +- int saved_dpms = connector->dpms;
318 +-
319 +- /* Only turn off the display it it's physically disconnected */
320 +- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
321 +- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
322 +- else if (radeon_dp_needs_link_train(radeon_connector))
323 +- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
324 +- connector->dpms = saved_dpms;
325 ++ struct radeon_connector_atom_dig *dig_connector =
326 ++ radeon_connector->con_priv;
327 ++
328 ++ /* if existing sink type was not DP no need to retrain */
329 ++ if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
330 ++ return;
331 ++
332 ++ /* first get sink type as it may be reset after (un)plug */
333 ++ dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
334 ++ /* don't do anything if sink is not display port, i.e.,
335 ++ * passive dp->(dvi|hdmi) adaptor
336 ++ */
337 ++ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
338 ++ int saved_dpms = connector->dpms;
339 ++ /* Only turn off the display if it's physically disconnected */
340 ++ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
341 ++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
342 ++ } else if (radeon_dp_needs_link_train(radeon_connector)) {
343 ++ /* set it to OFF so that drm_helper_connector_dpms()
344 ++ * won't return immediately since the current state
345 ++ * is ON at this point.
346 ++ */
347 ++ connector->dpms = DRM_MODE_DPMS_OFF;
348 ++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
349 ++ }
350 ++ connector->dpms = saved_dpms;
351 ++ }
352 + }
353 + }
354 +
355 +diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
356 +index 3fb2226..72f749d 100644
357 +--- a/drivers/gpu/drm/radeon/radeon_cursor.c
358 ++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
359 +@@ -257,8 +257,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
360 + if (!(cursor_end & 0x7f))
361 + w--;
362 + }
363 +- if (w <= 0)
364 ++ if (w <= 0) {
365 + w = 1;
366 ++ cursor_end = x - xorigin + w;
367 ++ if (!(cursor_end & 0x7f)) {
368 ++ x--;
369 ++ WARN_ON_ONCE(x < 0);
370 ++ }
371 ++ }
372 + }
373 + }
374 +
375 +diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
376 +index 936bbca..d3b3115 100644
377 +--- a/drivers/mmc/host/sdhci-pci.c
378 ++++ b/drivers/mmc/host/sdhci-pci.c
379 +@@ -140,6 +140,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = {
380 + static const struct sdhci_pci_fixes sdhci_cafe = {
381 + .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
382 + SDHCI_QUIRK_NO_BUSY_IRQ |
383 ++ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
384 + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
385 + };
386 +
387 +diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
388 +index 74580bb..c9b123c 100644
389 +--- a/drivers/net/bnx2.c
390 ++++ b/drivers/net/bnx2.c
391 +@@ -5310,7 +5310,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
392 + int k, last;
393 +
394 + if (skb == NULL) {
395 +- j++;
396 ++ j = NEXT_TX_BD(j);
397 + continue;
398 + }
399 +
400 +@@ -5322,8 +5322,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
401 + tx_buf->skb = NULL;
402 +
403 + last = tx_buf->nr_frags;
404 +- j++;
405 +- for (k = 0; k < last; k++, j++) {
406 ++ j = NEXT_TX_BD(j);
407 ++ for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
408 + tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
409 + dma_unmap_page(&bp->pdev->dev,
410 + dma_unmap_addr(tx_buf, mapping),
411 +diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
412 +index 3df0c0f..82b1802 100644
413 +--- a/drivers/net/caif/caif_serial.c
414 ++++ b/drivers/net/caif/caif_serial.c
415 +@@ -325,6 +325,9 @@ static int ldisc_open(struct tty_struct *tty)
416 +
417 + sprintf(name, "cf%s", tty->name);
418 + dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
419 ++ if (!dev)
420 ++ return -ENOMEM;
421 ++
422 + ser = netdev_priv(dev);
423 + ser->tty = tty_kref_get(tty);
424 + ser->dev = dev;
425 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
426 +index fb50e5a..a631bf7 100644
427 +--- a/drivers/net/tun.c
428 ++++ b/drivers/net/tun.c
429 +@@ -1239,10 +1239,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
430 + int vnet_hdr_sz;
431 + int ret;
432 +
433 +- if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
434 ++ if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
435 + if (copy_from_user(&ifr, argp, ifreq_len))
436 + return -EFAULT;
437 +-
438 ++ } else {
439 ++ memset(&ifr, 0, sizeof(ifr));
440 ++ }
441 + if (cmd == TUNGETFEATURES) {
442 + /* Currently this just means: "what IFF flags are valid?".
443 + * This is needed because we never checked for invalid flags on
444 +diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
445 +index ad0298f..3362449 100644
446 +--- a/drivers/net/usb/kaweth.c
447 ++++ b/drivers/net/usb/kaweth.c
448 +@@ -1308,7 +1308,7 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
449 + int retv;
450 + int length = 0; /* shut up GCC */
451 +
452 +- urb = usb_alloc_urb(0, GFP_NOIO);
453 ++ urb = usb_alloc_urb(0, GFP_ATOMIC);
454 + if (!urb)
455 + return -ENOMEM;
456 +
457 +diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
458 +index e68fac6..d2f9576 100644
459 +--- a/drivers/scsi/libsas/sas_expander.c
460 ++++ b/drivers/scsi/libsas/sas_expander.c
461 +@@ -770,7 +770,7 @@ static struct domain_device *sas_ex_discover_end_dev(
462 + }
463 +
464 + /* See if this phy is part of a wide port */
465 +-static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
466 ++static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
467 + {
468 + struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
469 + int i;
470 +@@ -786,11 +786,11 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
471 + sas_port_add_phy(ephy->port, phy->phy);
472 + phy->port = ephy->port;
473 + phy->phy_state = PHY_DEVICE_DISCOVERED;
474 +- return 0;
475 ++ return true;
476 + }
477 + }
478 +
479 +- return -ENODEV;
480 ++ return false;
481 + }
482 +
483 + static struct domain_device *sas_ex_discover_expander(
484 +@@ -928,8 +928,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
485 + return res;
486 + }
487 +
488 +- res = sas_ex_join_wide_port(dev, phy_id);
489 +- if (!res) {
490 ++ if (sas_ex_join_wide_port(dev, phy_id)) {
491 + SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
492 + phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
493 + return res;
494 +@@ -974,8 +973,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
495 + if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
496 + SAS_ADDR(child->sas_addr)) {
497 + ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
498 +- res = sas_ex_join_wide_port(dev, i);
499 +- if (!res)
500 ++ if (sas_ex_join_wide_port(dev, i))
501 + SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
502 + i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
503 +
504 +@@ -1838,32 +1836,20 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
505 + {
506 + struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
507 + struct domain_device *child;
508 +- bool found = false;
509 +- int res, i;
510 ++ int res;
511 +
512 + SAS_DPRINTK("ex %016llx phy%d new device attached\n",
513 + SAS_ADDR(dev->sas_addr), phy_id);
514 + res = sas_ex_phy_discover(dev, phy_id);
515 + if (res)
516 +- goto out;
517 +- /* to support the wide port inserted */
518 +- for (i = 0; i < dev->ex_dev.num_phys; i++) {
519 +- struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i];
520 +- if (i == phy_id)
521 +- continue;
522 +- if (SAS_ADDR(ex_phy_temp->attached_sas_addr) ==
523 +- SAS_ADDR(ex_phy->attached_sas_addr)) {
524 +- found = true;
525 +- break;
526 +- }
527 +- }
528 +- if (found) {
529 +- sas_ex_join_wide_port(dev, phy_id);
530 ++ return res;
531 ++
532 ++ if (sas_ex_join_wide_port(dev, phy_id))
533 + return 0;
534 +- }
535 ++
536 + res = sas_ex_discover_devices(dev, phy_id);
537 +- if (!res)
538 +- goto out;
539 ++ if (res)
540 ++ return res;
541 + list_for_each_entry(child, &dev->ex_dev.children, siblings) {
542 + if (SAS_ADDR(child->sas_addr) ==
543 + SAS_ADDR(ex_phy->attached_sas_addr)) {
544 +@@ -1873,7 +1859,6 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
545 + break;
546 + }
547 + }
548 +-out:
549 + return res;
550 + }
551 +
552 +@@ -1972,9 +1957,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
553 + struct domain_device *dev = NULL;
554 +
555 + res = sas_find_bcast_dev(port_dev, &dev);
556 +- if (res)
557 +- goto out;
558 +- if (dev) {
559 ++ while (res == 0 && dev) {
560 + struct expander_device *ex = &dev->ex_dev;
561 + int i = 0, phy_id;
562 +
563 +@@ -1986,8 +1969,10 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
564 + res = sas_rediscover(dev, phy_id);
565 + i = phy_id + 1;
566 + } while (i < ex->num_phys);
567 ++
568 ++ dev = NULL;
569 ++ res = sas_find_bcast_dev(port_dev, &dev);
570 + }
571 +-out:
572 + return res;
573 + }
574 +
575 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
576 +index a4b9cdb..7f1afde 100644
577 +--- a/drivers/scsi/scsi_error.c
578 ++++ b/drivers/scsi/scsi_error.c
579 +@@ -1665,6 +1665,20 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
580 + * requests are started.
581 + */
582 + scsi_run_host_queues(shost);
583 ++
584 ++ /*
585 ++ * if eh is active and host_eh_scheduled is pending we need to re-run
586 ++ * recovery. we do this check after scsi_run_host_queues() to allow
587 ++ * everything pent up since the last eh run a chance to make forward
588 ++ * progress before we sync again. Either we'll immediately re-run
589 ++ * recovery or scsi_device_unbusy() will wake us again when these
590 ++ * pending commands complete.
591 ++ */
592 ++ spin_lock_irqsave(shost->host_lock, flags);
593 ++ if (shost->host_eh_scheduled)
594 ++ if (scsi_host_set_state(shost, SHOST_RECOVERY))
595 ++ WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
596 ++ spin_unlock_irqrestore(shost->host_lock, flags);
597 + }
598 +
599 + /**
600 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
601 +index 99fc45b..dd454c4 100644
602 +--- a/drivers/scsi/scsi_lib.c
603 ++++ b/drivers/scsi/scsi_lib.c
604 +@@ -481,15 +481,26 @@ void scsi_requeue_run_queue(struct work_struct *work)
605 + */
606 + static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
607 + {
608 ++ struct scsi_device *sdev = cmd->device;
609 + struct request *req = cmd->request;
610 + unsigned long flags;
611 +
612 ++ /*
613 ++ * We need to hold a reference on the device to avoid the queue being
614 ++ * killed after the unlock and before scsi_run_queue is invoked which
615 ++ * may happen because scsi_unprep_request() puts the command which
616 ++ * releases its reference on the device.
617 ++ */
618 ++ get_device(&sdev->sdev_gendev);
619 ++
620 + spin_lock_irqsave(q->queue_lock, flags);
621 + scsi_unprep_request(req);
622 + blk_requeue_request(q, req);
623 + spin_unlock_irqrestore(q->queue_lock, flags);
624 +
625 + scsi_run_queue(q);
626 ++
627 ++ put_device(&sdev->sdev_gendev);
628 + }
629 +
630 + void scsi_next_command(struct scsi_cmnd *cmd)
631 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
632 +index 6e7ea4a..a48b59c 100644
633 +--- a/drivers/scsi/scsi_scan.c
634 ++++ b/drivers/scsi/scsi_scan.c
635 +@@ -1710,6 +1710,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
636 + {
637 + struct scsi_device *sdev;
638 + shost_for_each_device(sdev, shost) {
639 ++ /* target removed before the device could be added */
640 ++ if (sdev->sdev_state == SDEV_DEL)
641 ++ continue;
642 + if (!scsi_host_scan_allowed(shost) ||
643 + scsi_sysfs_add_sdev(sdev) != 0)
644 + __scsi_remove_device(sdev);
645 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
646 +index e0bd3f7..de21547 100644
647 +--- a/drivers/scsi/scsi_sysfs.c
648 ++++ b/drivers/scsi/scsi_sysfs.c
649 +@@ -962,7 +962,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
650 + struct scsi_device *sdev;
651 +
652 + spin_lock_irqsave(shost->host_lock, flags);
653 +- starget->reap_ref++;
654 + restart:
655 + list_for_each_entry(sdev, &shost->__devices, siblings) {
656 + if (sdev->channel != starget->channel ||
657 +@@ -976,14 +975,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
658 + goto restart;
659 + }
660 + spin_unlock_irqrestore(shost->host_lock, flags);
661 +- scsi_target_reap(starget);
662 +-}
663 +-
664 +-static int __remove_child (struct device * dev, void * data)
665 +-{
666 +- if (scsi_is_target_device(dev))
667 +- __scsi_remove_target(to_scsi_target(dev));
668 +- return 0;
669 + }
670 +
671 + /**
672 +@@ -996,14 +987,34 @@ static int __remove_child (struct device * dev, void * data)
673 + */
674 + void scsi_remove_target(struct device *dev)
675 + {
676 +- if (scsi_is_target_device(dev)) {
677 +- __scsi_remove_target(to_scsi_target(dev));
678 +- return;
679 ++ struct Scsi_Host *shost = dev_to_shost(dev->parent);
680 ++ struct scsi_target *starget, *found;
681 ++ unsigned long flags;
682 ++
683 ++ restart:
684 ++ found = NULL;
685 ++ spin_lock_irqsave(shost->host_lock, flags);
686 ++ list_for_each_entry(starget, &shost->__targets, siblings) {
687 ++ if (starget->state == STARGET_DEL)
688 ++ continue;
689 ++ if (starget->dev.parent == dev || &starget->dev == dev) {
690 ++ found = starget;
691 ++ found->reap_ref++;
692 ++ break;
693 ++ }
694 + }
695 ++ spin_unlock_irqrestore(shost->host_lock, flags);
696 +
697 +- get_device(dev);
698 +- device_for_each_child(dev, NULL, __remove_child);
699 +- put_device(dev);
700 ++ if (found) {
701 ++ __scsi_remove_target(found);
702 ++ scsi_target_reap(found);
703 ++ /* in the case where @dev has multiple starget children,
704 ++ * continue removing.
705 ++ *
706 ++ * FIXME: does such a case exist?
707 ++ */
708 ++ goto restart;
709 ++ }
710 + }
711 + EXPORT_SYMBOL(scsi_remove_target);
712 +
713 +diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
714 +index ca3c303..4d1f996 100644
715 +--- a/drivers/usb/core/devio.c
716 ++++ b/drivers/usb/core/devio.c
717 +@@ -1557,10 +1557,14 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
718 + void __user *addr = as->userurb;
719 + unsigned int i;
720 +
721 +- if (as->userbuffer && urb->actual_length)
722 +- if (copy_to_user(as->userbuffer, urb->transfer_buffer,
723 +- urb->actual_length))
724 ++ if (as->userbuffer && urb->actual_length) {
725 ++ if (urb->number_of_packets > 0) /* Isochronous */
726 ++ i = urb->transfer_buffer_length;
727 ++ else /* Non-Isoc */
728 ++ i = urb->actual_length;
729 ++ if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
730 + return -EFAULT;
731 ++ }
732 + if (put_user(as->status, &userurb->status))
733 + return -EFAULT;
734 + if (put_user(urb->actual_length, &userurb->actual_length))
735 +diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
736 +index 1fc8f12..347bb05 100644
737 +--- a/drivers/usb/early/ehci-dbgp.c
738 ++++ b/drivers/usb/early/ehci-dbgp.c
739 +@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
740 + writel(FLAG_CF, &ehci_regs->configured_flag);
741 +
742 + /* Wait until the controller is no longer halted */
743 +- loop = 10;
744 ++ loop = 1000;
745 + do {
746 + status = readl(&ehci_regs->status);
747 + if (!(status & STS_HALT))
748 +diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
749 +index 2ac1d21..a52404a 100644
750 +--- a/drivers/usb/gadget/u_ether.c
751 ++++ b/drivers/usb/gadget/u_ether.c
752 +@@ -803,12 +803,6 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
753 +
754 + SET_ETHTOOL_OPS(net, &ops);
755 +
756 +- /* two kinds of host-initiated state changes:
757 +- * - iff DATA transfer is active, carrier is "on"
758 +- * - tx queueing enabled if open *and* carrier is "on"
759 +- */
760 +- netif_carrier_off(net);
761 +-
762 + dev->gadget = g;
763 + SET_NETDEV_DEV(net, &g->dev);
764 + SET_NETDEV_DEVTYPE(net, &gadget_type);
765 +@@ -822,6 +816,12 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
766 + INFO(dev, "HOST MAC %pM\n", dev->host_mac);
767 +
768 + the_dev = dev;
769 ++
770 ++ /* two kinds of host-initiated state changes:
771 ++ * - iff DATA transfer is active, carrier is "on"
772 ++ * - tx queueing enabled if open *and* carrier is "on"
773 ++ */
774 ++ netif_carrier_off(net);
775 + }
776 +
777 + return status;
778 +diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
779 +index 7ec1409..8006a28 100644
780 +--- a/fs/btrfs/async-thread.c
781 ++++ b/fs/btrfs/async-thread.c
782 +@@ -212,10 +212,17 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
783 +
784 + work->ordered_func(work);
785 +
786 +- /* now take the lock again and call the freeing code */
787 ++ /* now take the lock again and drop our item from the list */
788 + spin_lock(&workers->order_lock);
789 + list_del(&work->order_list);
790 ++ spin_unlock(&workers->order_lock);
791 ++
792 ++ /*
793 ++ * we don't want to call the ordered free functions
794 ++ * with the lock held though
795 ++ */
796 + work->ordered_free(work);
797 ++ spin_lock(&workers->order_lock);
798 + }
799 +
800 + spin_unlock(&workers->order_lock);
801 +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
802 +index 264f694..ebe95f5 100644
803 +--- a/fs/ext4/balloc.c
804 ++++ b/fs/ext4/balloc.c
805 +@@ -514,7 +514,8 @@ ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
806 + if (bitmap_bh == NULL)
807 + continue;
808 +
809 +- x = ext4_count_free(bitmap_bh, sb->s_blocksize);
810 ++ x = ext4_count_free(bitmap_bh->b_data,
811 ++ EXT4_BLOCKS_PER_GROUP(sb) / 8);
812 + printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
813 + i, ext4_free_blks_count(sb, gdp), x);
814 + bitmap_count += x;
815 +diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
816 +index fa3af81..012faaa 100644
817 +--- a/fs/ext4/bitmap.c
818 ++++ b/fs/ext4/bitmap.c
819 +@@ -15,15 +15,13 @@
820 +
821 + static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
822 +
823 +-unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
824 ++unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
825 + {
826 + unsigned int i, sum = 0;
827 +
828 +- if (!map)
829 +- return 0;
830 + for (i = 0; i < numchars; i++)
831 +- sum += nibblemap[map->b_data[i] & 0xf] +
832 +- nibblemap[(map->b_data[i] >> 4) & 0xf];
833 ++ sum += nibblemap[bitmap[i] & 0xf] +
834 ++ nibblemap[(bitmap[i] >> 4) & 0xf];
835 + return sum;
836 + }
837 +
838 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
839 +index 1a34c1c..e0113aa 100644
840 +--- a/fs/ext4/ext4.h
841 ++++ b/fs/ext4/ext4.h
842 +@@ -1713,7 +1713,7 @@ struct mmpd_data {
843 + # define NORET_AND noreturn,
844 +
845 + /* bitmap.c */
846 +-extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
847 ++extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
848 +
849 + /* balloc.c */
850 + extern unsigned int ext4_block_group(struct super_block *sb,
851 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
852 +index 412469b..29272de 100644
853 +--- a/fs/ext4/ialloc.c
854 ++++ b/fs/ext4/ialloc.c
855 +@@ -1193,7 +1193,8 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
856 + if (!bitmap_bh)
857 + continue;
858 +
859 +- x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
860 ++ x = ext4_count_free(bitmap_bh->b_data,
861 ++ EXT4_INODES_PER_GROUP(sb) / 8);
862 + printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
863 + (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
864 + bitmap_count += x;
865 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
866 +index c1e6a72..18fee6d 100644
867 +--- a/fs/ext4/inode.c
868 ++++ b/fs/ext4/inode.c
869 +@@ -1134,6 +1134,15 @@ void ext4_da_update_reserve_space(struct inode *inode,
870 + used = ei->i_reserved_data_blocks;
871 + }
872 +
873 ++ if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
874 ++ ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
875 ++ "with only %d reserved metadata blocks\n", __func__,
876 ++ inode->i_ino, ei->i_allocated_meta_blocks,
877 ++ ei->i_reserved_meta_blocks);
878 ++ WARN_ON(1);
879 ++ ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
880 ++ }
881 ++
882 + /* Update per-inode reservations */
883 + ei->i_reserved_data_blocks -= used;
884 + ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
885 +diff --git a/fs/locks.c b/fs/locks.c
886 +index b286539..35388d5 100644
887 +--- a/fs/locks.c
888 ++++ b/fs/locks.c
889 +@@ -315,7 +315,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
890 + return 0;
891 + }
892 +
893 +-static int assign_type(struct file_lock *fl, int type)
894 ++static int assign_type(struct file_lock *fl, long type)
895 + {
896 + switch (type) {
897 + case F_RDLCK:
898 +@@ -452,7 +452,7 @@ static const struct lock_manager_operations lease_manager_ops = {
899 + /*
900 + * Initialize a lease, use the default lock manager operations
901 + */
902 +-static int lease_init(struct file *filp, int type, struct file_lock *fl)
903 ++static int lease_init(struct file *filp, long type, struct file_lock *fl)
904 + {
905 + if (assign_type(fl, type) != 0)
906 + return -EINVAL;
907 +@@ -470,7 +470,7 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
908 + }
909 +
910 + /* Allocate a file_lock initialised to this type of lease */
911 +-static struct file_lock *lease_alloc(struct file *filp, int type)
912 ++static struct file_lock *lease_alloc(struct file *filp, long type)
913 + {
914 + struct file_lock *fl = locks_alloc_lock();
915 + int error = -ENOMEM;
916 +diff --git a/fs/nfs/file.c b/fs/nfs/file.c
917 +index dd2f130..6c6e2c4 100644
918 +--- a/fs/nfs/file.c
919 ++++ b/fs/nfs/file.c
920 +@@ -493,8 +493,11 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
921 +
922 + dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
923 +
924 +- /* Only do I/O if gfp is a superset of GFP_KERNEL */
925 +- if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
926 ++ /* Only do I/O if gfp is a superset of GFP_KERNEL, and we're not
927 ++ * doing this memory reclaim for a fs-related allocation.
928 ++ */
929 ++ if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL &&
930 ++ !(current->flags & PF_FSTRANS)) {
931 + int how = FLUSH_SYNC;
932 +
933 + /* Don't let kswapd deadlock waiting for OOM RPC calls */
934 +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
935 +index 6c74097..f91d589 100644
936 +--- a/fs/nfsd/nfs4xdr.c
937 ++++ b/fs/nfsd/nfs4xdr.c
938 +@@ -2010,7 +2010,7 @@ out_acl:
939 + if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
940 + if ((buflen -= 4) < 0)
941 + goto out_resource;
942 +- WRITE32(1);
943 ++ WRITE32(0);
944 + }
945 + if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
946 + if ((buflen -= 4) < 0)
947 +diff --git a/include/linux/cpu.h b/include/linux/cpu.h
948 +index 5f09323..42af2ea 100644
949 +--- a/include/linux/cpu.h
950 ++++ b/include/linux/cpu.h
951 +@@ -66,8 +66,9 @@ enum {
952 + /* migration should happen before other stuff but after perf */
953 + CPU_PRI_PERF = 20,
954 + CPU_PRI_MIGRATION = 10,
955 +- /* prepare workqueues for other notifiers */
956 +- CPU_PRI_WORKQUEUE = 5,
957 ++ /* bring up workqueues before normal notifiers and down after */
958 ++ CPU_PRI_WORKQUEUE_UP = 5,
959 ++ CPU_PRI_WORKQUEUE_DOWN = -5,
960 + };
961 +
962 + #ifdef CONFIG_SMP
963 +diff --git a/kernel/futex.c b/kernel/futex.c
964 +index 11e8924..24bc59c 100644
965 +--- a/kernel/futex.c
966 ++++ b/kernel/futex.c
967 +@@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
968 + * @uaddr2: the pi futex we will take prior to returning to user-space
969 + *
970 + * The caller will wait on uaddr and will be requeued by futex_requeue() to
971 +- * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
972 +- * complete the acquisition of the rt_mutex prior to returning to userspace.
973 +- * This ensures the rt_mutex maintains an owner when it has waiters; without
974 +- * one, the pi logic wouldn't know which task to boost/deboost, if there was a
975 +- * need to.
976 ++ * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
977 ++ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
978 ++ * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
979 ++ * without one, the pi logic would not know which task to boost/deboost, if
980 ++ * there was a need to.
981 + *
982 + * We call schedule in futex_wait_queue_me() when we enqueue and return there
983 + * via the following:
984 +@@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
985 + struct futex_q q = futex_q_init;
986 + int res, ret;
987 +
988 ++ if (uaddr == uaddr2)
989 ++ return -EINVAL;
990 ++
991 + if (!bitset)
992 + return -EINVAL;
993 +
994 +@@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
995 + * signal. futex_unlock_pi() will not destroy the lock_ptr nor
996 + * the pi_state.
997 + */
998 +- WARN_ON(!&q.pi_state);
999 ++ WARN_ON(!q.pi_state);
1000 + pi_mutex = &q.pi_state->pi_mutex;
1001 + ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
1002 + debug_rt_mutex_free_waiter(&rt_waiter);
1003 +@@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
1004 + * fault, unlock the rt_mutex and return the fault to userspace.
1005 + */
1006 + if (ret == -EFAULT) {
1007 +- if (rt_mutex_owner(pi_mutex) == current)
1008 ++ if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
1009 + rt_mutex_unlock(pi_mutex);
1010 + } else if (ret == -EINTR) {
1011 + /*
1012 +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
1013 +index 8884c27..32f1590 100644
1014 +--- a/kernel/power/hibernate.c
1015 ++++ b/kernel/power/hibernate.c
1016 +@@ -344,6 +344,7 @@ int hibernation_snapshot(int platform_mode)
1017 + goto Complete_devices;
1018 +
1019 + suspend_console();
1020 ++ ftrace_stop();
1021 + pm_restrict_gfp_mask();
1022 + error = dpm_suspend(PMSG_FREEZE);
1023 + if (error)
1024 +@@ -369,6 +370,7 @@ int hibernation_snapshot(int platform_mode)
1025 + if (error || !in_suspend)
1026 + pm_restore_gfp_mask();
1027 +
1028 ++ ftrace_start();
1029 + resume_console();
1030 +
1031 + Complete_devices:
1032 +@@ -471,6 +473,7 @@ int hibernation_restore(int platform_mode)
1033 +
1034 + pm_prepare_console();
1035 + suspend_console();
1036 ++ ftrace_stop();
1037 + pm_restrict_gfp_mask();
1038 + error = dpm_suspend_start(PMSG_QUIESCE);
1039 + if (!error) {
1040 +@@ -478,6 +481,7 @@ int hibernation_restore(int platform_mode)
1041 + dpm_resume_end(PMSG_RECOVER);
1042 + }
1043 + pm_restore_gfp_mask();
1044 ++ ftrace_start();
1045 + resume_console();
1046 + pm_restore_console();
1047 + return error;
1048 +@@ -504,6 +508,7 @@ int hibernation_platform_enter(void)
1049 +
1050 + entering_platform_hibernation = true;
1051 + suspend_console();
1052 ++ ftrace_stop();
1053 + error = dpm_suspend_start(PMSG_HIBERNATE);
1054 + if (error) {
1055 + if (hibernation_ops->recover)
1056 +@@ -547,6 +552,7 @@ int hibernation_platform_enter(void)
1057 + Resume_devices:
1058 + entering_platform_hibernation = false;
1059 + dpm_resume_end(PMSG_RESTORE);
1060 ++ ftrace_start();
1061 + resume_console();
1062 +
1063 + Close:
1064 +diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
1065 +index 449ccc9..e40d205 100644
1066 +--- a/kernel/power/suspend.c
1067 ++++ b/kernel/power/suspend.c
1068 +@@ -23,6 +23,7 @@
1069 + #include <linux/slab.h>
1070 + #include <linux/suspend.h>
1071 + #include <linux/syscore_ops.h>
1072 ++#include <linux/ftrace.h>
1073 + #include <trace/events/power.h>
1074 +
1075 + #include "power.h"
1076 +@@ -210,6 +211,7 @@ int suspend_devices_and_enter(suspend_state_t state)
1077 + goto Close;
1078 + }
1079 + suspend_console();
1080 ++ ftrace_stop();
1081 + suspend_test_start();
1082 + error = dpm_suspend_start(PMSG_SUSPEND);
1083 + if (error) {
1084 +@@ -226,6 +228,7 @@ int suspend_devices_and_enter(suspend_state_t state)
1085 + suspend_test_start();
1086 + dpm_resume_end(PMSG_RESUME);
1087 + suspend_test_finish("resume devices");
1088 ++ ftrace_start();
1089 + resume_console();
1090 + Close:
1091 + if (suspend_ops->end)
1092 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
1093 +index ee1845b..e88c924 100644
1094 +--- a/kernel/workqueue.c
1095 ++++ b/kernel/workqueue.c
1096 +@@ -3561,6 +3561,41 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1097 + return notifier_from_errno(0);
1098 + }
1099 +
1100 ++/*
1101 ++ * Workqueues should be brought up before normal priority CPU notifiers.
1102 ++ * This will be registered high priority CPU notifier.
1103 ++ */
1104 ++static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
1105 ++ unsigned long action,
1106 ++ void *hcpu)
1107 ++{
1108 ++ switch (action & ~CPU_TASKS_FROZEN) {
1109 ++ case CPU_UP_PREPARE:
1110 ++ case CPU_UP_CANCELED:
1111 ++ case CPU_DOWN_FAILED:
1112 ++ case CPU_ONLINE:
1113 ++ return workqueue_cpu_callback(nfb, action, hcpu);
1114 ++ }
1115 ++ return NOTIFY_OK;
1116 ++}
1117 ++
1118 ++/*
1119 ++ * Workqueues should be brought down after normal priority CPU notifiers.
1120 ++ * This will be registered as low priority CPU notifier.
1121 ++ */
1122 ++static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
1123 ++ unsigned long action,
1124 ++ void *hcpu)
1125 ++{
1126 ++ switch (action & ~CPU_TASKS_FROZEN) {
1127 ++ case CPU_DOWN_PREPARE:
1128 ++ case CPU_DYING:
1129 ++ case CPU_POST_DEAD:
1130 ++ return workqueue_cpu_callback(nfb, action, hcpu);
1131 ++ }
1132 ++ return NOTIFY_OK;
1133 ++}
1134 ++
1135 + #ifdef CONFIG_SMP
1136 +
1137 + struct work_for_cpu {
1138 +@@ -3754,7 +3789,8 @@ static int __init init_workqueues(void)
1139 + unsigned int cpu;
1140 + int i;
1141 +
1142 +- cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
1143 ++ cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
1144 ++ cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
1145 +
1146 + /* initialize gcwqs */
1147 + for_each_gcwq_cpu(cpu) {
1148 +diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
1149 +index 5ba4366..804e50f 100644
1150 +--- a/net/caif/caif_dev.c
1151 ++++ b/net/caif/caif_dev.c
1152 +@@ -424,9 +424,9 @@ static int __init caif_device_init(void)
1153 +
1154 + static void __exit caif_device_exit(void)
1155 + {
1156 +- unregister_pernet_subsys(&caif_net_ops);
1157 + unregister_netdevice_notifier(&caif_device_notifier);
1158 + dev_remove_pack(&caif_packet_type);
1159 ++ unregister_pernet_subsys(&caif_net_ops);
1160 + }
1161 +
1162 + module_init(caif_device_init);
1163 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1164 +index abd936d..861d53f 100644
1165 +--- a/net/core/rtnetlink.c
1166 ++++ b/net/core/rtnetlink.c
1167 +@@ -647,6 +647,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
1168 + }
1169 + }
1170 +
1171 ++static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
1172 ++{
1173 ++ return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
1174 ++ (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
1175 ++}
1176 ++
1177 + static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
1178 + const struct ifinfomsg *ifm)
1179 + {
1180 +@@ -655,7 +661,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
1181 + /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
1182 + if (ifm->ifi_change)
1183 + flags = (flags & ifm->ifi_change) |
1184 +- (dev->flags & ~ifm->ifi_change);
1185 ++ (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
1186 +
1187 + return flags;
1188 + }
1189 +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
1190 +index 2b3c23c..062876b 100644
1191 +--- a/net/ipv4/cipso_ipv4.c
1192 ++++ b/net/ipv4/cipso_ipv4.c
1193 +@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
1194 + case CIPSO_V4_TAG_LOCAL:
1195 + /* This is a non-standard tag that we only allow for
1196 + * local connections, so if the incoming interface is
1197 +- * not the loopback device drop the packet. */
1198 +- if (!(skb->dev->flags & IFF_LOOPBACK)) {
1199 ++ * not the loopback device drop the packet. Further,
1200 ++ * there is no legitimate reason for setting this from
1201 ++ * userspace so reject it if skb is NULL. */
1202 ++ if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
1203 + err_offset = opt_iter;
1204 + goto validate_return_locked;
1205 + }
1206 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1207 +index 6db041d..b6ec23c 100644
1208 +--- a/net/ipv4/tcp.c
1209 ++++ b/net/ipv4/tcp.c
1210 +@@ -2394,7 +2394,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
1211 + /* Cap the max timeout in ms TCP will retry/retrans
1212 + * before giving up and aborting (ETIMEDOUT) a connection.
1213 + */
1214 +- icsk->icsk_user_timeout = msecs_to_jiffies(val);
1215 ++ if (val < 0)
1216 ++ err = -EINVAL;
1217 ++ else
1218 ++ icsk->icsk_user_timeout = msecs_to_jiffies(val);
1219 + break;
1220 + default:
1221 + err = -ENOPROTOOPT;
1222 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1223 +index 6e33b79c..b76aa2d 100644
1224 +--- a/net/ipv4/tcp_input.c
1225 ++++ b/net/ipv4/tcp_input.c
1226 +@@ -5340,7 +5340,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
1227 + if (tp->copied_seq == tp->rcv_nxt &&
1228 + len - tcp_header_len <= tp->ucopy.len) {
1229 + #ifdef CONFIG_NET_DMA
1230 +- if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
1231 ++ if (tp->ucopy.task == current &&
1232 ++ sock_owned_by_user(sk) &&
1233 ++ tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
1234 + copied_early = 1;
1235 + eaten = 1;
1236 + }
1237 +diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
1238 +index 47ee29f..e85b2487 100644
1239 +--- a/net/sched/sch_sfb.c
1240 ++++ b/net/sched/sch_sfb.c
1241 +@@ -556,6 +556,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
1242 +
1243 + sch->qstats.backlog = q->qdisc->qstats.backlog;
1244 + opts = nla_nest_start(skb, TCA_OPTIONS);
1245 ++ if (opts == NULL)
1246 ++ goto nla_put_failure;
1247 + NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
1248 + return nla_nest_end(skb, opts);
1249 +
1250 +diff --git a/net/sctp/input.c b/net/sctp/input.c
1251 +index 741ed16..cd9eded 100644
1252 +--- a/net/sctp/input.c
1253 ++++ b/net/sctp/input.c
1254 +@@ -737,15 +737,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
1255 +
1256 + epb = &ep->base;
1257 +
1258 +- if (hlist_unhashed(&epb->node))
1259 +- return;
1260 +-
1261 + epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
1262 +
1263 + head = &sctp_ep_hashtable[epb->hashent];
1264 +
1265 + sctp_write_lock(&head->lock);
1266 +- __hlist_del(&epb->node);
1267 ++ hlist_del_init(&epb->node);
1268 + sctp_write_unlock(&head->lock);
1269 + }
1270 +
1271 +@@ -826,7 +823,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
1272 + head = &sctp_assoc_hashtable[epb->hashent];
1273 +
1274 + sctp_write_lock(&head->lock);
1275 +- __hlist_del(&epb->node);
1276 ++ hlist_del_init(&epb->node);
1277 + sctp_write_unlock(&head->lock);
1278 + }
1279 +
1280 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1281 +index 4434853..b70a3ee 100644
1282 +--- a/net/sctp/socket.c
1283 ++++ b/net/sctp/socket.c
1284 +@@ -1160,8 +1160,14 @@ out_free:
1285 + SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
1286 + " kaddrs: %p err: %d\n",
1287 + asoc, kaddrs, err);
1288 +- if (asoc)
1289 ++ if (asoc) {
1290 ++ /* sctp_primitive_ASSOCIATE may have added this association
1291 ++ * To the hash table, try to unhash it, just in case, its a noop
1292 ++ * if it wasn't hashed so we're safe
1293 ++ */
1294 ++ sctp_unhash_established(asoc);
1295 + sctp_association_free(asoc);
1296 ++ }
1297 + return err;
1298 + }
1299 +
1300 +@@ -1871,8 +1877,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1301 + goto out_unlock;
1302 +
1303 + out_free:
1304 +- if (new_asoc)
1305 ++ if (new_asoc) {
1306 ++ sctp_unhash_established(asoc);
1307 + sctp_association_free(asoc);
1308 ++ }
1309 + out_unlock:
1310 + sctp_release_sock(sk);
1311 +
1312 +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
1313 +index b6bb225..c57f97f 100644
1314 +--- a/net/sunrpc/sched.c
1315 ++++ b/net/sunrpc/sched.c
1316 +@@ -713,7 +713,9 @@ void rpc_execute(struct rpc_task *task)
1317 +
1318 + static void rpc_async_schedule(struct work_struct *work)
1319 + {
1320 ++ current->flags |= PF_FSTRANS;
1321 + __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
1322 ++ current->flags &= ~PF_FSTRANS;
1323 + }
1324 +
1325 + /**
1326 +diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
1327 +index 0867070..d0b5210 100644
1328 +--- a/net/sunrpc/xprtrdma/transport.c
1329 ++++ b/net/sunrpc/xprtrdma/transport.c
1330 +@@ -200,6 +200,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
1331 + int rc = 0;
1332 +
1333 + if (!xprt->shutdown) {
1334 ++ current->flags |= PF_FSTRANS;
1335 + xprt_clear_connected(xprt);
1336 +
1337 + dprintk("RPC: %s: %sconnect\n", __func__,
1338 +@@ -212,10 +213,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
1339 +
1340 + out:
1341 + xprt_wake_pending_tasks(xprt, rc);
1342 +-
1343 + out_clear:
1344 + dprintk("RPC: %s: exit\n", __func__);
1345 + xprt_clear_connecting(xprt);
1346 ++ current->flags &= ~PF_FSTRANS;
1347 + }
1348 +
1349 + /*
1350 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
1351 +index ea75079..554111f 100644
1352 +--- a/net/sunrpc/xprtsock.c
1353 ++++ b/net/sunrpc/xprtsock.c
1354 +@@ -1882,6 +1882,8 @@ static void xs_local_setup_socket(struct work_struct *work)
1355 + if (xprt->shutdown)
1356 + goto out;
1357 +
1358 ++ current->flags |= PF_FSTRANS;
1359 ++
1360 + clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1361 + status = __sock_create(xprt->xprt_net, AF_LOCAL,
1362 + SOCK_STREAM, 0, &sock, 1);
1363 +@@ -1915,6 +1917,7 @@ static void xs_local_setup_socket(struct work_struct *work)
1364 + out:
1365 + xprt_clear_connecting(xprt);
1366 + xprt_wake_pending_tasks(xprt, status);
1367 ++ current->flags &= ~PF_FSTRANS;
1368 + }
1369 +
1370 + static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1371 +@@ -1957,6 +1960,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
1372 + if (xprt->shutdown)
1373 + goto out;
1374 +
1375 ++ current->flags |= PF_FSTRANS;
1376 ++
1377 + /* Start by resetting any existing state */
1378 + xs_reset_transport(transport);
1379 + sock = xs_create_sock(xprt, transport,
1380 +@@ -1975,6 +1980,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
1381 + out:
1382 + xprt_clear_connecting(xprt);
1383 + xprt_wake_pending_tasks(xprt, status);
1384 ++ current->flags &= ~PF_FSTRANS;
1385 + }
1386 +
1387 + /*
1388 +@@ -2100,6 +2106,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
1389 + if (xprt->shutdown)
1390 + goto out;
1391 +
1392 ++ current->flags |= PF_FSTRANS;
1393 ++
1394 + if (!sock) {
1395 + clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1396 + sock = xs_create_sock(xprt, transport,
1397 +@@ -2149,6 +2157,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
1398 + case -EINPROGRESS:
1399 + case -EALREADY:
1400 + xprt_clear_connecting(xprt);
1401 ++ current->flags &= ~PF_FSTRANS;
1402 + return;
1403 + case -EINVAL:
1404 + /* Happens, for instance, if the user specified a link
1405 +@@ -2161,6 +2170,7 @@ out_eagain:
1406 + out:
1407 + xprt_clear_connecting(xprt);
1408 + xprt_wake_pending_tasks(xprt, status);
1409 ++ current->flags &= ~PF_FSTRANS;
1410 + }
1411 +
1412 + /**
1413 +diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
1414 +index 788a12c..2ab7850 100644
1415 +--- a/net/wanrouter/wanmain.c
1416 ++++ b/net/wanrouter/wanmain.c
1417 +@@ -602,36 +602,31 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
1418 + * successfully, add it to the interface list.
1419 + */
1420 +
1421 +- if (dev->name == NULL) {
1422 +- err = -EINVAL;
1423 +- } else {
1424 ++#ifdef WANDEBUG
1425 ++ printk(KERN_INFO "%s: registering interface %s...\n",
1426 ++ wanrouter_modname, dev->name);
1427 ++#endif
1428 +
1429 +- #ifdef WANDEBUG
1430 +- printk(KERN_INFO "%s: registering interface %s...\n",
1431 +- wanrouter_modname, dev->name);
1432 +- #endif
1433 +-
1434 +- err = register_netdev(dev);
1435 +- if (!err) {
1436 +- struct net_device *slave = NULL;
1437 +- unsigned long smp_flags=0;
1438 +-
1439 +- lock_adapter_irq(&wandev->lock, &smp_flags);
1440 +-
1441 +- if (wandev->dev == NULL) {
1442 +- wandev->dev = dev;
1443 +- } else {
1444 +- for (slave=wandev->dev;
1445 +- DEV_TO_SLAVE(slave);
1446 +- slave = DEV_TO_SLAVE(slave))
1447 +- DEV_TO_SLAVE(slave) = dev;
1448 +- }
1449 +- ++wandev->ndev;
1450 +-
1451 +- unlock_adapter_irq(&wandev->lock, &smp_flags);
1452 +- err = 0; /* done !!! */
1453 +- goto out;
1454 ++ err = register_netdev(dev);
1455 ++ if (!err) {
1456 ++ struct net_device *slave = NULL;
1457 ++ unsigned long smp_flags=0;
1458 ++
1459 ++ lock_adapter_irq(&wandev->lock, &smp_flags);
1460 ++
1461 ++ if (wandev->dev == NULL) {
1462 ++ wandev->dev = dev;
1463 ++ } else {
1464 ++ for (slave=wandev->dev;
1465 ++ DEV_TO_SLAVE(slave);
1466 ++ slave = DEV_TO_SLAVE(slave))
1467 ++ DEV_TO_SLAVE(slave) = dev;
1468 + }
1469 ++ ++wandev->ndev;
1470 ++
1471 ++ unlock_adapter_irq(&wandev->lock, &smp_flags);
1472 ++ err = 0; /* done !!! */
1473 ++ goto out;
1474 + }
1475 + if (wandev->del_if)
1476 + wandev->del_if(wandev, dev);
1477 +diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c
1478 +index 2af0999..74f5a3d 100644
1479 +--- a/sound/drivers/mpu401/mpu401_uart.c
1480 ++++ b/sound/drivers/mpu401/mpu401_uart.c
1481 +@@ -554,6 +554,7 @@ int snd_mpu401_uart_new(struct snd_card *card, int device,
1482 + spin_lock_init(&mpu->output_lock);
1483 + spin_lock_init(&mpu->timer_lock);
1484 + mpu->hardware = hardware;
1485 ++ mpu->irq = -1;
1486 + if (! (info_flags & MPU401_INFO_INTEGRATED)) {
1487 + int res_size = hardware == MPU401_HW_PC98II ? 4 : 2;
1488 + mpu->res = request_region(port, res_size, "MPU401 UART");
1489 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1490 +index baa7a49..8d288a7 100644
1491 +--- a/sound/pci/hda/patch_realtek.c
1492 ++++ b/sound/pci/hda/patch_realtek.c
1493 +@@ -20133,6 +20133,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
1494 + { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
1495 + { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
1496 + { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
1497 ++ { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
1498 + { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
1499 + .patch = patch_alc861 },
1500 + { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
1501 +diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
1502 +index c850e3d..f16f587 100644
1503 +--- a/sound/soc/codecs/wm8962.c
1504 ++++ b/sound/soc/codecs/wm8962.c
1505 +@@ -2890,6 +2890,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
1506 + /* VMID 2*250k */
1507 + snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
1508 + WM8962_VMID_SEL_MASK, 0x100);
1509 ++
1510 ++ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
1511 ++ msleep(100);
1512 + break;
1513 +
1514 + case SND_SOC_BIAS_OFF:
1515 +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
1516 +index 2194912..1f7616d 100644
1517 +--- a/sound/soc/codecs/wm8994.c
1518 ++++ b/sound/soc/codecs/wm8994.c
1519 +@@ -2127,7 +2127,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
1520 + return -EINVAL;
1521 + }
1522 +
1523 +- bclk_rate = params_rate(params) * 2;
1524 ++ bclk_rate = params_rate(params) * 4;
1525 + switch (params_format(params)) {
1526 + case SNDRV_PCM_FORMAT_S16_LE:
1527 + bclk_rate *= 16;
1528 +diff --git a/sound/usb/clock.c b/sound/usb/clock.c
1529 +index 075195e..f0ff776 100644
1530 +--- a/sound/usb/clock.c
1531 ++++ b/sound/usb/clock.c
1532 +@@ -111,7 +111,8 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, int source_id)
1533 + return 0;
1534 +
1535 + /* If a clock source can't tell us whether it's valid, we assume it is */
1536 +- if (!uac2_control_is_readable(cs_desc->bmControls, UAC2_CS_CONTROL_CLOCK_VALID))
1537 ++ if (!uac2_control_is_readable(cs_desc->bmControls,
1538 ++ UAC2_CS_CONTROL_CLOCK_VALID - 1))
1539 + return 1;
1540 +
1541 + err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
1542
1543 Added: genpatches-2.6/trunk/3.0/1040_linux-3.0.41.patch
1544 ===================================================================
1545 --- genpatches-2.6/trunk/3.0/1040_linux-3.0.41.patch (rev 0)
1546 +++ genpatches-2.6/trunk/3.0/1040_linux-3.0.41.patch 2012-08-23 17:22:09 UTC (rev 2195)
1547 @@ -0,0 +1,1939 @@
1548 +diff --git a/MAINTAINERS b/MAINTAINERS
1549 +index de85391..c8c0874 100644
1550 +--- a/MAINTAINERS
1551 ++++ b/MAINTAINERS
1552 +@@ -5247,7 +5247,7 @@ F: Documentation/blockdev/ramdisk.txt
1553 + F: drivers/block/brd.c
1554 +
1555 + RANDOM NUMBER DRIVER
1556 +-M: Matt Mackall <mpm@×××××××.com>
1557 ++M: Theodore Ts'o" <tytso@×××.edu>
1558 + S: Maintained
1559 + F: drivers/char/random.c
1560 +
1561 +diff --git a/Makefile b/Makefile
1562 +index ec4fee5..2cbfd97 100644
1563 +--- a/Makefile
1564 ++++ b/Makefile
1565 +@@ -1,6 +1,6 @@
1566 + VERSION = 3
1567 + PATCHLEVEL = 0
1568 +-SUBLEVEL = 40
1569 ++SUBLEVEL = 41
1570 + EXTRAVERSION =
1571 + NAME = Sneaky Weasel
1572 +
1573 +diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
1574 +index 2bf2243..166d6aa 100644
1575 +--- a/arch/arm/configs/mxs_defconfig
1576 ++++ b/arch/arm/configs/mxs_defconfig
1577 +@@ -29,7 +29,6 @@ CONFIG_NO_HZ=y
1578 + CONFIG_HIGH_RES_TIMERS=y
1579 + CONFIG_PREEMPT_VOLUNTARY=y
1580 + CONFIG_AEABI=y
1581 +-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
1582 + CONFIG_AUTO_ZRELADDR=y
1583 + CONFIG_FPE_NWFPE=y
1584 + CONFIG_NET=y
1585 +diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1586 +index 1252a26..42dec04 100644
1587 +--- a/arch/arm/include/asm/cacheflush.h
1588 ++++ b/arch/arm/include/asm/cacheflush.h
1589 +@@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm)
1590 + static inline void
1591 + vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1592 + {
1593 +- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
1594 ++ struct mm_struct *mm = vma->vm_mm;
1595 ++
1596 ++ if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
1597 + __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
1598 + vma->vm_flags);
1599 + }
1600 +@@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
1601 + static inline void
1602 + vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
1603 + {
1604 +- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
1605 ++ struct mm_struct *mm = vma->vm_mm;
1606 ++
1607 ++ if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
1608 + unsigned long addr = user_addr & PAGE_MASK;
1609 + __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
1610 + }
1611 +diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
1612 +index 53cd5b4..875634a 100644
1613 +--- a/arch/arm/mm/tlb-v7.S
1614 ++++ b/arch/arm/mm/tlb-v7.S
1615 +@@ -38,11 +38,19 @@ ENTRY(v7wbi_flush_user_tlb_range)
1616 + dsb
1617 + mov r0, r0, lsr #PAGE_SHIFT @ align address
1618 + mov r1, r1, lsr #PAGE_SHIFT
1619 ++#ifdef CONFIG_ARM_ERRATA_720789
1620 ++ mov r3, #0
1621 ++#else
1622 + asid r3, r3 @ mask ASID
1623 ++#endif
1624 + orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
1625 + mov r1, r1, lsl #PAGE_SHIFT
1626 + 1:
1627 ++#ifdef CONFIG_ARM_ERRATA_720789
1628 ++ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
1629 ++#else
1630 + ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
1631 ++#endif
1632 + ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
1633 +
1634 + add r0, r0, #PAGE_SZ
1635 +@@ -70,7 +78,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
1636 + mov r0, r0, lsl #PAGE_SHIFT
1637 + mov r1, r1, lsl #PAGE_SHIFT
1638 + 1:
1639 ++#ifdef CONFIG_ARM_ERRATA_720789
1640 ++ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
1641 ++#else
1642 + ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
1643 ++#endif
1644 + ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
1645 + add r0, r0, #PAGE_SZ
1646 + cmp r0, r1
1647 +diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1648 +index 4468814..6fcc9a0 100644
1649 +--- a/arch/ia64/include/asm/atomic.h
1650 ++++ b/arch/ia64/include/asm/atomic.h
1651 +@@ -18,8 +18,8 @@
1652 + #include <asm/system.h>
1653 +
1654 +
1655 +-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
1656 +-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
1657 ++#define ATOMIC_INIT(i) { (i) }
1658 ++#define ATOMIC64_INIT(i) { (i) }
1659 +
1660 + #define atomic_read(v) (*(volatile int *)&(v)->counter)
1661 + #define atomic64_read(v) (*(volatile long *)&(v)->counter)
1662 +diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
1663 +index 782c3a35..3540c5e 100644
1664 +--- a/arch/ia64/kernel/irq_ia64.c
1665 ++++ b/arch/ia64/kernel/irq_ia64.c
1666 +@@ -23,7 +23,6 @@
1667 + #include <linux/ioport.h>
1668 + #include <linux/kernel_stat.h>
1669 + #include <linux/ptrace.h>
1670 +-#include <linux/random.h> /* for rand_initialize_irq() */
1671 + #include <linux/signal.h>
1672 + #include <linux/smp.h>
1673 + #include <linux/threads.h>
1674 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
1675 +index 5d9c61d..e5f7248 100644
1676 +--- a/arch/x86/include/asm/processor.h
1677 ++++ b/arch/x86/include/asm/processor.h
1678 +@@ -99,7 +99,6 @@ struct cpuinfo_x86 {
1679 + u16 apicid;
1680 + u16 initial_apicid;
1681 + u16 x86_clflush_size;
1682 +-#ifdef CONFIG_SMP
1683 + /* number of cores as seen by the OS: */
1684 + u16 booted_cores;
1685 + /* Physical processor id: */
1686 +@@ -110,7 +109,6 @@ struct cpuinfo_x86 {
1687 + u8 compute_unit_id;
1688 + /* Index into per_cpu list: */
1689 + u16 cpu_index;
1690 +-#endif
1691 + } __attribute__((__aligned__(SMP_CACHE_BYTES)));
1692 +
1693 + #define X86_VENDOR_INTEL 0
1694 +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
1695 +index a81f2d5..dfabea4 100644
1696 +--- a/arch/x86/kernel/alternative.c
1697 ++++ b/arch/x86/kernel/alternative.c
1698 +@@ -220,7 +220,7 @@ void __init arch_init_ideal_nops(void)
1699 + ideal_nops = intel_nops;
1700 + #endif
1701 + }
1702 +-
1703 ++ break;
1704 + default:
1705 + #ifdef CONFIG_X86_64
1706 + ideal_nops = k8_nops;
1707 +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
1708 +index bae1efe..be16854 100644
1709 +--- a/arch/x86/kernel/amd_nb.c
1710 ++++ b/arch/x86/kernel/amd_nb.c
1711 +@@ -154,16 +154,14 @@ int amd_get_subcaches(int cpu)
1712 + {
1713 + struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
1714 + unsigned int mask;
1715 +- int cuid = 0;
1716 ++ int cuid;
1717 +
1718 + if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1719 + return 0;
1720 +
1721 + pci_read_config_dword(link, 0x1d4, &mask);
1722 +
1723 +-#ifdef CONFIG_SMP
1724 + cuid = cpu_data(cpu).compute_unit_id;
1725 +-#endif
1726 + return (mask >> (4 * cuid)) & 0xf;
1727 + }
1728 +
1729 +@@ -172,7 +170,7 @@ int amd_set_subcaches(int cpu, int mask)
1730 + static unsigned int reset, ban;
1731 + struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
1732 + unsigned int reg;
1733 +- int cuid = 0;
1734 ++ int cuid;
1735 +
1736 + if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
1737 + return -EINVAL;
1738 +@@ -190,9 +188,7 @@ int amd_set_subcaches(int cpu, int mask)
1739 + pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
1740 + }
1741 +
1742 +-#ifdef CONFIG_SMP
1743 + cuid = cpu_data(cpu).compute_unit_id;
1744 +-#endif
1745 + mask <<= 4 * cuid;
1746 + mask |= (0xf ^ (1 << cuid)) << 26;
1747 +
1748 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
1749 +index b13ed39..8115040 100644
1750 +--- a/arch/x86/kernel/cpu/amd.c
1751 ++++ b/arch/x86/kernel/cpu/amd.c
1752 +@@ -146,7 +146,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
1753 +
1754 + static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
1755 + {
1756 +-#ifdef CONFIG_SMP
1757 + /* calling is from identify_secondary_cpu() ? */
1758 + if (!c->cpu_index)
1759 + return;
1760 +@@ -190,7 +189,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
1761 +
1762 + valid_k7:
1763 + ;
1764 +-#endif
1765 + }
1766 +
1767 + static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
1768 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
1769 +index 22a073d..0cb2883 100644
1770 +--- a/arch/x86/kernel/cpu/common.c
1771 ++++ b/arch/x86/kernel/cpu/common.c
1772 +@@ -675,9 +675,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1773 + if (this_cpu->c_early_init)
1774 + this_cpu->c_early_init(c);
1775 +
1776 +-#ifdef CONFIG_SMP
1777 + c->cpu_index = 0;
1778 +-#endif
1779 + filter_cpuid_features(c, false);
1780 +
1781 + setup_smep(c);
1782 +@@ -760,10 +758,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
1783 + c->apicid = c->initial_apicid;
1784 + # endif
1785 + #endif
1786 +-
1787 +-#ifdef CONFIG_X86_HT
1788 + c->phys_proc_id = c->initial_apicid;
1789 +-#endif
1790 + }
1791 +
1792 + setup_smep(c);
1793 +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
1794 +index ed6086e..e0dc000 100644
1795 +--- a/arch/x86/kernel/cpu/intel.c
1796 ++++ b/arch/x86/kernel/cpu/intel.c
1797 +@@ -179,7 +179,6 @@ static void __cpuinit trap_init_f00f_bug(void)
1798 +
1799 + static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
1800 + {
1801 +-#ifdef CONFIG_SMP
1802 + /* calling is from identify_secondary_cpu() ? */
1803 + if (!c->cpu_index)
1804 + return;
1805 +@@ -196,7 +195,6 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
1806 + WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
1807 + "with B stepping processors.\n");
1808 + }
1809 +-#endif
1810 + }
1811 +
1812 + static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
1813 +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
1814 +index ff1ae9b..942bda2 100644
1815 +--- a/arch/x86/kernel/cpu/mcheck/mce.c
1816 ++++ b/arch/x86/kernel/cpu/mcheck/mce.c
1817 +@@ -122,9 +122,7 @@ void mce_setup(struct mce *m)
1818 + m->time = get_seconds();
1819 + m->cpuvendor = boot_cpu_data.x86_vendor;
1820 + m->cpuid = cpuid_eax(1);
1821 +-#ifdef CONFIG_SMP
1822 + m->socketid = cpu_data(m->extcpu).phys_proc_id;
1823 +-#endif
1824 + m->apicid = cpu_data(m->extcpu).initial_apicid;
1825 + rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
1826 + }
1827 +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
1828 +index dc4fb77..b97aa72 100644
1829 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
1830 ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
1831 +@@ -65,11 +65,9 @@ struct threshold_bank {
1832 + };
1833 + static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
1834 +
1835 +-#ifdef CONFIG_SMP
1836 + static unsigned char shared_bank[NR_BANKS] = {
1837 + 0, 0, 0, 0, 1
1838 + };
1839 +-#endif
1840 +
1841 + static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
1842 +
1843 +@@ -227,10 +225,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
1844 +
1845 + if (!block)
1846 + per_cpu(bank_map, cpu) |= (1 << bank);
1847 +-#ifdef CONFIG_SMP
1848 ++
1849 + if (shared_bank[bank] && c->cpu_core_id)
1850 + break;
1851 +-#endif
1852 +
1853 + memset(&b, 0, sizeof(b));
1854 + b.cpu = cpu;
1855 +diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
1856 +index 62ac8cb..72c365a 100644
1857 +--- a/arch/x86/kernel/cpu/proc.c
1858 ++++ b/arch/x86/kernel/cpu/proc.c
1859 +@@ -64,12 +64,10 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
1860 + static int show_cpuinfo(struct seq_file *m, void *v)
1861 + {
1862 + struct cpuinfo_x86 *c = v;
1863 +- unsigned int cpu = 0;
1864 ++ unsigned int cpu;
1865 + int i;
1866 +
1867 +-#ifdef CONFIG_SMP
1868 + cpu = c->cpu_index;
1869 +-#endif
1870 + seq_printf(m, "processor\t: %u\n"
1871 + "vendor_id\t: %s\n"
1872 + "cpu family\t: %d\n"
1873 +diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
1874 +index f924280..c4e2465 100644
1875 +--- a/arch/x86/kernel/microcode_core.c
1876 ++++ b/arch/x86/kernel/microcode_core.c
1877 +@@ -297,20 +297,31 @@ static ssize_t reload_store(struct sys_device *dev,
1878 + const char *buf, size_t size)
1879 + {
1880 + unsigned long val;
1881 +- int cpu = dev->id;
1882 +- int ret = 0;
1883 +- char *end;
1884 ++ int cpu;
1885 ++ ssize_t ret = 0, tmp_ret;
1886 +
1887 +- val = simple_strtoul(buf, &end, 0);
1888 +- if (end == buf)
1889 ++ /* allow reload only from the BSP */
1890 ++ if (boot_cpu_data.cpu_index != dev->id)
1891 + return -EINVAL;
1892 +
1893 +- if (val == 1) {
1894 +- get_online_cpus();
1895 +- if (cpu_online(cpu))
1896 +- ret = reload_for_cpu(cpu);
1897 +- put_online_cpus();
1898 ++ ret = kstrtoul(buf, 0, &val);
1899 ++ if (ret)
1900 ++ return ret;
1901 ++
1902 ++ if (val != 1)
1903 ++ return size;
1904 ++
1905 ++ get_online_cpus();
1906 ++ for_each_online_cpu(cpu) {
1907 ++ tmp_ret = reload_for_cpu(cpu);
1908 ++ if (tmp_ret != 0)
1909 ++ pr_warn("Error reloading microcode on CPU %d\n", cpu);
1910 ++
1911 ++ /* save retval of the first encountered reload error */
1912 ++ if (!ret)
1913 ++ ret = tmp_ret;
1914 + }
1915 ++ put_online_cpus();
1916 +
1917 + if (!ret)
1918 + ret = size;
1919 +diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
1920 +index 25d139c..579051c 100644
1921 +--- a/drivers/char/mspec.c
1922 ++++ b/drivers/char/mspec.c
1923 +@@ -284,7 +284,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
1924 + vdata->flags = flags;
1925 + vdata->type = type;
1926 + spin_lock_init(&vdata->lock);
1927 +- vdata->refcnt = ATOMIC_INIT(1);
1928 ++ atomic_set(&vdata->refcnt, 1);
1929 + vma->vm_private_data = vdata;
1930 +
1931 + vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
1932 +diff --git a/drivers/char/random.c b/drivers/char/random.c
1933 +index c35a785..fceac95 100644
1934 +--- a/drivers/char/random.c
1935 ++++ b/drivers/char/random.c
1936 +@@ -125,21 +125,26 @@
1937 + * The current exported interfaces for gathering environmental noise
1938 + * from the devices are:
1939 + *
1940 ++ * void add_device_randomness(const void *buf, unsigned int size);
1941 + * void add_input_randomness(unsigned int type, unsigned int code,
1942 + * unsigned int value);
1943 +- * void add_interrupt_randomness(int irq);
1944 ++ * void add_interrupt_randomness(int irq, int irq_flags);
1945 + * void add_disk_randomness(struct gendisk *disk);
1946 + *
1947 ++ * add_device_randomness() is for adding data to the random pool that
1948 ++ * is likely to differ between two devices (or possibly even per boot).
1949 ++ * This would be things like MAC addresses or serial numbers, or the
1950 ++ * read-out of the RTC. This does *not* add any actual entropy to the
1951 ++ * pool, but it initializes the pool to different values for devices
1952 ++ * that might otherwise be identical and have very little entropy
1953 ++ * available to them (particularly common in the embedded world).
1954 ++ *
1955 + * add_input_randomness() uses the input layer interrupt timing, as well as
1956 + * the event type information from the hardware.
1957 + *
1958 +- * add_interrupt_randomness() uses the inter-interrupt timing as random
1959 +- * inputs to the entropy pool. Note that not all interrupts are good
1960 +- * sources of randomness! For example, the timer interrupts is not a
1961 +- * good choice, because the periodicity of the interrupts is too
1962 +- * regular, and hence predictable to an attacker. Network Interface
1963 +- * Controller interrupts are a better measure, since the timing of the
1964 +- * NIC interrupts are more unpredictable.
1965 ++ * add_interrupt_randomness() uses the interrupt timing as random
1966 ++ * inputs to the entropy pool. Using the cycle counters and the irq source
1967 ++ * as inputs, it feeds the randomness roughly once a second.
1968 + *
1969 + * add_disk_randomness() uses what amounts to the seek time of block
1970 + * layer request events, on a per-disk_devt basis, as input to the
1971 +@@ -248,6 +253,8 @@
1972 + #include <linux/percpu.h>
1973 + #include <linux/cryptohash.h>
1974 + #include <linux/fips.h>
1975 ++#include <linux/ptrace.h>
1976 ++#include <linux/kmemcheck.h>
1977 +
1978 + #ifdef CONFIG_GENERIC_HARDIRQS
1979 + # include <linux/irq.h>
1980 +@@ -256,8 +263,12 @@
1981 + #include <asm/processor.h>
1982 + #include <asm/uaccess.h>
1983 + #include <asm/irq.h>
1984 ++#include <asm/irq_regs.h>
1985 + #include <asm/io.h>
1986 +
1987 ++#define CREATE_TRACE_POINTS
1988 ++#include <trace/events/random.h>
1989 ++
1990 + /*
1991 + * Configuration information
1992 + */
1993 +@@ -266,6 +277,8 @@
1994 + #define SEC_XFER_SIZE 512
1995 + #define EXTRACT_SIZE 10
1996 +
1997 ++#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
1998 ++
1999 + /*
2000 + * The minimum number of bits of entropy before we wake up a read on
2001 + * /dev/random. Should be enough to do a significant reseed.
2002 +@@ -420,8 +433,10 @@ struct entropy_store {
2003 + /* read-write data: */
2004 + spinlock_t lock;
2005 + unsigned add_ptr;
2006 ++ unsigned input_rotate;
2007 + int entropy_count;
2008 +- int input_rotate;
2009 ++ int entropy_total;
2010 ++ unsigned int initialized:1;
2011 + __u8 last_data[EXTRACT_SIZE];
2012 + };
2013 +
2014 +@@ -454,6 +469,10 @@ static struct entropy_store nonblocking_pool = {
2015 + .pool = nonblocking_pool_data
2016 + };
2017 +
2018 ++static __u32 const twist_table[8] = {
2019 ++ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
2020 ++ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
2021 ++
2022 + /*
2023 + * This function adds bytes into the entropy "pool". It does not
2024 + * update the entropy estimate. The caller should call
2025 +@@ -464,29 +483,24 @@ static struct entropy_store nonblocking_pool = {
2026 + * it's cheap to do so and helps slightly in the expected case where
2027 + * the entropy is concentrated in the low-order bits.
2028 + */
2029 +-static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
2030 +- int nbytes, __u8 out[64])
2031 ++static void _mix_pool_bytes(struct entropy_store *r, const void *in,
2032 ++ int nbytes, __u8 out[64])
2033 + {
2034 +- static __u32 const twist_table[8] = {
2035 +- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
2036 +- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
2037 + unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
2038 + int input_rotate;
2039 + int wordmask = r->poolinfo->poolwords - 1;
2040 + const char *bytes = in;
2041 + __u32 w;
2042 +- unsigned long flags;
2043 +
2044 +- /* Taps are constant, so we can load them without holding r->lock. */
2045 + tap1 = r->poolinfo->tap1;
2046 + tap2 = r->poolinfo->tap2;
2047 + tap3 = r->poolinfo->tap3;
2048 + tap4 = r->poolinfo->tap4;
2049 + tap5 = r->poolinfo->tap5;
2050 +
2051 +- spin_lock_irqsave(&r->lock, flags);
2052 +- input_rotate = r->input_rotate;
2053 +- i = r->add_ptr;
2054 ++ smp_rmb();
2055 ++ input_rotate = ACCESS_ONCE(r->input_rotate);
2056 ++ i = ACCESS_ONCE(r->add_ptr);
2057 +
2058 + /* mix one byte at a time to simplify size handling and churn faster */
2059 + while (nbytes--) {
2060 +@@ -513,19 +527,61 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
2061 + input_rotate += i ? 7 : 14;
2062 + }
2063 +
2064 +- r->input_rotate = input_rotate;
2065 +- r->add_ptr = i;
2066 ++ ACCESS_ONCE(r->input_rotate) = input_rotate;
2067 ++ ACCESS_ONCE(r->add_ptr) = i;
2068 ++ smp_wmb();
2069 +
2070 + if (out)
2071 + for (j = 0; j < 16; j++)
2072 + ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
2073 ++}
2074 +
2075 ++static void __mix_pool_bytes(struct entropy_store *r, const void *in,
2076 ++ int nbytes, __u8 out[64])
2077 ++{
2078 ++ trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
2079 ++ _mix_pool_bytes(r, in, nbytes, out);
2080 ++}
2081 ++
2082 ++static void mix_pool_bytes(struct entropy_store *r, const void *in,
2083 ++ int nbytes, __u8 out[64])
2084 ++{
2085 ++ unsigned long flags;
2086 ++
2087 ++ trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
2088 ++ spin_lock_irqsave(&r->lock, flags);
2089 ++ _mix_pool_bytes(r, in, nbytes, out);
2090 + spin_unlock_irqrestore(&r->lock, flags);
2091 + }
2092 +
2093 +-static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
2094 ++struct fast_pool {
2095 ++ __u32 pool[4];
2096 ++ unsigned long last;
2097 ++ unsigned short count;
2098 ++ unsigned char rotate;
2099 ++ unsigned char last_timer_intr;
2100 ++};
2101 ++
2102 ++/*
2103 ++ * This is a fast mixing routine used by the interrupt randomness
2104 ++ * collector. It's hardcoded for an 128 bit pool and assumes that any
2105 ++ * locks that might be needed are taken by the caller.
2106 ++ */
2107 ++static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
2108 + {
2109 +- mix_pool_bytes_extract(r, in, bytes, NULL);
2110 ++ const char *bytes = in;
2111 ++ __u32 w;
2112 ++ unsigned i = f->count;
2113 ++ unsigned input_rotate = f->rotate;
2114 ++
2115 ++ while (nbytes--) {
2116 ++ w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
2117 ++ f->pool[(i + 1) & 3];
2118 ++ f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
2119 ++ input_rotate += (i++ & 3) ? 7 : 14;
2120 ++ }
2121 ++ f->count = i;
2122 ++ f->rotate = input_rotate;
2123 + }
2124 +
2125 + /*
2126 +@@ -533,30 +589,38 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
2127 + */
2128 + static void credit_entropy_bits(struct entropy_store *r, int nbits)
2129 + {
2130 +- unsigned long flags;
2131 +- int entropy_count;
2132 ++ int entropy_count, orig;
2133 +
2134 + if (!nbits)
2135 + return;
2136 +
2137 +- spin_lock_irqsave(&r->lock, flags);
2138 +-
2139 + DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
2140 +- entropy_count = r->entropy_count;
2141 ++retry:
2142 ++ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
2143 + entropy_count += nbits;
2144 ++
2145 + if (entropy_count < 0) {
2146 + DEBUG_ENT("negative entropy/overflow\n");
2147 + entropy_count = 0;
2148 + } else if (entropy_count > r->poolinfo->POOLBITS)
2149 + entropy_count = r->poolinfo->POOLBITS;
2150 +- r->entropy_count = entropy_count;
2151 ++ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
2152 ++ goto retry;
2153 ++
2154 ++ if (!r->initialized && nbits > 0) {
2155 ++ r->entropy_total += nbits;
2156 ++ if (r->entropy_total > 128)
2157 ++ r->initialized = 1;
2158 ++ }
2159 ++
2160 ++ trace_credit_entropy_bits(r->name, nbits, entropy_count,
2161 ++ r->entropy_total, _RET_IP_);
2162 +
2163 + /* should we wake readers? */
2164 + if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
2165 + wake_up_interruptible(&random_read_wait);
2166 + kill_fasync(&fasync, SIGIO, POLL_IN);
2167 + }
2168 +- spin_unlock_irqrestore(&r->lock, flags);
2169 + }
2170 +
2171 + /*********************************************************************
2172 +@@ -572,42 +636,24 @@ struct timer_rand_state {
2173 + unsigned dont_count_entropy:1;
2174 + };
2175 +
2176 +-#ifndef CONFIG_GENERIC_HARDIRQS
2177 +-
2178 +-static struct timer_rand_state *irq_timer_state[NR_IRQS];
2179 +-
2180 +-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
2181 +-{
2182 +- return irq_timer_state[irq];
2183 +-}
2184 +-
2185 +-static void set_timer_rand_state(unsigned int irq,
2186 +- struct timer_rand_state *state)
2187 +-{
2188 +- irq_timer_state[irq] = state;
2189 +-}
2190 +-
2191 +-#else
2192 +-
2193 +-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
2194 +-{
2195 +- struct irq_desc *desc;
2196 +-
2197 +- desc = irq_to_desc(irq);
2198 +-
2199 +- return desc->timer_rand_state;
2200 +-}
2201 +-
2202 +-static void set_timer_rand_state(unsigned int irq,
2203 +- struct timer_rand_state *state)
2204 ++/*
2205 ++ * Add device- or boot-specific data to the input and nonblocking
2206 ++ * pools to help initialize them to unique values.
2207 ++ *
2208 ++ * None of this adds any entropy, it is meant to avoid the
2209 ++ * problem of the nonblocking pool having similar initial state
2210 ++ * across largely identical devices.
2211 ++ */
2212 ++void add_device_randomness(const void *buf, unsigned int size)
2213 + {
2214 +- struct irq_desc *desc;
2215 +-
2216 +- desc = irq_to_desc(irq);
2217 ++ unsigned long time = get_cycles() ^ jiffies;
2218 +
2219 +- desc->timer_rand_state = state;
2220 ++ mix_pool_bytes(&input_pool, buf, size, NULL);
2221 ++ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
2222 ++ mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
2223 ++ mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
2224 + }
2225 +-#endif
2226 ++EXPORT_SYMBOL(add_device_randomness);
2227 +
2228 + static struct timer_rand_state input_timer_state;
2229 +
2230 +@@ -624,8 +670,8 @@ static struct timer_rand_state input_timer_state;
2231 + static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
2232 + {
2233 + struct {
2234 +- cycles_t cycles;
2235 + long jiffies;
2236 ++ unsigned cycles;
2237 + unsigned num;
2238 + } sample;
2239 + long delta, delta2, delta3;
2240 +@@ -639,7 +685,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
2241 + sample.jiffies = jiffies;
2242 + sample.cycles = get_cycles();
2243 + sample.num = num;
2244 +- mix_pool_bytes(&input_pool, &sample, sizeof(sample));
2245 ++ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
2246 +
2247 + /*
2248 + * Calculate number of bits of randomness we probably added.
2249 +@@ -696,17 +742,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
2250 + }
2251 + EXPORT_SYMBOL_GPL(add_input_randomness);
2252 +
2253 +-void add_interrupt_randomness(int irq)
2254 ++static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
2255 ++
2256 ++void add_interrupt_randomness(int irq, int irq_flags)
2257 + {
2258 +- struct timer_rand_state *state;
2259 ++ struct entropy_store *r;
2260 ++ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
2261 ++ struct pt_regs *regs = get_irq_regs();
2262 ++ unsigned long now = jiffies;
2263 ++ __u32 input[4], cycles = get_cycles();
2264 ++
2265 ++ input[0] = cycles ^ jiffies;
2266 ++ input[1] = irq;
2267 ++ if (regs) {
2268 ++ __u64 ip = instruction_pointer(regs);
2269 ++ input[2] = ip;
2270 ++ input[3] = ip >> 32;
2271 ++ }
2272 +
2273 +- state = get_timer_rand_state(irq);
2274 ++ fast_mix(fast_pool, input, sizeof(input));
2275 +
2276 +- if (state == NULL)
2277 ++ if ((fast_pool->count & 1023) &&
2278 ++ !time_after(now, fast_pool->last + HZ))
2279 + return;
2280 +
2281 +- DEBUG_ENT("irq event %d\n", irq);
2282 +- add_timer_randomness(state, 0x100 + irq);
2283 ++ fast_pool->last = now;
2284 ++
2285 ++ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
2286 ++ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
2287 ++ /*
2288 ++ * If we don't have a valid cycle counter, and we see
2289 ++ * back-to-back timer interrupts, then skip giving credit for
2290 ++ * any entropy.
2291 ++ */
2292 ++ if (cycles == 0) {
2293 ++ if (irq_flags & __IRQF_TIMER) {
2294 ++ if (fast_pool->last_timer_intr)
2295 ++ return;
2296 ++ fast_pool->last_timer_intr = 1;
2297 ++ } else
2298 ++ fast_pool->last_timer_intr = 0;
2299 ++ }
2300 ++ credit_entropy_bits(r, 1);
2301 + }
2302 +
2303 + #ifdef CONFIG_BLOCK
2304 +@@ -738,7 +815,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
2305 + */
2306 + static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
2307 + {
2308 +- __u32 tmp[OUTPUT_POOL_WORDS];
2309 ++ __u32 tmp[OUTPUT_POOL_WORDS];
2310 +
2311 + if (r->pull && r->entropy_count < nbytes * 8 &&
2312 + r->entropy_count < r->poolinfo->POOLBITS) {
2313 +@@ -757,7 +834,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
2314 +
2315 + bytes = extract_entropy(r->pull, tmp, bytes,
2316 + random_read_wakeup_thresh / 8, rsvd);
2317 +- mix_pool_bytes(r, tmp, bytes);
2318 ++ mix_pool_bytes(r, tmp, bytes, NULL);
2319 + credit_entropy_bits(r, bytes*8);
2320 + }
2321 + }
2322 +@@ -816,13 +893,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
2323 + static void extract_buf(struct entropy_store *r, __u8 *out)
2324 + {
2325 + int i;
2326 +- __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
2327 ++ union {
2328 ++ __u32 w[5];
2329 ++ unsigned long l[LONGS(EXTRACT_SIZE)];
2330 ++ } hash;
2331 ++ __u32 workspace[SHA_WORKSPACE_WORDS];
2332 + __u8 extract[64];
2333 ++ unsigned long flags;
2334 +
2335 + /* Generate a hash across the pool, 16 words (512 bits) at a time */
2336 +- sha_init(hash);
2337 ++ sha_init(hash.w);
2338 ++ spin_lock_irqsave(&r->lock, flags);
2339 + for (i = 0; i < r->poolinfo->poolwords; i += 16)
2340 +- sha_transform(hash, (__u8 *)(r->pool + i), workspace);
2341 ++ sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
2342 +
2343 + /*
2344 + * We mix the hash back into the pool to prevent backtracking
2345 +@@ -833,13 +916,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
2346 + * brute-forcing the feedback as hard as brute-forcing the
2347 + * hash.
2348 + */
2349 +- mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
2350 ++ __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
2351 ++ spin_unlock_irqrestore(&r->lock, flags);
2352 +
2353 + /*
2354 + * To avoid duplicates, we atomically extract a portion of the
2355 + * pool while mixing, and hash one final time.
2356 + */
2357 +- sha_transform(hash, extract, workspace);
2358 ++ sha_transform(hash.w, extract, workspace);
2359 + memset(extract, 0, sizeof(extract));
2360 + memset(workspace, 0, sizeof(workspace));
2361 +
2362 +@@ -848,20 +932,32 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
2363 + * pattern, we fold it in half. Thus, we always feed back
2364 + * twice as much data as we output.
2365 + */
2366 +- hash[0] ^= hash[3];
2367 +- hash[1] ^= hash[4];
2368 +- hash[2] ^= rol32(hash[2], 16);
2369 +- memcpy(out, hash, EXTRACT_SIZE);
2370 +- memset(hash, 0, sizeof(hash));
2371 ++ hash.w[0] ^= hash.w[3];
2372 ++ hash.w[1] ^= hash.w[4];
2373 ++ hash.w[2] ^= rol32(hash.w[2], 16);
2374 ++
2375 ++ /*
2376 ++ * If we have a architectural hardware random number
2377 ++ * generator, mix that in, too.
2378 ++ */
2379 ++ for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
2380 ++ unsigned long v;
2381 ++ if (!arch_get_random_long(&v))
2382 ++ break;
2383 ++ hash.l[i] ^= v;
2384 ++ }
2385 ++
2386 ++ memcpy(out, &hash, EXTRACT_SIZE);
2387 ++ memset(&hash, 0, sizeof(hash));
2388 + }
2389 +
2390 + static ssize_t extract_entropy(struct entropy_store *r, void *buf,
2391 +- size_t nbytes, int min, int reserved)
2392 ++ size_t nbytes, int min, int reserved)
2393 + {
2394 + ssize_t ret = 0, i;
2395 + __u8 tmp[EXTRACT_SIZE];
2396 +- unsigned long flags;
2397 +
2398 ++ trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
2399 + xfer_secondary_pool(r, nbytes);
2400 + nbytes = account(r, nbytes, min, reserved);
2401 +
2402 +@@ -869,6 +965,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
2403 + extract_buf(r, tmp);
2404 +
2405 + if (fips_enabled) {
2406 ++ unsigned long flags;
2407 ++
2408 + spin_lock_irqsave(&r->lock, flags);
2409 + if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
2410 + panic("Hardware RNG duplicated output!\n");
2411 +@@ -894,6 +992,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
2412 + ssize_t ret = 0, i;
2413 + __u8 tmp[EXTRACT_SIZE];
2414 +
2415 ++ trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
2416 + xfer_secondary_pool(r, nbytes);
2417 + nbytes = account(r, nbytes, 0, 0);
2418 +
2419 +@@ -927,8 +1026,9 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
2420 +
2421 + /*
2422 + * This function is the exported kernel interface. It returns some
2423 +- * number of good random numbers, suitable for seeding TCP sequence
2424 +- * numbers, etc.
2425 ++ * number of good random numbers, suitable for key generation, seeding
2426 ++ * TCP sequence numbers, etc. It does not use the hw random number
2427 ++ * generator, if available; use get_random_bytes_arch() for that.
2428 + */
2429 + void get_random_bytes(void *buf, int nbytes)
2430 + {
2431 +@@ -937,6 +1037,39 @@ void get_random_bytes(void *buf, int nbytes)
2432 + EXPORT_SYMBOL(get_random_bytes);
2433 +
2434 + /*
2435 ++ * This function will use the architecture-specific hardware random
2436 ++ * number generator if it is available. The arch-specific hw RNG will
2437 ++ * almost certainly be faster than what we can do in software, but it
2438 ++ * is impossible to verify that it is implemented securely (as
2439 ++ * opposed, to, say, the AES encryption of a sequence number using a
2440 ++ * key known by the NSA). So it's useful if we need the speed, but
2441 ++ * only if we're willing to trust the hardware manufacturer not to
2442 ++ * have put in a back door.
2443 ++ */
2444 ++void get_random_bytes_arch(void *buf, int nbytes)
2445 ++{
2446 ++ char *p = buf;
2447 ++
2448 ++ trace_get_random_bytes(nbytes, _RET_IP_);
2449 ++ while (nbytes) {
2450 ++ unsigned long v;
2451 ++ int chunk = min(nbytes, (int)sizeof(unsigned long));
2452 ++
2453 ++ if (!arch_get_random_long(&v))
2454 ++ break;
2455 ++
2456 ++ memcpy(p, &v, chunk);
2457 ++ p += chunk;
2458 ++ nbytes -= chunk;
2459 ++ }
2460 ++
2461 ++ if (nbytes)
2462 ++ extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
2463 ++}
2464 ++EXPORT_SYMBOL(get_random_bytes_arch);
2465 ++
2466 ++
2467 ++/*
2468 + * init_std_data - initialize pool with system data
2469 + *
2470 + * @r: pool to initialize
2471 +@@ -947,18 +1080,31 @@ EXPORT_SYMBOL(get_random_bytes);
2472 + */
2473 + static void init_std_data(struct entropy_store *r)
2474 + {
2475 +- ktime_t now;
2476 +- unsigned long flags;
2477 ++ int i;
2478 ++ ktime_t now = ktime_get_real();
2479 ++ unsigned long rv;
2480 +
2481 +- spin_lock_irqsave(&r->lock, flags);
2482 + r->entropy_count = 0;
2483 +- spin_unlock_irqrestore(&r->lock, flags);
2484 +-
2485 +- now = ktime_get_real();
2486 +- mix_pool_bytes(r, &now, sizeof(now));
2487 +- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
2488 ++ r->entropy_total = 0;
2489 ++ mix_pool_bytes(r, &now, sizeof(now), NULL);
2490 ++ for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
2491 ++ if (!arch_get_random_long(&rv))
2492 ++ break;
2493 ++ mix_pool_bytes(r, &rv, sizeof(rv), NULL);
2494 ++ }
2495 ++ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
2496 + }
2497 +
2498 ++/*
2499 ++ * Note that setup_arch() may call add_device_randomness()
2500 ++ * long before we get here. This allows seeding of the pools
2501 ++ * with some platform dependent data very early in the boot
2502 ++ * process. But it limits our options here. We must use
2503 ++ * statically allocated structures that already have all
2504 ++ * initializations complete at compile time. We should also
2505 ++ * take care not to overwrite the precious per platform data
2506 ++ * we were given.
2507 ++ */
2508 + static int rand_initialize(void)
2509 + {
2510 + init_std_data(&input_pool);
2511 +@@ -968,24 +1114,6 @@ static int rand_initialize(void)
2512 + }
2513 + module_init(rand_initialize);
2514 +
2515 +-void rand_initialize_irq(int irq)
2516 +-{
2517 +- struct timer_rand_state *state;
2518 +-
2519 +- state = get_timer_rand_state(irq);
2520 +-
2521 +- if (state)
2522 +- return;
2523 +-
2524 +- /*
2525 +- * If kzalloc returns null, we just won't use that entropy
2526 +- * source.
2527 +- */
2528 +- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
2529 +- if (state)
2530 +- set_timer_rand_state(irq, state);
2531 +-}
2532 +-
2533 + #ifdef CONFIG_BLOCK
2534 + void rand_initialize_disk(struct gendisk *disk)
2535 + {
2536 +@@ -1093,7 +1221,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
2537 + count -= bytes;
2538 + p += bytes;
2539 +
2540 +- mix_pool_bytes(r, buf, bytes);
2541 ++ mix_pool_bytes(r, buf, bytes, NULL);
2542 + cond_resched();
2543 + }
2544 +
2545 +@@ -1236,10 +1364,15 @@ static int proc_do_uuid(ctl_table *table, int write,
2546 + uuid = table->data;
2547 + if (!uuid) {
2548 + uuid = tmp_uuid;
2549 +- uuid[8] = 0;
2550 +- }
2551 +- if (uuid[8] == 0)
2552 + generate_random_uuid(uuid);
2553 ++ } else {
2554 ++ static DEFINE_SPINLOCK(bootid_spinlock);
2555 ++
2556 ++ spin_lock(&bootid_spinlock);
2557 ++ if (!uuid[8])
2558 ++ generate_random_uuid(uuid);
2559 ++ spin_unlock(&bootid_spinlock);
2560 ++ }
2561 +
2562 + sprintf(buf, "%pU", uuid);
2563 +
2564 +@@ -1318,9 +1451,14 @@ late_initcall(random_int_secret_init);
2565 + DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
2566 + unsigned int get_random_int(void)
2567 + {
2568 +- __u32 *hash = get_cpu_var(get_random_int_hash);
2569 ++ __u32 *hash;
2570 + unsigned int ret;
2571 +
2572 ++ if (arch_get_random_int(&ret))
2573 ++ return ret;
2574 ++
2575 ++ hash = get_cpu_var(get_random_int_hash);
2576 ++
2577 + hash[0] += current->pid + jiffies + get_cycles();
2578 + md5_transform(hash, random_int_secret);
2579 + ret = hash[0];
2580 +diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
2581 +index f6cf448..240966b 100644
2582 +--- a/drivers/edac/i7core_edac.c
2583 ++++ b/drivers/edac/i7core_edac.c
2584 +@@ -1842,11 +1842,9 @@ static int i7core_mce_check_error(void *priv, struct mce *mce)
2585 + if (mce->bank != 8)
2586 + return 0;
2587 +
2588 +-#ifdef CONFIG_SMP
2589 + /* Only handle if it is the right mc controller */
2590 + if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
2591 + return 0;
2592 +-#endif
2593 +
2594 + smp_rmb();
2595 + if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
2596 +diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
2597 +index bcb1126..02a52d1 100644
2598 +--- a/drivers/firmware/dmi_scan.c
2599 ++++ b/drivers/firmware/dmi_scan.c
2600 +@@ -6,6 +6,7 @@
2601 + #include <linux/dmi.h>
2602 + #include <linux/efi.h>
2603 + #include <linux/bootmem.h>
2604 ++#include <linux/random.h>
2605 + #include <asm/dmi.h>
2606 +
2607 + /*
2608 +@@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
2609 +
2610 + dmi_table(buf, dmi_len, dmi_num, decode, NULL);
2611 +
2612 ++ add_device_randomness(buf, dmi_len);
2613 ++
2614 + dmi_iounmap(buf, dmi_len);
2615 + return 0;
2616 + }
2617 +diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
2618 +index 51e0e2d..a330492 100644
2619 +--- a/drivers/firmware/pcdp.c
2620 ++++ b/drivers/firmware/pcdp.c
2621 +@@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
2622 + if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
2623 + return -ENODEV;
2624 +
2625 +- pcdp = ioremap(efi.hcdp, 4096);
2626 ++ pcdp = early_ioremap(efi.hcdp, 4096);
2627 + printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
2628 +
2629 + if (strstr(cmdline, "console=hcdp")) {
2630 +@@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
2631 + }
2632 +
2633 + out:
2634 +- iounmap(pcdp);
2635 ++ early_iounmap(pcdp, 4096);
2636 + return rc;
2637 + }
2638 +diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
2639 +index 252defd..87fd034 100644
2640 +--- a/drivers/hwmon/coretemp.c
2641 ++++ b/drivers/hwmon/coretemp.c
2642 +@@ -47,16 +47,15 @@
2643 + #define MAX_ATTRS 5 /* Maximum no of per-core attrs */
2644 + #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
2645 +
2646 +-#ifdef CONFIG_SMP
2647 + #define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
2648 + #define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
2649 ++#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
2650 ++
2651 ++#ifdef CONFIG_SMP
2652 + #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
2653 + #else
2654 +-#define TO_PHYS_ID(cpu) (cpu)
2655 +-#define TO_CORE_ID(cpu) (cpu)
2656 + #define for_each_sibling(i, cpu) for (i = 0; false; )
2657 + #endif
2658 +-#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
2659 +
2660 + /*
2661 + * Per-Core Temperature Data
2662 +diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
2663 +index 08ba5ad..a28ebf0 100644
2664 +--- a/drivers/input/tablet/wacom_wac.c
2665 ++++ b/drivers/input/tablet/wacom_wac.c
2666 +@@ -242,7 +242,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
2667 + input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
2668 + input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
2669 + if (wacom->tool[0] != BTN_TOOL_MOUSE) {
2670 +- input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8));
2671 ++ input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x03) << 8));
2672 + input_report_key(input, BTN_TOUCH, data[1] & 0x01);
2673 + input_report_key(input, BTN_STYLUS, data[1] & 0x02);
2674 + input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
2675 +diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
2676 +index a20e1c4..ccd81b1 100644
2677 +--- a/drivers/mfd/ab3100-core.c
2678 ++++ b/drivers/mfd/ab3100-core.c
2679 +@@ -408,8 +408,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
2680 + u32 fatevent;
2681 + int err;
2682 +
2683 +- add_interrupt_randomness(irq);
2684 +-
2685 + err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
2686 + event_regs, 3);
2687 + if (err)
2688 +@@ -938,9 +936,6 @@ static int __devinit ab3100_probe(struct i2c_client *client,
2689 +
2690 + err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler,
2691 + IRQF_ONESHOT, "ab3100-core", ab3100);
2692 +- /* This real unpredictable IRQ is of course sampled for entropy */
2693 +- rand_initialize_irq(client->irq);
2694 +-
2695 + if (err)
2696 + goto exit_no_irq;
2697 +
2698 +diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
2699 +index 3d7dce6..d69dc4b 100644
2700 +--- a/drivers/mfd/ab3550-core.c
2701 ++++ b/drivers/mfd/ab3550-core.c
2702 +@@ -1309,8 +1309,6 @@ static int __init ab3550_probe(struct i2c_client *client,
2703 +
2704 + err = request_threaded_irq(client->irq, NULL, ab3550_irq_handler,
2705 + IRQF_ONESHOT, "ab3550-core", ab);
2706 +- /* This real unpredictable IRQ is of course sampled for entropy */
2707 +- rand_initialize_irq(client->irq);
2708 +
2709 + if (err)
2710 + goto exit_no_irq;
2711 +diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
2712 +index 43a76c4..db662e2 100644
2713 +--- a/drivers/mfd/ezx-pcap.c
2714 ++++ b/drivers/mfd/ezx-pcap.c
2715 +@@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work)
2716 + }
2717 + local_irq_enable();
2718 + ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
2719 +- } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
2720 ++ } while (gpio_get_value(pdata->gpio));
2721 + }
2722 +
2723 + static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
2724 +diff --git a/drivers/mfd/wm831x-otp.c b/drivers/mfd/wm831x-otp.c
2725 +index f742745..b90f3e0 100644
2726 +--- a/drivers/mfd/wm831x-otp.c
2727 ++++ b/drivers/mfd/wm831x-otp.c
2728 +@@ -18,6 +18,7 @@
2729 + #include <linux/bcd.h>
2730 + #include <linux/delay.h>
2731 + #include <linux/mfd/core.h>
2732 ++#include <linux/random.h>
2733 +
2734 + #include <linux/mfd/wm831x/core.h>
2735 + #include <linux/mfd/wm831x/otp.h>
2736 +@@ -66,6 +67,7 @@ static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL);
2737 +
2738 + int wm831x_otp_init(struct wm831x *wm831x)
2739 + {
2740 ++ char uuid[WM831X_UNIQUE_ID_LEN];
2741 + int ret;
2742 +
2743 + ret = device_create_file(wm831x->dev, &dev_attr_unique_id);
2744 +@@ -73,6 +75,12 @@ int wm831x_otp_init(struct wm831x *wm831x)
2745 + dev_err(wm831x->dev, "Unique ID attribute not created: %d\n",
2746 + ret);
2747 +
2748 ++ ret = wm831x_unique_id_read(wm831x, uuid);
2749 ++ if (ret == 0)
2750 ++ add_device_randomness(uuid, sizeof(uuid));
2751 ++ else
2752 ++ dev_err(wm831x->dev, "Failed to read UUID: %d\n", ret);
2753 ++
2754 + return ret;
2755 + }
2756 +
2757 +diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
2758 +index 5278e84..0d0ee55 100644
2759 +--- a/drivers/net/e1000e/82571.c
2760 ++++ b/drivers/net/e1000e/82571.c
2761 +@@ -1602,10 +1602,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
2762 + * auto-negotiation in the TXCW register and disable
2763 + * forced link in the Device Control register in an
2764 + * attempt to auto-negotiate with our link partner.
2765 +- * If the partner code word is null, stop forcing
2766 +- * and restart auto negotiation.
2767 + */
2768 +- if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
2769 ++ if (rxcw & E1000_RXCW_C) {
2770 + /* Enable autoneg, and unforce link up */
2771 + ew32(TXCW, mac->txcw);
2772 + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
2773 +diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
2774 +index 9d35ec1..9e5fd45 100644
2775 +--- a/drivers/net/wireless/rt2x00/rt61pci.c
2776 ++++ b/drivers/net/wireless/rt2x00/rt61pci.c
2777 +@@ -2254,8 +2254,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2778 +
2779 + static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
2780 + {
2781 +- struct ieee80211_conf conf = { .flags = 0 };
2782 +- struct rt2x00lib_conf libconf = { .conf = &conf };
2783 ++ struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
2784 +
2785 + rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
2786 + }
2787 +diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
2788 +index bdc909b..f3c2110 100644
2789 +--- a/drivers/rtc/rtc-wm831x.c
2790 ++++ b/drivers/rtc/rtc-wm831x.c
2791 +@@ -24,7 +24,7 @@
2792 + #include <linux/mfd/wm831x/core.h>
2793 + #include <linux/delay.h>
2794 + #include <linux/platform_device.h>
2795 +-
2796 ++#include <linux/random.h>
2797 +
2798 + /*
2799 + * R16416 (0x4020) - RTC Write Counter
2800 +@@ -96,6 +96,26 @@ struct wm831x_rtc {
2801 + unsigned int alarm_enabled:1;
2802 + };
2803 +
2804 ++static void wm831x_rtc_add_randomness(struct wm831x *wm831x)
2805 ++{
2806 ++ int ret;
2807 ++ u16 reg;
2808 ++
2809 ++ /*
2810 ++ * The write counter contains a pseudo-random number which is
2811 ++ * regenerated every time we set the RTC so it should be a
2812 ++ * useful per-system source of entropy.
2813 ++ */
2814 ++ ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER);
2815 ++ if (ret >= 0) {
2816 ++ reg = ret;
2817 ++ add_device_randomness(&reg, sizeof(reg));
2818 ++ } else {
2819 ++ dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n",
2820 ++ ret);
2821 ++ }
2822 ++}
2823 ++
2824 + /*
2825 + * Read current time and date in RTC
2826 + */
2827 +@@ -449,6 +469,8 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
2828 + alm_irq, ret);
2829 + }
2830 +
2831 ++ wm831x_rtc_add_randomness(wm831x);
2832 ++
2833 + return 0;
2834 +
2835 + err:
2836 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2837 +index 34bb059..3c0aa02 100644
2838 +--- a/drivers/usb/core/hub.c
2839 ++++ b/drivers/usb/core/hub.c
2840 +@@ -24,6 +24,7 @@
2841 + #include <linux/kthread.h>
2842 + #include <linux/mutex.h>
2843 + #include <linux/freezer.h>
2844 ++#include <linux/random.h>
2845 +
2846 + #include <asm/uaccess.h>
2847 + #include <asm/byteorder.h>
2848 +@@ -1902,6 +1903,14 @@ int usb_new_device(struct usb_device *udev)
2849 + /* Tell the world! */
2850 + announce_device(udev);
2851 +
2852 ++ if (udev->serial)
2853 ++ add_device_randomness(udev->serial, strlen(udev->serial));
2854 ++ if (udev->product)
2855 ++ add_device_randomness(udev->product, strlen(udev->product));
2856 ++ if (udev->manufacturer)
2857 ++ add_device_randomness(udev->manufacturer,
2858 ++ strlen(udev->manufacturer));
2859 ++
2860 + device_enable_async_suspend(&udev->dev);
2861 + /* Register the device. The device driver is responsible
2862 + * for configuring the device and invoking the add-device
2863 +diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
2864 +index 3e65427..0d1c9bd 100644
2865 +--- a/fs/nilfs2/ioctl.c
2866 ++++ b/fs/nilfs2/ioctl.c
2867 +@@ -182,7 +182,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
2868 + if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
2869 + goto out;
2870 +
2871 +- down_read(&inode->i_sb->s_umount);
2872 ++ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
2873 +
2874 + nilfs_transaction_begin(inode->i_sb, &ti, 0);
2875 + ret = nilfs_cpfile_change_cpmode(
2876 +@@ -192,7 +192,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
2877 + else
2878 + nilfs_transaction_commit(inode->i_sb); /* never fails */
2879 +
2880 +- up_read(&inode->i_sb->s_umount);
2881 ++ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
2882 + out:
2883 + mnt_drop_write(filp->f_path.mnt);
2884 + return ret;
2885 +diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
2886 +index 8351c44..97bfbdd 100644
2887 +--- a/fs/nilfs2/super.c
2888 ++++ b/fs/nilfs2/super.c
2889 +@@ -951,6 +951,8 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
2890 + struct nilfs_root *root;
2891 + int ret;
2892 +
2893 ++ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
2894 ++
2895 + down_read(&nilfs->ns_segctor_sem);
2896 + ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
2897 + up_read(&nilfs->ns_segctor_sem);
2898 +@@ -975,6 +977,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
2899 + ret = nilfs_get_root_dentry(s, root, root_dentry);
2900 + nilfs_put_root(root);
2901 + out:
2902 ++ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
2903 + return ret;
2904 + }
2905 +
2906 +diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
2907 +index 35a8970..1c98f53 100644
2908 +--- a/fs/nilfs2/the_nilfs.c
2909 ++++ b/fs/nilfs2/the_nilfs.c
2910 +@@ -76,6 +76,7 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
2911 + nilfs->ns_bdev = bdev;
2912 + atomic_set(&nilfs->ns_ndirtyblks, 0);
2913 + init_rwsem(&nilfs->ns_sem);
2914 ++ mutex_init(&nilfs->ns_snapshot_mount_mutex);
2915 + INIT_LIST_HEAD(&nilfs->ns_dirty_files);
2916 + INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
2917 + spin_lock_init(&nilfs->ns_inode_lock);
2918 +diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
2919 +index 9992b11..de7435f 100644
2920 +--- a/fs/nilfs2/the_nilfs.h
2921 ++++ b/fs/nilfs2/the_nilfs.h
2922 +@@ -47,6 +47,7 @@ enum {
2923 + * @ns_flags: flags
2924 + * @ns_bdev: block device
2925 + * @ns_sem: semaphore for shared states
2926 ++ * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
2927 + * @ns_sbh: buffer heads of on-disk super blocks
2928 + * @ns_sbp: pointers to super block data
2929 + * @ns_sbwtime: previous write time of super block
2930 +@@ -99,6 +100,7 @@ struct the_nilfs {
2931 +
2932 + struct block_device *ns_bdev;
2933 + struct rw_semaphore ns_sem;
2934 ++ struct mutex ns_snapshot_mount_mutex;
2935 +
2936 + /*
2937 + * used for
2938 +diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
2939 +index 2d921b3..d0a3100 100644
2940 +--- a/include/linux/irqdesc.h
2941 ++++ b/include/linux/irqdesc.h
2942 +@@ -38,7 +38,6 @@ struct timer_rand_state;
2943 + */
2944 + struct irq_desc {
2945 + struct irq_data irq_data;
2946 +- struct timer_rand_state *timer_rand_state;
2947 + unsigned int __percpu *kstat_irqs;
2948 + irq_flow_handler_t handle_irq;
2949 + #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
2950 +diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
2951 +index 40c37216..32a1b5c 100644
2952 +--- a/include/linux/mfd/ezx-pcap.h
2953 ++++ b/include/linux/mfd/ezx-pcap.h
2954 +@@ -16,6 +16,7 @@ struct pcap_subdev {
2955 + struct pcap_platform_data {
2956 + unsigned int irq_base;
2957 + unsigned int config;
2958 ++ int gpio;
2959 + void (*init) (void *); /* board specific init */
2960 + int num_subdevs;
2961 + struct pcap_subdev *subdevs;
2962 +diff --git a/include/linux/random.h b/include/linux/random.h
2963 +index d13059f..ac621ce 100644
2964 +--- a/include/linux/random.h
2965 ++++ b/include/linux/random.h
2966 +@@ -48,13 +48,13 @@ struct rnd_state {
2967 +
2968 + #ifdef __KERNEL__
2969 +
2970 +-extern void rand_initialize_irq(int irq);
2971 +-
2972 ++extern void add_device_randomness(const void *, unsigned int);
2973 + extern void add_input_randomness(unsigned int type, unsigned int code,
2974 + unsigned int value);
2975 +-extern void add_interrupt_randomness(int irq);
2976 ++extern void add_interrupt_randomness(int irq, int irq_flags);
2977 +
2978 + extern void get_random_bytes(void *buf, int nbytes);
2979 ++extern void get_random_bytes_arch(void *buf, int nbytes);
2980 + void generate_random_uuid(unsigned char uuid_out[16]);
2981 +
2982 + #ifndef MODULE
2983 +@@ -91,6 +91,19 @@ static inline void prandom32_seed(struct rnd_state *state, u64 seed)
2984 + state->s3 = __seed(i, 15);
2985 + }
2986 +
2987 ++#ifdef CONFIG_ARCH_RANDOM
2988 ++# include <asm/archrandom.h>
2989 ++#else
2990 ++static inline int arch_get_random_long(unsigned long *v)
2991 ++{
2992 ++ return 0;
2993 ++}
2994 ++static inline int arch_get_random_int(unsigned int *v)
2995 ++{
2996 ++ return 0;
2997 ++}
2998 ++#endif
2999 ++
3000 + #endif /* __KERNEL___ */
3001 +
3002 + #endif /* _LINUX_RANDOM_H */
3003 +diff --git a/include/trace/events/random.h b/include/trace/events/random.h
3004 +new file mode 100644
3005 +index 0000000..422df19
3006 +--- /dev/null
3007 ++++ b/include/trace/events/random.h
3008 +@@ -0,0 +1,134 @@
3009 ++#undef TRACE_SYSTEM
3010 ++#define TRACE_SYSTEM random
3011 ++
3012 ++#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
3013 ++#define _TRACE_RANDOM_H
3014 ++
3015 ++#include <linux/writeback.h>
3016 ++#include <linux/tracepoint.h>
3017 ++
3018 ++DECLARE_EVENT_CLASS(random__mix_pool_bytes,
3019 ++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
3020 ++
3021 ++ TP_ARGS(pool_name, bytes, IP),
3022 ++
3023 ++ TP_STRUCT__entry(
3024 ++ __field( const char *, pool_name )
3025 ++ __field( int, bytes )
3026 ++ __field(unsigned long, IP )
3027 ++ ),
3028 ++
3029 ++ TP_fast_assign(
3030 ++ __entry->pool_name = pool_name;
3031 ++ __entry->bytes = bytes;
3032 ++ __entry->IP = IP;
3033 ++ ),
3034 ++
3035 ++ TP_printk("%s pool: bytes %d caller %pF",
3036 ++ __entry->pool_name, __entry->bytes, (void *)__entry->IP)
3037 ++);
3038 ++
3039 ++DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
3040 ++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
3041 ++
3042 ++ TP_ARGS(pool_name, bytes, IP)
3043 ++);
3044 ++
3045 ++DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
3046 ++ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
3047 ++
3048 ++ TP_ARGS(pool_name, bytes, IP)
3049 ++);
3050 ++
3051 ++TRACE_EVENT(credit_entropy_bits,
3052 ++ TP_PROTO(const char *pool_name, int bits, int entropy_count,
3053 ++ int entropy_total, unsigned long IP),
3054 ++
3055 ++ TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
3056 ++
3057 ++ TP_STRUCT__entry(
3058 ++ __field( const char *, pool_name )
3059 ++ __field( int, bits )
3060 ++ __field( int, entropy_count )
3061 ++ __field( int, entropy_total )
3062 ++ __field(unsigned long, IP )
3063 ++ ),
3064 ++
3065 ++ TP_fast_assign(
3066 ++ __entry->pool_name = pool_name;
3067 ++ __entry->bits = bits;
3068 ++ __entry->entropy_count = entropy_count;
3069 ++ __entry->entropy_total = entropy_total;
3070 ++ __entry->IP = IP;
3071 ++ ),
3072 ++
3073 ++ TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
3074 ++ "caller %pF", __entry->pool_name, __entry->bits,
3075 ++ __entry->entropy_count, __entry->entropy_total,
3076 ++ (void *)__entry->IP)
3077 ++);
3078 ++
3079 ++TRACE_EVENT(get_random_bytes,
3080 ++ TP_PROTO(int nbytes, unsigned long IP),
3081 ++
3082 ++ TP_ARGS(nbytes, IP),
3083 ++
3084 ++ TP_STRUCT__entry(
3085 ++ __field( int, nbytes )
3086 ++ __field(unsigned long, IP )
3087 ++ ),
3088 ++
3089 ++ TP_fast_assign(
3090 ++ __entry->nbytes = nbytes;
3091 ++ __entry->IP = IP;
3092 ++ ),
3093 ++
3094 ++ TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
3095 ++);
3096 ++
3097 ++DECLARE_EVENT_CLASS(random__extract_entropy,
3098 ++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
3099 ++ unsigned long IP),
3100 ++
3101 ++ TP_ARGS(pool_name, nbytes, entropy_count, IP),
3102 ++
3103 ++ TP_STRUCT__entry(
3104 ++ __field( const char *, pool_name )
3105 ++ __field( int, nbytes )
3106 ++ __field( int, entropy_count )
3107 ++ __field(unsigned long, IP )
3108 ++ ),
3109 ++
3110 ++ TP_fast_assign(
3111 ++ __entry->pool_name = pool_name;
3112 ++ __entry->nbytes = nbytes;
3113 ++ __entry->entropy_count = entropy_count;
3114 ++ __entry->IP = IP;
3115 ++ ),
3116 ++
3117 ++ TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
3118 ++ __entry->pool_name, __entry->nbytes, __entry->entropy_count,
3119 ++ (void *)__entry->IP)
3120 ++);
3121 ++
3122 ++
3123 ++DEFINE_EVENT(random__extract_entropy, extract_entropy,
3124 ++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
3125 ++ unsigned long IP),
3126 ++
3127 ++ TP_ARGS(pool_name, nbytes, entropy_count, IP)
3128 ++);
3129 ++
3130 ++DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
3131 ++ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
3132 ++ unsigned long IP),
3133 ++
3134 ++ TP_ARGS(pool_name, nbytes, entropy_count, IP)
3135 ++);
3136 ++
3137 ++
3138 ++
3139 ++#endif /* _TRACE_RANDOM_H */
3140 ++
3141 ++/* This part must be outside protection */
3142 ++#include <trace/define_trace.h>
3143 +diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
3144 +index 470d08c..10e0772 100644
3145 +--- a/kernel/irq/handle.c
3146 ++++ b/kernel/irq/handle.c
3147 +@@ -117,7 +117,7 @@ irqreturn_t
3148 + handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
3149 + {
3150 + irqreturn_t retval = IRQ_NONE;
3151 +- unsigned int random = 0, irq = desc->irq_data.irq;
3152 ++ unsigned int flags = 0, irq = desc->irq_data.irq;
3153 +
3154 + do {
3155 + irqreturn_t res;
3156 +@@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
3157 +
3158 + /* Fall through to add to randomness */
3159 + case IRQ_HANDLED:
3160 +- random |= action->flags;
3161 ++ flags |= action->flags;
3162 + break;
3163 +
3164 + default:
3165 +@@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
3166 + action = action->next;
3167 + } while (action);
3168 +
3169 +- if (random & IRQF_SAMPLE_RANDOM)
3170 +- add_interrupt_randomness(irq);
3171 ++ add_interrupt_randomness(irq, flags);
3172 +
3173 + if (!noirqdebug)
3174 + note_interrupt(irq, desc, retval);
3175 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
3176 +index df8136f..fa4a70e 100644
3177 +--- a/kernel/irq/manage.c
3178 ++++ b/kernel/irq/manage.c
3179 +@@ -886,22 +886,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
3180 +
3181 + if (desc->irq_data.chip == &no_irq_chip)
3182 + return -ENOSYS;
3183 +- /*
3184 +- * Some drivers like serial.c use request_irq() heavily,
3185 +- * so we have to be careful not to interfere with a
3186 +- * running system.
3187 +- */
3188 +- if (new->flags & IRQF_SAMPLE_RANDOM) {
3189 +- /*
3190 +- * This function might sleep, we want to call it first,
3191 +- * outside of the atomic block.
3192 +- * Yes, this might clear the entropy pool if the wrong
3193 +- * driver is attempted to be loaded, without actually
3194 +- * installing a new handler, but is this really a problem,
3195 +- * only the sysadmin is able to do this.
3196 +- */
3197 +- rand_initialize_irq(irq);
3198 +- }
3199 +
3200 + /*
3201 + * Check whether the interrupt nests into another interrupt
3202 +@@ -1325,7 +1309,6 @@ EXPORT_SYMBOL(free_irq);
3203 + * Flags:
3204 + *
3205 + * IRQF_SHARED Interrupt is shared
3206 +- * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
3207 + * IRQF_TRIGGER_* Specify active edge(s) or level
3208 + *
3209 + */
3210 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3211 +index ae60a53..037f077 100644
3212 +--- a/mm/hugetlb.c
3213 ++++ b/mm/hugetlb.c
3214 +@@ -2301,6 +2301,22 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3215 + {
3216 + mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
3217 + __unmap_hugepage_range(vma, start, end, ref_page);
3218 ++ /*
3219 ++ * Clear this flag so that x86's huge_pmd_share page_table_shareable
3220 ++ * test will fail on a vma being torn down, and not grab a page table
3221 ++ * on its way out. We're lucky that the flag has such an appropriate
3222 ++ * name, and can in fact be safely cleared here. We could clear it
3223 ++ * before the __unmap_hugepage_range above, but all that's necessary
3224 ++ * is to clear it before releasing the i_mmap_mutex below.
3225 ++ *
3226 ++ * This works because in the contexts this is called, the VMA is
3227 ++ * going to be destroyed. It is not vunerable to madvise(DONTNEED)
3228 ++ * because madvise is not supported on hugetlbfs. The same applies
3229 ++ * for direct IO. unmap_hugepage_range() is only being called just
3230 ++ * before free_pgtables() so clearing VM_MAYSHARE will not cause
3231 ++ * surprises later.
3232 ++ */
3233 ++ vma->vm_flags &= ~VM_MAYSHARE;
3234 + mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3235 + }
3236 +
3237 +@@ -2853,9 +2869,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
3238 + }
3239 + }
3240 + spin_unlock(&mm->page_table_lock);
3241 +- mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3242 +-
3243 ++ /*
3244 ++ * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
3245 ++ * may have cleared our pud entry and done put_page on the page table:
3246 ++ * once we release i_mmap_mutex, another task can do the final put_page
3247 ++ * and that page table be reused and filled with junk.
3248 ++ */
3249 + flush_tlb_range(vma, start, end);
3250 ++ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3251 + }
3252 +
3253 + int hugetlb_reserve_pages(struct inode *inode,
3254 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
3255 +index 6496748..2f49dcf 100644
3256 +--- a/mm/memory-failure.c
3257 ++++ b/mm/memory-failure.c
3258 +@@ -1334,8 +1334,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
3259 + /* Keep page count to indicate a given hugepage is isolated. */
3260 +
3261 + list_add(&hpage->lru, &pagelist);
3262 +- ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
3263 +- true);
3264 ++ ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, false,
3265 ++ MIGRATE_SYNC);
3266 + if (ret) {
3267 + struct page *page1, *page2;
3268 + list_for_each_entry_safe(page1, page2, &pagelist, lru)
3269 +@@ -1464,7 +1464,7 @@ int soft_offline_page(struct page *page, int flags)
3270 + page_is_file_cache(page));
3271 + list_add(&page->lru, &pagelist);
3272 + ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
3273 +- 0, MIGRATE_SYNC);
3274 ++ false, MIGRATE_SYNC);
3275 + if (ret) {
3276 + putback_lru_pages(&pagelist);
3277 + pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
3278 +diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
3279 +index 8d032de..71c7811 100644
3280 +--- a/mm/mmu_notifier.c
3281 ++++ b/mm/mmu_notifier.c
3282 +@@ -33,6 +33,24 @@
3283 + void __mmu_notifier_release(struct mm_struct *mm)
3284 + {
3285 + struct mmu_notifier *mn;
3286 ++ struct hlist_node *n;
3287 ++
3288 ++ /*
3289 ++ * RCU here will block mmu_notifier_unregister until
3290 ++ * ->release returns.
3291 ++ */
3292 ++ rcu_read_lock();
3293 ++ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
3294 ++ /*
3295 ++ * if ->release runs before mmu_notifier_unregister it
3296 ++ * must be handled as it's the only way for the driver
3297 ++ * to flush all existing sptes and stop the driver
3298 ++ * from establishing any more sptes before all the
3299 ++ * pages in the mm are freed.
3300 ++ */
3301 ++ if (mn->ops->release)
3302 ++ mn->ops->release(mn, mm);
3303 ++ rcu_read_unlock();
3304 +
3305 + spin_lock(&mm->mmu_notifier_mm->lock);
3306 + while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
3307 +@@ -46,23 +64,6 @@ void __mmu_notifier_release(struct mm_struct *mm)
3308 + * mmu_notifier_unregister to return.
3309 + */
3310 + hlist_del_init_rcu(&mn->hlist);
3311 +- /*
3312 +- * RCU here will block mmu_notifier_unregister until
3313 +- * ->release returns.
3314 +- */
3315 +- rcu_read_lock();
3316 +- spin_unlock(&mm->mmu_notifier_mm->lock);
3317 +- /*
3318 +- * if ->release runs before mmu_notifier_unregister it
3319 +- * must be handled as it's the only way for the driver
3320 +- * to flush all existing sptes and stop the driver
3321 +- * from establishing any more sptes before all the
3322 +- * pages in the mm are freed.
3323 +- */
3324 +- if (mn->ops->release)
3325 +- mn->ops->release(mn, mm);
3326 +- rcu_read_unlock();
3327 +- spin_lock(&mm->mmu_notifier_mm->lock);
3328 + }
3329 + spin_unlock(&mm->mmu_notifier_mm->lock);
3330 +
3331 +@@ -284,16 +285,13 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
3332 + {
3333 + BUG_ON(atomic_read(&mm->mm_count) <= 0);
3334 +
3335 +- spin_lock(&mm->mmu_notifier_mm->lock);
3336 + if (!hlist_unhashed(&mn->hlist)) {
3337 +- hlist_del_rcu(&mn->hlist);
3338 +-
3339 + /*
3340 + * RCU here will force exit_mmap to wait ->release to finish
3341 + * before freeing the pages.
3342 + */
3343 + rcu_read_lock();
3344 +- spin_unlock(&mm->mmu_notifier_mm->lock);
3345 ++
3346 + /*
3347 + * exit_mmap will block in mmu_notifier_release to
3348 + * guarantee ->release is called before freeing the
3349 +@@ -302,8 +300,11 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
3350 + if (mn->ops->release)
3351 + mn->ops->release(mn, mm);
3352 + rcu_read_unlock();
3353 +- } else
3354 ++
3355 ++ spin_lock(&mm->mmu_notifier_mm->lock);
3356 ++ hlist_del_rcu(&mn->hlist);
3357 + spin_unlock(&mm->mmu_notifier_mm->lock);
3358 ++ }
3359 +
3360 + /*
3361 + * Wait any running method to finish, of course including
3362 +diff --git a/net/core/dev.c b/net/core/dev.c
3363 +index a71eafc..8235b81 100644
3364 +--- a/net/core/dev.c
3365 ++++ b/net/core/dev.c
3366 +@@ -1163,6 +1163,7 @@ static int __dev_open(struct net_device *dev)
3367 + net_dmaengine_get();
3368 + dev_set_rx_mode(dev);
3369 + dev_activate(dev);
3370 ++ add_device_randomness(dev->dev_addr, dev->addr_len);
3371 + }
3372 +
3373 + return ret;
3374 +@@ -4730,6 +4731,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3375 + err = ops->ndo_set_mac_address(dev, sa);
3376 + if (!err)
3377 + call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3378 ++ add_device_randomness(dev->dev_addr, dev->addr_len);
3379 + return err;
3380 + }
3381 + EXPORT_SYMBOL(dev_set_mac_address);
3382 +@@ -5507,6 +5509,7 @@ int register_netdevice(struct net_device *dev)
3383 + dev_init_scheduler(dev);
3384 + dev_hold(dev);
3385 + list_netdevice(dev);
3386 ++ add_device_randomness(dev->dev_addr, dev->addr_len);
3387 +
3388 + /* Notify protocols, that a new device appeared. */
3389 + ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
3390 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
3391 +index 861d53f..ac49ad5 100644
3392 +--- a/net/core/rtnetlink.c
3393 ++++ b/net/core/rtnetlink.c
3394 +@@ -1304,6 +1304,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
3395 + goto errout;
3396 + send_addr_notify = 1;
3397 + modified = 1;
3398 ++ add_device_randomness(dev->dev_addr, dev->addr_len);
3399 + }
3400 +
3401 + if (tb[IFLA_MTU]) {
3402 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
3403 +index 29e9980..370aa94 100644
3404 +--- a/net/mac80211/mesh.c
3405 ++++ b/net/mac80211/mesh.c
3406 +@@ -490,6 +490,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
3407 +
3408 + del_timer_sync(&sdata->u.mesh.housekeeping_timer);
3409 + del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
3410 ++ del_timer_sync(&sdata->u.mesh.mesh_path_timer);
3411 + /*
3412 + * If the timer fired while we waited for it, it will have
3413 + * requeued the work. Now the work will be running again
3414 +diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
3415 +index e45d2fb..bf0a7f6 100644
3416 +--- a/net/sunrpc/rpcb_clnt.c
3417 ++++ b/net/sunrpc/rpcb_clnt.c
3418 +@@ -193,7 +193,7 @@ static int rpcb_create_local_unix(void)
3419 + if (IS_ERR(clnt)) {
3420 + dprintk("RPC: failed to create AF_LOCAL rpcbind "
3421 + "client (errno %ld).\n", PTR_ERR(clnt));
3422 +- result = -PTR_ERR(clnt);
3423 ++ result = PTR_ERR(clnt);
3424 + goto out;
3425 + }
3426 +
3427 +@@ -242,7 +242,7 @@ static int rpcb_create_local_net(void)
3428 + if (IS_ERR(clnt)) {
3429 + dprintk("RPC: failed to create local rpcbind "
3430 + "client (errno %ld).\n", PTR_ERR(clnt));
3431 +- result = -PTR_ERR(clnt);
3432 ++ result = PTR_ERR(clnt);
3433 + goto out;
3434 + }
3435 +
3436 +diff --git a/net/wireless/core.c b/net/wireless/core.c
3437 +index 880dbe2..498c760 100644
3438 +--- a/net/wireless/core.c
3439 ++++ b/net/wireless/core.c
3440 +@@ -959,6 +959,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
3441 + */
3442 + synchronize_rcu();
3443 + INIT_LIST_HEAD(&wdev->list);
3444 ++ /*
3445 ++ * Ensure that all events have been processed and
3446 ++ * freed.
3447 ++ */
3448 ++ cfg80211_process_wdev_events(wdev);
3449 + break;
3450 + case NETDEV_PRE_UP:
3451 + if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
3452 +diff --git a/net/wireless/core.h b/net/wireless/core.h
3453 +index a570ff9..8351645 100644
3454 +--- a/net/wireless/core.h
3455 ++++ b/net/wireless/core.h
3456 +@@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
3457 + struct net_device *dev, enum nl80211_iftype ntype,
3458 + u32 *flags, struct vif_params *params);
3459 + void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
3460 ++void cfg80211_process_wdev_events(struct wireless_dev *wdev);
3461 +
3462 + int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
3463 + struct wireless_dev *wdev,
3464 +diff --git a/net/wireless/util.c b/net/wireless/util.c
3465 +index bbcb58e..18e22be 100644
3466 +--- a/net/wireless/util.c
3467 ++++ b/net/wireless/util.c
3468 +@@ -719,7 +719,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
3469 + wdev->connect_keys = NULL;
3470 + }
3471 +
3472 +-static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
3473 ++void cfg80211_process_wdev_events(struct wireless_dev *wdev)
3474 + {
3475 + struct cfg80211_event *ev;
3476 + unsigned long flags;
3477 +@@ -975,6 +975,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
3478 + }
3479 + mutex_unlock(&rdev->devlist_mtx);
3480 +
3481 ++ if (total == 1)
3482 ++ return 0;
3483 ++
3484 + for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
3485 + const struct ieee80211_iface_combination *c;
3486 + struct ieee80211_iface_limit *limits;