Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2073 - genpatches-2.6/trunk/3.2
Date: Mon, 30 Jan 2012 21:05:17
Message-Id: 20120130210502.7CA862004B@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2012-01-30 21:05:01 +0000 (Mon, 30 Jan 2012)
3 New Revision: 2073
4
5 Added:
6 genpatches-2.6/trunk/3.2/1001_linux-3.2.2.patch
7 Log:
8 Add linux 3.2.2 for real this time.
9
10 Added: genpatches-2.6/trunk/3.2/1001_linux-3.2.2.patch
11 ===================================================================
12 --- genpatches-2.6/trunk/3.2/1001_linux-3.2.2.patch (rev 0)
13 +++ genpatches-2.6/trunk/3.2/1001_linux-3.2.2.patch 2012-01-30 21:05:01 UTC (rev 2073)
14 @@ -0,0 +1,6552 @@
15 +diff --git a/Makefile b/Makefile
16 +index c5edffa..2f684da 100644
17 +--- a/Makefile
18 ++++ b/Makefile
19 +@@ -1,6 +1,6 @@
20 + VERSION = 3
21 + PATCHLEVEL = 2
22 +-SUBLEVEL = 1
23 ++SUBLEVEL = 2
24 + EXTRAVERSION =
25 + NAME = Saber-toothed Squirrel
26 +
27 +diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
28 +index bfb4d01..5207035 100644
29 +--- a/arch/ia64/kernel/acpi.c
30 ++++ b/arch/ia64/kernel/acpi.c
31 +@@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
32 + static struct acpi_table_slit __initdata *slit_table;
33 + cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
34 +
35 +-static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
36 ++static int __init
37 ++get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
38 + {
39 + int pxm;
40 +
41 + pxm = pa->proximity_domain_lo;
42 +- if (ia64_platform_is("sn2"))
43 ++ if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
44 + pxm += pa->proximity_domain_hi[0] << 8;
45 + return pxm;
46 + }
47 +
48 +-static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
49 ++static int __init
50 ++get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
51 + {
52 + int pxm;
53 +
54 + pxm = ma->proximity_domain;
55 +- if (!ia64_platform_is("sn2"))
56 ++ if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
57 + pxm &= 0xff;
58 +
59 + return pxm;
60 +diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
61 +index 577abba..83bb960 100644
62 +--- a/arch/score/kernel/entry.S
63 ++++ b/arch/score/kernel/entry.S
64 +@@ -408,7 +408,7 @@ ENTRY(handle_sys)
65 + sw r9, [r0, PT_EPC]
66 +
67 + cmpi.c r27, __NR_syscalls # check syscall number
68 +- bgtu illegal_syscall
69 ++ bgeu illegal_syscall
70 +
71 + slli r8, r27, 2 # get syscall routine
72 + la r11, sys_call_table
73 +diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
74 +index 8e41071..49ad773 100644
75 +--- a/arch/x86/include/asm/amd_nb.h
76 ++++ b/arch/x86/include/asm/amd_nb.h
77 +@@ -1,6 +1,7 @@
78 + #ifndef _ASM_X86_AMD_NB_H
79 + #define _ASM_X86_AMD_NB_H
80 +
81 ++#include <linux/ioport.h>
82 + #include <linux/pci.h>
83 +
84 + struct amd_nb_bus_dev_range {
85 +@@ -13,6 +14,7 @@ extern const struct pci_device_id amd_nb_misc_ids[];
86 + extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
87 +
88 + extern bool early_is_amd_nb(u32 value);
89 ++extern struct resource *amd_get_mmconfig_range(struct resource *res);
90 + extern int amd_cache_northbridges(void);
91 + extern void amd_flush_garts(void);
92 + extern int amd_numa_init(void);
93 +diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
94 +index 8e862aa..1b82f7e 100644
95 +--- a/arch/x86/include/asm/uv/uv_bau.h
96 ++++ b/arch/x86/include/asm/uv/uv_bau.h
97 +@@ -65,7 +65,7 @@
98 + * UV2: Bit 19 selects between
99 + * (0): 10 microsecond timebase and
100 + * (1): 80 microseconds
101 +- * we're using 655us, similar to UV1: 65 units of 10us
102 ++ * we're using 560us, similar to UV1: 65 units of 10us
103 + */
104 + #define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
105 + #define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
106 +@@ -167,6 +167,7 @@
107 + #define FLUSH_RETRY_TIMEOUT 2
108 + #define FLUSH_GIVEUP 3
109 + #define FLUSH_COMPLETE 4
110 ++#define FLUSH_RETRY_BUSYBUG 5
111 +
112 + /*
113 + * tuning the action when the numalink network is extremely delayed
114 +@@ -235,10 +236,10 @@ struct bau_msg_payload {
115 +
116 +
117 + /*
118 +- * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
119 ++ * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
120 + * see table 4.2.3.0.1 in broacast_assist spec.
121 + */
122 +-struct bau_msg_header {
123 ++struct uv1_bau_msg_header {
124 + unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
125 + /* bits 5:0 */
126 + unsigned int base_dest_nasid:15; /* nasid of the first bit */
127 +@@ -318,19 +319,87 @@ struct bau_msg_header {
128 + };
129 +
130 + /*
131 ++ * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
132 ++ * see figure 9-2 of harp_sys.pdf
133 ++ */
134 ++struct uv2_bau_msg_header {
135 ++ unsigned int base_dest_nasid:15; /* nasid of the first bit */
136 ++ /* bits 14:0 */ /* in uvhub map */
137 ++ unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */
138 ++ /* bits 19:15 */
139 ++ unsigned int rsvd_1:1; /* must be zero */
140 ++ /* bit 20 */
141 ++ /* Address bits 59:21 */
142 ++ /* bits 25:2 of address (44:21) are payload */
143 ++ /* these next 24 bits become bytes 12-14 of msg */
144 ++ /* bits 28:21 land in byte 12 */
145 ++ unsigned int replied_to:1; /* sent as 0 by the source to
146 ++ byte 12 */
147 ++ /* bit 21 */
148 ++ unsigned int msg_type:3; /* software type of the
149 ++ message */
150 ++ /* bits 24:22 */
151 ++ unsigned int canceled:1; /* message canceled, resource
152 ++ is to be freed*/
153 ++ /* bit 25 */
154 ++ unsigned int payload_1:3; /* not currently used */
155 ++ /* bits 28:26 */
156 ++
157 ++ /* bits 36:29 land in byte 13 */
158 ++ unsigned int payload_2a:3; /* not currently used */
159 ++ unsigned int payload_2b:5; /* not currently used */
160 ++ /* bits 36:29 */
161 ++
162 ++ /* bits 44:37 land in byte 14 */
163 ++ unsigned int payload_3:8; /* not currently used */
164 ++ /* bits 44:37 */
165 ++
166 ++ unsigned int rsvd_2:7; /* reserved */
167 ++ /* bits 51:45 */
168 ++ unsigned int swack_flag:1; /* software acknowledge flag */
169 ++ /* bit 52 */
170 ++ unsigned int rsvd_3a:3; /* must be zero */
171 ++ unsigned int rsvd_3b:8; /* must be zero */
172 ++ unsigned int rsvd_3c:8; /* must be zero */
173 ++ unsigned int rsvd_3d:3; /* must be zero */
174 ++ /* bits 74:53 */
175 ++ unsigned int fairness:3; /* usually zero */
176 ++ /* bits 77:75 */
177 ++
178 ++ unsigned int sequence:16; /* message sequence number */
179 ++ /* bits 93:78 Suppl_A */
180 ++ unsigned int chaining:1; /* next descriptor is part of
181 ++ this activation*/
182 ++ /* bit 94 */
183 ++ unsigned int multilevel:1; /* multi-level multicast
184 ++ format */
185 ++ /* bit 95 */
186 ++ unsigned int rsvd_4:24; /* ordered / source node /
187 ++ source subnode / aging
188 ++ must be zero */
189 ++ /* bits 119:96 */
190 ++ unsigned int command:8; /* message type */
191 ++ /* bits 127:120 */
192 ++};
193 ++
194 ++/*
195 + * The activation descriptor:
196 + * The format of the message to send, plus all accompanying control
197 + * Should be 64 bytes
198 + */
199 + struct bau_desc {
200 +- struct pnmask distribution;
201 ++ struct pnmask distribution;
202 + /*
203 + * message template, consisting of header and payload:
204 + */
205 +- struct bau_msg_header header;
206 +- struct bau_msg_payload payload;
207 ++ union bau_msg_header {
208 ++ struct uv1_bau_msg_header uv1_hdr;
209 ++ struct uv2_bau_msg_header uv2_hdr;
210 ++ } header;
211 ++
212 ++ struct bau_msg_payload payload;
213 + };
214 +-/*
215 ++/* UV1:
216 + * -payload-- ---------header------
217 + * bytes 0-11 bits 41-56 bits 58-81
218 + * A B (2) C (3)
219 +@@ -340,6 +409,16 @@ struct bau_desc {
220 + * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
221 + * ------------payload queue-----------
222 + */
223 ++/* UV2:
224 ++ * -payload-- ---------header------
225 ++ * bytes 0-11 bits 70-78 bits 21-44
226 ++ * A B (2) C (3)
227 ++ *
228 ++ * A/B/C are moved to:
229 ++ * A C B
230 ++ * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
231 ++ * ------------payload queue-----------
232 ++ */
233 +
234 + /*
235 + * The payload queue on the destination side is an array of these.
236 +@@ -385,7 +464,6 @@ struct bau_pq_entry {
237 + struct msg_desc {
238 + struct bau_pq_entry *msg;
239 + int msg_slot;
240 +- int swack_slot;
241 + struct bau_pq_entry *queue_first;
242 + struct bau_pq_entry *queue_last;
243 + };
244 +@@ -439,6 +517,9 @@ struct ptc_stats {
245 + unsigned long s_retry_messages; /* retry broadcasts */
246 + unsigned long s_bau_reenabled; /* for bau enable/disable */
247 + unsigned long s_bau_disabled; /* for bau enable/disable */
248 ++ unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
249 ++ unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
250 ++ unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
251 + /* destination statistics */
252 + unsigned long d_alltlb; /* times all tlb's on this
253 + cpu were flushed */
254 +@@ -511,9 +592,12 @@ struct bau_control {
255 + short osnode;
256 + short uvhub_cpu;
257 + short uvhub;
258 ++ short uvhub_version;
259 + short cpus_in_socket;
260 + short cpus_in_uvhub;
261 + short partition_base_pnode;
262 ++ short using_desc; /* an index, like uvhub_cpu */
263 ++ unsigned int inuse_map;
264 + unsigned short message_number;
265 + unsigned short uvhub_quiesce;
266 + short socket_acknowledge_count[DEST_Q_SIZE];
267 +@@ -531,6 +615,7 @@ struct bau_control {
268 + int cong_response_us;
269 + int cong_reps;
270 + int cong_period;
271 ++ unsigned long clocks_per_100_usec;
272 + cycles_t period_time;
273 + long period_requests;
274 + struct hub_and_pnode *thp;
275 +@@ -591,6 +676,11 @@ static inline void write_mmr_sw_ack(unsigned long mr)
276 + uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
277 + }
278 +
279 ++static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
280 ++{
281 ++ write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
282 ++}
283 ++
284 + static inline unsigned long read_mmr_sw_ack(void)
285 + {
286 + return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
287 +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
288 +index 4c39baa..bae1efe 100644
289 +--- a/arch/x86/kernel/amd_nb.c
290 ++++ b/arch/x86/kernel/amd_nb.c
291 +@@ -119,6 +119,37 @@ bool __init early_is_amd_nb(u32 device)
292 + return false;
293 + }
294 +
295 ++struct resource *amd_get_mmconfig_range(struct resource *res)
296 ++{
297 ++ u32 address;
298 ++ u64 base, msr;
299 ++ unsigned segn_busn_bits;
300 ++
301 ++ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
302 ++ return NULL;
303 ++
304 ++ /* assume all cpus from fam10h have mmconfig */
305 ++ if (boot_cpu_data.x86 < 0x10)
306 ++ return NULL;
307 ++
308 ++ address = MSR_FAM10H_MMIO_CONF_BASE;
309 ++ rdmsrl(address, msr);
310 ++
311 ++ /* mmconfig is not enabled */
312 ++ if (!(msr & FAM10H_MMIO_CONF_ENABLE))
313 ++ return NULL;
314 ++
315 ++ base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
316 ++
317 ++ segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
318 ++ FAM10H_MMIO_CONF_BUSRANGE_MASK;
319 ++
320 ++ res->flags = IORESOURCE_MEM;
321 ++ res->start = base;
322 ++ res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
323 ++ return res;
324 ++}
325 ++
326 + int amd_get_subcaches(int cpu)
327 + {
328 + struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
329 +diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
330 +index 9d59bba..79b05b8 100644
331 +--- a/arch/x86/kernel/apic/x2apic_uv_x.c
332 ++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
333 +@@ -769,7 +769,12 @@ void __init uv_system_init(void)
334 + for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
335 + uv_possible_blades +=
336 + hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
337 +- printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
338 ++
339 ++ /* uv_num_possible_blades() is really the hub count */
340 ++ printk(KERN_INFO "UV: Found %d blades, %d hubs\n",
341 ++ is_uv1_hub() ? uv_num_possible_blades() :
342 ++ (uv_num_possible_blades() + 1) / 2,
343 ++ uv_num_possible_blades());
344 +
345 + bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
346 + uv_blade_info = kzalloc(bytes, GFP_KERNEL);
347 +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
348 +index 4b5ba85..845df68 100644
349 +--- a/arch/x86/mm/mmap.c
350 ++++ b/arch/x86/mm/mmap.c
351 +@@ -75,9 +75,9 @@ static unsigned long mmap_rnd(void)
352 + */
353 + if (current->flags & PF_RANDOMIZE) {
354 + if (mmap_is_ia32())
355 +- rnd = (long)get_random_int() % (1<<8);
356 ++ rnd = get_random_int() % (1<<8);
357 + else
358 +- rnd = (long)(get_random_int() % (1<<28));
359 ++ rnd = get_random_int() % (1<<28);
360 + }
361 + return rnd << PAGE_SHIFT;
362 + }
363 +diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
364 +index 81dbfde..7efd0c6 100644
365 +--- a/arch/x86/mm/srat.c
366 ++++ b/arch/x86/mm/srat.c
367 +@@ -104,6 +104,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
368 + if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
369 + return;
370 + pxm = pa->proximity_domain_lo;
371 ++ if (acpi_srat_revision >= 2)
372 ++ pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
373 + node = setup_node(pxm);
374 + if (node < 0) {
375 + printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
376 +@@ -155,6 +157,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
377 + start = ma->base_address;
378 + end = start + ma->length;
379 + pxm = ma->proximity_domain;
380 ++ if (acpi_srat_revision <= 1)
381 ++ pxm &= 0xff;
382 + node = setup_node(pxm);
383 + if (node < 0) {
384 + printk(KERN_ERR "SRAT: Too many proximity domains.\n");
385 +diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
386 +index 6b8759f..d24d3da 100644
387 +--- a/arch/x86/pci/Makefile
388 ++++ b/arch/x86/pci/Makefile
389 +@@ -18,8 +18,9 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
390 + obj-$(CONFIG_X86_MRST) += mrst.o
391 +
392 + obj-y += common.o early.o
393 +-obj-y += amd_bus.o bus_numa.o
394 ++obj-y += bus_numa.o
395 +
396 ++obj-$(CONFIG_AMD_NB) += amd_bus.o
397 + obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o
398 +
399 + ifeq ($(CONFIG_PCI_DEBUG),y)
400 +diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
401 +index 404f21a..f8348ab 100644
402 +--- a/arch/x86/pci/acpi.c
403 ++++ b/arch/x86/pci/acpi.c
404 +@@ -149,7 +149,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
405 + struct acpi_resource_address64 addr;
406 + acpi_status status;
407 + unsigned long flags;
408 +- u64 start, end;
409 ++ u64 start, orig_end, end;
410 +
411 + status = resource_to_addr(acpi_res, &addr);
412 + if (!ACPI_SUCCESS(status))
413 +@@ -165,7 +165,21 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
414 + return AE_OK;
415 +
416 + start = addr.minimum + addr.translation_offset;
417 +- end = addr.maximum + addr.translation_offset;
418 ++ orig_end = end = addr.maximum + addr.translation_offset;
419 ++
420 ++ /* Exclude non-addressable range or non-addressable portion of range */
421 ++ end = min(end, (u64)iomem_resource.end);
422 ++ if (end <= start) {
423 ++ dev_info(&info->bridge->dev,
424 ++ "host bridge window [%#llx-%#llx] "
425 ++ "(ignored, not CPU addressable)\n", start, orig_end);
426 ++ return AE_OK;
427 ++ } else if (orig_end != end) {
428 ++ dev_info(&info->bridge->dev,
429 ++ "host bridge window [%#llx-%#llx] "
430 ++ "([%#llx-%#llx] ignored, not CPU addressable)\n",
431 ++ start, orig_end, end + 1, orig_end);
432 ++ }
433 +
434 + res = &info->res[info->res_num];
435 + res->name = info->name;
436 +diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
437 +index 026e493..385a940 100644
438 +--- a/arch/x86/pci/amd_bus.c
439 ++++ b/arch/x86/pci/amd_bus.c
440 +@@ -30,34 +30,6 @@ static struct pci_hostbridge_probe pci_probes[] __initdata = {
441 + { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 },
442 + };
443 +
444 +-static u64 __initdata fam10h_mmconf_start;
445 +-static u64 __initdata fam10h_mmconf_end;
446 +-static void __init get_pci_mmcfg_amd_fam10h_range(void)
447 +-{
448 +- u32 address;
449 +- u64 base, msr;
450 +- unsigned segn_busn_bits;
451 +-
452 +- /* assume all cpus from fam10h have mmconf */
453 +- if (boot_cpu_data.x86 < 0x10)
454 +- return;
455 +-
456 +- address = MSR_FAM10H_MMIO_CONF_BASE;
457 +- rdmsrl(address, msr);
458 +-
459 +- /* mmconfig is not enable */
460 +- if (!(msr & FAM10H_MMIO_CONF_ENABLE))
461 +- return;
462 +-
463 +- base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
464 +-
465 +- segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
466 +- FAM10H_MMIO_CONF_BUSRANGE_MASK;
467 +-
468 +- fam10h_mmconf_start = base;
469 +- fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
470 +-}
471 +-
472 + #define RANGE_NUM 16
473 +
474 + /**
475 +@@ -85,6 +57,9 @@ static int __init early_fill_mp_bus_info(void)
476 + u64 val;
477 + u32 address;
478 + bool found;
479 ++ struct resource fam10h_mmconf_res, *fam10h_mmconf;
480 ++ u64 fam10h_mmconf_start;
481 ++ u64 fam10h_mmconf_end;
482 +
483 + if (!early_pci_allowed())
484 + return -1;
485 +@@ -211,12 +186,17 @@ static int __init early_fill_mp_bus_info(void)
486 + subtract_range(range, RANGE_NUM, 0, end);
487 +
488 + /* get mmconfig */
489 +- get_pci_mmcfg_amd_fam10h_range();
490 ++ fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res);
491 + /* need to take out mmconf range */
492 +- if (fam10h_mmconf_end) {
493 +- printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
494 ++ if (fam10h_mmconf) {
495 ++ printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf);
496 ++ fam10h_mmconf_start = fam10h_mmconf->start;
497 ++ fam10h_mmconf_end = fam10h_mmconf->end;
498 + subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
499 + fam10h_mmconf_end + 1);
500 ++ } else {
501 ++ fam10h_mmconf_start = 0;
502 ++ fam10h_mmconf_end = 0;
503 + }
504 +
505 + /* mmio resource */
506 +diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
507 +index 5b55219..9010ca7 100644
508 +--- a/arch/x86/platform/uv/tlb_uv.c
509 ++++ b/arch/x86/platform/uv/tlb_uv.c
510 +@@ -157,13 +157,14 @@ static int __init uvhub_to_first_apicid(int uvhub)
511 + * clear of the Timeout bit (as well) will free the resource. No reply will
512 + * be sent (the hardware will only do one reply per message).
513 + */
514 +-static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
515 ++static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
516 ++ int do_acknowledge)
517 + {
518 + unsigned long dw;
519 + struct bau_pq_entry *msg;
520 +
521 + msg = mdp->msg;
522 +- if (!msg->canceled) {
523 ++ if (!msg->canceled && do_acknowledge) {
524 + dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
525 + write_mmr_sw_ack(dw);
526 + }
527 +@@ -212,8 +213,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
528 + if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
529 + unsigned long mr;
530 + /*
531 +- * is the resource timed out?
532 +- * make everyone ignore the cancelled message.
533 ++ * Is the resource timed out?
534 ++ * Make everyone ignore the cancelled message.
535 + */
536 + msg2->canceled = 1;
537 + stat->d_canceled++;
538 +@@ -231,8 +232,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
539 + * Do all the things a cpu should do for a TLB shootdown message.
540 + * Other cpu's may come here at the same time for this message.
541 + */
542 +-static void bau_process_message(struct msg_desc *mdp,
543 +- struct bau_control *bcp)
544 ++static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
545 ++ int do_acknowledge)
546 + {
547 + short socket_ack_count = 0;
548 + short *sp;
549 +@@ -284,8 +285,9 @@ static void bau_process_message(struct msg_desc *mdp,
550 + if (msg_ack_count == bcp->cpus_in_uvhub) {
551 + /*
552 + * All cpus in uvhub saw it; reply
553 ++ * (unless we are in the UV2 workaround)
554 + */
555 +- reply_to_message(mdp, bcp);
556 ++ reply_to_message(mdp, bcp, do_acknowledge);
557 + }
558 + }
559 +
560 +@@ -491,27 +493,138 @@ static int uv1_wait_completion(struct bau_desc *bau_desc,
561 + /*
562 + * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
563 + */
564 +-static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
565 ++static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
566 + {
567 + unsigned long descriptor_status;
568 + unsigned long descriptor_status2;
569 +
570 + descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
571 +- descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
572 ++ descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
573 + descriptor_status = (descriptor_status << 1) | descriptor_status2;
574 + return descriptor_status;
575 + }
576 +
577 ++/*
578 ++ * Return whether the status of the descriptor that is normally used for this
579 ++ * cpu (the one indexed by its hub-relative cpu number) is busy.
580 ++ * The status of the original 32 descriptors is always reflected in the 64
581 ++ * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
582 ++ * The bit provided by the activation_status_2 register is irrelevant to
583 ++ * the status if it is only being tested for busy or not busy.
584 ++ */
585 ++int normal_busy(struct bau_control *bcp)
586 ++{
587 ++ int cpu = bcp->uvhub_cpu;
588 ++ int mmr_offset;
589 ++ int right_shift;
590 ++
591 ++ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
592 ++ right_shift = cpu * UV_ACT_STATUS_SIZE;
593 ++ return (((((read_lmmr(mmr_offset) >> right_shift) &
594 ++ UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
595 ++}
596 ++
597 ++/*
598 ++ * Entered when a bau descriptor has gone into a permanent busy wait because
599 ++ * of a hardware bug.
600 ++ * Workaround the bug.
601 ++ */
602 ++int handle_uv2_busy(struct bau_control *bcp)
603 ++{
604 ++ int busy_one = bcp->using_desc;
605 ++ int normal = bcp->uvhub_cpu;
606 ++ int selected = -1;
607 ++ int i;
608 ++ unsigned long descriptor_status;
609 ++ unsigned long status;
610 ++ int mmr_offset;
611 ++ struct bau_desc *bau_desc_old;
612 ++ struct bau_desc *bau_desc_new;
613 ++ struct bau_control *hmaster = bcp->uvhub_master;
614 ++ struct ptc_stats *stat = bcp->statp;
615 ++ cycles_t ttm;
616 ++
617 ++ stat->s_uv2_wars++;
618 ++ spin_lock(&hmaster->uvhub_lock);
619 ++ /* try for the original first */
620 ++ if (busy_one != normal) {
621 ++ if (!normal_busy(bcp))
622 ++ selected = normal;
623 ++ }
624 ++ if (selected < 0) {
625 ++ /* can't use the normal, select an alternate */
626 ++ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
627 ++ descriptor_status = read_lmmr(mmr_offset);
628 ++
629 ++ /* scan available descriptors 32-63 */
630 ++ for (i = 0; i < UV_CPUS_PER_AS; i++) {
631 ++ if ((hmaster->inuse_map & (1 << i)) == 0) {
632 ++ status = ((descriptor_status >>
633 ++ (i * UV_ACT_STATUS_SIZE)) &
634 ++ UV_ACT_STATUS_MASK) << 1;
635 ++ if (status != UV2H_DESC_BUSY) {
636 ++ selected = i + UV_CPUS_PER_AS;
637 ++ break;
638 ++ }
639 ++ }
640 ++ }
641 ++ }
642 ++
643 ++ if (busy_one != normal)
644 ++ /* mark the busy alternate as not in-use */
645 ++ hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
646 ++
647 ++ if (selected >= 0) {
648 ++ /* switch to the selected descriptor */
649 ++ if (selected != normal) {
650 ++ /* set the selected alternate as in-use */
651 ++ hmaster->inuse_map |=
652 ++ (1 << (selected - UV_CPUS_PER_AS));
653 ++ if (selected > stat->s_uv2_wars_hw)
654 ++ stat->s_uv2_wars_hw = selected;
655 ++ }
656 ++ bau_desc_old = bcp->descriptor_base;
657 ++ bau_desc_old += (ITEMS_PER_DESC * busy_one);
658 ++ bcp->using_desc = selected;
659 ++ bau_desc_new = bcp->descriptor_base;
660 ++ bau_desc_new += (ITEMS_PER_DESC * selected);
661 ++ *bau_desc_new = *bau_desc_old;
662 ++ } else {
663 ++ /*
664 ++ * All are busy. Wait for the normal one for this cpu to
665 ++ * free up.
666 ++ */
667 ++ stat->s_uv2_war_waits++;
668 ++ spin_unlock(&hmaster->uvhub_lock);
669 ++ ttm = get_cycles();
670 ++ do {
671 ++ cpu_relax();
672 ++ } while (normal_busy(bcp));
673 ++ spin_lock(&hmaster->uvhub_lock);
674 ++ /* switch to the original descriptor */
675 ++ bcp->using_desc = normal;
676 ++ bau_desc_old = bcp->descriptor_base;
677 ++ bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
678 ++ bcp->using_desc = (ITEMS_PER_DESC * normal);
679 ++ bau_desc_new = bcp->descriptor_base;
680 ++ bau_desc_new += (ITEMS_PER_DESC * normal);
681 ++ *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
682 ++ }
683 ++ spin_unlock(&hmaster->uvhub_lock);
684 ++ return FLUSH_RETRY_BUSYBUG;
685 ++}
686 ++
687 + static int uv2_wait_completion(struct bau_desc *bau_desc,
688 + unsigned long mmr_offset, int right_shift,
689 + struct bau_control *bcp, long try)
690 + {
691 + unsigned long descriptor_stat;
692 + cycles_t ttm;
693 +- int cpu = bcp->uvhub_cpu;
694 ++ int desc = bcp->using_desc;
695 ++ long busy_reps = 0;
696 + struct ptc_stats *stat = bcp->statp;
697 +
698 +- descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
699 ++ descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
700 +
701 + /* spin on the status MMR, waiting for it to go idle */
702 + while (descriptor_stat != UV2H_DESC_IDLE) {
703 +@@ -542,12 +655,23 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
704 + bcp->conseccompletes = 0;
705 + return FLUSH_RETRY_TIMEOUT;
706 + } else {
707 ++ busy_reps++;
708 ++ if (busy_reps > 1000000) {
709 ++ /* not to hammer on the clock */
710 ++ busy_reps = 0;
711 ++ ttm = get_cycles();
712 ++ if ((ttm - bcp->send_message) >
713 ++ (bcp->clocks_per_100_usec)) {
714 ++ return handle_uv2_busy(bcp);
715 ++ }
716 ++ }
717 + /*
718 + * descriptor_stat is still BUSY
719 + */
720 + cpu_relax();
721 + }
722 +- descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
723 ++ descriptor_stat = uv2_read_status(mmr_offset, right_shift,
724 ++ desc);
725 + }
726 + bcp->conseccompletes++;
727 + return FLUSH_COMPLETE;
728 +@@ -563,17 +687,17 @@ static int wait_completion(struct bau_desc *bau_desc,
729 + {
730 + int right_shift;
731 + unsigned long mmr_offset;
732 +- int cpu = bcp->uvhub_cpu;
733 ++ int desc = bcp->using_desc;
734 +
735 +- if (cpu < UV_CPUS_PER_AS) {
736 ++ if (desc < UV_CPUS_PER_AS) {
737 + mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
738 +- right_shift = cpu * UV_ACT_STATUS_SIZE;
739 ++ right_shift = desc * UV_ACT_STATUS_SIZE;
740 + } else {
741 + mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
742 +- right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
743 ++ right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
744 + }
745 +
746 +- if (is_uv1_hub())
747 ++ if (bcp->uvhub_version == 1)
748 + return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
749 + bcp, try);
750 + else
751 +@@ -752,19 +876,22 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
752 + * Returns 1 if it gives up entirely and the original cpu mask is to be
753 + * returned to the kernel.
754 + */
755 +-int uv_flush_send_and_wait(struct bau_desc *bau_desc,
756 +- struct cpumask *flush_mask, struct bau_control *bcp)
757 ++int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
758 + {
759 + int seq_number = 0;
760 + int completion_stat = 0;
761 ++ int uv1 = 0;
762 + long try = 0;
763 + unsigned long index;
764 + cycles_t time1;
765 + cycles_t time2;
766 + struct ptc_stats *stat = bcp->statp;
767 + struct bau_control *hmaster = bcp->uvhub_master;
768 ++ struct uv1_bau_msg_header *uv1_hdr = NULL;
769 ++ struct uv2_bau_msg_header *uv2_hdr = NULL;
770 ++ struct bau_desc *bau_desc;
771 +
772 +- if (is_uv1_hub())
773 ++ if (bcp->uvhub_version == 1)
774 + uv1_throttle(hmaster, stat);
775 +
776 + while (hmaster->uvhub_quiesce)
777 +@@ -772,22 +899,39 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
778 +
779 + time1 = get_cycles();
780 + do {
781 +- if (try == 0) {
782 +- bau_desc->header.msg_type = MSG_REGULAR;
783 ++ bau_desc = bcp->descriptor_base;
784 ++ bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
785 ++ if (bcp->uvhub_version == 1) {
786 ++ uv1 = 1;
787 ++ uv1_hdr = &bau_desc->header.uv1_hdr;
788 ++ } else
789 ++ uv2_hdr = &bau_desc->header.uv2_hdr;
790 ++ if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
791 ++ if (uv1)
792 ++ uv1_hdr->msg_type = MSG_REGULAR;
793 ++ else
794 ++ uv2_hdr->msg_type = MSG_REGULAR;
795 + seq_number = bcp->message_number++;
796 + } else {
797 +- bau_desc->header.msg_type = MSG_RETRY;
798 ++ if (uv1)
799 ++ uv1_hdr->msg_type = MSG_RETRY;
800 ++ else
801 ++ uv2_hdr->msg_type = MSG_RETRY;
802 + stat->s_retry_messages++;
803 + }
804 +
805 +- bau_desc->header.sequence = seq_number;
806 +- index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
807 ++ if (uv1)
808 ++ uv1_hdr->sequence = seq_number;
809 ++ else
810 ++ uv2_hdr->sequence = seq_number;
811 ++ index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
812 + bcp->send_message = get_cycles();
813 +
814 + write_mmr_activation(index);
815 +
816 + try++;
817 + completion_stat = wait_completion(bau_desc, bcp, try);
818 ++ /* UV2: wait_completion() may change the bcp->using_desc */
819 +
820 + handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
821 +
822 +@@ -798,6 +942,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
823 + }
824 + cpu_relax();
825 + } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
826 ++ (completion_stat == FLUSH_RETRY_BUSYBUG) ||
827 + (completion_stat == FLUSH_RETRY_TIMEOUT));
828 +
829 + time2 = get_cycles();
830 +@@ -812,6 +957,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
831 + record_send_stats(time1, time2, bcp, stat, completion_stat, try);
832 +
833 + if (completion_stat == FLUSH_GIVEUP)
834 ++ /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
835 + return 1;
836 + return 0;
837 + }
838 +@@ -967,7 +1113,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
839 + stat->s_ntargself++;
840 +
841 + bau_desc = bcp->descriptor_base;
842 +- bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
843 ++ bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
844 + bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
845 + if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
846 + return NULL;
847 +@@ -980,13 +1126,86 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
848 + * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
849 + * or 1 if it gave up and the original cpumask should be returned.
850 + */
851 +- if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
852 ++ if (!uv_flush_send_and_wait(flush_mask, bcp))
853 + return NULL;
854 + else
855 + return cpumask;
856 + }
857 +
858 + /*
859 ++ * Search the message queue for any 'other' message with the same software
860 ++ * acknowledge resource bit vector.
861 ++ */
862 ++struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
863 ++ struct bau_control *bcp, unsigned char swack_vec)
864 ++{
865 ++ struct bau_pq_entry *msg_next = msg + 1;
866 ++
867 ++ if (msg_next > bcp->queue_last)
868 ++ msg_next = bcp->queue_first;
869 ++ while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
870 ++ if (msg_next->swack_vec == swack_vec)
871 ++ return msg_next;
872 ++ msg_next++;
873 ++ if (msg_next > bcp->queue_last)
874 ++ msg_next = bcp->queue_first;
875 ++ }
876 ++ return NULL;
877 ++}
878 ++
879 ++/*
880 ++ * UV2 needs to work around a bug in which an arriving message has not
881 ++ * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
882 ++ * Such a message must be ignored.
883 ++ */
884 ++void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
885 ++{
886 ++ unsigned long mmr_image;
887 ++ unsigned char swack_vec;
888 ++ struct bau_pq_entry *msg = mdp->msg;
889 ++ struct bau_pq_entry *other_msg;
890 ++
891 ++ mmr_image = read_mmr_sw_ack();
892 ++ swack_vec = msg->swack_vec;
893 ++
894 ++ if ((swack_vec & mmr_image) == 0) {
895 ++ /*
896 ++ * This message was assigned a swack resource, but no
897 ++ * reserved acknowlegment is pending.
898 ++ * The bug has prevented this message from setting the MMR.
899 ++ * And no other message has used the same sw_ack resource.
900 ++ * Do the requested shootdown but do not reply to the msg.
901 ++ * (the 0 means make no acknowledge)
902 ++ */
903 ++ bau_process_message(mdp, bcp, 0);
904 ++ return;
905 ++ }
906 ++
907 ++ /*
908 ++ * Some message has set the MMR 'pending' bit; it might have been
909 ++ * another message. Look for that message.
910 ++ */
911 ++ other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
912 ++ if (other_msg) {
913 ++ /* There is another. Do not ack the current one. */
914 ++ bau_process_message(mdp, bcp, 0);
915 ++ /*
916 ++ * Let the natural processing of that message acknowledge
917 ++ * it. Don't get the processing of sw_ack's out of order.
918 ++ */
919 ++ return;
920 ++ }
921 ++
922 ++ /*
923 ++ * There is no other message using this sw_ack, so it is safe to
924 ++ * acknowledge it.
925 ++ */
926 ++ bau_process_message(mdp, bcp, 1);
927 ++
928 ++ return;
929 ++}
930 ++
931 ++/*
932 + * The BAU message interrupt comes here. (registered by set_intr_gate)
933 + * See entry_64.S
934 + *
935 +@@ -1022,9 +1241,11 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
936 + count++;
937 +
938 + msgdesc.msg_slot = msg - msgdesc.queue_first;
939 +- msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
940 + msgdesc.msg = msg;
941 +- bau_process_message(&msgdesc, bcp);
942 ++ if (bcp->uvhub_version == 2)
943 ++ process_uv2_message(&msgdesc, bcp);
944 ++ else
945 ++ bau_process_message(&msgdesc, bcp, 1);
946 +
947 + msg++;
948 + if (msg > msgdesc.queue_last)
949 +@@ -1083,7 +1304,7 @@ static void __init enable_timeouts(void)
950 + */
951 + mmr_image |= (1L << SOFTACK_MSHIFT);
952 + if (is_uv2_hub()) {
953 +- mmr_image |= (1L << UV2_LEG_SHFT);
954 ++ mmr_image &= ~(1L << UV2_LEG_SHFT);
955 + mmr_image |= (1L << UV2_EXT_SHFT);
956 + }
957 + write_mmr_misc_control(pnode, mmr_image);
958 +@@ -1142,7 +1363,7 @@ static int ptc_seq_show(struct seq_file *file, void *data)
959 + seq_printf(file,
960 + "all one mult none retry canc nocan reset rcan ");
961 + seq_printf(file,
962 +- "disable enable\n");
963 ++ "disable enable wars warshw warwaits\n");
964 + }
965 + if (cpu < num_possible_cpus() && cpu_online(cpu)) {
966 + stat = &per_cpu(ptcstats, cpu);
967 +@@ -1173,8 +1394,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
968 + stat->d_nomsg, stat->d_retries, stat->d_canceled,
969 + stat->d_nocanceled, stat->d_resets,
970 + stat->d_rcanceled);
971 +- seq_printf(file, "%ld %ld\n",
972 +- stat->s_bau_disabled, stat->s_bau_reenabled);
973 ++ seq_printf(file, "%ld %ld %ld %ld %ld\n",
974 ++ stat->s_bau_disabled, stat->s_bau_reenabled,
975 ++ stat->s_uv2_wars, stat->s_uv2_wars_hw,
976 ++ stat->s_uv2_war_waits);
977 + }
978 + return 0;
979 + }
980 +@@ -1432,12 +1655,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
981 + {
982 + int i;
983 + int cpu;
984 ++ int uv1 = 0;
985 + unsigned long gpa;
986 + unsigned long m;
987 + unsigned long n;
988 + size_t dsize;
989 + struct bau_desc *bau_desc;
990 + struct bau_desc *bd2;
991 ++ struct uv1_bau_msg_header *uv1_hdr;
992 ++ struct uv2_bau_msg_header *uv2_hdr;
993 + struct bau_control *bcp;
994 +
995 + /*
996 +@@ -1451,6 +1677,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
997 + gpa = uv_gpa(bau_desc);
998 + n = uv_gpa_to_gnode(gpa);
999 + m = uv_gpa_to_offset(gpa);
1000 ++ if (is_uv1_hub())
1001 ++ uv1 = 1;
1002 +
1003 + /* the 14-bit pnode */
1004 + write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
1005 +@@ -1461,21 +1689,33 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
1006 + */
1007 + for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
1008 + memset(bd2, 0, sizeof(struct bau_desc));
1009 +- bd2->header.swack_flag = 1;
1010 +- /*
1011 +- * The base_dest_nasid set in the message header is the nasid
1012 +- * of the first uvhub in the partition. The bit map will
1013 +- * indicate destination pnode numbers relative to that base.
1014 +- * They may not be consecutive if nasid striding is being used.
1015 +- */
1016 +- bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
1017 +- bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
1018 +- bd2->header.command = UV_NET_ENDPOINT_INTD;
1019 +- bd2->header.int_both = 1;
1020 +- /*
1021 +- * all others need to be set to zero:
1022 +- * fairness chaining multilevel count replied_to
1023 +- */
1024 ++ if (uv1) {
1025 ++ uv1_hdr = &bd2->header.uv1_hdr;
1026 ++ uv1_hdr->swack_flag = 1;
1027 ++ /*
1028 ++ * The base_dest_nasid set in the message header
1029 ++ * is the nasid of the first uvhub in the partition.
1030 ++ * The bit map will indicate destination pnode numbers
1031 ++ * relative to that base. They may not be consecutive
1032 ++ * if nasid striding is being used.
1033 ++ */
1034 ++ uv1_hdr->base_dest_nasid =
1035 ++ UV_PNODE_TO_NASID(base_pnode);
1036 ++ uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1037 ++ uv1_hdr->command = UV_NET_ENDPOINT_INTD;
1038 ++ uv1_hdr->int_both = 1;
1039 ++ /*
1040 ++ * all others need to be set to zero:
1041 ++ * fairness chaining multilevel count replied_to
1042 ++ */
1043 ++ } else {
1044 ++ uv2_hdr = &bd2->header.uv2_hdr;
1045 ++ uv2_hdr->swack_flag = 1;
1046 ++ uv2_hdr->base_dest_nasid =
1047 ++ UV_PNODE_TO_NASID(base_pnode);
1048 ++ uv2_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1049 ++ uv2_hdr->command = UV_NET_ENDPOINT_INTD;
1050 ++ }
1051 + }
1052 + for_each_present_cpu(cpu) {
1053 + if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1054 +@@ -1531,6 +1771,7 @@ static void pq_init(int node, int pnode)
1055 + write_mmr_payload_first(pnode, pn_first);
1056 + write_mmr_payload_tail(pnode, first);
1057 + write_mmr_payload_last(pnode, last);
1058 ++ write_gmmr_sw_ack(pnode, 0xffffUL);
1059 +
1060 + /* in effect, all msg_type's are set to MSG_NOOP */
1061 + memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
1062 +@@ -1584,14 +1825,14 @@ static int calculate_destination_timeout(void)
1063 + ts_ns = base * mult1 * mult2;
1064 + ret = ts_ns / 1000;
1065 + } else {
1066 +- /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
1067 +- mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1068 ++ /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1069 ++ mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
1070 + mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
1071 + if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
1072 +- mult1 = 80;
1073 ++ base = 80;
1074 + else
1075 +- mult1 = 10;
1076 +- base = mmr_image & UV2_ACK_MASK;
1077 ++ base = 10;
1078 ++ mult1 = mmr_image & UV2_ACK_MASK;
1079 + ret = mult1 * base;
1080 + }
1081 + return ret;
1082 +@@ -1618,6 +1859,7 @@ static void __init init_per_cpu_tunables(void)
1083 + bcp->cong_response_us = congested_respns_us;
1084 + bcp->cong_reps = congested_reps;
1085 + bcp->cong_period = congested_period;
1086 ++ bcp->clocks_per_100_usec = usec_2_cycles(100);
1087 + }
1088 + }
1089 +
1090 +@@ -1728,8 +1970,17 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1091 + bcp->cpus_in_socket = sdp->num_cpus;
1092 + bcp->socket_master = *smasterp;
1093 + bcp->uvhub = bdp->uvhub;
1094 ++ if (is_uv1_hub())
1095 ++ bcp->uvhub_version = 1;
1096 ++ else if (is_uv2_hub())
1097 ++ bcp->uvhub_version = 2;
1098 ++ else {
1099 ++ printk(KERN_EMERG "uvhub version not 1 or 2\n");
1100 ++ return 1;
1101 ++ }
1102 + bcp->uvhub_master = *hmasterp;
1103 + bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
1104 ++ bcp->using_desc = bcp->uvhub_cpu;
1105 + if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1106 + printk(KERN_EMERG "%d cpus per uvhub invalid\n",
1107 + bcp->uvhub_cpu);
1108 +@@ -1845,6 +2096,8 @@ static int __init uv_bau_init(void)
1109 + uv_base_pnode = uv_blade_to_pnode(uvhub);
1110 + }
1111 +
1112 ++ enable_timeouts();
1113 ++
1114 + if (init_per_cpu(nuvhubs, uv_base_pnode)) {
1115 + nobau = 1;
1116 + return 0;
1117 +@@ -1855,7 +2108,6 @@ static int __init uv_bau_init(void)
1118 + if (uv_blade_nr_possible_cpus(uvhub))
1119 + init_uvhub(uvhub, vector, uv_base_pnode);
1120 +
1121 +- enable_timeouts();
1122 + alloc_intr_gate(vector, uv_bau_message_intr1);
1123 +
1124 + for_each_possible_blade(uvhub) {
1125 +@@ -1867,7 +2119,8 @@ static int __init uv_bau_init(void)
1126 + val = 1L << 63;
1127 + write_gmmr_activation(pnode, val);
1128 + mmr = 1; /* should be 1 to broadcast to both sockets */
1129 +- write_mmr_data_broadcast(pnode, mmr);
1130 ++ if (!is_uv1_hub())
1131 ++ write_mmr_data_broadcast(pnode, mmr);
1132 + }
1133 + }
1134 +
1135 +diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
1136 +index fbdf0d8..688be8a 100644
1137 +--- a/block/scsi_ioctl.c
1138 ++++ b/block/scsi_ioctl.c
1139 +@@ -24,6 +24,7 @@
1140 + #include <linux/capability.h>
1141 + #include <linux/completion.h>
1142 + #include <linux/cdrom.h>
1143 ++#include <linux/ratelimit.h>
1144 + #include <linux/slab.h>
1145 + #include <linux/times.h>
1146 + #include <asm/uaccess.h>
1147 +@@ -690,6 +691,57 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
1148 + }
1149 + EXPORT_SYMBOL(scsi_cmd_ioctl);
1150 +
1151 ++int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
1152 ++{
1153 ++ if (bd && bd == bd->bd_contains)
1154 ++ return 0;
1155 ++
1156 ++ /* Actually none of these is particularly useful on a partition,
1157 ++ * but they are safe.
1158 ++ */
1159 ++ switch (cmd) {
1160 ++ case SCSI_IOCTL_GET_IDLUN:
1161 ++ case SCSI_IOCTL_GET_BUS_NUMBER:
1162 ++ case SCSI_IOCTL_GET_PCI:
1163 ++ case SCSI_IOCTL_PROBE_HOST:
1164 ++ case SG_GET_VERSION_NUM:
1165 ++ case SG_SET_TIMEOUT:
1166 ++ case SG_GET_TIMEOUT:
1167 ++ case SG_GET_RESERVED_SIZE:
1168 ++ case SG_SET_RESERVED_SIZE:
1169 ++ case SG_EMULATED_HOST:
1170 ++ return 0;
1171 ++ case CDROM_GET_CAPABILITY:
1172 ++ /* Keep this until we remove the printk below. udev sends it
1173 ++ * and we do not want to spam dmesg about it. CD-ROMs do
1174 ++ * not have partitions, so we get here only for disks.
1175 ++ */
1176 ++ return -ENOTTY;
1177 ++ default:
1178 ++ break;
1179 ++ }
1180 ++
1181 ++ /* In particular, rule out all resets and host-specific ioctls. */
1182 ++ printk_ratelimited(KERN_WARNING
1183 ++ "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
1184 ++
1185 ++ return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
1186 ++}
1187 ++EXPORT_SYMBOL(scsi_verify_blk_ioctl);
1188 ++
1189 ++int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
1190 ++ unsigned int cmd, void __user *arg)
1191 ++{
1192 ++ int ret;
1193 ++
1194 ++ ret = scsi_verify_blk_ioctl(bd, cmd);
1195 ++ if (ret < 0)
1196 ++ return ret;
1197 ++
1198 ++ return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
1199 ++}
1200 ++EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
1201 ++
1202 + static int __init blk_scsi_ioctl_init(void)
1203 + {
1204 + blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
1205 +diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
1206 +index 8c7b997..42163d8 100644
1207 +--- a/drivers/acpi/acpica/dsargs.c
1208 ++++ b/drivers/acpi/acpica/dsargs.c
1209 +@@ -387,5 +387,29 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
1210 + status = acpi_ds_execute_arguments(node, node->parent,
1211 + extra_desc->extra.aml_length,
1212 + extra_desc->extra.aml_start);
1213 ++ if (ACPI_FAILURE(status)) {
1214 ++ return_ACPI_STATUS(status);
1215 ++ }
1216 ++
1217 ++ /* Validate the region address/length via the host OS */
1218 ++
1219 ++ status = acpi_os_validate_address(obj_desc->region.space_id,
1220 ++ obj_desc->region.address,
1221 ++ (acpi_size) obj_desc->region.length,
1222 ++ acpi_ut_get_node_name(node));
1223 ++
1224 ++ if (ACPI_FAILURE(status)) {
1225 ++ /*
1226 ++ * Invalid address/length. We will emit an error message and mark
1227 ++ * the region as invalid, so that it will cause an additional error if
1228 ++ * it is ever used. Then return AE_OK.
1229 ++ */
1230 ++ ACPI_EXCEPTION((AE_INFO, status,
1231 ++ "During address validation of OpRegion [%4.4s]",
1232 ++ node->name.ascii));
1233 ++ obj_desc->common.flags |= AOPOBJ_INVALID;
1234 ++ status = AE_OK;
1235 ++ }
1236 ++
1237 + return_ACPI_STATUS(status);
1238 + }
1239 +diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
1240 +index 3b5c318..e56f3be 100644
1241 +--- a/drivers/acpi/numa.c
1242 ++++ b/drivers/acpi/numa.c
1243 +@@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
1244 + static int node_to_pxm_map[MAX_NUMNODES]
1245 + = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
1246 +
1247 ++unsigned char acpi_srat_revision __initdata;
1248 ++
1249 + int pxm_to_node(int pxm)
1250 + {
1251 + if (pxm < 0)
1252 +@@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
1253 +
1254 + static int __init acpi_parse_srat(struct acpi_table_header *table)
1255 + {
1256 ++ struct acpi_table_srat *srat;
1257 + if (!table)
1258 + return -EINVAL;
1259 +
1260 ++ srat = (struct acpi_table_srat *)table;
1261 ++ acpi_srat_revision = srat->header.revision;
1262 ++
1263 + /* Real work done in acpi_table_parse_srat below. */
1264 +
1265 + return 0;
1266 +diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
1267 +index 3a0428e..c850de4 100644
1268 +--- a/drivers/acpi/processor_core.c
1269 ++++ b/drivers/acpi/processor_core.c
1270 +@@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
1271 + apic_id = map_mat_entry(handle, type, acpi_id);
1272 + if (apic_id == -1)
1273 + apic_id = map_madt_entry(type, acpi_id);
1274 +- if (apic_id == -1)
1275 +- return apic_id;
1276 ++ if (apic_id == -1) {
1277 ++ /*
1278 ++ * On UP processor, there is no _MAT or MADT table.
1279 ++ * So above apic_id is always set to -1.
1280 ++ *
1281 ++ * BIOS may define multiple CPU handles even for UP processor.
1282 ++ * For example,
1283 ++ *
1284 ++ * Scope (_PR)
1285 ++ * {
1286 ++ * Processor (CPU0, 0x00, 0x00000410, 0x06) {}
1287 ++ * Processor (CPU1, 0x01, 0x00000410, 0x06) {}
1288 ++ * Processor (CPU2, 0x02, 0x00000410, 0x06) {}
1289 ++ * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
1290 ++ * }
1291 ++ *
1292 ++ * Ignores apic_id and always return 0 for CPU0's handle.
1293 ++ * Return -1 for other CPU's handle.
1294 ++ */
1295 ++ if (acpi_id == 0)
1296 ++ return acpi_id;
1297 ++ else
1298 ++ return apic_id;
1299 ++ }
1300 +
1301 + #ifdef CONFIG_SMP
1302 + for_each_possible_cpu(i) {
1303 +diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
1304 +index 990f5a8..48e06be 100644
1305 +--- a/drivers/bcma/host_pci.c
1306 ++++ b/drivers/bcma/host_pci.c
1307 +@@ -227,11 +227,14 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
1308 + #ifdef CONFIG_PM
1309 + static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
1310 + {
1311 ++ struct bcma_bus *bus = pci_get_drvdata(dev);
1312 ++
1313 + /* Host specific */
1314 + pci_save_state(dev);
1315 + pci_disable_device(dev);
1316 + pci_set_power_state(dev, pci_choose_state(dev, state));
1317 +
1318 ++ bus->mapped_core = NULL;
1319 + return 0;
1320 + }
1321 +
1322 +diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
1323 +index 587cce5..b0f553b 100644
1324 +--- a/drivers/block/cciss.c
1325 ++++ b/drivers/block/cciss.c
1326 +@@ -1735,7 +1735,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1327 + case CCISS_BIG_PASSTHRU:
1328 + return cciss_bigpassthru(h, argp);
1329 +
1330 +- /* scsi_cmd_ioctl handles these, below, though some are not */
1331 ++ /* scsi_cmd_blk_ioctl handles these, below, though some are not */
1332 + /* very meaningful for cciss. SG_IO is the main one people want. */
1333 +
1334 + case SG_GET_VERSION_NUM:
1335 +@@ -1746,9 +1746,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1336 + case SG_EMULATED_HOST:
1337 + case SG_IO:
1338 + case SCSI_IOCTL_SEND_COMMAND:
1339 +- return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
1340 ++ return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
1341 +
1342 +- /* scsi_cmd_ioctl would normally handle these, below, but */
1343 ++ /* scsi_cmd_blk_ioctl would normally handle these, below, but */
1344 + /* they aren't a good fit for cciss, as CD-ROMs are */
1345 + /* not supported, and we don't have any bus/target/lun */
1346 + /* which we present to the kernel. */
1347 +diff --git a/drivers/block/ub.c b/drivers/block/ub.c
1348 +index 0e376d4..7333b9e 100644
1349 +--- a/drivers/block/ub.c
1350 ++++ b/drivers/block/ub.c
1351 +@@ -1744,12 +1744,11 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
1352 + static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
1353 + unsigned int cmd, unsigned long arg)
1354 + {
1355 +- struct gendisk *disk = bdev->bd_disk;
1356 + void __user *usermem = (void __user *) arg;
1357 + int ret;
1358 +
1359 + mutex_lock(&ub_mutex);
1360 +- ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
1361 ++ ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
1362 + mutex_unlock(&ub_mutex);
1363 +
1364 + return ret;
1365 +diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
1366 +index 4d0b70a..e46f2f7 100644
1367 +--- a/drivers/block/virtio_blk.c
1368 ++++ b/drivers/block/virtio_blk.c
1369 +@@ -243,8 +243,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
1370 + if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
1371 + return -ENOTTY;
1372 +
1373 +- return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
1374 +- (void __user *)data);
1375 ++ return scsi_cmd_blk_ioctl(bdev, mode, cmd,
1376 ++ (void __user *)data);
1377 + }
1378 +
1379 + /* We provide getgeo only to please some old bootloader/partitioning tools */
1380 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
1381 +index f997c27..cedb231 100644
1382 +--- a/drivers/cdrom/cdrom.c
1383 ++++ b/drivers/cdrom/cdrom.c
1384 +@@ -2747,12 +2747,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
1385 + {
1386 + void __user *argp = (void __user *)arg;
1387 + int ret;
1388 +- struct gendisk *disk = bdev->bd_disk;
1389 +
1390 + /*
1391 + * Try the generic SCSI command ioctl's first.
1392 + */
1393 +- ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
1394 ++ ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
1395 + if (ret != -ENOTTY)
1396 + return ret;
1397 +
1398 +diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
1399 +index bfc08f6..31b0d1a 100644
1400 +--- a/drivers/gpu/drm/radeon/r100.c
1401 ++++ b/drivers/gpu/drm/radeon/r100.c
1402 +@@ -2177,6 +2177,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev)
1403 + void r100_bm_disable(struct radeon_device *rdev)
1404 + {
1405 + u32 tmp;
1406 ++ u16 tmp16;
1407 +
1408 + /* disable bus mastering */
1409 + tmp = RREG32(R_000030_BUS_CNTL);
1410 +@@ -2187,8 +2188,8 @@ void r100_bm_disable(struct radeon_device *rdev)
1411 + WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
1412 + tmp = RREG32(RADEON_BUS_CNTL);
1413 + mdelay(1);
1414 +- pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
1415 +- pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
1416 ++ pci_read_config_word(rdev->pdev, 0x4, &tmp16);
1417 ++ pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
1418 + mdelay(1);
1419 + }
1420 +
1421 +diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
1422 +index f5ac7e7..c45d921 100644
1423 +--- a/drivers/gpu/drm/radeon/r600_hdmi.c
1424 ++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
1425 +@@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
1426 + frame[0xD] = (right_bar >> 8);
1427 +
1428 + r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
1429 ++ /* Our header values (type, version, length) should be alright, Intel
1430 ++ * is using the same. Checksum function also seems to be OK, it works
1431 ++ * fine for audio infoframe. However calculated value is always lower
1432 ++ * by 2 in comparison to fglrx. It breaks displaying anything in case
1433 ++ * of TVs that strictly check the checksum. Hack it manually here to
1434 ++ * workaround this issue. */
1435 ++ frame[0x0] += 2;
1436 +
1437 + WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
1438 + frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1439 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1440 +index c4d00a1..9b39145 100644
1441 +--- a/drivers/gpu/drm/radeon/radeon_device.c
1442 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
1443 +@@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev)
1444 + if (radeon_no_wb == 1)
1445 + rdev->wb.enabled = false;
1446 + else {
1447 +- /* often unreliable on AGP */
1448 + if (rdev->flags & RADEON_IS_AGP) {
1449 ++ /* often unreliable on AGP */
1450 ++ rdev->wb.enabled = false;
1451 ++ } else if (rdev->family < CHIP_R300) {
1452 ++ /* often unreliable on pre-r300 */
1453 + rdev->wb.enabled = false;
1454 + } else {
1455 + rdev->wb.enabled = true;
1456 +diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
1457 +index b1053d6..c259e21 100644
1458 +--- a/drivers/gpu/drm/radeon/rs600.c
1459 ++++ b/drivers/gpu/drm/radeon/rs600.c
1460 +@@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
1461 +
1462 + void rs600_bm_disable(struct radeon_device *rdev)
1463 + {
1464 +- u32 tmp;
1465 ++ u16 tmp;
1466 +
1467 + /* disable bus mastering */
1468 +- pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
1469 ++ pci_read_config_word(rdev->pdev, 0x4, &tmp);
1470 + pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
1471 + mdelay(1);
1472 + }
1473 +diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
1474 +index 22a4a05..d21f6d0 100644
1475 +--- a/drivers/hid/Kconfig
1476 ++++ b/drivers/hid/Kconfig
1477 +@@ -335,6 +335,7 @@ config HID_MULTITOUCH
1478 + Say Y here if you have one of the following devices:
1479 + - 3M PCT touch screens
1480 + - ActionStar dual touch panels
1481 ++ - Atmel panels
1482 + - Cando dual touch panels
1483 + - Chunghwa panels
1484 + - CVTouch panels
1485 +@@ -355,6 +356,7 @@ config HID_MULTITOUCH
1486 + - Touch International Panels
1487 + - Unitec Panels
1488 + - XAT optical touch panels
1489 ++ - Xiroku optical touch panels
1490 +
1491 + If unsure, say N.
1492 +
1493 +@@ -620,6 +622,7 @@ config HID_WIIMOTE
1494 + depends on BT_HIDP
1495 + depends on LEDS_CLASS
1496 + select POWER_SUPPLY
1497 ++ select INPUT_FF_MEMLESS
1498 + ---help---
1499 + Support for the Nintendo Wii Remote bluetooth device.
1500 +
1501 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1502 +index af35384..bb656d8 100644
1503 +--- a/drivers/hid/hid-core.c
1504 ++++ b/drivers/hid/hid-core.c
1505 +@@ -362,7 +362,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
1506 +
1507 + case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
1508 + parser->global.report_size = item_udata(item);
1509 +- if (parser->global.report_size > 32) {
1510 ++ if (parser->global.report_size > 96) {
1511 + dbg_hid("invalid report_size %d\n",
1512 + parser->global.report_size);
1513 + return -1;
1514 +@@ -1404,11 +1404,13 @@ static const struct hid_device_id hid_have_special_driver[] = {
1515 + { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
1516 + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
1517 + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
1518 +- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
1519 +- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
1520 +- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
1521 +- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
1522 +- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
1523 ++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
1524 ++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
1525 ++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
1526 ++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
1527 ++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
1528 ++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
1529 ++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
1530 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
1531 + { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
1532 + { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
1533 +@@ -1423,6 +1425,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1534 + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
1535 + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
1536 + { HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
1537 ++ { HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
1538 + { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
1539 + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
1540 + { HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
1541 +@@ -1549,6 +1552,15 @@ static const struct hid_device_id hid_have_special_driver[] = {
1542 + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
1543 + { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
1544 + { HID_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) },
1545 ++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX) },
1546 ++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX) },
1547 ++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR) },
1548 ++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX1) },
1549 ++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX1) },
1550 ++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR1) },
1551 ++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX2) },
1552 ++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX2) },
1553 ++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR2) },
1554 + { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
1555 + { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
1556 + { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
1557 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1558 +index 4a441a6..00cabb3 100644
1559 +--- a/drivers/hid/hid-ids.h
1560 ++++ b/drivers/hid/hid-ids.h
1561 +@@ -21,6 +21,7 @@
1562 + #define USB_VENDOR_ID_3M 0x0596
1563 + #define USB_DEVICE_ID_3M1968 0x0500
1564 + #define USB_DEVICE_ID_3M2256 0x0502
1565 ++#define USB_DEVICE_ID_3M3266 0x0506
1566 +
1567 + #define USB_VENDOR_ID_A4TECH 0x09da
1568 + #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
1569 +@@ -145,6 +146,9 @@
1570 + #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
1571 + #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
1572 +
1573 ++#define USB_VENDOR_ID_ATMEL 0x03eb
1574 ++#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
1575 ++
1576 + #define USB_VENDOR_ID_AVERMEDIA 0x07ca
1577 + #define USB_DEVICE_ID_AVER_FM_MR800 0xb800
1578 +
1579 +@@ -230,11 +234,14 @@
1580 +
1581 + #define USB_VENDOR_ID_DWAV 0x0eef
1582 + #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
1583 +-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
1584 +-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
1585 +-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2 0x72a1
1586 +-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3 0x480e
1587 +-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4 0x726b
1588 ++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d
1589 ++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e
1590 ++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C 0x720c
1591 ++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b
1592 ++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1
1593 ++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa
1594 ++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302
1595 ++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
1596 +
1597 + #define USB_VENDOR_ID_ELECOM 0x056e
1598 + #define USB_DEVICE_ID_ELECOM_BM084 0x0061
1599 +@@ -356,6 +363,9 @@
1600 + #define USB_VENDOR_ID_HANVON 0x20b3
1601 + #define USB_DEVICE_ID_HANVON_MULTITOUCH 0x0a18
1602 +
1603 ++#define USB_VENDOR_ID_HANVON_ALT 0x22ed
1604 ++#define USB_DEVICE_ID_HANVON_ALT_MULTITOUCH 0x1010
1605 ++
1606 + #define USB_VENDOR_ID_HAPP 0x078b
1607 + #define USB_DEVICE_ID_UGCI_DRIVING 0x0010
1608 + #define USB_DEVICE_ID_UGCI_FLYING 0x0020
1609 +@@ -707,6 +717,17 @@
1610 + #define USB_VENDOR_ID_XAT 0x2505
1611 + #define USB_DEVICE_ID_XAT_CSR 0x0220
1612 +
1613 ++#define USB_VENDOR_ID_XIROKU 0x1477
1614 ++#define USB_DEVICE_ID_XIROKU_SPX 0x1006
1615 ++#define USB_DEVICE_ID_XIROKU_MPX 0x1007
1616 ++#define USB_DEVICE_ID_XIROKU_CSR 0x100e
1617 ++#define USB_DEVICE_ID_XIROKU_SPX1 0x1021
1618 ++#define USB_DEVICE_ID_XIROKU_CSR1 0x1022
1619 ++#define USB_DEVICE_ID_XIROKU_MPX1 0x1023
1620 ++#define USB_DEVICE_ID_XIROKU_SPX2 0x1024
1621 ++#define USB_DEVICE_ID_XIROKU_CSR2 0x1025
1622 ++#define USB_DEVICE_ID_XIROKU_MPX2 0x1026
1623 ++
1624 + #define USB_VENDOR_ID_YEALINK 0x6993
1625 + #define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K 0xb001
1626 +
1627 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1628 +index f1c909f..995fc4c 100644
1629 +--- a/drivers/hid/hid-multitouch.c
1630 ++++ b/drivers/hid/hid-multitouch.c
1631 +@@ -609,12 +609,20 @@ static const struct hid_device_id mt_devices[] = {
1632 + { .driver_data = MT_CLS_3M,
1633 + HID_USB_DEVICE(USB_VENDOR_ID_3M,
1634 + USB_DEVICE_ID_3M2256) },
1635 ++ { .driver_data = MT_CLS_3M,
1636 ++ HID_USB_DEVICE(USB_VENDOR_ID_3M,
1637 ++ USB_DEVICE_ID_3M3266) },
1638 +
1639 + /* ActionStar panels */
1640 + { .driver_data = MT_CLS_DEFAULT,
1641 + HID_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
1642 + USB_DEVICE_ID_ACTIONSTAR_1011) },
1643 +
1644 ++ /* Atmel panels */
1645 ++ { .driver_data = MT_CLS_SERIAL,
1646 ++ HID_USB_DEVICE(USB_VENDOR_ID_ATMEL,
1647 ++ USB_DEVICE_ID_ATMEL_MULTITOUCH) },
1648 ++
1649 + /* Cando panels */
1650 + { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
1651 + HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
1652 +@@ -645,23 +653,32 @@ static const struct hid_device_id mt_devices[] = {
1653 + USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
1654 +
1655 + /* eGalax devices (resistive) */
1656 +- { .driver_data = MT_CLS_EGALAX,
1657 ++ { .driver_data = MT_CLS_EGALAX,
1658 + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1659 +- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
1660 +- { .driver_data = MT_CLS_EGALAX,
1661 ++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
1662 ++ { .driver_data = MT_CLS_EGALAX,
1663 + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1664 +- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
1665 ++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
1666 +
1667 + /* eGalax devices (capacitive) */
1668 +- { .driver_data = MT_CLS_EGALAX,
1669 ++ { .driver_data = MT_CLS_EGALAX,
1670 ++ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1671 ++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
1672 ++ { .driver_data = MT_CLS_EGALAX,
1673 + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1674 +- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
1675 +- { .driver_data = MT_CLS_EGALAX,
1676 ++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
1677 ++ { .driver_data = MT_CLS_EGALAX,
1678 + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1679 +- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
1680 +- { .driver_data = MT_CLS_EGALAX,
1681 ++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
1682 ++ { .driver_data = MT_CLS_EGALAX,
1683 + HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1684 +- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
1685 ++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) },
1686 ++ { .driver_data = MT_CLS_EGALAX,
1687 ++ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1688 ++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
1689 ++ { .driver_data = MT_CLS_EGALAX,
1690 ++ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
1691 ++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
1692 +
1693 + /* Elo TouchSystems IntelliTouch Plus panel */
1694 + { .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
1695 +@@ -678,6 +695,11 @@ static const struct hid_device_id mt_devices[] = {
1696 + HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
1697 + USB_DEVICE_ID_GOODTOUCH_000f) },
1698 +
1699 ++ /* Hanvon panels */
1700 ++ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
1701 ++ HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
1702 ++ USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
1703 ++
1704 + /* Ideacom panel */
1705 + { .driver_data = MT_CLS_SERIAL,
1706 + HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
1707 +@@ -758,6 +780,35 @@ static const struct hid_device_id mt_devices[] = {
1708 + HID_USB_DEVICE(USB_VENDOR_ID_XAT,
1709 + USB_DEVICE_ID_XAT_CSR) },
1710 +
1711 ++ /* Xiroku */
1712 ++ { .driver_data = MT_CLS_DEFAULT,
1713 ++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1714 ++ USB_DEVICE_ID_XIROKU_SPX) },
1715 ++ { .driver_data = MT_CLS_DEFAULT,
1716 ++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1717 ++ USB_DEVICE_ID_XIROKU_MPX) },
1718 ++ { .driver_data = MT_CLS_DEFAULT,
1719 ++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1720 ++ USB_DEVICE_ID_XIROKU_CSR) },
1721 ++ { .driver_data = MT_CLS_DEFAULT,
1722 ++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1723 ++ USB_DEVICE_ID_XIROKU_SPX1) },
1724 ++ { .driver_data = MT_CLS_DEFAULT,
1725 ++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1726 ++ USB_DEVICE_ID_XIROKU_MPX1) },
1727 ++ { .driver_data = MT_CLS_DEFAULT,
1728 ++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1729 ++ USB_DEVICE_ID_XIROKU_CSR1) },
1730 ++ { .driver_data = MT_CLS_DEFAULT,
1731 ++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1732 ++ USB_DEVICE_ID_XIROKU_SPX2) },
1733 ++ { .driver_data = MT_CLS_DEFAULT,
1734 ++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1735 ++ USB_DEVICE_ID_XIROKU_MPX2) },
1736 ++ { .driver_data = MT_CLS_DEFAULT,
1737 ++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
1738 ++ USB_DEVICE_ID_XIROKU_CSR2) },
1739 ++
1740 + { }
1741 + };
1742 + MODULE_DEVICE_TABLE(hid, mt_devices);
1743 +diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
1744 +index b6807db..5b667e5 100644
1745 +--- a/drivers/i2c/busses/i2c-ali1535.c
1746 ++++ b/drivers/i2c/busses/i2c-ali1535.c
1747 +@@ -140,7 +140,7 @@ static unsigned short ali1535_smba;
1748 + defined to make the transition easier. */
1749 + static int __devinit ali1535_setup(struct pci_dev *dev)
1750 + {
1751 +- int retval = -ENODEV;
1752 ++ int retval;
1753 + unsigned char temp;
1754 +
1755 + /* Check the following things:
1756 +@@ -155,6 +155,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1757 + if (ali1535_smba == 0) {
1758 + dev_warn(&dev->dev,
1759 + "ALI1535_smb region uninitialized - upgrade BIOS?\n");
1760 ++ retval = -ENODEV;
1761 + goto exit;
1762 + }
1763 +
1764 +@@ -167,6 +168,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1765 + ali1535_driver.name)) {
1766 + dev_err(&dev->dev, "ALI1535_smb region 0x%x already in use!\n",
1767 + ali1535_smba);
1768 ++ retval = -EBUSY;
1769 + goto exit;
1770 + }
1771 +
1772 +@@ -174,6 +176,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1773 + pci_read_config_byte(dev, SMBCFG, &temp);
1774 + if ((temp & ALI1535_SMBIO_EN) == 0) {
1775 + dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n");
1776 ++ retval = -ENODEV;
1777 + goto exit_free;
1778 + }
1779 +
1780 +@@ -181,6 +184,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1781 + pci_read_config_byte(dev, SMBHSTCFG, &temp);
1782 + if ((temp & 1) == 0) {
1783 + dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n");
1784 ++ retval = -ENODEV;
1785 + goto exit_free;
1786 + }
1787 +
1788 +@@ -198,12 +202,11 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
1789 + dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp);
1790 + dev_dbg(&dev->dev, "ALI1535_smba = 0x%X\n", ali1535_smba);
1791 +
1792 +- retval = 0;
1793 +-exit:
1794 +- return retval;
1795 ++ return 0;
1796 +
1797 + exit_free:
1798 + release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
1799 ++exit:
1800 + return retval;
1801 + }
1802 +
1803 +diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
1804 +index 18936ac..730215e 100644
1805 +--- a/drivers/i2c/busses/i2c-eg20t.c
1806 ++++ b/drivers/i2c/busses/i2c-eg20t.c
1807 +@@ -243,7 +243,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
1808 + if (pch_clk > PCH_MAX_CLK)
1809 + pch_clk = 62500;
1810 +
1811 +- pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
1812 ++ pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
1813 + /* Set transfer speed in I2CBC */
1814 + iowrite32(pch_i2cbc, p + PCH_I2CBC);
1815 +
1816 +diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
1817 +index ff1e127..4853b52 100644
1818 +--- a/drivers/i2c/busses/i2c-nforce2.c
1819 ++++ b/drivers/i2c/busses/i2c-nforce2.c
1820 +@@ -356,7 +356,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
1821 + error = acpi_check_region(smbus->base, smbus->size,
1822 + nforce2_driver.name);
1823 + if (error)
1824 +- return -1;
1825 ++ return error;
1826 +
1827 + if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) {
1828 + dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n",
1829 +diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
1830 +index fa23faa..257c1a5 100644
1831 +--- a/drivers/i2c/busses/i2c-omap.c
1832 ++++ b/drivers/i2c/busses/i2c-omap.c
1833 +@@ -235,7 +235,7 @@ static const u8 reg_map_ip_v2[] = {
1834 + [OMAP_I2C_BUF_REG] = 0x94,
1835 + [OMAP_I2C_CNT_REG] = 0x98,
1836 + [OMAP_I2C_DATA_REG] = 0x9c,
1837 +- [OMAP_I2C_SYSC_REG] = 0x20,
1838 ++ [OMAP_I2C_SYSC_REG] = 0x10,
1839 + [OMAP_I2C_CON_REG] = 0xa4,
1840 + [OMAP_I2C_OA_REG] = 0xa8,
1841 + [OMAP_I2C_SA_REG] = 0xac,
1842 +diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
1843 +index 4375866..6d60284 100644
1844 +--- a/drivers/i2c/busses/i2c-sis5595.c
1845 ++++ b/drivers/i2c/busses/i2c-sis5595.c
1846 +@@ -147,7 +147,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
1847 + u16 a;
1848 + u8 val;
1849 + int *i;
1850 +- int retval = -ENODEV;
1851 ++ int retval;
1852 +
1853 + /* Look for imposters */
1854 + for (i = blacklist; *i != 0; i++) {
1855 +@@ -223,7 +223,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
1856 +
1857 + error:
1858 + release_region(sis5595_base + SMB_INDEX, 2);
1859 +- return retval;
1860 ++ return -ENODEV;
1861 + }
1862 +
1863 + static int sis5595_transaction(struct i2c_adapter *adap)
1864 +diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
1865 +index e6f539e..b617fd0 100644
1866 +--- a/drivers/i2c/busses/i2c-sis630.c
1867 ++++ b/drivers/i2c/busses/i2c-sis630.c
1868 +@@ -393,7 +393,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
1869 + {
1870 + unsigned char b;
1871 + struct pci_dev *dummy = NULL;
1872 +- int retval = -ENODEV, i;
1873 ++ int retval, i;
1874 +
1875 + /* check for supported SiS devices */
1876 + for (i=0; supported[i] > 0 ; i++) {
1877 +@@ -418,18 +418,21 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
1878 + */
1879 + if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
1880 + dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
1881 ++ retval = -ENODEV;
1882 + goto exit;
1883 + }
1884 + /* if ACPI already enabled , do nothing */
1885 + if (!(b & 0x80) &&
1886 + pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) {
1887 + dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n");
1888 ++ retval = -ENODEV;
1889 + goto exit;
1890 + }
1891 +
1892 + /* Determine the ACPI base address */
1893 + if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
1894 + dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
1895 ++ retval = -ENODEV;
1896 + goto exit;
1897 + }
1898 +
1899 +@@ -445,6 +448,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
1900 + sis630_driver.name)) {
1901 + dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
1902 + "in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
1903 ++ retval = -EBUSY;
1904 + goto exit;
1905 + }
1906 +
1907 +diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
1908 +index 0b012f1..58261d4 100644
1909 +--- a/drivers/i2c/busses/i2c-viapro.c
1910 ++++ b/drivers/i2c/busses/i2c-viapro.c
1911 +@@ -324,7 +324,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
1912 + const struct pci_device_id *id)
1913 + {
1914 + unsigned char temp;
1915 +- int error = -ENODEV;
1916 ++ int error;
1917 +
1918 + /* Determine the address of the SMBus areas */
1919 + if (force_addr) {
1920 +@@ -390,6 +390,7 @@ found:
1921 + dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
1922 + "controller not enabled! - upgrade BIOS or "
1923 + "use force=1\n");
1924 ++ error = -ENODEV;
1925 + goto release_region;
1926 + }
1927 + }
1928 +@@ -422,9 +423,11 @@ found:
1929 + "SMBus Via Pro adapter at %04x", vt596_smba);
1930 +
1931 + vt596_pdev = pci_dev_get(pdev);
1932 +- if (i2c_add_adapter(&vt596_adapter)) {
1933 ++ error = i2c_add_adapter(&vt596_adapter);
1934 ++ if (error) {
1935 + pci_dev_put(vt596_pdev);
1936 + vt596_pdev = NULL;
1937 ++ goto release_region;
1938 + }
1939 +
1940 + /* Always return failure here. This is to allow other drivers to bind
1941 +diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
1942 +index d267b7a..a22ca84 100644
1943 +--- a/drivers/ide/ide-floppy_ioctl.c
1944 ++++ b/drivers/ide/ide-floppy_ioctl.c
1945 +@@ -292,8 +292,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
1946 + * and CDROM_SEND_PACKET (legacy) ioctls
1947 + */
1948 + if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
1949 +- err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
1950 +- mode, cmd, argp);
1951 ++ err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
1952 +
1953 + if (err == -ENOTTY)
1954 + err = generic_ide_ioctl(drive, bdev, cmd, arg);
1955 +diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
1956 +index 5d2f8e1..5b39216 100644
1957 +--- a/drivers/idle/intel_idle.c
1958 ++++ b/drivers/idle/intel_idle.c
1959 +@@ -348,7 +348,8 @@ static int intel_idle_probe(void)
1960 + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
1961 +
1962 + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
1963 +- !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
1964 ++ !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
1965 ++ !mwait_substates)
1966 + return -ENODEV;
1967 +
1968 + pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
1969 +@@ -394,7 +395,7 @@ static int intel_idle_probe(void)
1970 + if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
1971 + lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
1972 + else {
1973 +- smp_call_function(__setup_broadcast_timer, (void *)true, 1);
1974 ++ on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
1975 + register_cpu_notifier(&setup_broadcast_notifier);
1976 + }
1977 +
1978 +@@ -471,7 +472,7 @@ static int intel_idle_cpuidle_driver_init(void)
1979 + }
1980 +
1981 + if (auto_demotion_disable_flags)
1982 +- smp_call_function(auto_demotion_disable, NULL, 1);
1983 ++ on_each_cpu(auto_demotion_disable, NULL, 1);
1984 +
1985 + return 0;
1986 + }
1987 +@@ -568,7 +569,7 @@ static void __exit intel_idle_exit(void)
1988 + cpuidle_unregister_driver(&intel_idle_driver);
1989 +
1990 + if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
1991 +- smp_call_function(__setup_broadcast_timer, (void *)false, 1);
1992 ++ on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
1993 + unregister_cpu_notifier(&setup_broadcast_notifier);
1994 + }
1995 +
1996 +diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
1997 +index f84c080..9fb18c1 100644
1998 +--- a/drivers/md/dm-flakey.c
1999 ++++ b/drivers/md/dm-flakey.c
2000 +@@ -368,8 +368,17 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
2001 + static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
2002 + {
2003 + struct flakey_c *fc = ti->private;
2004 ++ struct dm_dev *dev = fc->dev;
2005 ++ int r = 0;
2006 +
2007 +- return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
2008 ++ /*
2009 ++ * Only pass ioctls through if the device sizes match exactly.
2010 ++ */
2011 ++ if (fc->start ||
2012 ++ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
2013 ++ r = scsi_verify_blk_ioctl(NULL, cmd);
2014 ++
2015 ++ return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
2016 + }
2017 +
2018 + static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2019 +diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
2020 +index 3921e3b..9728839 100644
2021 +--- a/drivers/md/dm-linear.c
2022 ++++ b/drivers/md/dm-linear.c
2023 +@@ -116,7 +116,17 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
2024 + unsigned long arg)
2025 + {
2026 + struct linear_c *lc = (struct linear_c *) ti->private;
2027 +- return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
2028 ++ struct dm_dev *dev = lc->dev;
2029 ++ int r = 0;
2030 ++
2031 ++ /*
2032 ++ * Only pass ioctls through if the device sizes match exactly.
2033 ++ */
2034 ++ if (lc->start ||
2035 ++ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
2036 ++ r = scsi_verify_blk_ioctl(NULL, cmd);
2037 ++
2038 ++ return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
2039 + }
2040 +
2041 + static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2042 +diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
2043 +index 5e0090e..801d92d 100644
2044 +--- a/drivers/md/dm-mpath.c
2045 ++++ b/drivers/md/dm-mpath.c
2046 +@@ -1520,6 +1520,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
2047 +
2048 + spin_unlock_irqrestore(&m->lock, flags);
2049 +
2050 ++ /*
2051 ++ * Only pass ioctls through if the device sizes match exactly.
2052 ++ */
2053 ++ if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
2054 ++ r = scsi_verify_blk_ioctl(NULL, cmd);
2055 ++
2056 + return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
2057 + }
2058 +
2059 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2060 +index ede2461..7d9e071 100644
2061 +--- a/drivers/md/raid1.c
2062 ++++ b/drivers/md/raid1.c
2063 +@@ -525,8 +525,17 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
2064 + if (test_bit(WriteMostly, &rdev->flags)) {
2065 + /* Don't balance among write-mostly, just
2066 + * use the first as a last resort */
2067 +- if (best_disk < 0)
2068 ++ if (best_disk < 0) {
2069 ++ if (is_badblock(rdev, this_sector, sectors,
2070 ++ &first_bad, &bad_sectors)) {
2071 ++ if (first_bad < this_sector)
2072 ++ /* Cannot use this */
2073 ++ continue;
2074 ++ best_good_sectors = first_bad - this_sector;
2075 ++ } else
2076 ++ best_good_sectors = sectors;
2077 + best_disk = disk;
2078 ++ }
2079 + continue;
2080 + }
2081 + /* This is a reasonable device to use. It might
2082 +diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
2083 +index bcb45be..f0482b2 100644
2084 +--- a/drivers/media/video/cx23885/cx23885-dvb.c
2085 ++++ b/drivers/media/video/cx23885/cx23885-dvb.c
2086 +@@ -940,6 +940,11 @@ static int dvb_register(struct cx23885_tsport *port)
2087 +
2088 + fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
2089 + &dev->i2c_bus[1].i2c_adap, &cfg);
2090 ++ if (!fe) {
2091 ++ printk(KERN_ERR "%s/2: xc4000 attach failed\n",
2092 ++ dev->name);
2093 ++ goto frontend_detach;
2094 ++ }
2095 + }
2096 + break;
2097 + case CX23885_BOARD_TBS_6920:
2098 +diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
2099 +index 0d719fa..3929d93 100644
2100 +--- a/drivers/media/video/cx88/cx88-cards.c
2101 ++++ b/drivers/media/video/cx88/cx88-cards.c
2102 +@@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = {
2103 + .name = "Pinnacle Hybrid PCTV",
2104 + .tuner_type = TUNER_XC2028,
2105 + .tuner_addr = 0x61,
2106 +- .radio_type = TUNER_XC2028,
2107 +- .radio_addr = 0x61,
2108 ++ .radio_type = UNSET,
2109 ++ .radio_addr = ADDR_UNSET,
2110 + .input = { {
2111 + .type = CX88_VMUX_TELEVISION,
2112 + .vmux = 0,
2113 +@@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = {
2114 + .name = "Leadtek TV2000 XP Global",
2115 + .tuner_type = TUNER_XC2028,
2116 + .tuner_addr = 0x61,
2117 +- .radio_type = TUNER_XC2028,
2118 +- .radio_addr = 0x61,
2119 ++ .radio_type = UNSET,
2120 ++ .radio_addr = ADDR_UNSET,
2121 + .input = { {
2122 + .type = CX88_VMUX_TELEVISION,
2123 + .vmux = 0,
2124 +@@ -2043,8 +2043,8 @@ static const struct cx88_board cx88_boards[] = {
2125 + .name = "Terratec Cinergy HT PCI MKII",
2126 + .tuner_type = TUNER_XC2028,
2127 + .tuner_addr = 0x61,
2128 +- .radio_type = TUNER_XC2028,
2129 +- .radio_addr = 0x61,
2130 ++ .radio_type = UNSET,
2131 ++ .radio_addr = ADDR_UNSET,
2132 + .input = { {
2133 + .type = CX88_VMUX_TELEVISION,
2134 + .vmux = 0,
2135 +@@ -2082,9 +2082,9 @@ static const struct cx88_board cx88_boards[] = {
2136 + [CX88_BOARD_WINFAST_DTV1800H] = {
2137 + .name = "Leadtek WinFast DTV1800 Hybrid",
2138 + .tuner_type = TUNER_XC2028,
2139 +- .radio_type = TUNER_XC2028,
2140 ++ .radio_type = UNSET,
2141 + .tuner_addr = 0x61,
2142 +- .radio_addr = 0x61,
2143 ++ .radio_addr = ADDR_UNSET,
2144 + /*
2145 + * GPIO setting
2146 + *
2147 +@@ -2123,9 +2123,9 @@ static const struct cx88_board cx88_boards[] = {
2148 + [CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
2149 + .name = "Leadtek WinFast DTV1800 H (XC4000)",
2150 + .tuner_type = TUNER_XC4000,
2151 +- .radio_type = TUNER_XC4000,
2152 ++ .radio_type = UNSET,
2153 + .tuner_addr = 0x61,
2154 +- .radio_addr = 0x61,
2155 ++ .radio_addr = ADDR_UNSET,
2156 + /*
2157 + * GPIO setting
2158 + *
2159 +@@ -2164,9 +2164,9 @@ static const struct cx88_board cx88_boards[] = {
2160 + [CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
2161 + .name = "Leadtek WinFast DTV2000 H PLUS",
2162 + .tuner_type = TUNER_XC4000,
2163 +- .radio_type = TUNER_XC4000,
2164 ++ .radio_type = UNSET,
2165 + .tuner_addr = 0x61,
2166 +- .radio_addr = 0x61,
2167 ++ .radio_addr = ADDR_UNSET,
2168 + /*
2169 + * GPIO
2170 + * 2: 1: mute audio
2171 +diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
2172 +index dadf11f..cf7788f 100644
2173 +--- a/drivers/media/video/uvc/uvc_v4l2.c
2174 ++++ b/drivers/media/video/uvc/uvc_v4l2.c
2175 +@@ -58,6 +58,15 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
2176 + break;
2177 +
2178 + case V4L2_CTRL_TYPE_MENU:
2179 ++ /* Prevent excessive memory consumption, as well as integer
2180 ++ * overflows.
2181 ++ */
2182 ++ if (xmap->menu_count == 0 ||
2183 ++ xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
2184 ++ ret = -EINVAL;
2185 ++ goto done;
2186 ++ }
2187 ++
2188 + size = xmap->menu_count * sizeof(*map->menu_info);
2189 + map->menu_info = kmalloc(size, GFP_KERNEL);
2190 + if (map->menu_info == NULL) {
2191 +diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
2192 +index 4c1392e..bc446ba 100644
2193 +--- a/drivers/media/video/uvc/uvcvideo.h
2194 ++++ b/drivers/media/video/uvc/uvcvideo.h
2195 +@@ -113,6 +113,7 @@
2196 +
2197 + /* Maximum allowed number of control mappings per device */
2198 + #define UVC_MAX_CONTROL_MAPPINGS 1024
2199 ++#define UVC_MAX_CONTROL_MENU_ENTRIES 32
2200 +
2201 + /* Devices quirks */
2202 + #define UVC_QUIRK_STATUS_INTERVAL 0x00000001
2203 +diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
2204 +index e1da8fc..639abee 100644
2205 +--- a/drivers/media/video/v4l2-ioctl.c
2206 ++++ b/drivers/media/video/v4l2-ioctl.c
2207 +@@ -2226,6 +2226,10 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
2208 + struct v4l2_ext_controls *ctrls = parg;
2209 +
2210 + if (ctrls->count != 0) {
2211 ++ if (ctrls->count > V4L2_CID_MAX_CTRLS) {
2212 ++ ret = -EINVAL;
2213 ++ break;
2214 ++ }
2215 + *user_ptr = (void __user *)ctrls->controls;
2216 + *kernel_ptr = (void *)&ctrls->controls;
2217 + *array_size = sizeof(struct v4l2_ext_control)
2218 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
2219 +index d240427..fb7c27f 100644
2220 +--- a/drivers/mmc/core/mmc.c
2221 ++++ b/drivers/mmc/core/mmc.c
2222 +@@ -1048,7 +1048,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
2223 + *
2224 + * WARNING: eMMC rules are NOT the same as SD DDR
2225 + */
2226 +- if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
2227 ++ if (ddr == MMC_1_2V_DDR_MODE) {
2228 + err = mmc_set_signal_voltage(host,
2229 + MMC_SIGNAL_VOLTAGE_120, 0);
2230 + if (err)
2231 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2232 +index 19ed580..6ce32a7 100644
2233 +--- a/drivers/mmc/host/sdhci.c
2234 ++++ b/drivers/mmc/host/sdhci.c
2235 +@@ -1364,8 +1364,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
2236 + if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
2237 + (ios->timing == MMC_TIMING_UHS_SDR104) ||
2238 + (ios->timing == MMC_TIMING_UHS_DDR50) ||
2239 +- (ios->timing == MMC_TIMING_UHS_SDR25) ||
2240 +- (ios->timing == MMC_TIMING_UHS_SDR12))
2241 ++ (ios->timing == MMC_TIMING_UHS_SDR25))
2242 + ctrl |= SDHCI_CTRL_HISPD;
2243 +
2244 + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2245 +@@ -2336,9 +2335,8 @@ int sdhci_suspend_host(struct sdhci_host *host)
2246 + /* Disable tuning since we are suspending */
2247 + if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
2248 + host->tuning_mode == SDHCI_TUNING_MODE_1) {
2249 ++ del_timer_sync(&host->tuning_timer);
2250 + host->flags &= ~SDHCI_NEEDS_RETUNING;
2251 +- mod_timer(&host->tuning_timer, jiffies +
2252 +- host->tuning_count * HZ);
2253 + }
2254 +
2255 + ret = mmc_suspend_host(host->mmc);
2256 +diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
2257 +index ed8b5e7..424ca5f 100644
2258 +--- a/drivers/mtd/mtd_blkdevs.c
2259 ++++ b/drivers/mtd/mtd_blkdevs.c
2260 +@@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
2261 +
2262 + mutex_lock(&dev->lock);
2263 +
2264 +- if (dev->open++)
2265 ++ if (dev->open)
2266 + goto unlock;
2267 +
2268 + kref_get(&dev->ref);
2269 +@@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
2270 + goto error_release;
2271 +
2272 + unlock:
2273 ++ dev->open++;
2274 + mutex_unlock(&dev->lock);
2275 + blktrans_dev_put(dev);
2276 + return ret;
2277 +diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
2278 +index 1e2fa62..f3cdce9 100644
2279 +--- a/drivers/mtd/mtdoops.c
2280 ++++ b/drivers/mtd/mtdoops.c
2281 +@@ -253,6 +253,9 @@ static void find_next_position(struct mtdoops_context *cxt)
2282 + size_t retlen;
2283 +
2284 + for (page = 0; page < cxt->oops_pages; page++) {
2285 ++ if (mtd->block_isbad &&
2286 ++ mtd->block_isbad(mtd, page * record_size))
2287 ++ continue;
2288 + /* Assume the page is used */
2289 + mark_page_used(cxt, page);
2290 + ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
2291 +@@ -369,7 +372,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
2292 +
2293 + /* oops_page_used is a bit field */
2294 + cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
2295 +- BITS_PER_LONG));
2296 ++ BITS_PER_LONG) * sizeof(unsigned long));
2297 + if (!cxt->oops_page_used) {
2298 + printk(KERN_ERR "mtdoops: could not allocate page array\n");
2299 + return;
2300 +diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
2301 +index 52ffd91..811642f 100644
2302 +--- a/drivers/mtd/tests/mtd_stresstest.c
2303 ++++ b/drivers/mtd/tests/mtd_stresstest.c
2304 +@@ -284,6 +284,12 @@ static int __init mtd_stresstest_init(void)
2305 + (unsigned long long)mtd->size, mtd->erasesize,
2306 + pgsize, ebcnt, pgcnt, mtd->oobsize);
2307 +
2308 ++ if (ebcnt < 2) {
2309 ++ printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
2310 ++ err = -ENOSPC;
2311 ++ goto out_put_mtd;
2312 ++ }
2313 ++
2314 + /* Read or write up 2 eraseblocks at a time */
2315 + bufsize = mtd->erasesize * 2;
2316 +
2317 +@@ -322,6 +328,7 @@ out:
2318 + kfree(bbt);
2319 + vfree(writebuf);
2320 + vfree(readbuf);
2321 ++out_put_mtd:
2322 + put_mtd_device(mtd);
2323 + if (err)
2324 + printk(PRINT_PREF "error %d occurred\n", err);
2325 +diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
2326 +index 3320a50..ad76592 100644
2327 +--- a/drivers/mtd/ubi/cdev.c
2328 ++++ b/drivers/mtd/ubi/cdev.c
2329 +@@ -632,6 +632,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
2330 + if (req->alignment != 1 && n)
2331 + goto bad;
2332 +
2333 ++ if (!req->name[0] || !req->name_len)
2334 ++ goto bad;
2335 ++
2336 + if (req->name_len > UBI_VOL_NAME_MAX) {
2337 + err = -ENAMETOOLONG;
2338 + goto bad;
2339 +diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
2340 +index 64fbb00..ead2cd1 100644
2341 +--- a/drivers/mtd/ubi/debug.h
2342 ++++ b/drivers/mtd/ubi/debug.h
2343 +@@ -43,7 +43,10 @@
2344 + pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
2345 +
2346 + /* Just a debugging messages not related to any specific UBI subsystem */
2347 +-#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
2348 ++#define dbg_msg(fmt, ...) \
2349 ++ printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
2350 ++ current->pid, __func__, ##__VA_ARGS__)
2351 ++
2352 + /* General debugging messages */
2353 + #define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
2354 + /* Messages from the eraseblock association sub-system */
2355 +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
2356 +index fb7f19b..cd26da8 100644
2357 +--- a/drivers/mtd/ubi/eba.c
2358 ++++ b/drivers/mtd/ubi/eba.c
2359 +@@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
2360 + * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
2361 + * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
2362 + * LEB is already locked, we just do not move it and return
2363 +- * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
2364 ++ * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
2365 ++ * we do not know the reasons of the contention - it may be just a
2366 ++ * normal I/O on this LEB, so we want to re-try.
2367 + */
2368 + err = leb_write_trylock(ubi, vol_id, lnum);
2369 + if (err) {
2370 + dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
2371 +- return MOVE_CANCEL_RACE;
2372 ++ return MOVE_RETRY;
2373 + }
2374 +
2375 + /*
2376 +diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
2377 +index dc64c76..d51d75d 100644
2378 +--- a/drivers/mtd/ubi/ubi.h
2379 ++++ b/drivers/mtd/ubi/ubi.h
2380 +@@ -120,6 +120,7 @@ enum {
2381 + * PEB
2382 + * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
2383 + * target PEB
2384 ++ * MOVE_RETRY: retry scrubbing the PEB
2385 + */
2386 + enum {
2387 + MOVE_CANCEL_RACE = 1,
2388 +@@ -127,6 +128,7 @@ enum {
2389 + MOVE_TARGET_RD_ERR,
2390 + MOVE_TARGET_WR_ERR,
2391 + MOVE_CANCEL_BITFLIPS,
2392 ++ MOVE_RETRY,
2393 + };
2394 +
2395 + /**
2396 +diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
2397 +index 9ad18da..890754c 100644
2398 +--- a/drivers/mtd/ubi/vtbl.c
2399 ++++ b/drivers/mtd/ubi/vtbl.c
2400 +@@ -306,7 +306,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
2401 + int copy, void *vtbl)
2402 + {
2403 + int err, tries = 0;
2404 +- static struct ubi_vid_hdr *vid_hdr;
2405 ++ struct ubi_vid_hdr *vid_hdr;
2406 + struct ubi_scan_leb *new_seb;
2407 +
2408 + ubi_msg("create volume table (copy #%d)", copy + 1);
2409 +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
2410 +index 42c684c..0696e36 100644
2411 +--- a/drivers/mtd/ubi/wl.c
2412 ++++ b/drivers/mtd/ubi/wl.c
2413 +@@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
2414 + protect = 1;
2415 + goto out_not_moved;
2416 + }
2417 +-
2418 ++ if (err == MOVE_RETRY) {
2419 ++ scrubbing = 1;
2420 ++ goto out_not_moved;
2421 ++ }
2422 + if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
2423 + err == MOVE_TARGET_RD_ERR) {
2424 + /*
2425 +@@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
2426 +
2427 + ubi_err("failed to erase PEB %d, error %d", pnum, err);
2428 + kfree(wl_wrk);
2429 +- kmem_cache_free(ubi_wl_entry_slab, e);
2430 +
2431 + if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
2432 + err == -EBUSY) {
2433 +@@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
2434 + goto out_ro;
2435 + }
2436 + return err;
2437 +- } else if (err != -EIO) {
2438 ++ }
2439 ++
2440 ++ kmem_cache_free(ubi_wl_entry_slab, e);
2441 ++ if (err != -EIO)
2442 + /*
2443 + * If this is not %-EIO, we have no idea what to do. Scheduling
2444 + * this physical eraseblock for erasure again would cause
2445 + * errors again and again. Well, lets switch to R/O mode.
2446 + */
2447 + goto out_ro;
2448 +- }
2449 +
2450 + /* It is %-EIO, the PEB went bad */
2451 +
2452 +diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
2453 +index dd2625a..f5e063a 100644
2454 +--- a/drivers/net/usb/asix.c
2455 ++++ b/drivers/net/usb/asix.c
2456 +@@ -974,6 +974,7 @@ static int ax88772_link_reset(struct usbnet *dev)
2457 +
2458 + static int ax88772_reset(struct usbnet *dev)
2459 + {
2460 ++ struct asix_data *data = (struct asix_data *)&dev->data;
2461 + int ret, embd_phy;
2462 + u16 rx_ctl;
2463 +
2464 +@@ -1051,6 +1052,13 @@ static int ax88772_reset(struct usbnet *dev)
2465 + goto out;
2466 + }
2467 +
2468 ++ /* Rewrite MAC address */
2469 ++ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
2470 ++ ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
2471 ++ data->mac_addr);
2472 ++ if (ret < 0)
2473 ++ goto out;
2474 ++
2475 + /* Set RX_CTL to default values with 2k buffer, and enable cactus */
2476 + ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
2477 + if (ret < 0)
2478 +@@ -1316,6 +1324,13 @@ static int ax88178_reset(struct usbnet *dev)
2479 + if (ret < 0)
2480 + return ret;
2481 +
2482 ++ /* Rewrite MAC address */
2483 ++ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
2484 ++ ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
2485 ++ data->mac_addr);
2486 ++ if (ret < 0)
2487 ++ return ret;
2488 ++
2489 + ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
2490 + if (ret < 0)
2491 + return ret;
2492 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
2493 +index ccde784..f5ae3c6 100644
2494 +--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
2495 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
2496 +@@ -526,10 +526,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
2497 + rxs->rs_status |= ATH9K_RXERR_DECRYPT;
2498 + else if (rxsp->status11 & AR_MichaelErr)
2499 + rxs->rs_status |= ATH9K_RXERR_MIC;
2500 +- if (rxsp->status11 & AR_KeyMiss)
2501 +- rxs->rs_status |= ATH9K_RXERR_KEYMISS;
2502 + }
2503 +
2504 ++ if (rxsp->status11 & AR_KeyMiss)
2505 ++ rxs->rs_status |= ATH9K_RXERR_KEYMISS;
2506 ++
2507 + return 0;
2508 + }
2509 + EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
2510 +diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
2511 +index 9953881..8ddef3e 100644
2512 +--- a/drivers/net/wireless/ath/ath9k/calib.c
2513 ++++ b/drivers/net/wireless/ath/ath9k/calib.c
2514 +@@ -402,6 +402,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
2515 + ah->noise = ath9k_hw_getchan_noise(ah, chan);
2516 + return true;
2517 + }
2518 ++EXPORT_SYMBOL(ath9k_hw_getnf);
2519 +
2520 + void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
2521 + struct ath9k_channel *chan)
2522 +diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
2523 +index ecdb6fd..bbcb777 100644
2524 +--- a/drivers/net/wireless/ath/ath9k/mac.c
2525 ++++ b/drivers/net/wireless/ath/ath9k/mac.c
2526 +@@ -621,10 +621,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
2527 + rs->rs_status |= ATH9K_RXERR_DECRYPT;
2528 + else if (ads.ds_rxstatus8 & AR_MichaelErr)
2529 + rs->rs_status |= ATH9K_RXERR_MIC;
2530 +- if (ads.ds_rxstatus8 & AR_KeyMiss)
2531 +- rs->rs_status |= ATH9K_RXERR_KEYMISS;
2532 + }
2533 +
2534 ++ if (ads.ds_rxstatus8 & AR_KeyMiss)
2535 ++ rs->rs_status |= ATH9K_RXERR_KEYMISS;
2536 ++
2537 + return 0;
2538 + }
2539 + EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
2540 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2541 +index a9c5ae7..f76a814 100644
2542 +--- a/drivers/net/wireless/ath/ath9k/main.c
2543 ++++ b/drivers/net/wireless/ath/ath9k/main.c
2544 +@@ -1667,7 +1667,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2545 +
2546 + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2547 + struct ieee80211_channel *curchan = hw->conf.channel;
2548 +- struct ath9k_channel old_chan;
2549 + int pos = curchan->hw_value;
2550 + int old_pos = -1;
2551 + unsigned long flags;
2552 +@@ -1693,11 +1692,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2553 + * Preserve the current channel values, before updating
2554 + * the same channel
2555 + */
2556 +- if (old_pos == pos) {
2557 +- memcpy(&old_chan, &sc->sc_ah->channels[pos],
2558 +- sizeof(struct ath9k_channel));
2559 +- ah->curchan = &old_chan;
2560 +- }
2561 ++ if (ah->curchan && (old_pos == pos))
2562 ++ ath9k_hw_getnf(ah, ah->curchan);
2563 +
2564 + ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
2565 + curchan, conf->channel_type);
2566 +diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
2567 +index b282d86..05f2ad1 100644
2568 +--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
2569 ++++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
2570 +@@ -2656,14 +2656,13 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2571 + IWL_WARN(priv, "Invalid scan band\n");
2572 + return -EIO;
2573 + }
2574 +-
2575 + /*
2576 +- * If active scaning is requested but a certain channel
2577 +- * is marked passive, we can do active scanning if we
2578 +- * detect transmissions.
2579 ++ * If active scaning is requested but a certain channel is marked
2580 ++ * passive, we can do active scanning if we detect transmissions. For
2581 ++ * passive only scanning disable switching to active on any channel.
2582 + */
2583 + scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2584 +- IWL_GOOD_CRC_TH_DISABLED;
2585 ++ IWL_GOOD_CRC_TH_NEVER;
2586 +
2587 + len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
2588 + vif->addr, priv->scan_request->ie,
2589 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
2590 +index 1a52ed2..6465983 100644
2591 +--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
2592 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
2593 +@@ -827,6 +827,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
2594 + case IEEE80211_SMPS_STATIC:
2595 + case IEEE80211_SMPS_DYNAMIC:
2596 + return IWL_NUM_IDLE_CHAINS_SINGLE;
2597 ++ case IEEE80211_SMPS_AUTOMATIC:
2598 + case IEEE80211_SMPS_OFF:
2599 + return active_cnt;
2600 + default:
2601 +diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2602 +index 5c7c17c..d552fa3 100644
2603 +--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2604 ++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
2605 +@@ -559,6 +559,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
2606 +
2607 + mutex_lock(&priv->shrd->mutex);
2608 +
2609 ++ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
2610 ++ goto out;
2611 ++
2612 + if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
2613 + IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2614 + goto out;
2615 +diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
2616 +index da48c8a..837b460 100644
2617 +--- a/drivers/net/wireless/rt2x00/rt2800pci.c
2618 ++++ b/drivers/net/wireless/rt2x00/rt2800pci.c
2619 +@@ -422,7 +422,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
2620 + static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
2621 + enum dev_state state)
2622 + {
2623 +- int mask = (state == STATE_RADIO_IRQ_ON);
2624 + u32 reg;
2625 + unsigned long flags;
2626 +
2627 +@@ -436,25 +435,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
2628 + }
2629 +
2630 + spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
2631 +- rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
2632 +- rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
2633 +- rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
2634 +- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
2635 +- rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
2636 +- rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
2637 +- rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
2638 +- rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
2639 +- rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
2640 +- rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
2641 +- rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
2642 +- rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
2643 +- rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
2644 +- rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
2645 +- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
2646 +- rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
2647 +- rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
2648 +- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
2649 +- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
2650 ++ reg = 0;
2651 ++ if (state == STATE_RADIO_IRQ_ON) {
2652 ++ rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
2653 ++ rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
2654 ++ rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
2655 ++ rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
2656 ++ rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
2657 ++ }
2658 + rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
2659 + spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
2660 +
2661 +diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
2662 +index 6f91a14..3fda6b1 100644
2663 +--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
2664 ++++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
2665 +@@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
2666 + /* Allocate skb buffer to contain firmware */
2667 + /* info and tx descriptor info. */
2668 + skb = dev_alloc_skb(frag_length);
2669 ++ if (!skb)
2670 ++ return false;
2671 + skb_reserve(skb, extra_descoffset);
2672 + seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
2673 + extra_descoffset));
2674 +@@ -573,6 +575,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
2675 +
2676 + len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
2677 + skb = dev_alloc_skb(len);
2678 ++ if (!skb)
2679 ++ return false;
2680 + cb_desc = (struct rtl_tcb_desc *)(skb->cb);
2681 + cb_desc->queue_index = TXCMD_QUEUE;
2682 + cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
2683 +diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
2684 +index 0e6d04d..e3efb43 100644
2685 +--- a/drivers/pci/msi.c
2686 ++++ b/drivers/pci/msi.c
2687 +@@ -870,5 +870,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
2688 +
2689 + void pci_msi_init_pci_dev(struct pci_dev *dev)
2690 + {
2691 ++ int pos;
2692 + INIT_LIST_HEAD(&dev->msi_list);
2693 ++
2694 ++ /* Disable the msi hardware to avoid screaming interrupts
2695 ++ * during boot. This is the power on reset default so
2696 ++ * usually this should be a noop.
2697 ++ */
2698 ++ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2699 ++ if (pos)
2700 ++ msi_set_enable(dev, pos, 0);
2701 ++ msix_set_enable(dev, 0);
2702 + }
2703 +diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
2704 +index dfbd5a6..258fef2 100644
2705 +--- a/drivers/pnp/quirks.c
2706 ++++ b/drivers/pnp/quirks.c
2707 +@@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
2708 + }
2709 + }
2710 +
2711 ++#ifdef CONFIG_AMD_NB
2712 ++
2713 ++#include <asm/amd_nb.h>
2714 ++
2715 ++static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
2716 ++{
2717 ++ resource_size_t start, end;
2718 ++ struct pnp_resource *pnp_res;
2719 ++ struct resource *res;
2720 ++ struct resource mmconfig_res, *mmconfig;
2721 ++
2722 ++ mmconfig = amd_get_mmconfig_range(&mmconfig_res);
2723 ++ if (!mmconfig)
2724 ++ return;
2725 ++
2726 ++ list_for_each_entry(pnp_res, &dev->resources, list) {
2727 ++ res = &pnp_res->res;
2728 ++ if (res->end < mmconfig->start || res->start > mmconfig->end ||
2729 ++ (res->start == mmconfig->start && res->end == mmconfig->end))
2730 ++ continue;
2731 ++
2732 ++ dev_info(&dev->dev, FW_BUG
2733 ++ "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
2734 ++ res, mmconfig);
2735 ++ if (mmconfig->start < res->start) {
2736 ++ start = mmconfig->start;
2737 ++ end = res->start - 1;
2738 ++ pnp_add_mem_resource(dev, start, end, 0);
2739 ++ }
2740 ++ if (mmconfig->end > res->end) {
2741 ++ start = res->end + 1;
2742 ++ end = mmconfig->end;
2743 ++ pnp_add_mem_resource(dev, start, end, 0);
2744 ++ }
2745 ++ break;
2746 ++ }
2747 ++}
2748 ++#endif
2749 ++
2750 + /*
2751 + * PnP Quirks
2752 + * Cards or devices that need some tweaking due to incomplete resource info
2753 +@@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = {
2754 + /* PnP resources that might overlap PCI BARs */
2755 + {"PNP0c01", quirk_system_pci_resources},
2756 + {"PNP0c02", quirk_system_pci_resources},
2757 ++#ifdef CONFIG_AMD_NB
2758 ++ {"PNP0c01", quirk_amd_mmconfig_area},
2759 ++#endif
2760 + {""}
2761 + };
2762 +
2763 +diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
2764 +index 8e28625..8a1c031 100644
2765 +--- a/drivers/rtc/interface.c
2766 ++++ b/drivers/rtc/interface.c
2767 +@@ -228,11 +228,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
2768 + alarm->time.tm_hour = now.tm_hour;
2769 +
2770 + /* For simplicity, only support date rollover for now */
2771 +- if (alarm->time.tm_mday == -1) {
2772 ++ if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
2773 + alarm->time.tm_mday = now.tm_mday;
2774 + missing = day;
2775 + }
2776 +- if (alarm->time.tm_mon == -1) {
2777 ++ if ((unsigned)alarm->time.tm_mon >= 12) {
2778 + alarm->time.tm_mon = now.tm_mon;
2779 + if (missing == none)
2780 + missing = month;
2781 +diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
2782 +index beda04a..0794c72 100644
2783 +--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
2784 ++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
2785 +@@ -65,6 +65,8 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
2786 +
2787 + #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
2788 +
2789 ++#define MAX_HBA_QUEUE_DEPTH 30000
2790 ++#define MAX_CHAIN_DEPTH 100000
2791 + static int max_queue_depth = -1;
2792 + module_param(max_queue_depth, int, 0);
2793 + MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
2794 +@@ -2311,8 +2313,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
2795 + }
2796 + if (ioc->chain_dma_pool)
2797 + pci_pool_destroy(ioc->chain_dma_pool);
2798 +- }
2799 +- if (ioc->chain_lookup) {
2800 + free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2801 + ioc->chain_lookup = NULL;
2802 + }
2803 +@@ -2330,9 +2330,7 @@ static int
2804 + _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2805 + {
2806 + struct mpt2sas_facts *facts;
2807 +- u32 queue_size, queue_diff;
2808 + u16 max_sge_elements;
2809 +- u16 num_of_reply_frames;
2810 + u16 chains_needed_per_io;
2811 + u32 sz, total_sz, reply_post_free_sz;
2812 + u32 retry_sz;
2813 +@@ -2359,7 +2357,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2814 + max_request_credit = (max_queue_depth < facts->RequestCredit)
2815 + ? max_queue_depth : facts->RequestCredit;
2816 + else
2817 +- max_request_credit = facts->RequestCredit;
2818 ++ max_request_credit = min_t(u16, facts->RequestCredit,
2819 ++ MAX_HBA_QUEUE_DEPTH);
2820 +
2821 + ioc->hba_queue_depth = max_request_credit;
2822 + ioc->hi_priority_depth = facts->HighPriorityCredit;
2823 +@@ -2400,50 +2399,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2824 + }
2825 + ioc->chains_needed_per_io = chains_needed_per_io;
2826 +
2827 +- /* reply free queue sizing - taking into account for events */
2828 +- num_of_reply_frames = ioc->hba_queue_depth + 32;
2829 +-
2830 +- /* number of replies frames can't be a multiple of 16 */
2831 +- /* decrease number of reply frames by 1 */
2832 +- if (!(num_of_reply_frames % 16))
2833 +- num_of_reply_frames--;
2834 +-
2835 +- /* calculate number of reply free queue entries
2836 +- * (must be multiple of 16)
2837 +- */
2838 +-
2839 +- /* (we know reply_free_queue_depth is not a multiple of 16) */
2840 +- queue_size = num_of_reply_frames;
2841 +- queue_size += 16 - (queue_size % 16);
2842 +- ioc->reply_free_queue_depth = queue_size;
2843 +-
2844 +- /* reply descriptor post queue sizing */
2845 +- /* this size should be the number of request frames + number of reply
2846 +- * frames
2847 +- */
2848 +-
2849 +- queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
2850 +- /* round up to 16 byte boundary */
2851 +- if (queue_size % 16)
2852 +- queue_size += 16 - (queue_size % 16);
2853 +-
2854 +- /* check against IOC maximum reply post queue depth */
2855 +- if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
2856 +- queue_diff = queue_size -
2857 +- facts->MaxReplyDescriptorPostQueueDepth;
2858 ++ /* reply free queue sizing - taking into account for 64 FW events */
2859 ++ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2860 +
2861 +- /* round queue_diff up to multiple of 16 */
2862 +- if (queue_diff % 16)
2863 +- queue_diff += 16 - (queue_diff % 16);
2864 +-
2865 +- /* adjust hba_queue_depth, reply_free_queue_depth,
2866 +- * and queue_size
2867 +- */
2868 +- ioc->hba_queue_depth -= (queue_diff / 2);
2869 +- ioc->reply_free_queue_depth -= (queue_diff / 2);
2870 +- queue_size = facts->MaxReplyDescriptorPostQueueDepth;
2871 ++ /* align the reply post queue on the next 16 count boundary */
2872 ++ if (!ioc->reply_free_queue_depth % 16)
2873 ++ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
2874 ++ else
2875 ++ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
2876 ++ 32 - (ioc->reply_free_queue_depth % 16);
2877 ++ if (ioc->reply_post_queue_depth >
2878 ++ facts->MaxReplyDescriptorPostQueueDepth) {
2879 ++ ioc->reply_post_queue_depth = min_t(u16,
2880 ++ (facts->MaxReplyDescriptorPostQueueDepth -
2881 ++ (facts->MaxReplyDescriptorPostQueueDepth % 16)),
2882 ++ (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
2883 ++ ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
2884 ++ ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
2885 + }
2886 +- ioc->reply_post_queue_depth = queue_size;
2887 ++
2888 +
2889 + dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
2890 + "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2891 +@@ -2529,15 +2503,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2892 + "depth(%d)\n", ioc->name, ioc->request,
2893 + ioc->scsiio_depth));
2894 +
2895 +- /* loop till the allocation succeeds */
2896 +- do {
2897 +- sz = ioc->chain_depth * sizeof(struct chain_tracker);
2898 +- ioc->chain_pages = get_order(sz);
2899 +- ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2900 +- GFP_KERNEL, ioc->chain_pages);
2901 +- if (ioc->chain_lookup == NULL)
2902 +- ioc->chain_depth -= 100;
2903 +- } while (ioc->chain_lookup == NULL);
2904 ++ ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
2905 ++ sz = ioc->chain_depth * sizeof(struct chain_tracker);
2906 ++ ioc->chain_pages = get_order(sz);
2907 ++
2908 ++ ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2909 ++ GFP_KERNEL, ioc->chain_pages);
2910 + ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2911 + ioc->request_sz, 16, 0);
2912 + if (!ioc->chain_dma_pool) {
2913 +diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2914 +index d570573..9bc6fb2 100644
2915 +--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2916 ++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
2917 +@@ -1007,8 +1007,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
2918 + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2919 + if (list_empty(&ioc->free_chain_list)) {
2920 + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2921 +- printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
2922 +- ioc->name);
2923 ++ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
2924 ++ "available\n", ioc->name));
2925 + return NULL;
2926 + }
2927 + chain_req = list_entry(ioc->free_chain_list.next,
2928 +@@ -6714,6 +6714,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
2929 + } else
2930 + sas_target_priv_data = NULL;
2931 + raid_device->responding = 1;
2932 ++ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2933 + starget_printk(KERN_INFO, raid_device->starget,
2934 + "handle(0x%04x), wwid(0x%016llx)\n", handle,
2935 + (unsigned long long)raid_device->wwid);
2936 +@@ -6724,16 +6725,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
2937 + */
2938 + _scsih_init_warpdrive_properties(ioc, raid_device);
2939 + if (raid_device->handle == handle)
2940 +- goto out;
2941 ++ return;
2942 + printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
2943 + raid_device->handle);
2944 + raid_device->handle = handle;
2945 + if (sas_target_priv_data)
2946 + sas_target_priv_data->handle = handle;
2947 +- goto out;
2948 ++ return;
2949 + }
2950 + }
2951 +- out:
2952 ++
2953 + spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2954 + }
2955 +
2956 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2957 +index fa3a591..4b63c73 100644
2958 +--- a/drivers/scsi/sd.c
2959 ++++ b/drivers/scsi/sd.c
2960 +@@ -1074,6 +1074,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
2961 + SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
2962 + "cmd=0x%x\n", disk->disk_name, cmd));
2963 +
2964 ++ error = scsi_verify_blk_ioctl(bdev, cmd);
2965 ++ if (error < 0)
2966 ++ return error;
2967 ++
2968 + /*
2969 + * If we are in the middle of error recovery, don't let anyone
2970 + * else try and use this device. Also, if error recovery fails, it
2971 +@@ -1096,7 +1100,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
2972 + error = scsi_ioctl(sdp, cmd, p);
2973 + break;
2974 + default:
2975 +- error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
2976 ++ error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
2977 + if (error != -ENOTTY)
2978 + break;
2979 + error = scsi_ioctl(sdp, cmd, p);
2980 +@@ -1266,6 +1270,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
2981 + unsigned int cmd, unsigned long arg)
2982 + {
2983 + struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
2984 ++ int ret;
2985 ++
2986 ++ ret = scsi_verify_blk_ioctl(bdev, cmd);
2987 ++ if (ret < 0)
2988 ++ return -ENOIOCTLCMD;
2989 +
2990 + /*
2991 + * If we are in the middle of error recovery, don't let anyone
2992 +@@ -1277,8 +1286,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
2993 + return -ENODEV;
2994 +
2995 + if (sdev->host->hostt->compat_ioctl) {
2996 +- int ret;
2997 +-
2998 + ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
2999 +
3000 + return ret;
3001 +diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
3002 +index b4543f5..36d1ed7 100644
3003 +--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
3004 ++++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
3005 +@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
3006 + struct sym_lcb *lp = sym_lp(tp, sdev->lun);
3007 + unsigned long flags;
3008 +
3009 ++ /* if slave_alloc returned before allocating a sym_lcb, return */
3010 ++ if (!lp)
3011 ++ return;
3012 ++
3013 + spin_lock_irqsave(np->s.host->host_lock, flags);
3014 +
3015 + if (lp->busy_itlq || lp->busy_itl) {
3016 +diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
3017 +index 831468b..2e8c1be 100644
3018 +--- a/drivers/target/target_core_cdb.c
3019 ++++ b/drivers/target/target_core_cdb.c
3020 +@@ -94,6 +94,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
3021 + buf[2] = dev->transport->get_device_rev(dev);
3022 +
3023 + /*
3024 ++ * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
3025 ++ *
3026 ++ * SPC4 says:
3027 ++ * A RESPONSE DATA FORMAT field set to 2h indicates that the
3028 ++ * standard INQUIRY data is in the format defined in this
3029 ++ * standard. Response data format values less than 2h are
3030 ++ * obsolete. Response data format values greater than 2h are
3031 ++ * reserved.
3032 ++ */
3033 ++ buf[3] = 2;
3034 ++
3035 ++ /*
3036 + * Enable SCCS and TPGS fields for Emulated ALUA
3037 + */
3038 + if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
3039 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
3040 +index 0257658..e87d0eb 100644
3041 +--- a/drivers/target/target_core_transport.c
3042 ++++ b/drivers/target/target_core_transport.c
3043 +@@ -4353,6 +4353,7 @@ int transport_send_check_condition_and_sense(
3044 + case TCM_NON_EXISTENT_LUN:
3045 + /* CURRENT ERROR */
3046 + buffer[offset] = 0x70;
3047 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3048 + /* ILLEGAL REQUEST */
3049 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3050 + /* LOGICAL UNIT NOT SUPPORTED */
3051 +@@ -4362,6 +4363,7 @@ int transport_send_check_condition_and_sense(
3052 + case TCM_SECTOR_COUNT_TOO_MANY:
3053 + /* CURRENT ERROR */
3054 + buffer[offset] = 0x70;
3055 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3056 + /* ILLEGAL REQUEST */
3057 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3058 + /* INVALID COMMAND OPERATION CODE */
3059 +@@ -4370,6 +4372,7 @@ int transport_send_check_condition_and_sense(
3060 + case TCM_UNKNOWN_MODE_PAGE:
3061 + /* CURRENT ERROR */
3062 + buffer[offset] = 0x70;
3063 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3064 + /* ILLEGAL REQUEST */
3065 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3066 + /* INVALID FIELD IN CDB */
3067 +@@ -4378,6 +4381,7 @@ int transport_send_check_condition_and_sense(
3068 + case TCM_CHECK_CONDITION_ABORT_CMD:
3069 + /* CURRENT ERROR */
3070 + buffer[offset] = 0x70;
3071 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3072 + /* ABORTED COMMAND */
3073 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3074 + /* BUS DEVICE RESET FUNCTION OCCURRED */
3075 +@@ -4387,6 +4391,7 @@ int transport_send_check_condition_and_sense(
3076 + case TCM_INCORRECT_AMOUNT_OF_DATA:
3077 + /* CURRENT ERROR */
3078 + buffer[offset] = 0x70;
3079 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3080 + /* ABORTED COMMAND */
3081 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3082 + /* WRITE ERROR */
3083 +@@ -4397,6 +4402,7 @@ int transport_send_check_condition_and_sense(
3084 + case TCM_INVALID_CDB_FIELD:
3085 + /* CURRENT ERROR */
3086 + buffer[offset] = 0x70;
3087 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3088 + /* ABORTED COMMAND */
3089 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3090 + /* INVALID FIELD IN CDB */
3091 +@@ -4405,6 +4411,7 @@ int transport_send_check_condition_and_sense(
3092 + case TCM_INVALID_PARAMETER_LIST:
3093 + /* CURRENT ERROR */
3094 + buffer[offset] = 0x70;
3095 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3096 + /* ABORTED COMMAND */
3097 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3098 + /* INVALID FIELD IN PARAMETER LIST */
3099 +@@ -4413,6 +4420,7 @@ int transport_send_check_condition_and_sense(
3100 + case TCM_UNEXPECTED_UNSOLICITED_DATA:
3101 + /* CURRENT ERROR */
3102 + buffer[offset] = 0x70;
3103 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3104 + /* ABORTED COMMAND */
3105 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3106 + /* WRITE ERROR */
3107 +@@ -4423,6 +4431,7 @@ int transport_send_check_condition_and_sense(
3108 + case TCM_SERVICE_CRC_ERROR:
3109 + /* CURRENT ERROR */
3110 + buffer[offset] = 0x70;
3111 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3112 + /* ABORTED COMMAND */
3113 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3114 + /* PROTOCOL SERVICE CRC ERROR */
3115 +@@ -4433,6 +4442,7 @@ int transport_send_check_condition_and_sense(
3116 + case TCM_SNACK_REJECTED:
3117 + /* CURRENT ERROR */
3118 + buffer[offset] = 0x70;
3119 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3120 + /* ABORTED COMMAND */
3121 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
3122 + /* READ ERROR */
3123 +@@ -4443,6 +4453,7 @@ int transport_send_check_condition_and_sense(
3124 + case TCM_WRITE_PROTECTED:
3125 + /* CURRENT ERROR */
3126 + buffer[offset] = 0x70;
3127 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3128 + /* DATA PROTECT */
3129 + buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
3130 + /* WRITE PROTECTED */
3131 +@@ -4451,6 +4462,7 @@ int transport_send_check_condition_and_sense(
3132 + case TCM_CHECK_CONDITION_UNIT_ATTENTION:
3133 + /* CURRENT ERROR */
3134 + buffer[offset] = 0x70;
3135 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3136 + /* UNIT ATTENTION */
3137 + buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
3138 + core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
3139 +@@ -4460,6 +4472,7 @@ int transport_send_check_condition_and_sense(
3140 + case TCM_CHECK_CONDITION_NOT_READY:
3141 + /* CURRENT ERROR */
3142 + buffer[offset] = 0x70;
3143 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3144 + /* Not Ready */
3145 + buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
3146 + transport_get_sense_codes(cmd, &asc, &ascq);
3147 +@@ -4470,6 +4483,7 @@ int transport_send_check_condition_and_sense(
3148 + default:
3149 + /* CURRENT ERROR */
3150 + buffer[offset] = 0x70;
3151 ++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
3152 + /* ILLEGAL REQUEST */
3153 + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
3154 + /* LOGICAL UNIT COMMUNICATION FAILURE */
3155 +diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
3156 +index ede860f..a580b17 100644
3157 +--- a/drivers/xen/xenbus/xenbus_xs.c
3158 ++++ b/drivers/xen/xenbus/xenbus_xs.c
3159 +@@ -801,6 +801,12 @@ static int process_msg(void)
3160 + goto out;
3161 + }
3162 +
3163 ++ if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
3164 ++ kfree(msg);
3165 ++ err = -EINVAL;
3166 ++ goto out;
3167 ++ }
3168 ++
3169 + body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
3170 + if (body == NULL) {
3171 + kfree(msg);
3172 +diff --git a/fs/aio.c b/fs/aio.c
3173 +index 78c514c..969beb0 100644
3174 +--- a/fs/aio.c
3175 ++++ b/fs/aio.c
3176 +@@ -476,14 +476,21 @@ static void kiocb_batch_init(struct kiocb_batch *batch, long total)
3177 + batch->count = total;
3178 + }
3179 +
3180 +-static void kiocb_batch_free(struct kiocb_batch *batch)
3181 ++static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
3182 + {
3183 + struct kiocb *req, *n;
3184 +
3185 ++ if (list_empty(&batch->head))
3186 ++ return;
3187 ++
3188 ++ spin_lock_irq(&ctx->ctx_lock);
3189 + list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
3190 + list_del(&req->ki_batch);
3191 ++ list_del(&req->ki_list);
3192 + kmem_cache_free(kiocb_cachep, req);
3193 ++ ctx->reqs_active--;
3194 + }
3195 ++ spin_unlock_irq(&ctx->ctx_lock);
3196 + }
3197 +
3198 + /*
3199 +@@ -1742,7 +1749,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
3200 + }
3201 + blk_finish_plug(&plug);
3202 +
3203 +- kiocb_batch_free(&batch);
3204 ++ kiocb_batch_free(ctx, &batch);
3205 + put_ioctx(ctx);
3206 + return i ? i : ret;
3207 + }
3208 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3209 +index f3670cf..63e4be4 100644
3210 +--- a/fs/cifs/connect.c
3211 ++++ b/fs/cifs/connect.c
3212 +@@ -2914,18 +2914,33 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
3213 + #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
3214 +
3215 + /*
3216 +- * Windows only supports a max of 60k reads. Default to that when posix
3217 +- * extensions aren't in force.
3218 ++ * Windows only supports a max of 60kb reads and 65535 byte writes. Default to
3219 ++ * those values when posix extensions aren't in force. In actuality here, we
3220 ++ * use 65536 to allow for a write that is a multiple of 4k. Most servers seem
3221 ++ * to be ok with the extra byte even though Windows doesn't send writes that
3222 ++ * are that large.
3223 ++ *
3224 ++ * Citation:
3225 ++ *
3226 ++ * http://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx
3227 + */
3228 + #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
3229 ++#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
3230 +
3231 + static unsigned int
3232 + cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
3233 + {
3234 + __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
3235 + struct TCP_Server_Info *server = tcon->ses->server;
3236 +- unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
3237 +- CIFS_DEFAULT_IOSIZE;
3238 ++ unsigned int wsize;
3239 ++
3240 ++ /* start with specified wsize, or default */
3241 ++ if (pvolume_info->wsize)
3242 ++ wsize = pvolume_info->wsize;
3243 ++ else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
3244 ++ wsize = CIFS_DEFAULT_IOSIZE;
3245 ++ else
3246 ++ wsize = CIFS_DEFAULT_NON_POSIX_WSIZE;
3247 +
3248 + /* can server support 24-bit write sizes? (via UNIX extensions) */
3249 + if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
3250 +diff --git a/fs/dcache.c b/fs/dcache.c
3251 +index 89509b5..f7908ae 100644
3252 +--- a/fs/dcache.c
3253 ++++ b/fs/dcache.c
3254 +@@ -242,6 +242,7 @@ static void dentry_lru_add(struct dentry *dentry)
3255 + static void __dentry_lru_del(struct dentry *dentry)
3256 + {
3257 + list_del_init(&dentry->d_lru);
3258 ++ dentry->d_flags &= ~DCACHE_SHRINK_LIST;
3259 + dentry->d_sb->s_nr_dentry_unused--;
3260 + dentry_stat.nr_unused--;
3261 + }
3262 +@@ -275,15 +276,15 @@ static void dentry_lru_prune(struct dentry *dentry)
3263 + }
3264 + }
3265 +
3266 +-static void dentry_lru_move_tail(struct dentry *dentry)
3267 ++static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
3268 + {
3269 + spin_lock(&dcache_lru_lock);
3270 + if (list_empty(&dentry->d_lru)) {
3271 +- list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
3272 ++ list_add_tail(&dentry->d_lru, list);
3273 + dentry->d_sb->s_nr_dentry_unused++;
3274 + dentry_stat.nr_unused++;
3275 + } else {
3276 +- list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
3277 ++ list_move_tail(&dentry->d_lru, list);
3278 + }
3279 + spin_unlock(&dcache_lru_lock);
3280 + }
3281 +@@ -769,14 +770,18 @@ static void shrink_dentry_list(struct list_head *list)
3282 + }
3283 +
3284 + /**
3285 +- * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
3286 +- * @sb: superblock to shrink dentry LRU.
3287 +- * @count: number of entries to prune
3288 +- * @flags: flags to control the dentry processing
3289 ++ * prune_dcache_sb - shrink the dcache
3290 ++ * @sb: superblock
3291 ++ * @count: number of entries to try to free
3292 ++ *
3293 ++ * Attempt to shrink the superblock dcache LRU by @count entries. This is
3294 ++ * done when we need more memory an called from the superblock shrinker
3295 ++ * function.
3296 + *
3297 +- * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
3298 ++ * This function may fail to free any resources if all the dentries are in
3299 ++ * use.
3300 + */
3301 +-static void __shrink_dcache_sb(struct super_block *sb, int count, int flags)
3302 ++void prune_dcache_sb(struct super_block *sb, int count)
3303 + {
3304 + struct dentry *dentry;
3305 + LIST_HEAD(referenced);
3306 +@@ -795,18 +800,13 @@ relock:
3307 + goto relock;
3308 + }
3309 +
3310 +- /*
3311 +- * If we are honouring the DCACHE_REFERENCED flag and the
3312 +- * dentry has this flag set, don't free it. Clear the flag
3313 +- * and put it back on the LRU.
3314 +- */
3315 +- if (flags & DCACHE_REFERENCED &&
3316 +- dentry->d_flags & DCACHE_REFERENCED) {
3317 ++ if (dentry->d_flags & DCACHE_REFERENCED) {
3318 + dentry->d_flags &= ~DCACHE_REFERENCED;
3319 + list_move(&dentry->d_lru, &referenced);
3320 + spin_unlock(&dentry->d_lock);
3321 + } else {
3322 + list_move_tail(&dentry->d_lru, &tmp);
3323 ++ dentry->d_flags |= DCACHE_SHRINK_LIST;
3324 + spin_unlock(&dentry->d_lock);
3325 + if (!--count)
3326 + break;
3327 +@@ -821,23 +821,6 @@ relock:
3328 + }
3329 +
3330 + /**
3331 +- * prune_dcache_sb - shrink the dcache
3332 +- * @sb: superblock
3333 +- * @nr_to_scan: number of entries to try to free
3334 +- *
3335 +- * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
3336 +- * done when we need more memory an called from the superblock shrinker
3337 +- * function.
3338 +- *
3339 +- * This function may fail to free any resources if all the dentries are in
3340 +- * use.
3341 +- */
3342 +-void prune_dcache_sb(struct super_block *sb, int nr_to_scan)
3343 +-{
3344 +- __shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED);
3345 +-}
3346 +-
3347 +-/**
3348 + * shrink_dcache_sb - shrink dcache for a superblock
3349 + * @sb: superblock
3350 + *
3351 +@@ -1091,7 +1074,7 @@ EXPORT_SYMBOL(have_submounts);
3352 + * drop the lock and return early due to latency
3353 + * constraints.
3354 + */
3355 +-static int select_parent(struct dentry * parent)
3356 ++static int select_parent(struct dentry *parent, struct list_head *dispose)
3357 + {
3358 + struct dentry *this_parent;
3359 + struct list_head *next;
3360 +@@ -1113,17 +1096,21 @@ resume:
3361 +
3362 + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3363 +
3364 +- /*
3365 +- * move only zero ref count dentries to the end
3366 +- * of the unused list for prune_dcache
3367 ++ /*
3368 ++ * move only zero ref count dentries to the dispose list.
3369 ++ *
3370 ++ * Those which are presently on the shrink list, being processed
3371 ++ * by shrink_dentry_list(), shouldn't be moved. Otherwise the
3372 ++ * loop in shrink_dcache_parent() might not make any progress
3373 ++ * and loop forever.
3374 + */
3375 +- if (!dentry->d_count) {
3376 +- dentry_lru_move_tail(dentry);
3377 +- found++;
3378 +- } else {
3379 ++ if (dentry->d_count) {
3380 + dentry_lru_del(dentry);
3381 ++ } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
3382 ++ dentry_lru_move_list(dentry, dispose);
3383 ++ dentry->d_flags |= DCACHE_SHRINK_LIST;
3384 ++ found++;
3385 + }
3386 +-
3387 + /*
3388 + * We can return to the caller if we have found some (this
3389 + * ensures forward progress). We'll be coming back to find
3390 +@@ -1180,14 +1167,13 @@ rename_retry:
3391 + *
3392 + * Prune the dcache to remove unused children of the parent dentry.
3393 + */
3394 +-
3395 + void shrink_dcache_parent(struct dentry * parent)
3396 + {
3397 +- struct super_block *sb = parent->d_sb;
3398 ++ LIST_HEAD(dispose);
3399 + int found;
3400 +
3401 +- while ((found = select_parent(parent)) != 0)
3402 +- __shrink_dcache_sb(sb, found, 0);
3403 ++ while ((found = select_parent(parent, &dispose)) != 0)
3404 ++ shrink_dentry_list(&dispose);
3405 + }
3406 + EXPORT_SYMBOL(shrink_dcache_parent);
3407 +
3408 +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
3409 +index a567968..ab25f57 100644
3410 +--- a/fs/ext4/ioctl.c
3411 ++++ b/fs/ext4/ioctl.c
3412 +@@ -182,19 +182,22 @@ setversion_out:
3413 + if (err)
3414 + return err;
3415 +
3416 +- if (get_user(n_blocks_count, (__u32 __user *)arg))
3417 +- return -EFAULT;
3418 ++ if (get_user(n_blocks_count, (__u32 __user *)arg)) {
3419 ++ err = -EFAULT;
3420 ++ goto group_extend_out;
3421 ++ }
3422 +
3423 + if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3424 + EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
3425 + ext4_msg(sb, KERN_ERR,
3426 + "Online resizing not supported with bigalloc");
3427 +- return -EOPNOTSUPP;
3428 ++ err = -EOPNOTSUPP;
3429 ++ goto group_extend_out;
3430 + }
3431 +
3432 + err = mnt_want_write(filp->f_path.mnt);
3433 + if (err)
3434 +- return err;
3435 ++ goto group_extend_out;
3436 +
3437 + err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
3438 + if (EXT4_SB(sb)->s_journal) {
3439 +@@ -204,9 +207,10 @@ setversion_out:
3440 + }
3441 + if (err == 0)
3442 + err = err2;
3443 ++
3444 + mnt_drop_write(filp->f_path.mnt);
3445 ++group_extend_out:
3446 + ext4_resize_end(sb);
3447 +-
3448 + return err;
3449 + }
3450 +
3451 +@@ -267,19 +271,22 @@ mext_out:
3452 + return err;
3453 +
3454 + if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
3455 +- sizeof(input)))
3456 +- return -EFAULT;
3457 ++ sizeof(input))) {
3458 ++ err = -EFAULT;
3459 ++ goto group_add_out;
3460 ++ }
3461 +
3462 + if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3463 + EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
3464 + ext4_msg(sb, KERN_ERR,
3465 + "Online resizing not supported with bigalloc");
3466 +- return -EOPNOTSUPP;
3467 ++ err = -EOPNOTSUPP;
3468 ++ goto group_add_out;
3469 + }
3470 +
3471 + err = mnt_want_write(filp->f_path.mnt);
3472 + if (err)
3473 +- return err;
3474 ++ goto group_add_out;
3475 +
3476 + err = ext4_group_add(sb, &input);
3477 + if (EXT4_SB(sb)->s_journal) {
3478 +@@ -289,9 +296,10 @@ mext_out:
3479 + }
3480 + if (err == 0)
3481 + err = err2;
3482 ++
3483 + mnt_drop_write(filp->f_path.mnt);
3484 ++group_add_out:
3485 + ext4_resize_end(sb);
3486 +-
3487 + return err;
3488 + }
3489 +
3490 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3491 +index 3e1329e..9281dbe 100644
3492 +--- a/fs/ext4/super.c
3493 ++++ b/fs/ext4/super.c
3494 +@@ -2006,17 +2006,16 @@ static int ext4_fill_flex_info(struct super_block *sb)
3495 + struct ext4_group_desc *gdp = NULL;
3496 + ext4_group_t flex_group_count;
3497 + ext4_group_t flex_group;
3498 +- int groups_per_flex = 0;
3499 ++ unsigned int groups_per_flex = 0;
3500 + size_t size;
3501 + int i;
3502 +
3503 + sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
3504 +- groups_per_flex = 1 << sbi->s_log_groups_per_flex;
3505 +-
3506 +- if (groups_per_flex < 2) {
3507 ++ if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
3508 + sbi->s_log_groups_per_flex = 0;
3509 + return 1;
3510 + }
3511 ++ groups_per_flex = 1 << sbi->s_log_groups_per_flex;
3512 +
3513 + /* We allocate both existing and potentially added groups */
3514 + flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
3515 +diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
3516 +index 281ae95..3db6b82 100644
3517 +--- a/fs/nfs/blocklayout/blocklayout.c
3518 ++++ b/fs/nfs/blocklayout/blocklayout.c
3519 +@@ -146,14 +146,19 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
3520 + {
3521 + struct bio *bio;
3522 +
3523 ++ npg = min(npg, BIO_MAX_PAGES);
3524 + bio = bio_alloc(GFP_NOIO, npg);
3525 +- if (!bio)
3526 +- return NULL;
3527 ++ if (!bio && (current->flags & PF_MEMALLOC)) {
3528 ++ while (!bio && (npg /= 2))
3529 ++ bio = bio_alloc(GFP_NOIO, npg);
3530 ++ }
3531 +
3532 +- bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
3533 +- bio->bi_bdev = be->be_mdev;
3534 +- bio->bi_end_io = end_io;
3535 +- bio->bi_private = par;
3536 ++ if (bio) {
3537 ++ bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
3538 ++ bio->bi_bdev = be->be_mdev;
3539 ++ bio->bi_end_io = end_io;
3540 ++ bio->bi_private = par;
3541 ++ }
3542 + return bio;
3543 + }
3544 +
3545 +@@ -779,16 +784,13 @@ bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
3546 + static void free_blk_mountid(struct block_mount_id *mid)
3547 + {
3548 + if (mid) {
3549 +- struct pnfs_block_dev *dev;
3550 +- spin_lock(&mid->bm_lock);
3551 +- while (!list_empty(&mid->bm_devlist)) {
3552 +- dev = list_first_entry(&mid->bm_devlist,
3553 +- struct pnfs_block_dev,
3554 +- bm_node);
3555 ++ struct pnfs_block_dev *dev, *tmp;
3556 ++
3557 ++ /* No need to take bm_lock as we are last user freeing bm_devlist */
3558 ++ list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
3559 + list_del(&dev->bm_node);
3560 + bl_free_block_dev(dev);
3561 + }
3562 +- spin_unlock(&mid->bm_lock);
3563 + kfree(mid);
3564 + }
3565 + }
3566 +diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
3567 +index 19fa7b0..c69682a 100644
3568 +--- a/fs/nfs/blocklayout/extents.c
3569 ++++ b/fs/nfs/blocklayout/extents.c
3570 +@@ -139,11 +139,13 @@ static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length)
3571 + }
3572 +
3573 + /* Ensure that future operations on given range of tree will not malloc */
3574 +-static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
3575 ++static int _preload_range(struct pnfs_inval_markings *marks,
3576 ++ u64 offset, u64 length)
3577 + {
3578 + u64 start, end, s;
3579 + int count, i, used = 0, status = -ENOMEM;
3580 + struct pnfs_inval_tracking **storage;
3581 ++ struct my_tree *tree = &marks->im_tree;
3582 +
3583 + dprintk("%s(%llu, %llu) enter\n", __func__, offset, length);
3584 + start = normalize(offset, tree->mtt_step_size);
3585 +@@ -161,12 +163,11 @@ static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
3586 + goto out_cleanup;
3587 + }
3588 +
3589 +- /* Now need lock - HOW??? */
3590 +-
3591 ++ spin_lock(&marks->im_lock);
3592 + for (s = start; s < end; s += tree->mtt_step_size)
3593 + used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]);
3594 ++ spin_unlock(&marks->im_lock);
3595 +
3596 +- /* Unlock - HOW??? */
3597 + status = 0;
3598 +
3599 + out_cleanup:
3600 +@@ -286,7 +287,7 @@ int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
3601 +
3602 + start = normalize(offset, marks->im_block_size);
3603 + end = normalize_up(offset + length, marks->im_block_size);
3604 +- if (_preload_range(&marks->im_tree, start, end - start))
3605 ++ if (_preload_range(marks, start, end - start))
3606 + goto outerr;
3607 +
3608 + spin_lock(&marks->im_lock);
3609 +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
3610 +index 43926ad..54cea8a 100644
3611 +--- a/fs/nfs/callback_proc.c
3612 ++++ b/fs/nfs/callback_proc.c
3613 +@@ -339,7 +339,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
3614 + dprintk("%s enter. slotid %d seqid %d\n",
3615 + __func__, args->csa_slotid, args->csa_sequenceid);
3616 +
3617 +- if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
3618 ++ if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
3619 + return htonl(NFS4ERR_BADSLOT);
3620 +
3621 + slot = tbl->slots + args->csa_slotid;
3622 +diff --git a/fs/nfs/file.c b/fs/nfs/file.c
3623 +index 606ef0f..c43a452 100644
3624 +--- a/fs/nfs/file.c
3625 ++++ b/fs/nfs/file.c
3626 +@@ -272,13 +272,13 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
3627 + datasync);
3628 +
3629 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
3630 +- if (ret)
3631 +- return ret;
3632 + mutex_lock(&inode->i_mutex);
3633 +
3634 + nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
3635 + have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
3636 + status = nfs_commit_inode(inode, FLUSH_SYNC);
3637 ++ if (status >= 0 && ret < 0)
3638 ++ status = ret;
3639 + have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
3640 + if (have_error)
3641 + ret = xchg(&ctx->error, 0);
3642 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3643 +index d9f4d78..055d702 100644
3644 +--- a/fs/nfs/nfs4proc.c
3645 ++++ b/fs/nfs/nfs4proc.c
3646 +@@ -3430,19 +3430,6 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
3647 + */
3648 + #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3649 +
3650 +-static void buf_to_pages(const void *buf, size_t buflen,
3651 +- struct page **pages, unsigned int *pgbase)
3652 +-{
3653 +- const void *p = buf;
3654 +-
3655 +- *pgbase = offset_in_page(buf);
3656 +- p -= *pgbase;
3657 +- while (p < buf + buflen) {
3658 +- *(pages++) = virt_to_page(p);
3659 +- p += PAGE_CACHE_SIZE;
3660 +- }
3661 +-}
3662 +-
3663 + static int buf_to_pages_noslab(const void *buf, size_t buflen,
3664 + struct page **pages, unsigned int *pgbase)
3665 + {
3666 +@@ -3539,9 +3526,19 @@ out:
3667 + nfs4_set_cached_acl(inode, acl);
3668 + }
3669 +
3670 ++/*
3671 ++ * The getxattr API returns the required buffer length when called with a
3672 ++ * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3673 ++ * the required buf. On a NULL buf, we send a page of data to the server
3674 ++ * guessing that the ACL request can be serviced by a page. If so, we cache
3675 ++ * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3676 ++ * the cache. If not so, we throw away the page, and cache the required
3677 ++ * length. The next getxattr call will then produce another round trip to
3678 ++ * the server, this time with the input buf of the required size.
3679 ++ */
3680 + static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3681 + {
3682 +- struct page *pages[NFS4ACL_MAXPAGES];
3683 ++ struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
3684 + struct nfs_getaclargs args = {
3685 + .fh = NFS_FH(inode),
3686 + .acl_pages = pages,
3687 +@@ -3556,41 +3553,60 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
3688 + .rpc_argp = &args,
3689 + .rpc_resp = &res,
3690 + };
3691 +- struct page *localpage = NULL;
3692 +- int ret;
3693 ++ int ret = -ENOMEM, npages, i, acl_len = 0;
3694 +
3695 +- if (buflen < PAGE_SIZE) {
3696 +- /* As long as we're doing a round trip to the server anyway,
3697 +- * let's be prepared for a page of acl data. */
3698 +- localpage = alloc_page(GFP_KERNEL);
3699 +- resp_buf = page_address(localpage);
3700 +- if (localpage == NULL)
3701 +- return -ENOMEM;
3702 +- args.acl_pages[0] = localpage;
3703 +- args.acl_pgbase = 0;
3704 +- args.acl_len = PAGE_SIZE;
3705 +- } else {
3706 +- resp_buf = buf;
3707 +- buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
3708 ++ npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3709 ++ /* As long as we're doing a round trip to the server anyway,
3710 ++ * let's be prepared for a page of acl data. */
3711 ++ if (npages == 0)
3712 ++ npages = 1;
3713 ++
3714 ++ for (i = 0; i < npages; i++) {
3715 ++ pages[i] = alloc_page(GFP_KERNEL);
3716 ++ if (!pages[i])
3717 ++ goto out_free;
3718 ++ }
3719 ++ if (npages > 1) {
3720 ++ /* for decoding across pages */
3721 ++ args.acl_scratch = alloc_page(GFP_KERNEL);
3722 ++ if (!args.acl_scratch)
3723 ++ goto out_free;
3724 + }
3725 +- ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3726 ++ args.acl_len = npages * PAGE_SIZE;
3727 ++ args.acl_pgbase = 0;
3728 ++ /* Let decode_getfacl know not to fail if the ACL data is larger than
3729 ++ * the page we send as a guess */
3730 ++ if (buf == NULL)
3731 ++ res.acl_flags |= NFS4_ACL_LEN_REQUEST;
3732 ++ resp_buf = page_address(pages[0]);
3733 ++
3734 ++ dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n",
3735 ++ __func__, buf, buflen, npages, args.acl_len);
3736 ++ ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
3737 ++ &msg, &args.seq_args, &res.seq_res, 0);
3738 + if (ret)
3739 + goto out_free;
3740 +- if (res.acl_len > args.acl_len)
3741 +- nfs4_write_cached_acl(inode, NULL, res.acl_len);
3742 ++
3743 ++ acl_len = res.acl_len - res.acl_data_offset;
3744 ++ if (acl_len > args.acl_len)
3745 ++ nfs4_write_cached_acl(inode, NULL, acl_len);
3746 + else
3747 +- nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
3748 ++ nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
3749 ++ acl_len);
3750 + if (buf) {
3751 + ret = -ERANGE;
3752 +- if (res.acl_len > buflen)
3753 ++ if (acl_len > buflen)
3754 + goto out_free;
3755 +- if (localpage)
3756 +- memcpy(buf, resp_buf, res.acl_len);
3757 ++ _copy_from_pages(buf, pages, res.acl_data_offset,
3758 ++ res.acl_len);
3759 + }
3760 +- ret = res.acl_len;
3761 ++ ret = acl_len;
3762 + out_free:
3763 +- if (localpage)
3764 +- __free_page(localpage);
3765 ++ for (i = 0; i < npages; i++)
3766 ++ if (pages[i])
3767 ++ __free_page(pages[i]);
3768 ++ if (args.acl_scratch)
3769 ++ __free_page(args.acl_scratch);
3770 + return ret;
3771 + }
3772 +
3773 +@@ -3621,6 +3637,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3774 + nfs_zap_acl_cache(inode);
3775 + ret = nfs4_read_cached_acl(inode, buf, buflen);
3776 + if (ret != -ENOENT)
3777 ++ /* -ENOENT is returned if there is no ACL or if there is an ACL
3778 ++ * but no cached acl data, just the acl length */
3779 + return ret;
3780 + return nfs4_get_acl_uncached(inode, buf, buflen);
3781 + }
3782 +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
3783 +index e6161b2..dcaf693 100644
3784 +--- a/fs/nfs/nfs4xdr.c
3785 ++++ b/fs/nfs/nfs4xdr.c
3786 +@@ -2517,11 +2517,13 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
3787 + encode_compound_hdr(xdr, req, &hdr);
3788 + encode_sequence(xdr, &args->seq_args, &hdr);
3789 + encode_putfh(xdr, args->fh, &hdr);
3790 +- replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
3791 ++ replen = hdr.replen + op_decode_hdr_maxsz + 1;
3792 + encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
3793 +
3794 + xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
3795 + args->acl_pages, args->acl_pgbase, args->acl_len);
3796 ++ xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
3797 ++
3798 + encode_nops(&hdr);
3799 + }
3800 +
3801 +@@ -4957,17 +4959,18 @@ decode_restorefh(struct xdr_stream *xdr)
3802 + }
3803 +
3804 + static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
3805 +- size_t *acl_len)
3806 ++ struct nfs_getaclres *res)
3807 + {
3808 +- __be32 *savep;
3809 ++ __be32 *savep, *bm_p;
3810 + uint32_t attrlen,
3811 + bitmap[3] = {0};
3812 + struct kvec *iov = req->rq_rcv_buf.head;
3813 + int status;
3814 +
3815 +- *acl_len = 0;
3816 ++ res->acl_len = 0;
3817 + if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
3818 + goto out;
3819 ++ bm_p = xdr->p;
3820 + if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
3821 + goto out;
3822 + if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
3823 +@@ -4979,18 +4982,30 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
3824 + size_t hdrlen;
3825 + u32 recvd;
3826 +
3827 ++ /* The bitmap (xdr len + bitmaps) and the attr xdr len words
3828 ++ * are stored with the acl data to handle the problem of
3829 ++ * variable length bitmaps.*/
3830 ++ xdr->p = bm_p;
3831 ++ res->acl_data_offset = be32_to_cpup(bm_p) + 2;
3832 ++ res->acl_data_offset <<= 2;
3833 ++
3834 + /* We ignore &savep and don't do consistency checks on
3835 + * the attr length. Let userspace figure it out.... */
3836 + hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
3837 ++ attrlen += res->acl_data_offset;
3838 + recvd = req->rq_rcv_buf.len - hdrlen;
3839 + if (attrlen > recvd) {
3840 +- dprintk("NFS: server cheating in getattr"
3841 +- " acl reply: attrlen %u > recvd %u\n",
3842 ++ if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
3843 ++ /* getxattr interface called with a NULL buf */
3844 ++ res->acl_len = attrlen;
3845 ++ goto out;
3846 ++ }
3847 ++ dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
3848 + attrlen, recvd);
3849 + return -EINVAL;
3850 + }
3851 + xdr_read_pages(xdr, attrlen);
3852 +- *acl_len = attrlen;
3853 ++ res->acl_len = attrlen;
3854 + } else
3855 + status = -EOPNOTSUPP;
3856 +
3857 +@@ -6028,7 +6043,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
3858 + status = decode_putfh(xdr);
3859 + if (status)
3860 + goto out;
3861 +- status = decode_getacl(xdr, rqstp, &res->acl_len);
3862 ++ status = decode_getacl(xdr, rqstp, res);
3863 +
3864 + out:
3865 + return status;
3866 +diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
3867 +index c807ab9..55d0128 100644
3868 +--- a/fs/nfs/objlayout/objio_osd.c
3869 ++++ b/fs/nfs/objlayout/objio_osd.c
3870 +@@ -551,7 +551,8 @@ static const struct nfs_pageio_ops objio_pg_write_ops = {
3871 + static struct pnfs_layoutdriver_type objlayout_type = {
3872 + .id = LAYOUT_OSD2_OBJECTS,
3873 + .name = "LAYOUT_OSD2_OBJECTS",
3874 +- .flags = PNFS_LAYOUTRET_ON_SETATTR,
3875 ++ .flags = PNFS_LAYOUTRET_ON_SETATTR |
3876 ++ PNFS_LAYOUTRET_ON_ERROR,
3877 +
3878 + .alloc_layout_hdr = objlayout_alloc_layout_hdr,
3879 + .free_layout_hdr = objlayout_free_layout_hdr,
3880 +diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
3881 +index 72074e3..b3c2903 100644
3882 +--- a/fs/nfs/objlayout/objlayout.c
3883 ++++ b/fs/nfs/objlayout/objlayout.c
3884 +@@ -254,6 +254,8 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
3885 + oir->status = rdata->task.tk_status = status;
3886 + if (status >= 0)
3887 + rdata->res.count = status;
3888 ++ else
3889 ++ rdata->pnfs_error = status;
3890 + objlayout_iodone(oir);
3891 + /* must not use oir after this point */
3892 +
3893 +@@ -334,6 +336,8 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
3894 + if (status >= 0) {
3895 + wdata->res.count = status;
3896 + wdata->verf.committed = oir->committed;
3897 ++ } else {
3898 ++ wdata->pnfs_error = status;
3899 + }
3900 + objlayout_iodone(oir);
3901 + /* must not use oir after this point */
3902 +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
3903 +index 8e672a2..f881a63 100644
3904 +--- a/fs/nfs/pnfs.c
3905 ++++ b/fs/nfs/pnfs.c
3906 +@@ -1178,6 +1178,15 @@ void pnfs_ld_write_done(struct nfs_write_data *data)
3907 + put_lseg(data->lseg);
3908 + data->lseg = NULL;
3909 + dprintk("pnfs write error = %d\n", data->pnfs_error);
3910 ++ if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
3911 ++ PNFS_LAYOUTRET_ON_ERROR) {
3912 ++ /* Don't lo_commit on error, Server will needs to
3913 ++ * preform a file recovery.
3914 ++ */
3915 ++ clear_bit(NFS_INO_LAYOUTCOMMIT,
3916 ++ &NFS_I(data->inode)->flags);
3917 ++ pnfs_return_layout(data->inode);
3918 ++ }
3919 + }
3920 + data->mds_ops->rpc_release(data);
3921 + }
3922 +@@ -1267,6 +1276,9 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
3923 + put_lseg(data->lseg);
3924 + data->lseg = NULL;
3925 + dprintk("pnfs write error = %d\n", data->pnfs_error);
3926 ++ if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
3927 ++ PNFS_LAYOUTRET_ON_ERROR)
3928 ++ pnfs_return_layout(data->inode);
3929 +
3930 + nfs_pageio_init_read_mds(&pgio, data->inode);
3931 +
3932 +diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
3933 +index 1509530..53d593a 100644
3934 +--- a/fs/nfs/pnfs.h
3935 ++++ b/fs/nfs/pnfs.h
3936 +@@ -68,6 +68,7 @@ enum {
3937 + enum layoutdriver_policy_flags {
3938 + /* Should the pNFS client commit and return the layout upon a setattr */
3939 + PNFS_LAYOUTRET_ON_SETATTR = 1 << 0,
3940 ++ PNFS_LAYOUTRET_ON_ERROR = 1 << 1,
3941 + };
3942 +
3943 + struct nfs4_deviceid_node;
3944 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
3945 +index 1347774..3ada13c 100644
3946 +--- a/fs/nfs/super.c
3947 ++++ b/fs/nfs/super.c
3948 +@@ -909,10 +909,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
3949 + data->auth_flavor_len = 1;
3950 + data->version = version;
3951 + data->minorversion = 0;
3952 ++ security_init_mnt_opts(&data->lsm_opts);
3953 + }
3954 + return data;
3955 + }
3956 +
3957 ++static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
3958 ++{
3959 ++ if (data) {
3960 ++ kfree(data->client_address);
3961 ++ kfree(data->mount_server.hostname);
3962 ++ kfree(data->nfs_server.export_path);
3963 ++ kfree(data->nfs_server.hostname);
3964 ++ kfree(data->fscache_uniq);
3965 ++ security_free_mnt_opts(&data->lsm_opts);
3966 ++ kfree(data);
3967 ++ }
3968 ++}
3969 ++
3970 + /*
3971 + * Sanity-check a server address provided by the mount command.
3972 + *
3973 +@@ -2220,9 +2234,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
3974 + data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
3975 + mntfh = nfs_alloc_fhandle();
3976 + if (data == NULL || mntfh == NULL)
3977 +- goto out_free_fh;
3978 +-
3979 +- security_init_mnt_opts(&data->lsm_opts);
3980 ++ goto out;
3981 +
3982 + /* Validate the mount data */
3983 + error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
3984 +@@ -2234,8 +2246,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
3985 + #ifdef CONFIG_NFS_V4
3986 + if (data->version == 4) {
3987 + mntroot = nfs4_try_mount(flags, dev_name, data);
3988 +- kfree(data->client_address);
3989 +- kfree(data->nfs_server.export_path);
3990 + goto out;
3991 + }
3992 + #endif /* CONFIG_NFS_V4 */
3993 +@@ -2290,13 +2300,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
3994 + s->s_flags |= MS_ACTIVE;
3995 +
3996 + out:
3997 +- kfree(data->nfs_server.hostname);
3998 +- kfree(data->mount_server.hostname);
3999 +- kfree(data->fscache_uniq);
4000 +- security_free_mnt_opts(&data->lsm_opts);
4001 +-out_free_fh:
4002 ++ nfs_free_parsed_mount_data(data);
4003 + nfs_free_fhandle(mntfh);
4004 +- kfree(data);
4005 + return mntroot;
4006 +
4007 + out_err_nosb:
4008 +@@ -2623,9 +2628,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
4009 +
4010 + mntfh = nfs_alloc_fhandle();
4011 + if (data == NULL || mntfh == NULL)
4012 +- goto out_free_fh;
4013 +-
4014 +- security_init_mnt_opts(&data->lsm_opts);
4015 ++ goto out;
4016 +
4017 + /* Get a volume representation */
4018 + server = nfs4_create_server(data, mntfh);
4019 +@@ -2677,13 +2680,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
4020 +
4021 + s->s_flags |= MS_ACTIVE;
4022 +
4023 +- security_free_mnt_opts(&data->lsm_opts);
4024 + nfs_free_fhandle(mntfh);
4025 + return mntroot;
4026 +
4027 + out:
4028 +- security_free_mnt_opts(&data->lsm_opts);
4029 +-out_free_fh:
4030 + nfs_free_fhandle(mntfh);
4031 + return ERR_PTR(error);
4032 +
4033 +@@ -2838,7 +2838,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
4034 +
4035 + data = nfs_alloc_parsed_mount_data(4);
4036 + if (data == NULL)
4037 +- goto out_free_data;
4038 ++ goto out;
4039 +
4040 + /* Validate the mount data */
4041 + error = nfs4_validate_mount_data(raw_data, data, dev_name);
4042 +@@ -2852,12 +2852,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
4043 + error = PTR_ERR(res);
4044 +
4045 + out:
4046 +- kfree(data->client_address);
4047 +- kfree(data->nfs_server.export_path);
4048 +- kfree(data->nfs_server.hostname);
4049 +- kfree(data->fscache_uniq);
4050 +-out_free_data:
4051 +- kfree(data);
4052 ++ nfs_free_parsed_mount_data(data);
4053 + dprintk("<-- nfs4_mount() = %d%s\n", error,
4054 + error != 0 ? " [error]" : "");
4055 + return res;
4056 +diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
4057 +index 62f3b90..5f312ab 100644
4058 +--- a/fs/nfsd/export.c
4059 ++++ b/fs/nfsd/export.c
4060 +@@ -87,7 +87,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
4061 + struct svc_expkey key;
4062 + struct svc_expkey *ek = NULL;
4063 +
4064 +- if (mesg[mlen-1] != '\n')
4065 ++ if (mlen < 1 || mesg[mlen-1] != '\n')
4066 + return -EINVAL;
4067 + mesg[mlen-1] = 0;
4068 +
4069 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
4070 +index 47e94e3..5abced7 100644
4071 +--- a/fs/nfsd/nfs4state.c
4072 ++++ b/fs/nfsd/nfs4state.c
4073 +@@ -3809,16 +3809,29 @@ nevermind:
4074 + deny->ld_type = NFS4_WRITE_LT;
4075 + }
4076 +
4077 ++static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner)
4078 ++{
4079 ++ struct nfs4_ol_stateid *lst;
4080 ++
4081 ++ if (!same_owner_str(&lo->lo_owner, owner, clid))
4082 ++ return false;
4083 ++ lst = list_first_entry(&lo->lo_owner.so_stateids,
4084 ++ struct nfs4_ol_stateid, st_perstateowner);
4085 ++ return lst->st_file->fi_inode == inode;
4086 ++}
4087 ++
4088 + static struct nfs4_lockowner *
4089 + find_lockowner_str(struct inode *inode, clientid_t *clid,
4090 + struct xdr_netobj *owner)
4091 + {
4092 + unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
4093 ++ struct nfs4_lockowner *lo;
4094 + struct nfs4_stateowner *op;
4095 +
4096 + list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
4097 +- if (same_owner_str(op, owner, clid))
4098 +- return lockowner(op);
4099 ++ lo = lockowner(op);
4100 ++ if (same_lockowner_ino(lo, inode, clid, owner))
4101 ++ return lo;
4102 + }
4103 + return NULL;
4104 + }
4105 +diff --git a/fs/notify/mark.c b/fs/notify/mark.c
4106 +index e14587d..f104d56 100644
4107 +--- a/fs/notify/mark.c
4108 ++++ b/fs/notify/mark.c
4109 +@@ -135,9 +135,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
4110 +
4111 + mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
4112 +
4113 +- /* 1 from caller and 1 for being on i_list/g_list */
4114 +- BUG_ON(atomic_read(&mark->refcnt) < 2);
4115 +-
4116 + spin_lock(&group->mark_lock);
4117 +
4118 + if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
4119 +@@ -182,6 +179,11 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
4120 + iput(inode);
4121 +
4122 + /*
4123 ++ * We don't necessarily have a ref on mark from caller so the above iput
4124 ++ * may have already destroyed it. Don't touch from now on.
4125 ++ */
4126 ++
4127 ++ /*
4128 + * it's possible that this group tried to destroy itself, but this
4129 + * this mark was simultaneously being freed by inode. If that's the
4130 + * case, we finish freeing the group here.
4131 +diff --git a/fs/proc/base.c b/fs/proc/base.c
4132 +index 851ba3d..1fc1dca 100644
4133 +--- a/fs/proc/base.c
4134 ++++ b/fs/proc/base.c
4135 +@@ -194,65 +194,7 @@ static int proc_root_link(struct inode *inode, struct path *path)
4136 + return result;
4137 + }
4138 +
4139 +-static struct mm_struct *__check_mem_permission(struct task_struct *task)
4140 +-{
4141 +- struct mm_struct *mm;
4142 +-
4143 +- mm = get_task_mm(task);
4144 +- if (!mm)
4145 +- return ERR_PTR(-EINVAL);
4146 +-
4147 +- /*
4148 +- * A task can always look at itself, in case it chooses
4149 +- * to use system calls instead of load instructions.
4150 +- */
4151 +- if (task == current)
4152 +- return mm;
4153 +-
4154 +- /*
4155 +- * If current is actively ptrace'ing, and would also be
4156 +- * permitted to freshly attach with ptrace now, permit it.
4157 +- */
4158 +- if (task_is_stopped_or_traced(task)) {
4159 +- int match;
4160 +- rcu_read_lock();
4161 +- match = (ptrace_parent(task) == current);
4162 +- rcu_read_unlock();
4163 +- if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
4164 +- return mm;
4165 +- }
4166 +-
4167 +- /*
4168 +- * No one else is allowed.
4169 +- */
4170 +- mmput(mm);
4171 +- return ERR_PTR(-EPERM);
4172 +-}
4173 +-
4174 +-/*
4175 +- * If current may access user memory in @task return a reference to the
4176 +- * corresponding mm, otherwise ERR_PTR.
4177 +- */
4178 +-static struct mm_struct *check_mem_permission(struct task_struct *task)
4179 +-{
4180 +- struct mm_struct *mm;
4181 +- int err;
4182 +-
4183 +- /*
4184 +- * Avoid racing if task exec's as we might get a new mm but validate
4185 +- * against old credentials.
4186 +- */
4187 +- err = mutex_lock_killable(&task->signal->cred_guard_mutex);
4188 +- if (err)
4189 +- return ERR_PTR(err);
4190 +-
4191 +- mm = __check_mem_permission(task);
4192 +- mutex_unlock(&task->signal->cred_guard_mutex);
4193 +-
4194 +- return mm;
4195 +-}
4196 +-
4197 +-struct mm_struct *mm_for_maps(struct task_struct *task)
4198 ++static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
4199 + {
4200 + struct mm_struct *mm;
4201 + int err;
4202 +@@ -263,7 +205,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
4203 +
4204 + mm = get_task_mm(task);
4205 + if (mm && mm != current->mm &&
4206 +- !ptrace_may_access(task, PTRACE_MODE_READ)) {
4207 ++ !ptrace_may_access(task, mode)) {
4208 + mmput(mm);
4209 + mm = ERR_PTR(-EACCES);
4210 + }
4211 +@@ -272,6 +214,11 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
4212 + return mm;
4213 + }
4214 +
4215 ++struct mm_struct *mm_for_maps(struct task_struct *task)
4216 ++{
4217 ++ return mm_access(task, PTRACE_MODE_READ);
4218 ++}
4219 ++
4220 + static int proc_pid_cmdline(struct task_struct *task, char * buffer)
4221 + {
4222 + int res = 0;
4223 +@@ -816,38 +763,39 @@ static const struct file_operations proc_single_file_operations = {
4224 +
4225 + static int mem_open(struct inode* inode, struct file* file)
4226 + {
4227 +- file->private_data = (void*)((long)current->self_exec_id);
4228 ++ struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
4229 ++ struct mm_struct *mm;
4230 ++
4231 ++ if (!task)
4232 ++ return -ESRCH;
4233 ++
4234 ++ mm = mm_access(task, PTRACE_MODE_ATTACH);
4235 ++ put_task_struct(task);
4236 ++
4237 ++ if (IS_ERR(mm))
4238 ++ return PTR_ERR(mm);
4239 ++
4240 + /* OK to pass negative loff_t, we can catch out-of-range */
4241 + file->f_mode |= FMODE_UNSIGNED_OFFSET;
4242 ++ file->private_data = mm;
4243 ++
4244 + return 0;
4245 + }
4246 +
4247 + static ssize_t mem_read(struct file * file, char __user * buf,
4248 + size_t count, loff_t *ppos)
4249 + {
4250 +- struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
4251 ++ int ret;
4252 + char *page;
4253 + unsigned long src = *ppos;
4254 +- int ret = -ESRCH;
4255 +- struct mm_struct *mm;
4256 ++ struct mm_struct *mm = file->private_data;
4257 +
4258 +- if (!task)
4259 +- goto out_no_task;
4260 ++ if (!mm)
4261 ++ return 0;
4262 +
4263 +- ret = -ENOMEM;
4264 + page = (char *)__get_free_page(GFP_TEMPORARY);
4265 + if (!page)
4266 +- goto out;
4267 +-
4268 +- mm = check_mem_permission(task);
4269 +- ret = PTR_ERR(mm);
4270 +- if (IS_ERR(mm))
4271 +- goto out_free;
4272 +-
4273 +- ret = -EIO;
4274 +-
4275 +- if (file->private_data != (void*)((long)current->self_exec_id))
4276 +- goto out_put;
4277 ++ return -ENOMEM;
4278 +
4279 + ret = 0;
4280 +
4281 +@@ -874,13 +822,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
4282 + }
4283 + *ppos = src;
4284 +
4285 +-out_put:
4286 +- mmput(mm);
4287 +-out_free:
4288 + free_page((unsigned long) page);
4289 +-out:
4290 +- put_task_struct(task);
4291 +-out_no_task:
4292 + return ret;
4293 + }
4294 +
4295 +@@ -889,27 +831,15 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
4296 + {
4297 + int copied;
4298 + char *page;
4299 +- struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
4300 + unsigned long dst = *ppos;
4301 +- struct mm_struct *mm;
4302 ++ struct mm_struct *mm = file->private_data;
4303 +
4304 +- copied = -ESRCH;
4305 +- if (!task)
4306 +- goto out_no_task;
4307 ++ if (!mm)
4308 ++ return 0;
4309 +
4310 +- copied = -ENOMEM;
4311 + page = (char *)__get_free_page(GFP_TEMPORARY);
4312 + if (!page)
4313 +- goto out_task;
4314 +-
4315 +- mm = check_mem_permission(task);
4316 +- copied = PTR_ERR(mm);
4317 +- if (IS_ERR(mm))
4318 +- goto out_free;
4319 +-
4320 +- copied = -EIO;
4321 +- if (file->private_data != (void *)((long)current->self_exec_id))
4322 +- goto out_mm;
4323 ++ return -ENOMEM;
4324 +
4325 + copied = 0;
4326 + while (count > 0) {
4327 +@@ -933,13 +863,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
4328 + }
4329 + *ppos = dst;
4330 +
4331 +-out_mm:
4332 +- mmput(mm);
4333 +-out_free:
4334 + free_page((unsigned long) page);
4335 +-out_task:
4336 +- put_task_struct(task);
4337 +-out_no_task:
4338 + return copied;
4339 + }
4340 +
4341 +@@ -959,11 +883,20 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
4342 + return file->f_pos;
4343 + }
4344 +
4345 ++static int mem_release(struct inode *inode, struct file *file)
4346 ++{
4347 ++ struct mm_struct *mm = file->private_data;
4348 ++
4349 ++ mmput(mm);
4350 ++ return 0;
4351 ++}
4352 ++
4353 + static const struct file_operations proc_mem_operations = {
4354 + .llseek = mem_lseek,
4355 + .read = mem_read,
4356 + .write = mem_write,
4357 + .open = mem_open,
4358 ++ .release = mem_release,
4359 + };
4360 +
4361 + static ssize_t environ_read(struct file *file, char __user *buf,
4362 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
4363 +index e418c5a..7dcd2a2 100644
4364 +--- a/fs/proc/task_mmu.c
4365 ++++ b/fs/proc/task_mmu.c
4366 +@@ -518,6 +518,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
4367 + if (!page)
4368 + continue;
4369 +
4370 ++ if (PageReserved(page))
4371 ++ continue;
4372 ++
4373 + /* Clear accessed and referenced bits. */
4374 + ptep_test_and_clear_young(vma, addr, pte);
4375 + ClearPageReferenced(page);
4376 +diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
4377 +index 766b1d4..29166ec 100644
4378 +--- a/fs/proc/uptime.c
4379 ++++ b/fs/proc/uptime.c
4380 +@@ -11,15 +11,20 @@ static int uptime_proc_show(struct seq_file *m, void *v)
4381 + {
4382 + struct timespec uptime;
4383 + struct timespec idle;
4384 ++ cputime64_t idletime;
4385 ++ u64 nsec;
4386 ++ u32 rem;
4387 + int i;
4388 +- cputime_t idletime = cputime_zero;
4389 +
4390 ++ idletime = 0;
4391 + for_each_possible_cpu(i)
4392 + idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
4393 +
4394 + do_posix_clock_monotonic_gettime(&uptime);
4395 + monotonic_to_bootbased(&uptime);
4396 +- cputime_to_timespec(idletime, &idle);
4397 ++ nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
4398 ++ idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
4399 ++ idle.tv_nsec = rem;
4400 + seq_printf(m, "%lu.%02lu %lu.%02lu\n",
4401 + (unsigned long) uptime.tv_sec,
4402 + (uptime.tv_nsec / (NSEC_PER_SEC / 100)),
4403 +diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
4404 +index 8d9c468..c9d2941 100644
4405 +--- a/fs/ubifs/debug.h
4406 ++++ b/fs/ubifs/debug.h
4407 +@@ -175,22 +175,23 @@ const char *dbg_key_str1(const struct ubifs_info *c,
4408 + const union ubifs_key *key);
4409 +
4410 + /*
4411 +- * DBGKEY macros require @dbg_lock to be held, which it is in the dbg message
4412 +- * macros.
4413 ++ * TODO: these macros are now broken because there is no locking around them
4414 ++ * and we use a global buffer for the key string. This means that in case of
4415 ++ * concurrent execution we will end up with incorrect and messy key strings.
4416 + */
4417 + #define DBGKEY(key) dbg_key_str0(c, (key))
4418 + #define DBGKEY1(key) dbg_key_str1(c, (key))
4419 +
4420 + extern spinlock_t dbg_lock;
4421 +
4422 +-#define ubifs_dbg_msg(type, fmt, ...) do { \
4423 +- spin_lock(&dbg_lock); \
4424 +- pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__); \
4425 +- spin_unlock(&dbg_lock); \
4426 +-} while (0)
4427 ++#define ubifs_dbg_msg(type, fmt, ...) \
4428 ++ pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
4429 +
4430 + /* Just a debugging messages not related to any specific UBIFS subsystem */
4431 +-#define dbg_msg(fmt, ...) ubifs_dbg_msg("msg", fmt, ##__VA_ARGS__)
4432 ++#define dbg_msg(fmt, ...) \
4433 ++ printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", current->pid, \
4434 ++ __func__, ##__VA_ARGS__)
4435 ++
4436 + /* General messages */
4437 + #define dbg_gen(fmt, ...) ubifs_dbg_msg("gen", fmt, ##__VA_ARGS__)
4438 + /* Additional journal messages */
4439 +diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
4440 +index 8a24f0c..286a051 100644
4441 +--- a/fs/xfs/xfs_discard.c
4442 ++++ b/fs/xfs/xfs_discard.c
4443 +@@ -68,7 +68,7 @@ xfs_trim_extents(
4444 + * Look up the longest btree in the AGF and start with it.
4445 + */
4446 + error = xfs_alloc_lookup_le(cur, 0,
4447 +- XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
4448 ++ be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest), &i);
4449 + if (error)
4450 + goto out_del_cursor;
4451 +
4452 +@@ -84,7 +84,7 @@ xfs_trim_extents(
4453 + if (error)
4454 + goto out_del_cursor;
4455 + XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
4456 +- ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);
4457 ++ ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
4458 +
4459 + /*
4460 + * Too small? Give up.
4461 +diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
4462 +index 1739726..451823c 100644
4463 +--- a/include/acpi/acpi_numa.h
4464 ++++ b/include/acpi/acpi_numa.h
4465 +@@ -15,6 +15,7 @@ extern int pxm_to_node(int);
4466 + extern int node_to_pxm(int);
4467 + extern void __acpi_map_pxm_to_node(int, int);
4468 + extern int acpi_map_pxm_to_node(int);
4469 ++extern unsigned char acpi_srat_revision;
4470 +
4471 + #endif /* CONFIG_ACPI_NUMA */
4472 + #endif /* __ACP_NUMA_H */
4473 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
4474 +index 94acd81..0ed1eb0 100644
4475 +--- a/include/linux/blkdev.h
4476 ++++ b/include/linux/blkdev.h
4477 +@@ -675,6 +675,9 @@ extern int blk_insert_cloned_request(struct request_queue *q,
4478 + struct request *rq);
4479 + extern void blk_delay_queue(struct request_queue *, unsigned long);
4480 + extern void blk_recount_segments(struct request_queue *, struct bio *);
4481 ++extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
4482 ++extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
4483 ++ unsigned int, void __user *);
4484 + extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
4485 + unsigned int, void __user *);
4486 + extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
4487 +diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
4488 +index 5c4abce..b936763 100644
4489 +--- a/include/linux/crash_dump.h
4490 ++++ b/include/linux/crash_dump.h
4491 +@@ -5,6 +5,7 @@
4492 + #include <linux/kexec.h>
4493 + #include <linux/device.h>
4494 + #include <linux/proc_fs.h>
4495 ++#include <linux/elf.h>
4496 +
4497 + #define ELFCORE_ADDR_MAX (-1ULL)
4498 + #define ELFCORE_ADDR_ERR (-2ULL)
4499 +diff --git a/include/linux/dcache.h b/include/linux/dcache.h
4500 +index ed9f74f..4eb8c80 100644
4501 +--- a/include/linux/dcache.h
4502 ++++ b/include/linux/dcache.h
4503 +@@ -203,6 +203,7 @@ struct dentry_operations {
4504 +
4505 + #define DCACHE_CANT_MOUNT 0x0100
4506 + #define DCACHE_GENOCIDE 0x0200
4507 ++#define DCACHE_SHRINK_LIST 0x0400
4508 +
4509 + #define DCACHE_NFSFS_RENAMED 0x1000
4510 + /* this dentry has been "silly renamed" and has to be deleted on the last
4511 +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
4512 +index b87068a..81572af 100644
4513 +--- a/include/linux/memcontrol.h
4514 ++++ b/include/linux/memcontrol.h
4515 +@@ -119,6 +119,8 @@ struct zone_reclaim_stat*
4516 + mem_cgroup_get_reclaim_stat_from_page(struct page *page);
4517 + extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
4518 + struct task_struct *p);
4519 ++extern void mem_cgroup_replace_page_cache(struct page *oldpage,
4520 ++ struct page *newpage);
4521 +
4522 + #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4523 + extern int do_swap_account;
4524 +@@ -366,6 +368,10 @@ static inline
4525 + void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
4526 + {
4527 + }
4528 ++static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
4529 ++ struct page *newpage)
4530 ++{
4531 ++}
4532 + #endif /* CONFIG_CGROUP_MEM_CONT */
4533 +
4534 + #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
4535 +diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
4536 +index 2a7c533..6c898af 100644
4537 +--- a/include/linux/nfs_xdr.h
4538 ++++ b/include/linux/nfs_xdr.h
4539 +@@ -602,11 +602,16 @@ struct nfs_getaclargs {
4540 + size_t acl_len;
4541 + unsigned int acl_pgbase;
4542 + struct page ** acl_pages;
4543 ++ struct page * acl_scratch;
4544 + struct nfs4_sequence_args seq_args;
4545 + };
4546 +
4547 ++/* getxattr ACL interface flags */
4548 ++#define NFS4_ACL_LEN_REQUEST 0x0001 /* zero length getxattr buffer */
4549 + struct nfs_getaclres {
4550 + size_t acl_len;
4551 ++ size_t acl_data_offset;
4552 ++ int acl_flags;
4553 + struct nfs4_sequence_res seq_res;
4554 + };
4555 +
4556 +diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
4557 +index b5d9657..411c412 100644
4558 +--- a/include/linux/pci_regs.h
4559 ++++ b/include/linux/pci_regs.h
4560 +@@ -392,7 +392,7 @@
4561 + #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
4562 + #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
4563 + #define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
4564 +-#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */
4565 ++#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
4566 + #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
4567 + #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
4568 + #define PCI_EXP_DEVCAP 4 /* Device capabilities */
4569 +diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
4570 +index 9291ac3..6f10c9c 100644
4571 +--- a/include/linux/shmem_fs.h
4572 ++++ b/include/linux/shmem_fs.h
4573 +@@ -48,6 +48,7 @@ extern struct file *shmem_file_setup(const char *name,
4574 + loff_t size, unsigned long flags);
4575 + extern int shmem_zero_setup(struct vm_area_struct *);
4576 + extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
4577 ++extern void shmem_unlock_mapping(struct address_space *mapping);
4578 + extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4579 + pgoff_t index, gfp_t gfp_mask);
4580 + extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
4581 +diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
4582 +index 85c50b4..c84e974 100644
4583 +--- a/include/linux/sunrpc/svcsock.h
4584 ++++ b/include/linux/sunrpc/svcsock.h
4585 +@@ -34,7 +34,7 @@ struct svc_sock {
4586 + /*
4587 + * Function prototypes.
4588 + */
4589 +-void svc_close_all(struct list_head *);
4590 ++void svc_close_all(struct svc_serv *);
4591 + int svc_recv(struct svc_rqst *, long);
4592 + int svc_send(struct svc_rqst *);
4593 + void svc_drop(struct svc_rqst *);
4594 +diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
4595 +index a20970e..af70af3 100644
4596 +--- a/include/linux/sunrpc/xdr.h
4597 ++++ b/include/linux/sunrpc/xdr.h
4598 +@@ -191,6 +191,8 @@ extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
4599 + struct xdr_array2_desc *desc);
4600 + extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
4601 + struct xdr_array2_desc *desc);
4602 ++extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
4603 ++ size_t len);
4604 +
4605 + /*
4606 + * Provide some simple tools for XDR buffer overflow-checking etc.
4607 +diff --git a/include/linux/swap.h b/include/linux/swap.h
4608 +index 1e22e12..67b3fa3 100644
4609 +--- a/include/linux/swap.h
4610 ++++ b/include/linux/swap.h
4611 +@@ -272,7 +272,7 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
4612 + #endif
4613 +
4614 + extern int page_evictable(struct page *page, struct vm_area_struct *vma);
4615 +-extern void scan_mapping_unevictable_pages(struct address_space *);
4616 ++extern void check_move_unevictable_pages(struct page **, int nr_pages);
4617 +
4618 + extern unsigned long scan_unevictable_pages;
4619 + extern int scan_unevictable_handler(struct ctl_table *, int,
4620 +diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
4621 +index 4b752d5..45a7698 100644
4622 +--- a/include/linux/videodev2.h
4623 ++++ b/include/linux/videodev2.h
4624 +@@ -1131,6 +1131,7 @@ struct v4l2_querymenu {
4625 + #define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
4626 +
4627 + /* User-class control IDs defined by V4L2 */
4628 ++#define V4L2_CID_MAX_CTRLS 1024
4629 + #define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900)
4630 + #define V4L2_CID_USER_BASE V4L2_CID_BASE
4631 + /* IDs reserved for driver specific controls */
4632 +diff --git a/include/media/tuner.h b/include/media/tuner.h
4633 +index 89c290b..29e1920 100644
4634 +--- a/include/media/tuner.h
4635 ++++ b/include/media/tuner.h
4636 +@@ -127,7 +127,6 @@
4637 + #define TUNER_PHILIPS_FMD1216MEX_MK3 78
4638 + #define TUNER_PHILIPS_FM1216MK5 79
4639 + #define TUNER_PHILIPS_FQ1216LME_MK3 80 /* Active loopthrough, no FM */
4640 +-#define TUNER_XC4000 81 /* Xceive Silicon Tuner */
4641 +
4642 + #define TUNER_PARTSNIC_PTI_5NF05 81
4643 + #define TUNER_PHILIPS_CU1216L 82
4644 +@@ -136,6 +135,8 @@
4645 + #define TUNER_PHILIPS_FQ1236_MK5 85 /* NTSC, TDA9885, no FM radio */
4646 + #define TUNER_TENA_TNF_5337 86
4647 +
4648 ++#define TUNER_XC4000 87 /* Xceive Silicon Tuner */
4649 ++
4650 + /* tv card specific */
4651 + #define TDA9887_PRESENT (1<<0)
4652 + #define TDA9887_PORT1_INACTIVE (1<<1)
4653 +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
4654 +index 6873c7d..a79886c 100644
4655 +--- a/include/target/target_core_base.h
4656 ++++ b/include/target/target_core_base.h
4657 +@@ -34,6 +34,7 @@
4658 + #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
4659 + /* Used by transport_send_check_condition_and_sense() */
4660 + #define SPC_SENSE_KEY_OFFSET 2
4661 ++#define SPC_ADD_SENSE_LEN_OFFSET 7
4662 + #define SPC_ASC_KEY_OFFSET 12
4663 + #define SPC_ASCQ_KEY_OFFSET 13
4664 + #define TRANSPORT_IQN_LEN 224
4665 +diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
4666 +index f6f07aa..7cdfca2 100644
4667 +--- a/include/xen/interface/io/xs_wire.h
4668 ++++ b/include/xen/interface/io/xs_wire.h
4669 +@@ -87,4 +87,7 @@ struct xenstore_domain_interface {
4670 + XENSTORE_RING_IDX rsp_cons, rsp_prod;
4671 + };
4672 +
4673 ++/* Violating this is very bad. See docs/misc/xenstore.txt. */
4674 ++#define XENSTORE_PAYLOAD_MAX 4096
4675 ++
4676 + #endif /* _XS_WIRE_H */
4677 +diff --git a/init/do_mounts.c b/init/do_mounts.c
4678 +index 0f6e1d9..db6e5ee 100644
4679 +--- a/init/do_mounts.c
4680 ++++ b/init/do_mounts.c
4681 +@@ -398,15 +398,42 @@ out:
4682 + }
4683 +
4684 + #ifdef CONFIG_ROOT_NFS
4685 ++
4686 ++#define NFSROOT_TIMEOUT_MIN 5
4687 ++#define NFSROOT_TIMEOUT_MAX 30
4688 ++#define NFSROOT_RETRY_MAX 5
4689 ++
4690 + static int __init mount_nfs_root(void)
4691 + {
4692 + char *root_dev, *root_data;
4693 ++ unsigned int timeout;
4694 ++ int try, err;
4695 +
4696 +- if (nfs_root_data(&root_dev, &root_data) != 0)
4697 +- return 0;
4698 +- if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0)
4699 ++ err = nfs_root_data(&root_dev, &root_data);
4700 ++ if (err != 0)
4701 + return 0;
4702 +- return 1;
4703 ++
4704 ++ /*
4705 ++ * The server or network may not be ready, so try several
4706 ++ * times. Stop after a few tries in case the client wants
4707 ++ * to fall back to other boot methods.
4708 ++ */
4709 ++ timeout = NFSROOT_TIMEOUT_MIN;
4710 ++ for (try = 1; ; try++) {
4711 ++ err = do_mount_root(root_dev, "nfs",
4712 ++ root_mountflags, root_data);
4713 ++ if (err == 0)
4714 ++ return 1;
4715 ++ if (try > NFSROOT_RETRY_MAX)
4716 ++ break;
4717 ++
4718 ++ /* Wait, in case the server refused us immediately */
4719 ++ ssleep(timeout);
4720 ++ timeout <<= 1;
4721 ++ if (timeout > NFSROOT_TIMEOUT_MAX)
4722 ++ timeout = NFSROOT_TIMEOUT_MAX;
4723 ++ }
4724 ++ return 0;
4725 + }
4726 + #endif
4727 +
4728 +diff --git a/ipc/shm.c b/ipc/shm.c
4729 +index 02ecf2c..b76be5b 100644
4730 +--- a/ipc/shm.c
4731 ++++ b/ipc/shm.c
4732 +@@ -870,9 +870,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
4733 + case SHM_LOCK:
4734 + case SHM_UNLOCK:
4735 + {
4736 +- struct file *uninitialized_var(shm_file);
4737 +-
4738 +- lru_add_drain_all(); /* drain pagevecs to lru lists */
4739 ++ struct file *shm_file;
4740 +
4741 + shp = shm_lock_check(ns, shmid);
4742 + if (IS_ERR(shp)) {
4743 +@@ -895,22 +893,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
4744 + err = security_shm_shmctl(shp, cmd);
4745 + if (err)
4746 + goto out_unlock;
4747 +-
4748 +- if(cmd==SHM_LOCK) {
4749 ++
4750 ++ shm_file = shp->shm_file;
4751 ++ if (is_file_hugepages(shm_file))
4752 ++ goto out_unlock;
4753 ++
4754 ++ if (cmd == SHM_LOCK) {
4755 + struct user_struct *user = current_user();
4756 +- if (!is_file_hugepages(shp->shm_file)) {
4757 +- err = shmem_lock(shp->shm_file, 1, user);
4758 +- if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
4759 +- shp->shm_perm.mode |= SHM_LOCKED;
4760 +- shp->mlock_user = user;
4761 +- }
4762 ++ err = shmem_lock(shm_file, 1, user);
4763 ++ if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
4764 ++ shp->shm_perm.mode |= SHM_LOCKED;
4765 ++ shp->mlock_user = user;
4766 + }
4767 +- } else if (!is_file_hugepages(shp->shm_file)) {
4768 +- shmem_lock(shp->shm_file, 0, shp->mlock_user);
4769 +- shp->shm_perm.mode &= ~SHM_LOCKED;
4770 +- shp->mlock_user = NULL;
4771 ++ goto out_unlock;
4772 + }
4773 ++
4774 ++ /* SHM_UNLOCK */
4775 ++ if (!(shp->shm_perm.mode & SHM_LOCKED))
4776 ++ goto out_unlock;
4777 ++ shmem_lock(shm_file, 0, shp->mlock_user);
4778 ++ shp->shm_perm.mode &= ~SHM_LOCKED;
4779 ++ shp->mlock_user = NULL;
4780 ++ get_file(shm_file);
4781 + shm_unlock(shp);
4782 ++ shmem_unlock_mapping(shm_file->f_mapping);
4783 ++ fput(shm_file);
4784 + goto out;
4785 + }
4786 + case IPC_RMID:
4787 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
4788 +index e5d8464..52fd049 100644
4789 +--- a/kernel/kprobes.c
4790 ++++ b/kernel/kprobes.c
4791 +@@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
4792 + /* Early boot. kretprobe_table_locks not yet initialized. */
4793 + return;
4794 +
4795 ++ INIT_HLIST_HEAD(&empty_rp);
4796 + hash = hash_ptr(tk, KPROBE_HASH_BITS);
4797 + head = &kretprobe_inst_table[hash];
4798 + kretprobe_table_lock(hash, &flags);
4799 +@@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
4800 + recycle_rp_inst(ri, &empty_rp);
4801 + }
4802 + kretprobe_table_unlock(hash, &flags);
4803 +- INIT_HLIST_HEAD(&empty_rp);
4804 + hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
4805 + hlist_del(&ri->hlist);
4806 + kfree(ri);
4807 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
4808 +index b1e8943..25b4f4d 100644
4809 +--- a/kernel/trace/ftrace.c
4810 ++++ b/kernel/trace/ftrace.c
4811 +@@ -948,7 +948,7 @@ struct ftrace_func_probe {
4812 + };
4813 +
4814 + enum {
4815 +- FTRACE_ENABLE_CALLS = (1 << 0),
4816 ++ FTRACE_UPDATE_CALLS = (1 << 0),
4817 + FTRACE_DISABLE_CALLS = (1 << 1),
4818 + FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
4819 + FTRACE_START_FUNC_RET = (1 << 3),
4820 +@@ -1519,7 +1519,7 @@ int ftrace_text_reserved(void *start, void *end)
4821 +
4822 +
4823 + static int
4824 +-__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
4825 ++__ftrace_replace_code(struct dyn_ftrace *rec, int update)
4826 + {
4827 + unsigned long ftrace_addr;
4828 + unsigned long flag = 0UL;
4829 +@@ -1527,17 +1527,17 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
4830 + ftrace_addr = (unsigned long)FTRACE_ADDR;
4831 +
4832 + /*
4833 +- * If we are enabling tracing:
4834 ++ * If we are updating calls:
4835 + *
4836 + * If the record has a ref count, then we need to enable it
4837 + * because someone is using it.
4838 + *
4839 + * Otherwise we make sure its disabled.
4840 + *
4841 +- * If we are disabling tracing, then disable all records that
4842 ++ * If we are disabling calls, then disable all records that
4843 + * are enabled.
4844 + */
4845 +- if (enable && (rec->flags & ~FTRACE_FL_MASK))
4846 ++ if (update && (rec->flags & ~FTRACE_FL_MASK))
4847 + flag = FTRACE_FL_ENABLED;
4848 +
4849 + /* If the state of this record hasn't changed, then do nothing */
4850 +@@ -1553,7 +1553,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
4851 + return ftrace_make_nop(NULL, rec, ftrace_addr);
4852 + }
4853 +
4854 +-static void ftrace_replace_code(int enable)
4855 ++static void ftrace_replace_code(int update)
4856 + {
4857 + struct dyn_ftrace *rec;
4858 + struct ftrace_page *pg;
4859 +@@ -1567,7 +1567,7 @@ static void ftrace_replace_code(int enable)
4860 + if (rec->flags & FTRACE_FL_FREE)
4861 + continue;
4862 +
4863 +- failed = __ftrace_replace_code(rec, enable);
4864 ++ failed = __ftrace_replace_code(rec, update);
4865 + if (failed) {
4866 + ftrace_bug(failed, rec->ip);
4867 + /* Stop processing */
4868 +@@ -1623,7 +1623,7 @@ static int __ftrace_modify_code(void *data)
4869 + */
4870 + function_trace_stop++;
4871 +
4872 +- if (*command & FTRACE_ENABLE_CALLS)
4873 ++ if (*command & FTRACE_UPDATE_CALLS)
4874 + ftrace_replace_code(1);
4875 + else if (*command & FTRACE_DISABLE_CALLS)
4876 + ftrace_replace_code(0);
4877 +@@ -1691,7 +1691,7 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
4878 + return -ENODEV;
4879 +
4880 + ftrace_start_up++;
4881 +- command |= FTRACE_ENABLE_CALLS;
4882 ++ command |= FTRACE_UPDATE_CALLS;
4883 +
4884 + /* ops marked global share the filter hashes */
4885 + if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
4886 +@@ -1743,8 +1743,7 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
4887 + if (ops != &global_ops || !global_start_up)
4888 + ops->flags &= ~FTRACE_OPS_FL_ENABLED;
4889 +
4890 +- if (!ftrace_start_up)
4891 +- command |= FTRACE_DISABLE_CALLS;
4892 ++ command |= FTRACE_UPDATE_CALLS;
4893 +
4894 + if (saved_ftrace_func != ftrace_trace_function) {
4895 + saved_ftrace_func = ftrace_trace_function;
4896 +@@ -1766,7 +1765,7 @@ static void ftrace_startup_sysctl(void)
4897 + saved_ftrace_func = NULL;
4898 + /* ftrace_start_up is true if we want ftrace running */
4899 + if (ftrace_start_up)
4900 +- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
4901 ++ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
4902 + }
4903 +
4904 + static void ftrace_shutdown_sysctl(void)
4905 +@@ -2919,7 +2918,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4906 + ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4907 + if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
4908 + && ftrace_enabled)
4909 +- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
4910 ++ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
4911 +
4912 + mutex_unlock(&ftrace_lock);
4913 +
4914 +@@ -3107,7 +3106,7 @@ ftrace_regex_release(struct inode *inode, struct file *file)
4915 + orig_hash, iter->hash);
4916 + if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
4917 + && ftrace_enabled)
4918 +- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
4919 ++ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
4920 +
4921 + mutex_unlock(&ftrace_lock);
4922 + }
4923 +diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
4924 +index db110b8..f1539de 100644
4925 +--- a/kernel/tracepoint.c
4926 ++++ b/kernel/tracepoint.c
4927 +@@ -634,10 +634,11 @@ static int tracepoint_module_coming(struct module *mod)
4928 + int ret = 0;
4929 +
4930 + /*
4931 +- * We skip modules that tain the kernel, especially those with different
4932 +- * module header (for forced load), to make sure we don't cause a crash.
4933 ++ * We skip modules that taint the kernel, especially those with different
4934 ++ * module headers (for forced load), to make sure we don't cause a crash.
4935 ++ * Staging and out-of-tree GPL modules are fine.
4936 + */
4937 +- if (mod->taints)
4938 ++ if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
4939 + return 0;
4940 + mutex_lock(&tracepoints_mutex);
4941 + tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
4942 +diff --git a/mm/filemap.c b/mm/filemap.c
4943 +index 5f0a3c9..90286a4 100644
4944 +--- a/mm/filemap.c
4945 ++++ b/mm/filemap.c
4946 +@@ -393,24 +393,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range);
4947 + int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
4948 + {
4949 + int error;
4950 +- struct mem_cgroup *memcg = NULL;
4951 +
4952 + VM_BUG_ON(!PageLocked(old));
4953 + VM_BUG_ON(!PageLocked(new));
4954 + VM_BUG_ON(new->mapping);
4955 +
4956 +- /*
4957 +- * This is not page migration, but prepare_migration and
4958 +- * end_migration does enough work for charge replacement.
4959 +- *
4960 +- * In the longer term we probably want a specialized function
4961 +- * for moving the charge from old to new in a more efficient
4962 +- * manner.
4963 +- */
4964 +- error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
4965 +- if (error)
4966 +- return error;
4967 +-
4968 + error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
4969 + if (!error) {
4970 + struct address_space *mapping = old->mapping;
4971 +@@ -432,13 +419,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
4972 + if (PageSwapBacked(new))
4973 + __inc_zone_page_state(new, NR_SHMEM);
4974 + spin_unlock_irq(&mapping->tree_lock);
4975 ++ /* mem_cgroup codes must not be called under tree_lock */
4976 ++ mem_cgroup_replace_page_cache(old, new);
4977 + radix_tree_preload_end();
4978 + if (freepage)
4979 + freepage(old);
4980 + page_cache_release(old);
4981 +- mem_cgroup_end_migration(memcg, old, new, true);
4982 +- } else {
4983 +- mem_cgroup_end_migration(memcg, old, new, false);
4984 + }
4985 +
4986 + return error;
4987 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4988 +index b63f5f7..f538e9b 100644
4989 +--- a/mm/memcontrol.c
4990 ++++ b/mm/memcontrol.c
4991 +@@ -3366,6 +3366,50 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
4992 + cgroup_release_and_wakeup_rmdir(&memcg->css);
4993 + }
4994 +
4995 ++/*
4996 ++ * At replace page cache, newpage is not under any memcg but it's on
4997 ++ * LRU. So, this function doesn't touch res_counter but handles LRU
4998 ++ * in correct way. Both pages are locked so we cannot race with uncharge.
4999 ++ */
5000 ++void mem_cgroup_replace_page_cache(struct page *oldpage,
5001 ++ struct page *newpage)
5002 ++{
5003 ++ struct mem_cgroup *memcg;
5004 ++ struct page_cgroup *pc;
5005 ++ struct zone *zone;
5006 ++ enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
5007 ++ unsigned long flags;
5008 ++
5009 ++ if (mem_cgroup_disabled())
5010 ++ return;
5011 ++
5012 ++ pc = lookup_page_cgroup(oldpage);
5013 ++ /* fix accounting on old pages */
5014 ++ lock_page_cgroup(pc);
5015 ++ memcg = pc->mem_cgroup;
5016 ++ mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
5017 ++ ClearPageCgroupUsed(pc);
5018 ++ unlock_page_cgroup(pc);
5019 ++
5020 ++ if (PageSwapBacked(oldpage))
5021 ++ type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
5022 ++
5023 ++ zone = page_zone(newpage);
5024 ++ pc = lookup_page_cgroup(newpage);
5025 ++ /*
5026 ++ * Even if newpage->mapping was NULL before starting replacement,
5027 ++ * the newpage may be on LRU(or pagevec for LRU) already. We lock
5028 ++ * LRU while we overwrite pc->mem_cgroup.
5029 ++ */
5030 ++ spin_lock_irqsave(&zone->lru_lock, flags);
5031 ++ if (PageLRU(newpage))
5032 ++ del_page_from_lru_list(zone, newpage, page_lru(newpage));
5033 ++ __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
5034 ++ if (PageLRU(newpage))
5035 ++ add_page_to_lru_list(zone, newpage, page_lru(newpage));
5036 ++ spin_unlock_irqrestore(&zone->lru_lock, flags);
5037 ++}
5038 ++
5039 + #ifdef CONFIG_DEBUG_VM
5040 + static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
5041 + {
5042 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5043 +index 2b8ba3a..485be89 100644
5044 +--- a/mm/page_alloc.c
5045 ++++ b/mm/page_alloc.c
5046 +@@ -5608,6 +5608,17 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
5047 + bool is_pageblock_removable_nolock(struct page *page)
5048 + {
5049 + struct zone *zone = page_zone(page);
5050 ++ unsigned long pfn = page_to_pfn(page);
5051 ++
5052 ++ /*
5053 ++ * We have to be careful here because we are iterating over memory
5054 ++ * sections which are not zone aware so we might end up outside of
5055 ++ * the zone but still within the section.
5056 ++ */
5057 ++ if (!zone || zone->zone_start_pfn > pfn ||
5058 ++ zone->zone_start_pfn + zone->spanned_pages <= pfn)
5059 ++ return false;
5060 ++
5061 + return __count_immobile_pages(zone, page, 0);
5062 + }
5063 +
5064 +diff --git a/mm/shmem.c b/mm/shmem.c
5065 +index d672250..6c253f7 100644
5066 +--- a/mm/shmem.c
5067 ++++ b/mm/shmem.c
5068 +@@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
5069 + /*
5070 + * Pagevec may contain swap entries, so shuffle up pages before releasing.
5071 + */
5072 +-static void shmem_pagevec_release(struct pagevec *pvec)
5073 ++static void shmem_deswap_pagevec(struct pagevec *pvec)
5074 + {
5075 + int i, j;
5076 +
5077 +@@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
5078 + pvec->pages[j++] = page;
5079 + }
5080 + pvec->nr = j;
5081 +- pagevec_release(pvec);
5082 ++}
5083 ++
5084 ++/*
5085 ++ * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
5086 ++ */
5087 ++void shmem_unlock_mapping(struct address_space *mapping)
5088 ++{
5089 ++ struct pagevec pvec;
5090 ++ pgoff_t indices[PAGEVEC_SIZE];
5091 ++ pgoff_t index = 0;
5092 ++
5093 ++ pagevec_init(&pvec, 0);
5094 ++ /*
5095 ++ * Minor point, but we might as well stop if someone else SHM_LOCKs it.
5096 ++ */
5097 ++ while (!mapping_unevictable(mapping)) {
5098 ++ /*
5099 ++ * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
5100 ++ * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
5101 ++ */
5102 ++ pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
5103 ++ PAGEVEC_SIZE, pvec.pages, indices);
5104 ++ if (!pvec.nr)
5105 ++ break;
5106 ++ index = indices[pvec.nr - 1] + 1;
5107 ++ shmem_deswap_pagevec(&pvec);
5108 ++ check_move_unevictable_pages(pvec.pages, pvec.nr);
5109 ++ pagevec_release(&pvec);
5110 ++ cond_resched();
5111 ++ }
5112 + }
5113 +
5114 + /*
5115 +@@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5116 + }
5117 + unlock_page(page);
5118 + }
5119 +- shmem_pagevec_release(&pvec);
5120 ++ shmem_deswap_pagevec(&pvec);
5121 ++ pagevec_release(&pvec);
5122 + mem_cgroup_uncharge_end();
5123 + cond_resched();
5124 + index++;
5125 +@@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5126 + continue;
5127 + }
5128 + if (index == start && indices[0] > end) {
5129 +- shmem_pagevec_release(&pvec);
5130 ++ shmem_deswap_pagevec(&pvec);
5131 ++ pagevec_release(&pvec);
5132 + break;
5133 + }
5134 + mem_cgroup_uncharge_start();
5135 +@@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5136 + }
5137 + unlock_page(page);
5138 + }
5139 +- shmem_pagevec_release(&pvec);
5140 ++ shmem_deswap_pagevec(&pvec);
5141 ++ pagevec_release(&pvec);
5142 + mem_cgroup_uncharge_end();
5143 + index++;
5144 + }
5145 +@@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
5146 + user_shm_unlock(inode->i_size, user);
5147 + info->flags &= ~VM_LOCKED;
5148 + mapping_clear_unevictable(file->f_mapping);
5149 +- /*
5150 +- * Ensure that a racing putback_lru_page() can see
5151 +- * the pages of this mapping are evictable when we
5152 +- * skip them due to !PageLRU during the scan.
5153 +- */
5154 +- smp_mb__after_clear_bit();
5155 +- scan_mapping_unevictable_pages(file->f_mapping);
5156 + }
5157 + retval = 0;
5158 +
5159 +@@ -2446,6 +2471,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
5160 + return 0;
5161 + }
5162 +
5163 ++void shmem_unlock_mapping(struct address_space *mapping)
5164 ++{
5165 ++}
5166 ++
5167 + void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
5168 + {
5169 + truncate_inode_pages_range(inode->i_mapping, lstart, lend);
5170 +diff --git a/mm/slub.c b/mm/slub.c
5171 +index ed3334d..1a919f0 100644
5172 +--- a/mm/slub.c
5173 ++++ b/mm/slub.c
5174 +@@ -2166,6 +2166,11 @@ redo:
5175 + goto new_slab;
5176 + }
5177 +
5178 ++ /* must check again c->freelist in case of cpu migration or IRQ */
5179 ++ object = c->freelist;
5180 ++ if (object)
5181 ++ goto load_freelist;
5182 ++
5183 + stat(s, ALLOC_SLOWPATH);
5184 +
5185 + do {
5186 +diff --git a/mm/vmscan.c b/mm/vmscan.c
5187 +index f54a05b..cb33d9c 100644
5188 +--- a/mm/vmscan.c
5189 ++++ b/mm/vmscan.c
5190 +@@ -636,7 +636,7 @@ redo:
5191 + * When racing with an mlock or AS_UNEVICTABLE clearing
5192 + * (page is unlocked) make sure that if the other thread
5193 + * does not observe our setting of PG_lru and fails
5194 +- * isolation/check_move_unevictable_page,
5195 ++ * isolation/check_move_unevictable_pages,
5196 + * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
5197 + * the page back to the evictable list.
5198 + *
5199 +@@ -3353,97 +3353,59 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
5200 + return 1;
5201 + }
5202 +
5203 ++#ifdef CONFIG_SHMEM
5204 + /**
5205 +- * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
5206 +- * @page: page to check evictability and move to appropriate lru list
5207 +- * @zone: zone page is in
5208 ++ * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
5209 ++ * @pages: array of pages to check
5210 ++ * @nr_pages: number of pages to check
5211 + *
5212 +- * Checks a page for evictability and moves the page to the appropriate
5213 +- * zone lru list.
5214 ++ * Checks pages for evictability and moves them to the appropriate lru list.
5215 + *
5216 +- * Restrictions: zone->lru_lock must be held, page must be on LRU and must
5217 +- * have PageUnevictable set.
5218 ++ * This function is only used for SysV IPC SHM_UNLOCK.
5219 + */
5220 +-static void check_move_unevictable_page(struct page *page, struct zone *zone)
5221 ++void check_move_unevictable_pages(struct page **pages, int nr_pages)
5222 + {
5223 +- VM_BUG_ON(PageActive(page));
5224 +-
5225 +-retry:
5226 +- ClearPageUnevictable(page);
5227 +- if (page_evictable(page, NULL)) {
5228 +- enum lru_list l = page_lru_base_type(page);
5229 ++ struct zone *zone = NULL;
5230 ++ int pgscanned = 0;
5231 ++ int pgrescued = 0;
5232 ++ int i;
5233 +
5234 +- __dec_zone_state(zone, NR_UNEVICTABLE);
5235 +- list_move(&page->lru, &zone->lru[l].list);
5236 +- mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
5237 +- __inc_zone_state(zone, NR_INACTIVE_ANON + l);
5238 +- __count_vm_event(UNEVICTABLE_PGRESCUED);
5239 +- } else {
5240 +- /*
5241 +- * rotate unevictable list
5242 +- */
5243 +- SetPageUnevictable(page);
5244 +- list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
5245 +- mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
5246 +- if (page_evictable(page, NULL))
5247 +- goto retry;
5248 +- }
5249 +-}
5250 ++ for (i = 0; i < nr_pages; i++) {
5251 ++ struct page *page = pages[i];
5252 ++ struct zone *pagezone;
5253 +
5254 +-/**
5255 +- * scan_mapping_unevictable_pages - scan an address space for evictable pages
5256 +- * @mapping: struct address_space to scan for evictable pages
5257 +- *
5258 +- * Scan all pages in mapping. Check unevictable pages for
5259 +- * evictability and move them to the appropriate zone lru list.
5260 +- */
5261 +-void scan_mapping_unevictable_pages(struct address_space *mapping)
5262 +-{
5263 +- pgoff_t next = 0;
5264 +- pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
5265 +- PAGE_CACHE_SHIFT;
5266 +- struct zone *zone;
5267 +- struct pagevec pvec;
5268 ++ pgscanned++;
5269 ++ pagezone = page_zone(page);
5270 ++ if (pagezone != zone) {
5271 ++ if (zone)
5272 ++ spin_unlock_irq(&zone->lru_lock);
5273 ++ zone = pagezone;
5274 ++ spin_lock_irq(&zone->lru_lock);
5275 ++ }
5276 +
5277 +- if (mapping->nrpages == 0)
5278 +- return;
5279 ++ if (!PageLRU(page) || !PageUnevictable(page))
5280 ++ continue;
5281 +
5282 +- pagevec_init(&pvec, 0);
5283 +- while (next < end &&
5284 +- pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
5285 +- int i;
5286 +- int pg_scanned = 0;
5287 +-
5288 +- zone = NULL;
5289 +-
5290 +- for (i = 0; i < pagevec_count(&pvec); i++) {
5291 +- struct page *page = pvec.pages[i];
5292 +- pgoff_t page_index = page->index;
5293 +- struct zone *pagezone = page_zone(page);
5294 +-
5295 +- pg_scanned++;
5296 +- if (page_index > next)
5297 +- next = page_index;
5298 +- next++;
5299 +-
5300 +- if (pagezone != zone) {
5301 +- if (zone)
5302 +- spin_unlock_irq(&zone->lru_lock);
5303 +- zone = pagezone;
5304 +- spin_lock_irq(&zone->lru_lock);
5305 +- }
5306 ++ if (page_evictable(page, NULL)) {
5307 ++ enum lru_list lru = page_lru_base_type(page);
5308 +
5309 +- if (PageLRU(page) && PageUnevictable(page))
5310 +- check_move_unevictable_page(page, zone);
5311 ++ VM_BUG_ON(PageActive(page));
5312 ++ ClearPageUnevictable(page);
5313 ++ __dec_zone_state(zone, NR_UNEVICTABLE);
5314 ++ list_move(&page->lru, &zone->lru[lru].list);
5315 ++ mem_cgroup_move_lists(page, LRU_UNEVICTABLE, lru);
5316 ++ __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
5317 ++ pgrescued++;
5318 + }
5319 +- if (zone)
5320 +- spin_unlock_irq(&zone->lru_lock);
5321 +- pagevec_release(&pvec);
5322 +-
5323 +- count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
5324 + }
5325 +
5326 ++ if (zone) {
5327 ++ __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
5328 ++ __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
5329 ++ spin_unlock_irq(&zone->lru_lock);
5330 ++ }
5331 + }
5332 ++#endif /* CONFIG_SHMEM */
5333 +
5334 + static void warn_scan_unevictable_pages(void)
5335 + {
5336 +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
5337 +index ea10a51..73495f1 100644
5338 +--- a/net/mac80211/ieee80211_i.h
5339 ++++ b/net/mac80211/ieee80211_i.h
5340 +@@ -702,6 +702,8 @@ struct tpt_led_trigger {
5341 + * well be on the operating channel
5342 + * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
5343 + * determine if we are on the operating channel or not
5344 ++ * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
5345 ++ * gets only set in conjunction with SCAN_SW_SCANNING
5346 + * @SCAN_COMPLETED: Set for our scan work function when the driver reported
5347 + * that the scan completed.
5348 + * @SCAN_ABORTED: Set for our scan work function when the driver reported
5349 +@@ -710,6 +712,7 @@ struct tpt_led_trigger {
5350 + enum {
5351 + SCAN_SW_SCANNING,
5352 + SCAN_HW_SCANNING,
5353 ++ SCAN_OFF_CHANNEL,
5354 + SCAN_COMPLETED,
5355 + SCAN_ABORTED,
5356 + };
5357 +@@ -1140,14 +1143,10 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
5358 + void ieee80211_sched_scan_stopped_work(struct work_struct *work);
5359 +
5360 + /* off-channel helpers */
5361 +-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
5362 +-void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
5363 +- bool tell_ap);
5364 +-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
5365 +- bool offchannel_ps_enable);
5366 ++void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
5367 ++void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
5368 + void ieee80211_offchannel_return(struct ieee80211_local *local,
5369 +- bool enable_beaconing,
5370 +- bool offchannel_ps_disable);
5371 ++ bool enable_beaconing);
5372 + void ieee80211_hw_roc_setup(struct ieee80211_local *local);
5373 +
5374 + /* interface handling */
5375 +diff --git a/net/mac80211/main.c b/net/mac80211/main.c
5376 +index cae4435..a7536fd 100644
5377 +--- a/net/mac80211/main.c
5378 ++++ b/net/mac80211/main.c
5379 +@@ -92,47 +92,6 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
5380 + ieee80211_configure_filter(local);
5381 + }
5382 +
5383 +-/*
5384 +- * Returns true if we are logically configured to be on
5385 +- * the operating channel AND the hardware-conf is currently
5386 +- * configured on the operating channel. Compares channel-type
5387 +- * as well.
5388 +- */
5389 +-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
5390 +-{
5391 +- struct ieee80211_channel *chan, *scan_chan;
5392 +- enum nl80211_channel_type channel_type;
5393 +-
5394 +- /* This logic needs to match logic in ieee80211_hw_config */
5395 +- if (local->scan_channel) {
5396 +- chan = local->scan_channel;
5397 +- /* If scanning on oper channel, use whatever channel-type
5398 +- * is currently in use.
5399 +- */
5400 +- if (chan == local->oper_channel)
5401 +- channel_type = local->_oper_channel_type;
5402 +- else
5403 +- channel_type = NL80211_CHAN_NO_HT;
5404 +- } else if (local->tmp_channel) {
5405 +- chan = scan_chan = local->tmp_channel;
5406 +- channel_type = local->tmp_channel_type;
5407 +- } else {
5408 +- chan = local->oper_channel;
5409 +- channel_type = local->_oper_channel_type;
5410 +- }
5411 +-
5412 +- if (chan != local->oper_channel ||
5413 +- channel_type != local->_oper_channel_type)
5414 +- return false;
5415 +-
5416 +- /* Check current hardware-config against oper_channel. */
5417 +- if ((local->oper_channel != local->hw.conf.channel) ||
5418 +- (local->_oper_channel_type != local->hw.conf.channel_type))
5419 +- return false;
5420 +-
5421 +- return true;
5422 +-}
5423 +-
5424 + int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
5425 + {
5426 + struct ieee80211_channel *chan, *scan_chan;
5427 +@@ -145,9 +104,6 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
5428 +
5429 + scan_chan = local->scan_channel;
5430 +
5431 +- /* If this off-channel logic ever changes, ieee80211_on_oper_channel
5432 +- * may need to change as well.
5433 +- */
5434 + offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
5435 + if (scan_chan) {
5436 + chan = scan_chan;
5437 +@@ -158,19 +114,17 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
5438 + channel_type = local->_oper_channel_type;
5439 + else
5440 + channel_type = NL80211_CHAN_NO_HT;
5441 +- } else if (local->tmp_channel) {
5442 ++ local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
5443 ++ } else if (local->tmp_channel &&
5444 ++ local->oper_channel != local->tmp_channel) {
5445 + chan = scan_chan = local->tmp_channel;
5446 + channel_type = local->tmp_channel_type;
5447 ++ local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
5448 + } else {
5449 + chan = local->oper_channel;
5450 + channel_type = local->_oper_channel_type;
5451 +- }
5452 +-
5453 +- if (chan != local->oper_channel ||
5454 +- channel_type != local->_oper_channel_type)
5455 +- local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
5456 +- else
5457 + local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
5458 ++ }
5459 +
5460 + offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
5461 +
5462 +@@ -279,7 +233,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
5463 +
5464 + if (changed & BSS_CHANGED_BEACON_ENABLED) {
5465 + if (local->quiescing || !ieee80211_sdata_running(sdata) ||
5466 +- test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) {
5467 ++ test_bit(SCAN_SW_SCANNING, &local->scanning)) {
5468 + sdata->vif.bss_conf.enable_beacon = false;
5469 + } else {
5470 + /*
5471 +diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
5472 +index 3d41441..1b239be 100644
5473 +--- a/net/mac80211/offchannel.c
5474 ++++ b/net/mac80211/offchannel.c
5475 +@@ -18,14 +18,10 @@
5476 + #include "driver-trace.h"
5477 +
5478 + /*
5479 +- * Tell our hardware to disable PS.
5480 +- * Optionally inform AP that we will go to sleep so that it will buffer
5481 +- * the frames while we are doing off-channel work. This is optional
5482 +- * because we *may* be doing work on-operating channel, and want our
5483 +- * hardware unconditionally awake, but still let the AP send us normal frames.
5484 ++ * inform AP that we will go to sleep so that it will buffer the frames
5485 ++ * while we scan
5486 + */
5487 +-static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
5488 +- bool tell_ap)
5489 ++static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
5490 + {
5491 + struct ieee80211_local *local = sdata->local;
5492 + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
5493 +@@ -46,8 +42,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
5494 + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5495 + }
5496 +
5497 +- if (tell_ap && (!local->offchannel_ps_enabled ||
5498 +- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)))
5499 ++ if (!(local->offchannel_ps_enabled) ||
5500 ++ !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
5501 + /*
5502 + * If power save was enabled, no need to send a nullfunc
5503 + * frame because AP knows that we are sleeping. But if the
5504 +@@ -82,9 +78,6 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
5505 + * we are sleeping, let's just enable power save mode in
5506 + * hardware.
5507 + */
5508 +- /* TODO: Only set hardware if CONF_PS changed?
5509 +- * TODO: Should we set offchannel_ps_enabled to false?
5510 +- */
5511 + local->hw.conf.flags |= IEEE80211_CONF_PS;
5512 + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
5513 + } else if (local->hw.conf.dynamic_ps_timeout > 0) {
5514 +@@ -103,61 +96,63 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
5515 + ieee80211_sta_reset_conn_monitor(sdata);
5516 + }
5517 +
5518 +-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
5519 +- bool offchannel_ps_enable)
5520 ++void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
5521 + {
5522 + struct ieee80211_sub_if_data *sdata;
5523 +
5524 +- /*
5525 +- * notify the AP about us leaving the channel and stop all
5526 +- * STA interfaces.
5527 +- */
5528 + mutex_lock(&local->iflist_mtx);
5529 + list_for_each_entry(sdata, &local->interfaces, list) {
5530 + if (!ieee80211_sdata_running(sdata))
5531 + continue;
5532 +
5533 +- if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
5534 +- set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
5535 +-
5536 +- /* Check to see if we should disable beaconing. */
5537 ++ /* disable beaconing */
5538 + if (sdata->vif.type == NL80211_IFTYPE_AP ||
5539 + sdata->vif.type == NL80211_IFTYPE_ADHOC ||
5540 + sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
5541 + ieee80211_bss_info_change_notify(
5542 + sdata, BSS_CHANGED_BEACON_ENABLED);
5543 +
5544 +- if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
5545 ++ /*
5546 ++ * only handle non-STA interfaces here, STA interfaces
5547 ++ * are handled in ieee80211_offchannel_stop_station(),
5548 ++ * e.g., from the background scan state machine.
5549 ++ *
5550 ++ * In addition, do not stop monitor interface to allow it to be
5551 ++ * used from user space controlled off-channel operations.
5552 ++ */
5553 ++ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
5554 ++ sdata->vif.type != NL80211_IFTYPE_MONITOR) {
5555 ++ set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
5556 + netif_tx_stop_all_queues(sdata->dev);
5557 +- if (offchannel_ps_enable &&
5558 +- (sdata->vif.type == NL80211_IFTYPE_STATION) &&
5559 +- sdata->u.mgd.associated)
5560 +- ieee80211_offchannel_ps_enable(sdata, true);
5561 + }
5562 + }
5563 + mutex_unlock(&local->iflist_mtx);
5564 + }
5565 +
5566 +-void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
5567 +- bool tell_ap)
5568 ++void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
5569 + {
5570 + struct ieee80211_sub_if_data *sdata;
5571 +
5572 ++ /*
5573 ++ * notify the AP about us leaving the channel and stop all STA interfaces
5574 ++ */
5575 + mutex_lock(&local->iflist_mtx);
5576 + list_for_each_entry(sdata, &local->interfaces, list) {
5577 + if (!ieee80211_sdata_running(sdata))
5578 + continue;
5579 +
5580 +- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
5581 +- sdata->u.mgd.associated)
5582 +- ieee80211_offchannel_ps_enable(sdata, tell_ap);
5583 ++ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
5584 ++ set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
5585 ++ netif_tx_stop_all_queues(sdata->dev);
5586 ++ if (sdata->u.mgd.associated)
5587 ++ ieee80211_offchannel_ps_enable(sdata);
5588 ++ }
5589 + }
5590 + mutex_unlock(&local->iflist_mtx);
5591 + }
5592 +
5593 + void ieee80211_offchannel_return(struct ieee80211_local *local,
5594 +- bool enable_beaconing,
5595 +- bool offchannel_ps_disable)
5596 ++ bool enable_beaconing)
5597 + {
5598 + struct ieee80211_sub_if_data *sdata;
5599 +
5600 +@@ -167,8 +162,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
5601 + continue;
5602 +
5603 + /* Tell AP we're back */
5604 +- if (offchannel_ps_disable &&
5605 +- sdata->vif.type == NL80211_IFTYPE_STATION) {
5606 ++ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
5607 + if (sdata->u.mgd.associated)
5608 + ieee80211_offchannel_ps_disable(sdata);
5609 + }
5610 +@@ -188,7 +182,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
5611 + netif_tx_wake_all_queues(sdata->dev);
5612 + }
5613 +
5614 +- /* Check to see if we should re-enable beaconing */
5615 ++ /* re-enable beaconing */
5616 + if (enable_beaconing &&
5617 + (sdata->vif.type == NL80211_IFTYPE_AP ||
5618 + sdata->vif.type == NL80211_IFTYPE_ADHOC ||
5619 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
5620 +index fb123e2..5c51607 100644
5621 +--- a/net/mac80211/rx.c
5622 ++++ b/net/mac80211/rx.c
5623 +@@ -421,10 +421,16 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
5624 + return RX_CONTINUE;
5625 +
5626 + if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
5627 +- test_bit(SCAN_SW_SCANNING, &local->scanning) ||
5628 + local->sched_scanning)
5629 + return ieee80211_scan_rx(rx->sdata, skb);
5630 +
5631 ++ if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
5632 ++ /* drop all the other packets during a software scan anyway */
5633 ++ if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
5634 ++ dev_kfree_skb(skb);
5635 ++ return RX_QUEUED;
5636 ++ }
5637 ++
5638 + /* scanning finished during invoking of handlers */
5639 + I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
5640 + return RX_DROP_UNUSABLE;
5641 +@@ -2858,7 +2864,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
5642 + local->dot11ReceivedFragmentCount++;
5643 +
5644 + if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
5645 +- test_bit(SCAN_SW_SCANNING, &local->scanning)))
5646 ++ test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
5647 + status->rx_flags |= IEEE80211_RX_IN_SCAN;
5648 +
5649 + if (ieee80211_is_mgmt(fc))
5650 +diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
5651 +index 105436d..5279300 100644
5652 +--- a/net/mac80211/scan.c
5653 ++++ b/net/mac80211/scan.c
5654 +@@ -213,14 +213,6 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
5655 + if (bss)
5656 + ieee80211_rx_bss_put(sdata->local, bss);
5657 +
5658 +- /* If we are on-operating-channel, and this packet is for the
5659 +- * current channel, pass the pkt on up the stack so that
5660 +- * the rest of the stack can make use of it.
5661 +- */
5662 +- if (ieee80211_cfg_on_oper_channel(sdata->local)
5663 +- && (channel == sdata->local->oper_channel))
5664 +- return RX_CONTINUE;
5665 +-
5666 + dev_kfree_skb(skb);
5667 + return RX_QUEUED;
5668 + }
5669 +@@ -264,8 +256,6 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
5670 + bool was_hw_scan)
5671 + {
5672 + struct ieee80211_local *local = hw_to_local(hw);
5673 +- bool on_oper_chan;
5674 +- bool enable_beacons = false;
5675 +
5676 + lockdep_assert_held(&local->mtx);
5677 +
5678 +@@ -298,25 +288,11 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
5679 + local->scanning = 0;
5680 + local->scan_channel = NULL;
5681 +
5682 +- on_oper_chan = ieee80211_cfg_on_oper_channel(local);
5683 +-
5684 +- if (was_hw_scan || !on_oper_chan)
5685 +- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
5686 +- else
5687 +- /* Set power back to normal operating levels. */
5688 +- ieee80211_hw_config(local, 0);
5689 +-
5690 ++ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
5691 + if (!was_hw_scan) {
5692 +- bool on_oper_chan2;
5693 + ieee80211_configure_filter(local);
5694 + drv_sw_scan_complete(local);
5695 +- on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
5696 +- /* We should always be on-channel at this point. */
5697 +- WARN_ON(!on_oper_chan2);
5698 +- if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
5699 +- enable_beacons = true;
5700 +-
5701 +- ieee80211_offchannel_return(local, enable_beacons, true);
5702 ++ ieee80211_offchannel_return(local, true);
5703 + }
5704 +
5705 + ieee80211_recalc_idle(local);
5706 +@@ -357,15 +333,13 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
5707 + */
5708 + drv_sw_scan_start(local);
5709 +
5710 ++ ieee80211_offchannel_stop_beaconing(local);
5711 ++
5712 + local->leave_oper_channel_time = 0;
5713 + local->next_scan_state = SCAN_DECISION;
5714 + local->scan_channel_idx = 0;
5715 +
5716 +- /* We always want to use off-channel PS, even if we
5717 +- * are not really leaving oper-channel. Don't
5718 +- * tell the AP though, as long as we are on-channel.
5719 +- */
5720 +- ieee80211_offchannel_enable_all_ps(local, false);
5721 ++ drv_flush(local, false);
5722 +
5723 + ieee80211_configure_filter(local);
5724 +
5725 +@@ -508,20 +482,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
5726 + }
5727 + mutex_unlock(&local->iflist_mtx);
5728 +
5729 +- next_chan = local->scan_req->channels[local->scan_channel_idx];
5730 +-
5731 +- if (ieee80211_cfg_on_oper_channel(local)) {
5732 +- /* We're currently on operating channel. */
5733 +- if (next_chan == local->oper_channel)
5734 +- /* We don't need to move off of operating channel. */
5735 +- local->next_scan_state = SCAN_SET_CHANNEL;
5736 +- else
5737 +- /*
5738 +- * We do need to leave operating channel, as next
5739 +- * scan is somewhere else.
5740 +- */
5741 +- local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
5742 +- } else {
5743 ++ if (local->scan_channel) {
5744 + /*
5745 + * we're currently scanning a different channel, let's
5746 + * see if we can scan another channel without interfering
5747 +@@ -537,6 +498,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
5748 + *
5749 + * Otherwise switch back to the operating channel.
5750 + */
5751 ++ next_chan = local->scan_req->channels[local->scan_channel_idx];
5752 +
5753 + bad_latency = time_after(jiffies +
5754 + ieee80211_scan_get_channel_time(next_chan),
5755 +@@ -554,6 +516,12 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
5756 + local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
5757 + else
5758 + local->next_scan_state = SCAN_SET_CHANNEL;
5759 ++ } else {
5760 ++ /*
5761 ++ * we're on the operating channel currently, let's
5762 ++ * leave that channel now to scan another one
5763 ++ */
5764 ++ local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
5765 + }
5766 +
5767 + *next_delay = 0;
5768 +@@ -562,10 +530,9 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
5769 + static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
5770 + unsigned long *next_delay)
5771 + {
5772 +- /* PS will already be in off-channel mode,
5773 +- * we do that once at the beginning of scanning.
5774 +- */
5775 +- ieee80211_offchannel_stop_vifs(local, false);
5776 ++ ieee80211_offchannel_stop_station(local);
5777 ++
5778 ++ __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
5779 +
5780 + /*
5781 + * What if the nullfunc frames didn't arrive?
5782 +@@ -588,15 +555,15 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca
5783 + {
5784 + /* switch back to the operating channel */
5785 + local->scan_channel = NULL;
5786 +- if (!ieee80211_cfg_on_oper_channel(local))
5787 +- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
5788 ++ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
5789 +
5790 + /*
5791 +- * Re-enable vifs and beaconing. Leave PS
5792 +- * in off-channel state..will put that back
5793 +- * on-channel at the end of scanning.
5794 ++ * Only re-enable station mode interface now; beaconing will be
5795 ++ * re-enabled once the full scan has been completed.
5796 + */
5797 +- ieee80211_offchannel_return(local, true, false);
5798 ++ ieee80211_offchannel_return(local, false);
5799 ++
5800 ++ __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
5801 +
5802 + *next_delay = HZ / 5;
5803 + local->next_scan_state = SCAN_DECISION;
5804 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
5805 +index 1f8b120..eff1f4e 100644
5806 +--- a/net/mac80211/tx.c
5807 ++++ b/net/mac80211/tx.c
5808 +@@ -259,8 +259,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
5809 + if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
5810 + return TX_CONTINUE;
5811 +
5812 +- if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
5813 +- test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
5814 ++ if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
5815 + !ieee80211_is_probe_req(hdr->frame_control) &&
5816 + !ieee80211_is_nullfunc(hdr->frame_control))
5817 + /*
5818 +diff --git a/net/mac80211/work.c b/net/mac80211/work.c
5819 +index 6c53b6d..99165ef 100644
5820 +--- a/net/mac80211/work.c
5821 ++++ b/net/mac80211/work.c
5822 +@@ -899,26 +899,6 @@ static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
5823 + return false;
5824 + }
5825 +
5826 +-static enum nl80211_channel_type
5827 +-ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
5828 +- enum nl80211_channel_type oper_ct)
5829 +-{
5830 +- switch (wk_ct) {
5831 +- case NL80211_CHAN_NO_HT:
5832 +- return oper_ct;
5833 +- case NL80211_CHAN_HT20:
5834 +- if (oper_ct != NL80211_CHAN_NO_HT)
5835 +- return oper_ct;
5836 +- return wk_ct;
5837 +- case NL80211_CHAN_HT40MINUS:
5838 +- case NL80211_CHAN_HT40PLUS:
5839 +- return wk_ct;
5840 +- }
5841 +- WARN_ON(1); /* shouldn't get here */
5842 +- return wk_ct;
5843 +-}
5844 +-
5845 +-
5846 + static void ieee80211_work_timer(unsigned long data)
5847 + {
5848 + struct ieee80211_local *local = (void *) data;
5849 +@@ -969,52 +949,18 @@ static void ieee80211_work_work(struct work_struct *work)
5850 + }
5851 +
5852 + if (!started && !local->tmp_channel) {
5853 +- bool on_oper_chan;
5854 +- bool tmp_chan_changed = false;
5855 +- bool on_oper_chan2;
5856 +- enum nl80211_channel_type wk_ct;
5857 +- on_oper_chan = ieee80211_cfg_on_oper_channel(local);
5858 +-
5859 +- /* Work with existing channel type if possible. */
5860 +- wk_ct = wk->chan_type;
5861 +- if (wk->chan == local->hw.conf.channel)
5862 +- wk_ct = ieee80211_calc_ct(wk->chan_type,
5863 +- local->hw.conf.channel_type);
5864 +-
5865 +- if (local->tmp_channel)
5866 +- if ((local->tmp_channel != wk->chan) ||
5867 +- (local->tmp_channel_type != wk_ct))
5868 +- tmp_chan_changed = true;
5869 +-
5870 +- local->tmp_channel = wk->chan;
5871 +- local->tmp_channel_type = wk_ct;
5872 + /*
5873 +- * Leave the station vifs in awake mode if they
5874 +- * happen to be on the same channel as
5875 +- * the requested channel.
5876 ++ * TODO: could optimize this by leaving the
5877 ++ * station vifs in awake mode if they
5878 ++ * happen to be on the same channel as
5879 ++ * the requested channel
5880 + */
5881 +- on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
5882 +- if (on_oper_chan != on_oper_chan2) {
5883 +- if (on_oper_chan2) {
5884 +- /* going off oper channel, PS too */
5885 +- ieee80211_offchannel_stop_vifs(local,
5886 +- true);
5887 +- ieee80211_hw_config(local, 0);
5888 +- } else {
5889 +- /* going on channel, but leave PS
5890 +- * off-channel. */
5891 +- ieee80211_hw_config(local, 0);
5892 +- ieee80211_offchannel_return(local,
5893 +- true,
5894 +- false);
5895 +- }
5896 +- } else if (tmp_chan_changed)
5897 +- /* Still off-channel, but on some other
5898 +- * channel, so update hardware.
5899 +- * PS should already be off-channel.
5900 +- */
5901 +- ieee80211_hw_config(local, 0);
5902 ++ ieee80211_offchannel_stop_beaconing(local);
5903 ++ ieee80211_offchannel_stop_station(local);
5904 +
5905 ++ local->tmp_channel = wk->chan;
5906 ++ local->tmp_channel_type = wk->chan_type;
5907 ++ ieee80211_hw_config(local, 0);
5908 + started = true;
5909 + wk->timeout = jiffies;
5910 + }
5911 +@@ -1100,8 +1046,7 @@ static void ieee80211_work_work(struct work_struct *work)
5912 + * we still need to do a hardware config. Currently,
5913 + * we cannot be here while scanning, however.
5914 + */
5915 +- if (!ieee80211_cfg_on_oper_channel(local))
5916 +- ieee80211_hw_config(local, 0);
5917 ++ ieee80211_hw_config(local, 0);
5918 +
5919 + /* At the least, we need to disable offchannel_ps,
5920 + * so just go ahead and run the entire offchannel
5921 +@@ -1109,7 +1054,7 @@ static void ieee80211_work_work(struct work_struct *work)
5922 + * beaconing if we were already on-oper-channel
5923 + * as a future optimization.
5924 + */
5925 +- ieee80211_offchannel_return(local, true, true);
5926 ++ ieee80211_offchannel_return(local, true);
5927 +
5928 + /* give connection some time to breathe */
5929 + run_again(local, jiffies + HZ/2);
5930 +diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
5931 +index f614ce7..28a39bb 100644
5932 +--- a/net/mac80211/wpa.c
5933 ++++ b/net/mac80211/wpa.c
5934 +@@ -106,7 +106,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
5935 + if (status->flag & RX_FLAG_MMIC_ERROR)
5936 + goto mic_fail;
5937 +
5938 +- if (!(status->flag & RX_FLAG_IV_STRIPPED))
5939 ++ if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
5940 + goto update_iv;
5941 +
5942 + return RX_CONTINUE;
5943 +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
5944 +index 6e03888..d4ad50e 100644
5945 +--- a/net/sunrpc/svc.c
5946 ++++ b/net/sunrpc/svc.c
5947 +@@ -167,6 +167,7 @@ svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
5948 +
5949 + fail_free:
5950 + kfree(m->to_pool);
5951 ++ m->to_pool = NULL;
5952 + fail:
5953 + return -ENOMEM;
5954 + }
5955 +@@ -287,7 +288,9 @@ svc_pool_map_put(void)
5956 + if (!--m->count) {
5957 + m->mode = SVC_POOL_DEFAULT;
5958 + kfree(m->to_pool);
5959 ++ m->to_pool = NULL;
5960 + kfree(m->pool_to);
5961 ++ m->pool_to = NULL;
5962 + m->npools = 0;
5963 + }
5964 +
5965 +@@ -527,17 +530,20 @@ svc_destroy(struct svc_serv *serv)
5966 + printk("svc_destroy: no threads for serv=%p!\n", serv);
5967 +
5968 + del_timer_sync(&serv->sv_temptimer);
5969 +-
5970 +- svc_close_all(&serv->sv_tempsocks);
5971 ++ /*
5972 ++ * The set of xprts (contained in the sv_tempsocks and
5973 ++ * sv_permsocks lists) is now constant, since it is modified
5974 ++ * only by accepting new sockets (done by service threads in
5975 ++ * svc_recv) or aging old ones (done by sv_temptimer), or
5976 ++ * configuration changes (excluded by whatever locking the
5977 ++ * caller is using--nfsd_mutex in the case of nfsd). So it's
5978 ++ * safe to traverse those lists and shut everything down:
5979 ++ */
5980 ++ svc_close_all(serv);
5981 +
5982 + if (serv->sv_shutdown)
5983 + serv->sv_shutdown(serv);
5984 +
5985 +- svc_close_all(&serv->sv_permsocks);
5986 +-
5987 +- BUG_ON(!list_empty(&serv->sv_permsocks));
5988 +- BUG_ON(!list_empty(&serv->sv_tempsocks));
5989 +-
5990 + cache_clean_deferred(serv);
5991 +
5992 + if (svc_serv_is_pooled(serv))
5993 +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
5994 +index 447cd0e..9ed2cd0 100644
5995 +--- a/net/sunrpc/svc_xprt.c
5996 ++++ b/net/sunrpc/svc_xprt.c
5997 +@@ -893,14 +893,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
5998 + spin_lock_bh(&serv->sv_lock);
5999 + if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
6000 + list_del_init(&xprt->xpt_list);
6001 +- /*
6002 +- * The only time we're called while xpt_ready is still on a list
6003 +- * is while the list itself is about to be destroyed (in
6004 +- * svc_destroy). BUT svc_xprt_enqueue could still be attempting
6005 +- * to add new entries to the sp_sockets list, so we can't leave
6006 +- * a freed xprt on it.
6007 +- */
6008 +- list_del_init(&xprt->xpt_ready);
6009 ++ BUG_ON(!list_empty(&xprt->xpt_ready));
6010 + if (test_bit(XPT_TEMP, &xprt->xpt_flags))
6011 + serv->sv_tmpcnt--;
6012 + spin_unlock_bh(&serv->sv_lock);
6013 +@@ -928,22 +921,48 @@ void svc_close_xprt(struct svc_xprt *xprt)
6014 + }
6015 + EXPORT_SYMBOL_GPL(svc_close_xprt);
6016 +
6017 +-void svc_close_all(struct list_head *xprt_list)
6018 ++static void svc_close_list(struct list_head *xprt_list)
6019 ++{
6020 ++ struct svc_xprt *xprt;
6021 ++
6022 ++ list_for_each_entry(xprt, xprt_list, xpt_list) {
6023 ++ set_bit(XPT_CLOSE, &xprt->xpt_flags);
6024 ++ set_bit(XPT_BUSY, &xprt->xpt_flags);
6025 ++ }
6026 ++}
6027 ++
6028 ++void svc_close_all(struct svc_serv *serv)
6029 + {
6030 ++ struct svc_pool *pool;
6031 + struct svc_xprt *xprt;
6032 + struct svc_xprt *tmp;
6033 ++ int i;
6034 ++
6035 ++ svc_close_list(&serv->sv_tempsocks);
6036 ++ svc_close_list(&serv->sv_permsocks);
6037 +
6038 ++ for (i = 0; i < serv->sv_nrpools; i++) {
6039 ++ pool = &serv->sv_pools[i];
6040 ++
6041 ++ spin_lock_bh(&pool->sp_lock);
6042 ++ while (!list_empty(&pool->sp_sockets)) {
6043 ++ xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready);
6044 ++ list_del_init(&xprt->xpt_ready);
6045 ++ }
6046 ++ spin_unlock_bh(&pool->sp_lock);
6047 ++ }
6048 + /*
6049 +- * The server is shutting down, and no more threads are running.
6050 +- * svc_xprt_enqueue() might still be running, but at worst it
6051 +- * will re-add the xprt to sp_sockets, which will soon get
6052 +- * freed. So we don't bother with any more locking, and don't
6053 +- * leave the close to the (nonexistent) server threads:
6054 ++ * At this point the sp_sockets lists will stay empty, since
6055 ++ * svc_enqueue will not add new entries without taking the
6056 ++ * sp_lock and checking XPT_BUSY.
6057 + */
6058 +- list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
6059 +- set_bit(XPT_CLOSE, &xprt->xpt_flags);
6060 ++ list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list)
6061 + svc_delete_xprt(xprt);
6062 +- }
6063 ++ list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list)
6064 ++ svc_delete_xprt(xprt);
6065 ++
6066 ++ BUG_ON(!list_empty(&serv->sv_permsocks));
6067 ++ BUG_ON(!list_empty(&serv->sv_tempsocks));
6068 + }
6069 +
6070 + /*
6071 +diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
6072 +index 277ebd4..593f4c6 100644
6073 +--- a/net/sunrpc/xdr.c
6074 ++++ b/net/sunrpc/xdr.c
6075 +@@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
6076 + * Copies data into an arbitrary memory location from an array of pages
6077 + * The copy is assumed to be non-overlapping.
6078 + */
6079 +-static void
6080 ++void
6081 + _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
6082 + {
6083 + struct page **pgfrom;
6084 +@@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
6085 +
6086 + } while ((len -= copy) != 0);
6087 + }
6088 ++EXPORT_SYMBOL_GPL(_copy_from_pages);
6089 +
6090 + /*
6091 + * xdr_shrink_bufhead
6092 +diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
6093 +index ec7afce..bccf07d 100644
6094 +--- a/scripts/kconfig/streamline_config.pl
6095 ++++ b/scripts/kconfig/streamline_config.pl
6096 +@@ -250,33 +250,61 @@ if ($kconfig) {
6097 + read_kconfig($kconfig);
6098 + }
6099 +
6100 ++sub convert_vars {
6101 ++ my ($line, %vars) = @_;
6102 ++
6103 ++ my $process = "";
6104 ++
6105 ++ while ($line =~ s/^(.*?)(\$\((.*?)\))//) {
6106 ++ my $start = $1;
6107 ++ my $variable = $2;
6108 ++ my $var = $3;
6109 ++
6110 ++ if (defined($vars{$var})) {
6111 ++ $process .= $start . $vars{$var};
6112 ++ } else {
6113 ++ $process .= $start . $variable;
6114 ++ }
6115 ++ }
6116 ++
6117 ++ $process .= $line;
6118 ++
6119 ++ return $process;
6120 ++}
6121 ++
6122 + # Read all Makefiles to map the configs to the objects
6123 + foreach my $makefile (@makefiles) {
6124 +
6125 +- my $cont = 0;
6126 ++ my $line = "";
6127 ++ my %make_vars;
6128 +
6129 + open(MIN,$makefile) || die "Can't open $makefile";
6130 + while (<MIN>) {
6131 ++ # if this line ends with a backslash, continue
6132 ++ chomp;
6133 ++ if (/^(.*)\\$/) {
6134 ++ $line .= $1;
6135 ++ next;
6136 ++ }
6137 ++
6138 ++ $line .= $_;
6139 ++ $_ = $line;
6140 ++ $line = "";
6141 ++
6142 + my $objs;
6143 +
6144 +- # is this a line after a line with a backslash?
6145 +- if ($cont && /(\S.*)$/) {
6146 +- $objs = $1;
6147 +- }
6148 +- $cont = 0;
6149 ++ $_ = convert_vars($_, %make_vars);
6150 +
6151 + # collect objects after obj-$(CONFIG_FOO_BAR)
6152 + if (/obj-\$\((CONFIG_[^\)]*)\)\s*[+:]?=\s*(.*)/) {
6153 + $var = $1;
6154 + $objs = $2;
6155 ++
6156 ++ # check if variables are set
6157 ++ } elsif (/^\s*(\S+)\s*[:]?=\s*(.*\S)/) {
6158 ++ $make_vars{$1} = $2;
6159 + }
6160 + if (defined($objs)) {
6161 +- # test if the line ends with a backslash
6162 +- if ($objs =~ m,(.*)\\$,) {
6163 +- $objs = $1;
6164 +- $cont = 1;
6165 +- }
6166 +-
6167 + foreach my $obj (split /\s+/,$objs) {
6168 + $obj =~ s/-/_/g;
6169 + if ($obj =~ /(.*)\.o$/) {
6170 +diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
6171 +index f40a6af6..54e35c1 100644
6172 +--- a/scripts/recordmcount.h
6173 ++++ b/scripts/recordmcount.h
6174 +@@ -462,7 +462,7 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */
6175 + succeed_file();
6176 + }
6177 + if (w(txthdr->sh_type) != SHT_PROGBITS ||
6178 +- !(w(txthdr->sh_flags) & SHF_EXECINSTR))
6179 ++ !(_w(txthdr->sh_flags) & SHF_EXECINSTR))
6180 + return NULL;
6181 + return txtname;
6182 + }
6183 +diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
6184 +index 0d50df0..88a2788 100644
6185 +--- a/security/integrity/ima/ima_api.c
6186 ++++ b/security/integrity/ima/ima_api.c
6187 +@@ -178,8 +178,8 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
6188 + strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
6189 +
6190 + result = ima_store_template(entry, violation, inode);
6191 +- if (!result)
6192 ++ if (!result || result == -EEXIST)
6193 + iint->flags |= IMA_MEASURED;
6194 +- else
6195 ++ if (result < 0)
6196 + kfree(entry);
6197 + }
6198 +diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
6199 +index 8e28f04..55a6271 100644
6200 +--- a/security/integrity/ima/ima_queue.c
6201 ++++ b/security/integrity/ima/ima_queue.c
6202 +@@ -23,6 +23,8 @@
6203 + #include <linux/slab.h>
6204 + #include "ima.h"
6205 +
6206 ++#define AUDIT_CAUSE_LEN_MAX 32
6207 ++
6208 + LIST_HEAD(ima_measurements); /* list of all measurements */
6209 +
6210 + /* key: inode (before secure-hashing a file) */
6211 +@@ -94,7 +96,8 @@ static int ima_pcr_extend(const u8 *hash)
6212 +
6213 + result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
6214 + if (result != 0)
6215 +- pr_err("IMA: Error Communicating to TPM chip\n");
6216 ++ pr_err("IMA: Error Communicating to TPM chip, result: %d\n",
6217 ++ result);
6218 + return result;
6219 + }
6220 +
6221 +@@ -106,14 +109,16 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
6222 + {
6223 + u8 digest[IMA_DIGEST_SIZE];
6224 + const char *audit_cause = "hash_added";
6225 ++ char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
6226 + int audit_info = 1;
6227 +- int result = 0;
6228 ++ int result = 0, tpmresult = 0;
6229 +
6230 + mutex_lock(&ima_extend_list_mutex);
6231 + if (!violation) {
6232 + memcpy(digest, entry->digest, sizeof digest);
6233 + if (ima_lookup_digest_entry(digest)) {
6234 + audit_cause = "hash_exists";
6235 ++ result = -EEXIST;
6236 + goto out;
6237 + }
6238 + }
6239 +@@ -128,9 +133,11 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
6240 + if (violation) /* invalidate pcr */
6241 + memset(digest, 0xff, sizeof digest);
6242 +
6243 +- result = ima_pcr_extend(digest);
6244 +- if (result != 0) {
6245 +- audit_cause = "TPM error";
6246 ++ tpmresult = ima_pcr_extend(digest);
6247 ++ if (tpmresult != 0) {
6248 ++ snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
6249 ++ tpmresult);
6250 ++ audit_cause = tpm_audit_cause;
6251 + audit_info = 0;
6252 + }
6253 + out:
6254 +diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
6255 +index 4a9b4b2..867558c 100644
6256 +--- a/security/tomoyo/util.c
6257 ++++ b/security/tomoyo/util.c
6258 +@@ -492,13 +492,13 @@ static bool tomoyo_correct_word2(const char *string, size_t len)
6259 + if (d < '0' || d > '7' || e < '0' || e > '7')
6260 + break;
6261 + c = tomoyo_make_byte(c, d, e);
6262 +- if (tomoyo_invalid(c))
6263 +- continue; /* pattern is not \000 */
6264 ++ if (c <= ' ' || c >= 127)
6265 ++ continue;
6266 + }
6267 + goto out;
6268 + } else if (in_repetition && c == '/') {
6269 + goto out;
6270 +- } else if (tomoyo_invalid(c)) {
6271 ++ } else if (c <= ' ' || c >= 127) {
6272 + goto out;
6273 + }
6274 + }
6275 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6276 +index c2f79e6..5b2b75b 100644
6277 +--- a/sound/pci/hda/hda_intel.c
6278 ++++ b/sound/pci/hda/hda_intel.c
6279 +@@ -2509,6 +2509,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
6280 + SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
6281 + SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
6282 + SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
6283 ++ SND_PCI_QUIRK(0x10de, 0xcb89, "Macbook Pro 7,1", POS_FIX_LPIB),
6284 + SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
6285 + SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
6286 + SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
6287 +diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
6288 +index 618ddad..368f0c5 100644
6289 +--- a/sound/pci/hda/hda_local.h
6290 ++++ b/sound/pci/hda/hda_local.h
6291 +@@ -487,7 +487,12 @@ static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid)
6292 + }
6293 +
6294 + /* get the widget type from widget capability bits */
6295 +-#define get_wcaps_type(wcaps) (((wcaps) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT)
6296 ++static inline int get_wcaps_type(unsigned int wcaps)
6297 ++{
6298 ++ if (!wcaps)
6299 ++ return -1; /* invalid type */
6300 ++ return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
6301 ++}
6302 +
6303 + static inline unsigned int get_wcaps_channels(u32 wcaps)
6304 + {
6305 +diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
6306 +index 2c981b5..254ab52 100644
6307 +--- a/sound/pci/hda/hda_proc.c
6308 ++++ b/sound/pci/hda/hda_proc.c
6309 +@@ -54,6 +54,8 @@ static const char *get_wid_type_name(unsigned int wid_value)
6310 + [AC_WID_BEEP] = "Beep Generator Widget",
6311 + [AC_WID_VENDOR] = "Vendor Defined Widget",
6312 + };
6313 ++ if (wid_value == -1)
6314 ++ return "UNKNOWN Widget";
6315 + wid_value &= 0xf;
6316 + if (names[wid_value])
6317 + return names[wid_value];
6318 +diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
6319 +index 70a7abd..5b0a9bb 100644
6320 +--- a/sound/pci/hda/patch_cirrus.c
6321 ++++ b/sound/pci/hda/patch_cirrus.c
6322 +@@ -920,16 +920,14 @@ static void cs_automute(struct hda_codec *codec)
6323 +
6324 + /* mute speakers if spdif or hp jack is plugged in */
6325 + for (i = 0; i < cfg->speaker_outs; i++) {
6326 ++ int pin_ctl = hp_present ? 0 : PIN_OUT;
6327 ++ /* detect on spdif is specific to CS421x */
6328 ++ if (spdif_present && (spec->vendor_nid == CS421X_VENDOR_NID))
6329 ++ pin_ctl = 0;
6330 ++
6331 + nid = cfg->speaker_pins[i];
6332 + snd_hda_codec_write(codec, nid, 0,
6333 +- AC_VERB_SET_PIN_WIDGET_CONTROL,
6334 +- hp_present ? 0 : PIN_OUT);
6335 +- /* detect on spdif is specific to CS421x */
6336 +- if (spec->vendor_nid == CS421X_VENDOR_NID) {
6337 +- snd_hda_codec_write(codec, nid, 0,
6338 +- AC_VERB_SET_PIN_WIDGET_CONTROL,
6339 +- spdif_present ? 0 : PIN_OUT);
6340 +- }
6341 ++ AC_VERB_SET_PIN_WIDGET_CONTROL, pin_ctl);
6342 + }
6343 + if (spec->gpio_eapd_hp) {
6344 + unsigned int gpio = hp_present ?
6345 +@@ -1771,30 +1769,19 @@ static int build_cs421x_output(struct hda_codec *codec)
6346 + struct auto_pin_cfg *cfg = &spec->autocfg;
6347 + struct snd_kcontrol *kctl;
6348 + int err;
6349 +- char *name = "HP/Speakers";
6350 ++ char *name = "Master";
6351 +
6352 + fix_volume_caps(codec, dac);
6353 +- if (!spec->vmaster_sw) {
6354 +- err = add_vmaster(codec, dac);
6355 +- if (err < 0)
6356 +- return err;
6357 +- }
6358 +
6359 + err = add_mute(codec, name, 0,
6360 + HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
6361 + if (err < 0)
6362 + return err;
6363 +- err = snd_ctl_add_slave(spec->vmaster_sw, kctl);
6364 +- if (err < 0)
6365 +- return err;
6366 +
6367 + err = add_volume(codec, name, 0,
6368 + HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
6369 + if (err < 0)
6370 + return err;
6371 +- err = snd_ctl_add_slave(spec->vmaster_vol, kctl);
6372 +- if (err < 0)
6373 +- return err;
6374 +
6375 + if (cfg->speaker_outs) {
6376 + err = snd_hda_ctl_add(codec, 0,
6377 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
6378 +index 0de2119..7072251 100644
6379 +--- a/sound/pci/hda/patch_conexant.c
6380 ++++ b/sound/pci/hda/patch_conexant.c
6381 +@@ -1120,8 +1120,6 @@ static const char * const cxt5045_models[CXT5045_MODELS] = {
6382 +
6383 + static const struct snd_pci_quirk cxt5045_cfg_tbl[] = {
6384 + SND_PCI_QUIRK(0x103c, 0x30d5, "HP 530", CXT5045_LAPTOP_HP530),
6385 +- SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3000, "HP DV Series",
6386 +- CXT5045_LAPTOP_HPSENSE),
6387 + SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba P105", CXT5045_LAPTOP_MICSENSE),
6388 + SND_PCI_QUIRK(0x152d, 0x0753, "Benq R55E", CXT5045_BENQ),
6389 + SND_PCI_QUIRK(0x1734, 0x10ad, "Fujitsu Si1520", CXT5045_LAPTOP_MICSENSE),
6390 +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
6391 +index 616678f..f3c73a9 100644
6392 +--- a/sound/pci/hda/patch_sigmatel.c
6393 ++++ b/sound/pci/hda/patch_sigmatel.c
6394 +@@ -1631,7 +1631,7 @@ static const struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
6395 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
6396 + "Dell Studio 1557", STAC_DELL_M6_DMIC),
6397 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
6398 +- "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
6399 ++ "Dell Studio XPS 1645", STAC_DELL_M6_DMIC),
6400 + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
6401 + "Dell Studio 1558", STAC_DELL_M6_DMIC),
6402 + {} /* terminator */
6403 +@@ -4326,6 +4326,27 @@ static void stac_store_hints(struct hda_codec *codec)
6404 + }
6405 + }
6406 +
6407 ++static void stac_issue_unsol_events(struct hda_codec *codec, int num_pins,
6408 ++ const hda_nid_t *pins)
6409 ++{
6410 ++ while (num_pins--)
6411 ++ stac_issue_unsol_event(codec, *pins++);
6412 ++}
6413 ++
6414 ++/* fake event to set up pins */
6415 ++static void stac_fake_hp_events(struct hda_codec *codec)
6416 ++{
6417 ++ struct sigmatel_spec *spec = codec->spec;
6418 ++
6419 ++ if (spec->autocfg.hp_outs)
6420 ++ stac_issue_unsol_events(codec, spec->autocfg.hp_outs,
6421 ++ spec->autocfg.hp_pins);
6422 ++ if (spec->autocfg.line_outs &&
6423 ++ spec->autocfg.line_out_pins[0] != spec->autocfg.hp_pins[0])
6424 ++ stac_issue_unsol_events(codec, spec->autocfg.line_outs,
6425 ++ spec->autocfg.line_out_pins);
6426 ++}
6427 ++
6428 + static int stac92xx_init(struct hda_codec *codec)
6429 + {
6430 + struct sigmatel_spec *spec = codec->spec;
6431 +@@ -4376,10 +4397,7 @@ static int stac92xx_init(struct hda_codec *codec)
6432 + stac92xx_auto_set_pinctl(codec, spec->autocfg.line_out_pins[0],
6433 + AC_PINCTL_OUT_EN);
6434 + /* fake event to set up pins */
6435 +- if (cfg->hp_pins[0])
6436 +- stac_issue_unsol_event(codec, cfg->hp_pins[0]);
6437 +- else if (cfg->line_out_pins[0])
6438 +- stac_issue_unsol_event(codec, cfg->line_out_pins[0]);
6439 ++ stac_fake_hp_events(codec);
6440 + } else {
6441 + stac92xx_auto_init_multi_out(codec);
6442 + stac92xx_auto_init_hp_out(codec);
6443 +@@ -5028,19 +5046,11 @@ static void stac927x_proc_hook(struct snd_info_buffer *buffer,
6444 + #ifdef CONFIG_PM
6445 + static int stac92xx_resume(struct hda_codec *codec)
6446 + {
6447 +- struct sigmatel_spec *spec = codec->spec;
6448 +-
6449 + stac92xx_init(codec);
6450 + snd_hda_codec_resume_amp(codec);
6451 + snd_hda_codec_resume_cache(codec);
6452 + /* fake event to set up pins again to override cached values */
6453 +- if (spec->hp_detect) {
6454 +- if (spec->autocfg.hp_pins[0])
6455 +- stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0]);
6456 +- else if (spec->autocfg.line_out_pins[0])
6457 +- stac_issue_unsol_event(codec,
6458 +- spec->autocfg.line_out_pins[0]);
6459 +- }
6460 ++ stac_fake_hp_events(codec);
6461 + return 0;
6462 + }
6463 +
6464 +diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
6465 +index b513762..8d69e59 100644
6466 +--- a/sound/pci/hda/patch_via.c
6467 ++++ b/sound/pci/hda/patch_via.c
6468 +@@ -2200,7 +2200,10 @@ static int via_auto_create_loopback_switch(struct hda_codec *codec)
6469 + {
6470 + struct via_spec *spec = codec->spec;
6471 +
6472 +- if (!spec->aa_mix_nid || !spec->out_mix_path.depth)
6473 ++ if (!spec->aa_mix_nid)
6474 ++ return 0; /* no loopback switching available */
6475 ++ if (!(spec->out_mix_path.depth || spec->hp_mix_path.depth ||
6476 ++ spec->speaker_path.depth))
6477 + return 0; /* no loopback switching available */
6478 + if (!via_clone_control(spec, &via_aamix_ctl_enum))
6479 + return -ENOMEM;
6480 +diff --git a/sound/pci/ice1712/amp.c b/sound/pci/ice1712/amp.c
6481 +index e328cfb..e525da2 100644
6482 +--- a/sound/pci/ice1712/amp.c
6483 ++++ b/sound/pci/ice1712/amp.c
6484 +@@ -68,8 +68,11 @@ static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice)
6485 +
6486 + static int __devinit snd_vt1724_amp_add_controls(struct snd_ice1712 *ice)
6487 + {
6488 +- /* we use pins 39 and 41 of the VT1616 for left and right read outputs */
6489 +- snd_ac97_write_cache(ice->ac97, 0x5a, snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
6490 ++ if (ice->ac97)
6491 ++ /* we use pins 39 and 41 of the VT1616 for left and right
6492 ++ read outputs */
6493 ++ snd_ac97_write_cache(ice->ac97, 0x5a,
6494 ++ snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
6495 + return 0;
6496 + }
6497 +
6498 +diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
6499 +index 42d1ab1..915546a 100644
6500 +--- a/sound/pci/oxygen/xonar_wm87x6.c
6501 ++++ b/sound/pci/oxygen/xonar_wm87x6.c
6502 +@@ -177,6 +177,7 @@ static void wm8776_registers_init(struct oxygen *chip)
6503 + struct xonar_wm87x6 *data = chip->model_data;
6504 +
6505 + wm8776_write(chip, WM8776_RESET, 0);
6506 ++ wm8776_write(chip, WM8776_PHASESWAP, WM8776_PH_MASK);
6507 + wm8776_write(chip, WM8776_DACCTRL1, WM8776_DZCEN |
6508 + WM8776_PL_LEFT_LEFT | WM8776_PL_RIGHT_RIGHT);
6509 + wm8776_write(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0);
6510 +diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
6511 +index 81c6ede..08dcce5 100644
6512 +--- a/sound/usb/endpoint.c
6513 ++++ b/sound/usb/endpoint.c
6514 +@@ -17,6 +17,7 @@
6515 +
6516 + #include <linux/gfp.h>
6517 + #include <linux/init.h>
6518 ++#include <linux/ratelimit.h>
6519 + #include <linux/usb.h>
6520 + #include <linux/usb/audio.h>
6521 +
6522 +@@ -458,8 +459,8 @@ static int retire_capture_urb(struct snd_usb_substream *subs,
6523 +
6524 + for (i = 0; i < urb->number_of_packets; i++) {
6525 + cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
6526 +- if (urb->iso_frame_desc[i].status) {
6527 +- snd_printd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
6528 ++ if (urb->iso_frame_desc[i].status && printk_ratelimit()) {
6529 ++ snd_printdd("frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
6530 + // continue;
6531 + }
6532 + bytes = urb->iso_frame_desc[i].actual_length;
6533 +diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
6534 +index c400ade..1e7a47a 100644
6535 +--- a/sound/usb/usx2y/usb_stream.c
6536 ++++ b/sound/usb/usx2y/usb_stream.c
6537 +@@ -674,7 +674,7 @@ dotry:
6538 + inurb->transfer_buffer_length =
6539 + inurb->number_of_packets *
6540 + inurb->iso_frame_desc[0].length;
6541 +- preempt_disable();
6542 ++
6543 + if (u == 0) {
6544 + int now;
6545 + struct usb_device *dev = inurb->dev;
6546 +@@ -686,19 +686,17 @@ dotry:
6547 + }
6548 + err = usb_submit_urb(inurb, GFP_ATOMIC);
6549 + if (err < 0) {
6550 +- preempt_enable();
6551 + snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])"
6552 + " returned %i\n", u, err);
6553 + return err;
6554 + }
6555 + err = usb_submit_urb(outurb, GFP_ATOMIC);
6556 + if (err < 0) {
6557 +- preempt_enable();
6558 + snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])"
6559 + " returned %i\n", u, err);
6560 + return err;
6561 + }
6562 +- preempt_enable();
6563 ++
6564 + if (inurb->start_frame != outurb->start_frame) {
6565 + snd_printd(KERN_DEBUG
6566 + "u[%i] start_frames differ in:%u out:%u\n",