Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r1658 - genpatches-2.6/trunk/2.6.32
Date: Tue, 26 Jan 2010 02:13:33
Message-Id: E1NZavp-0003y8-Q8@stork.gentoo.org
1 Author: mpagano
2 Date: 2010-01-26 02:13:25 +0000 (Tue, 26 Jan 2010)
3 New Revision: 1658
4
5 Added:
6 genpatches-2.6/trunk/2.6.32/1005_linux-2.6.32.6.patch
7 Modified:
8 genpatches-2.6/trunk/2.6.32/0000_README
9 Log:
10 Linux patch 2.6.32.6
11
12 Modified: genpatches-2.6/trunk/2.6.32/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/2.6.32/0000_README 2010-01-23 02:36:04 UTC (rev 1657)
15 +++ genpatches-2.6/trunk/2.6.32/0000_README 2010-01-26 02:13:25 UTC (rev 1658)
16 @@ -59,6 +59,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 2.6.32.5
19
20 +Patch: 1005_linux-2.6.32.6.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 2.6.32.6
23 +
24 Patch: 1900_EFI-GPT-header-read-fix.patch
25 From: http://bugs.gentoo.org/show_bug.cgi?id=296915
26 Desc: Read whole sector with EFI GPT header
27
28 Added: genpatches-2.6/trunk/2.6.32/1005_linux-2.6.32.6.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/2.6.32/1005_linux-2.6.32.6.patch (rev 0)
31 +++ genpatches-2.6/trunk/2.6.32/1005_linux-2.6.32.6.patch 2010-01-26 02:13:25 UTC (rev 1658)
32 @@ -0,0 +1,1322 @@
33 +diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
34 +index 6a52d4b..f8590c5 100644
35 +--- a/arch/x86/kernel/cpuid.c
36 ++++ b/arch/x86/kernel/cpuid.c
37 +@@ -192,7 +192,8 @@ static int __init cpuid_init(void)
38 + int i, err = 0;
39 + i = 0;
40 +
41 +- if (register_chrdev(CPUID_MAJOR, "cpu/cpuid", &cpuid_fops)) {
42 ++ if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS,
43 ++ "cpu/cpuid", &cpuid_fops)) {
44 + printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n",
45 + CPUID_MAJOR);
46 + err = -EBUSY;
47 +@@ -221,7 +222,7 @@ out_class:
48 + }
49 + class_destroy(cpuid_class);
50 + out_chrdev:
51 +- unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
52 ++ __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
53 + out:
54 + return err;
55 + }
56 +diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
57 +index 6a3cefc..b42e63b 100644
58 +--- a/arch/x86/kernel/msr.c
59 ++++ b/arch/x86/kernel/msr.c
60 +@@ -251,7 +251,7 @@ static int __init msr_init(void)
61 + int i, err = 0;
62 + i = 0;
63 +
64 +- if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) {
65 ++ if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) {
66 + printk(KERN_ERR "msr: unable to get major %d for msr\n",
67 + MSR_MAJOR);
68 + err = -EBUSY;
69 +@@ -279,7 +279,7 @@ out_class:
70 + msr_device_destroy(i);
71 + class_destroy(msr_class);
72 + out_chrdev:
73 +- unregister_chrdev(MSR_MAJOR, "cpu/msr");
74 ++ __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
75 + out:
76 + return err;
77 + }
78 +diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
79 +index b22d13b..a672f12 100644
80 +--- a/arch/x86/pci/i386.c
81 ++++ b/arch/x86/pci/i386.c
82 +@@ -282,6 +282,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
83 + return -EINVAL;
84 +
85 + prot = pgprot_val(vma->vm_page_prot);
86 ++
87 ++ /*
88 ++ * Return error if pat is not enabled and write_combine is requested.
89 ++ * Caller can followup with UC MINUS request and add a WC mtrr if there
90 ++ * is a free mtrr slot.
91 ++ */
92 ++ if (!pat_enabled && write_combine)
93 ++ return -EINVAL;
94 ++
95 + if (pat_enabled && write_combine)
96 + prot |= _PAGE_CACHE_WC;
97 + else if (pat_enabled || boot_cpu_data.x86 > 3)
98 +diff --git a/block/blk-settings.c b/block/blk-settings.c
99 +index 66d4aa8..d5aa886 100644
100 +--- a/block/blk-settings.c
101 ++++ b/block/blk-settings.c
102 +@@ -560,6 +560,28 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
103 + EXPORT_SYMBOL(blk_stack_limits);
104 +
105 + /**
106 ++ * bdev_stack_limits - adjust queue limits for stacked drivers
107 ++ * @t: the stacking driver limits (top device)
108 ++ * @bdev: the component block_device (bottom)
109 ++ * @start: first data sector within component device
110 ++ *
111 ++ * Description:
112 ++ * Merges queue limits for a top device and a block_device. Returns
113 ++ * 0 if alignment didn't change. Returns -1 if adding the bottom
114 ++ * device caused misalignment.
115 ++ */
116 ++int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
117 ++ sector_t start)
118 ++{
119 ++ struct request_queue *bq = bdev_get_queue(bdev);
120 ++
121 ++ start += get_start_sect(bdev);
122 ++
123 ++ return blk_stack_limits(t, &bq->limits, start << 9);
124 ++}
125 ++EXPORT_SYMBOL(bdev_stack_limits);
126 ++
127 ++/**
128 + * disk_stack_limits - adjust queue limits for stacked drivers
129 + * @disk: MD/DM gendisk (top)
130 + * @bdev: the underlying block device (bottom)
131 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
132 +index 7511029..f1670e0 100644
133 +--- a/drivers/acpi/ec.c
134 ++++ b/drivers/acpi/ec.c
135 +@@ -201,14 +201,13 @@ unlock:
136 + spin_unlock_irqrestore(&ec->curr_lock, flags);
137 + }
138 +
139 +-static void acpi_ec_gpe_query(void *ec_cxt);
140 ++static int acpi_ec_sync_query(struct acpi_ec *ec);
141 +
142 +-static int ec_check_sci(struct acpi_ec *ec, u8 state)
143 ++static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
144 + {
145 + if (state & ACPI_EC_FLAG_SCI) {
146 + if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
147 +- return acpi_os_execute(OSL_EC_BURST_HANDLER,
148 +- acpi_ec_gpe_query, ec);
149 ++ return acpi_ec_sync_query(ec);
150 + }
151 + return 0;
152 + }
153 +@@ -249,11 +248,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
154 + {
155 + unsigned long tmp;
156 + int ret = 0;
157 +- pr_debug(PREFIX "transaction start\n");
158 +- /* disable GPE during transaction if storm is detected */
159 +- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
160 +- acpi_disable_gpe(NULL, ec->gpe);
161 +- }
162 + if (EC_FLAGS_MSI)
163 + udelay(ACPI_EC_MSI_UDELAY);
164 + /* start transaction */
165 +@@ -265,20 +259,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
166 + clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
167 + spin_unlock_irqrestore(&ec->curr_lock, tmp);
168 + ret = ec_poll(ec);
169 +- pr_debug(PREFIX "transaction end\n");
170 + spin_lock_irqsave(&ec->curr_lock, tmp);
171 + ec->curr = NULL;
172 + spin_unlock_irqrestore(&ec->curr_lock, tmp);
173 +- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
174 +- /* check if we received SCI during transaction */
175 +- ec_check_sci(ec, acpi_ec_read_status(ec));
176 +- /* it is safe to enable GPE outside of transaction */
177 +- acpi_enable_gpe(NULL, ec->gpe);
178 +- } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
179 +- pr_info(PREFIX "GPE storm detected, "
180 +- "transactions will use polling mode\n");
181 +- set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
182 +- }
183 + return ret;
184 + }
185 +
186 +@@ -321,7 +304,26 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
187 + status = -ETIME;
188 + goto end;
189 + }
190 ++ pr_debug(PREFIX "transaction start\n");
191 ++ /* disable GPE during transaction if storm is detected */
192 ++ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
193 ++ acpi_disable_gpe(NULL, ec->gpe);
194 ++ }
195 ++
196 + status = acpi_ec_transaction_unlocked(ec, t);
197 ++
198 ++ /* check if we received SCI during transaction */
199 ++ ec_check_sci_sync(ec, acpi_ec_read_status(ec));
200 ++ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
201 ++ msleep(1);
202 ++ /* it is safe to enable GPE outside of transaction */
203 ++ acpi_enable_gpe(NULL, ec->gpe);
204 ++ } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
205 ++ pr_info(PREFIX "GPE storm detected, "
206 ++ "transactions will use polling mode\n");
207 ++ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
208 ++ }
209 ++ pr_debug(PREFIX "transaction end\n");
210 + end:
211 + if (ec->global_lock)
212 + acpi_release_global_lock(glk);
213 +@@ -443,7 +445,7 @@ int ec_transaction(u8 command,
214 +
215 + EXPORT_SYMBOL(ec_transaction);
216 +
217 +-static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
218 ++static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data)
219 + {
220 + int result;
221 + u8 d;
222 +@@ -452,20 +454,16 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
223 + .wlen = 0, .rlen = 1};
224 + if (!ec || !data)
225 + return -EINVAL;
226 +-
227 + /*
228 + * Query the EC to find out which _Qxx method we need to evaluate.
229 + * Note that successful completion of the query causes the ACPI_EC_SCI
230 + * bit to be cleared (and thus clearing the interrupt source).
231 + */
232 +-
233 +- result = acpi_ec_transaction(ec, &t);
234 ++ result = acpi_ec_transaction_unlocked(ec, &t);
235 + if (result)
236 + return result;
237 +-
238 + if (!d)
239 + return -ENODATA;
240 +-
241 + *data = d;
242 + return 0;
243 + }
244 +@@ -509,43 +507,78 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
245 +
246 + EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
247 +
248 +-static void acpi_ec_gpe_query(void *ec_cxt)
249 ++static void acpi_ec_run(void *cxt)
250 + {
251 +- struct acpi_ec *ec = ec_cxt;
252 +- u8 value = 0;
253 +- struct acpi_ec_query_handler *handler, copy;
254 +-
255 +- if (!ec || acpi_ec_query(ec, &value))
256 ++ struct acpi_ec_query_handler *handler = cxt;
257 ++ if (!handler)
258 + return;
259 +- mutex_lock(&ec->lock);
260 ++ pr_debug(PREFIX "start query execution\n");
261 ++ if (handler->func)
262 ++ handler->func(handler->data);
263 ++ else if (handler->handle)
264 ++ acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
265 ++ pr_debug(PREFIX "stop query execution\n");
266 ++ kfree(handler);
267 ++}
268 ++
269 ++static int acpi_ec_sync_query(struct acpi_ec *ec)
270 ++{
271 ++ u8 value = 0;
272 ++ int status;
273 ++ struct acpi_ec_query_handler *handler, *copy;
274 ++ if ((status = acpi_ec_query_unlocked(ec, &value)))
275 ++ return status;
276 + list_for_each_entry(handler, &ec->list, node) {
277 + if (value == handler->query_bit) {
278 + /* have custom handler for this bit */
279 +- memcpy(&copy, handler, sizeof(copy));
280 +- mutex_unlock(&ec->lock);
281 +- if (copy.func) {
282 +- copy.func(copy.data);
283 +- } else if (copy.handle) {
284 +- acpi_evaluate_object(copy.handle, NULL, NULL, NULL);
285 +- }
286 +- return;
287 ++ copy = kmalloc(sizeof(*handler), GFP_KERNEL);
288 ++ if (!copy)
289 ++ return -ENOMEM;
290 ++ memcpy(copy, handler, sizeof(*copy));
291 ++ pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
292 ++ return acpi_os_execute(OSL_GPE_HANDLER,
293 ++ acpi_ec_run, copy);
294 + }
295 + }
296 ++ return 0;
297 ++}
298 ++
299 ++static void acpi_ec_gpe_query(void *ec_cxt)
300 ++{
301 ++ struct acpi_ec *ec = ec_cxt;
302 ++ if (!ec)
303 ++ return;
304 ++ mutex_lock(&ec->lock);
305 ++ acpi_ec_sync_query(ec);
306 + mutex_unlock(&ec->lock);
307 + }
308 +
309 ++static void acpi_ec_gpe_query(void *ec_cxt);
310 ++
311 ++static int ec_check_sci(struct acpi_ec *ec, u8 state)
312 ++{
313 ++ if (state & ACPI_EC_FLAG_SCI) {
314 ++ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
315 ++ pr_debug(PREFIX "push gpe query to the queue\n");
316 ++ return acpi_os_execute(OSL_NOTIFY_HANDLER,
317 ++ acpi_ec_gpe_query, ec);
318 ++ }
319 ++ }
320 ++ return 0;
321 ++}
322 ++
323 + static u32 acpi_ec_gpe_handler(void *data)
324 + {
325 + struct acpi_ec *ec = data;
326 +- u8 status;
327 +
328 + pr_debug(PREFIX "~~~> interrupt\n");
329 +- status = acpi_ec_read_status(ec);
330 +
331 +- advance_transaction(ec, status);
332 +- if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0)
333 ++ advance_transaction(ec, acpi_ec_read_status(ec));
334 ++ if (ec_transaction_done(ec) &&
335 ++ (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
336 + wake_up(&ec->wait);
337 +- ec_check_sci(ec, status);
338 ++ ec_check_sci(ec, acpi_ec_read_status(ec));
339 ++ }
340 + return ACPI_INTERRUPT_HANDLED;
341 + }
342 +
343 +diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
344 +index d3400b2..dc52f75 100644
345 +--- a/drivers/char/nozomi.c
346 ++++ b/drivers/char/nozomi.c
347 +@@ -1629,10 +1629,10 @@ static void ntty_close(struct tty_struct *tty, struct file *file)
348 +
349 + dc->open_ttys--;
350 + port->count--;
351 +- tty_port_tty_set(port, NULL);
352 +
353 + if (port->count == 0) {
354 + DBG1("close: %d", nport->token_dl);
355 ++ tty_port_tty_set(port, NULL);
356 + spin_lock_irqsave(&dc->spin_mutex, flags);
357 + dc->last_ier &= ~(nport->token_dl);
358 + writew(dc->last_ier, dc->reg_ier);
359 +diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
360 +index 59499ee..e919bd9 100644
361 +--- a/drivers/char/tty_io.c
362 ++++ b/drivers/char/tty_io.c
363 +@@ -1930,8 +1930,8 @@ static int tty_fasync(int fd, struct file *filp, int on)
364 + pid = task_pid(current);
365 + type = PIDTYPE_PID;
366 + }
367 +- spin_unlock_irqrestore(&tty->ctrl_lock, flags);
368 + retval = __f_setown(filp, pid, type, 0);
369 ++ spin_unlock_irqrestore(&tty->ctrl_lock, flags);
370 + if (retval)
371 + goto out;
372 + } else {
373 +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
374 +index 083bec2..29e21d3 100644
375 +--- a/drivers/gpu/drm/i915/intel_sdvo.c
376 ++++ b/drivers/gpu/drm/i915/intel_sdvo.c
377 +@@ -472,14 +472,63 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
378 + }
379 +
380 + /**
381 +- * Don't check status code from this as it switches the bus back to the
382 +- * SDVO chips which defeats the purpose of doing a bus switch in the first
383 +- * place.
384 ++ * Try to read the response after issuie the DDC switch command. But it
385 ++ * is noted that we must do the action of reading response and issuing DDC
386 ++ * switch command in one I2C transaction. Otherwise when we try to start
387 ++ * another I2C transaction after issuing the DDC bus switch, it will be
388 ++ * switched to the internal SDVO register.
389 + */
390 + static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
391 + u8 target)
392 + {
393 +- intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1);
394 ++ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
395 ++ u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
396 ++ struct i2c_msg msgs[] = {
397 ++ {
398 ++ .addr = sdvo_priv->slave_addr >> 1,
399 ++ .flags = 0,
400 ++ .len = 2,
401 ++ .buf = out_buf,
402 ++ },
403 ++ /* the following two are to read the response */
404 ++ {
405 ++ .addr = sdvo_priv->slave_addr >> 1,
406 ++ .flags = 0,
407 ++ .len = 1,
408 ++ .buf = cmd_buf,
409 ++ },
410 ++ {
411 ++ .addr = sdvo_priv->slave_addr >> 1,
412 ++ .flags = I2C_M_RD,
413 ++ .len = 1,
414 ++ .buf = ret_value,
415 ++ },
416 ++ };
417 ++
418 ++ intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
419 ++ &target, 1);
420 ++ /* write the DDC switch command argument */
421 ++ intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target);
422 ++
423 ++ out_buf[0] = SDVO_I2C_OPCODE;
424 ++ out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
425 ++ cmd_buf[0] = SDVO_I2C_CMD_STATUS;
426 ++ cmd_buf[1] = 0;
427 ++ ret_value[0] = 0;
428 ++ ret_value[1] = 0;
429 ++
430 ++ ret = i2c_transfer(intel_output->i2c_bus, msgs, 3);
431 ++ if (ret != 3) {
432 ++ /* failure in I2C transfer */
433 ++ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
434 ++ return;
435 ++ }
436 ++ if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) {
437 ++ DRM_DEBUG_KMS("DDC switch command returns response %d\n",
438 ++ ret_value[0]);
439 ++ return;
440 ++ }
441 ++ return;
442 + }
443 +
444 + static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1)
445 +@@ -1589,6 +1638,32 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
446 + edid = drm_get_edid(&intel_output->base,
447 + intel_output->ddc_bus);
448 +
449 ++ /* This is only applied to SDVO cards with multiple outputs */
450 ++ if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) {
451 ++ uint8_t saved_ddc, temp_ddc;
452 ++ saved_ddc = sdvo_priv->ddc_bus;
453 ++ temp_ddc = sdvo_priv->ddc_bus >> 1;
454 ++ /*
455 ++ * Don't use the 1 as the argument of DDC bus switch to get
456 ++ * the EDID. It is used for SDVO SPD ROM.
457 ++ */
458 ++ while(temp_ddc > 1) {
459 ++ sdvo_priv->ddc_bus = temp_ddc;
460 ++ edid = drm_get_edid(&intel_output->base,
461 ++ intel_output->ddc_bus);
462 ++ if (edid) {
463 ++ /*
464 ++ * When we can get the EDID, maybe it is the
465 ++ * correct DDC bus. Update it.
466 ++ */
467 ++ sdvo_priv->ddc_bus = temp_ddc;
468 ++ break;
469 ++ }
470 ++ temp_ddc >>= 1;
471 ++ }
472 ++ if (edid == NULL)
473 ++ sdvo_priv->ddc_bus = saved_ddc;
474 ++ }
475 + /* when there is no edid and no monitor is connected with VGA
476 + * port, try to use the CRT ddc to read the EDID for DVI-connector
477 + */
478 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
479 +index 1a6cb3c..e869128 100644
480 +--- a/drivers/md/dm-table.c
481 ++++ b/drivers/md/dm-table.c
482 +@@ -499,16 +499,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
483 + return 0;
484 + }
485 +
486 +- if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
487 +- DMWARN("%s: target device %s is misaligned: "
488 ++ if (bdev_stack_limits(limits, bdev, start) < 0)
489 ++ DMWARN("%s: adding target device %s caused an alignment inconsistency: "
490 + "physical_block_size=%u, logical_block_size=%u, "
491 + "alignment_offset=%u, start=%llu",
492 + dm_device_name(ti->table->md), bdevname(bdev, b),
493 + q->limits.physical_block_size,
494 + q->limits.logical_block_size,
495 + q->limits.alignment_offset,
496 +- (unsigned long long) start << 9);
497 +-
498 ++ (unsigned long long) start << SECTOR_SHIFT);
499 +
500 + /*
501 + * Check if merge fn is supported.
502 +@@ -1025,9 +1024,9 @@ combine_limits:
503 + * for the table.
504 + */
505 + if (blk_stack_limits(limits, &ti_limits, 0) < 0)
506 +- DMWARN("%s: target device "
507 ++ DMWARN("%s: adding target device "
508 + "(start sect %llu len %llu) "
509 +- "is misaligned",
510 ++ "caused an alignment inconsistency",
511 + dm_device_name(table->md),
512 + (unsigned long long) ti->begin,
513 + (unsigned long long) ti->len);
514 +@@ -1079,15 +1078,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
515 + struct queue_limits *limits)
516 + {
517 + /*
518 +- * Each target device in the table has a data area that should normally
519 +- * be aligned such that the DM device's alignment_offset is 0.
520 +- * FIXME: Propagate alignment_offsets up the stack and warn of
521 +- * sub-optimal or inconsistent settings.
522 +- */
523 +- limits->alignment_offset = 0;
524 +- limits->misaligned = 0;
525 +-
526 +- /*
527 + * Copy table's limits to the DM device's request_queue
528 + */
529 + q->limits = *limits;
530 +diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
531 +index aa8f995..28b4625 100644
532 +--- a/drivers/media/video/gspca/sunplus.c
533 ++++ b/drivers/media/video/gspca/sunplus.c
534 +@@ -705,7 +705,7 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev)
535 + rc = spca504B_PollingDataReady(gspca_dev);
536 +
537 + /* Init the cam width height with some values get on init ? */
538 +- reg_w_riv(dev, 0x31, 0, 0x04);
539 ++ reg_w_riv(dev, 0x31, 0x04, 0);
540 + spca504B_WaitCmdStatus(gspca_dev);
541 + rc = spca504B_PollingDataReady(gspca_dev);
542 + break;
543 +@@ -807,14 +807,14 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev)
544 + default:
545 + /* case BRIDGE_SPCA533: */
546 + /* case BRIDGE_SPCA504B: */
547 +- reg_w_riv(dev, 0, 0x00, 0x21ad); /* hue */
548 +- reg_w_riv(dev, 0, 0x01, 0x21ac); /* sat/hue */
549 +- reg_w_riv(dev, 0, 0x00, 0x21a3); /* gamma */
550 ++ reg_w_riv(dev, 0, 0x21ad, 0x00); /* hue */
551 ++ reg_w_riv(dev, 0, 0x21ac, 0x01); /* sat/hue */
552 ++ reg_w_riv(dev, 0, 0x21a3, 0x00); /* gamma */
553 + break;
554 + case BRIDGE_SPCA536:
555 +- reg_w_riv(dev, 0, 0x40, 0x20f5);
556 +- reg_w_riv(dev, 0, 0x01, 0x20f4);
557 +- reg_w_riv(dev, 0, 0x00, 0x2089);
558 ++ reg_w_riv(dev, 0, 0x20f5, 0x40);
559 ++ reg_w_riv(dev, 0, 0x20f4, 0x01);
560 ++ reg_w_riv(dev, 0, 0x2089, 0x00);
561 + break;
562 + }
563 + if (pollreg)
564 +@@ -888,11 +888,11 @@ static int sd_init(struct gspca_dev *gspca_dev)
565 + switch (sd->bridge) {
566 + case BRIDGE_SPCA504B:
567 + reg_w_riv(dev, 0x1d, 0x00, 0);
568 +- reg_w_riv(dev, 0, 0x01, 0x2306);
569 +- reg_w_riv(dev, 0, 0x00, 0x0d04);
570 +- reg_w_riv(dev, 0, 0x00, 0x2000);
571 +- reg_w_riv(dev, 0, 0x13, 0x2301);
572 +- reg_w_riv(dev, 0, 0x00, 0x2306);
573 ++ reg_w_riv(dev, 0, 0x2306, 0x01);
574 ++ reg_w_riv(dev, 0, 0x0d04, 0x00);
575 ++ reg_w_riv(dev, 0, 0x2000, 0x00);
576 ++ reg_w_riv(dev, 0, 0x2301, 0x13);
577 ++ reg_w_riv(dev, 0, 0x2306, 0x00);
578 + /* fall thru */
579 + case BRIDGE_SPCA533:
580 + spca504B_PollingDataReady(gspca_dev);
581 +@@ -1011,7 +1011,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
582 + spca504B_WaitCmdStatus(gspca_dev);
583 + break;
584 + default:
585 +- reg_w_riv(dev, 0x31, 0, 0x04);
586 ++ reg_w_riv(dev, 0x31, 0x04, 0);
587 + spca504B_WaitCmdStatus(gspca_dev);
588 + spca504B_PollingDataReady(gspca_dev);
589 + break;
590 +diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
591 +index e9eae4a..1eac626 100644
592 +--- a/drivers/misc/enclosure.c
593 ++++ b/drivers/misc/enclosure.c
594 +@@ -391,6 +391,7 @@ static const char *const enclosure_status [] = {
595 + [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
596 + [ENCLOSURE_STATUS_UNKNOWN] = "unknown",
597 + [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
598 ++ [ENCLOSURE_STATUS_MAX] = NULL,
599 + };
600 +
601 + static const char *const enclosure_type [] = {
602 +diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
603 +index 0cce8a4..deac67e 100644
604 +--- a/drivers/serial/8250_pnp.c
605 ++++ b/drivers/serial/8250_pnp.c
606 +@@ -328,15 +328,7 @@ static const struct pnp_device_id pnp_dev_table[] = {
607 + /* U.S. Robotics 56K Voice INT PnP*/
608 + { "USR9190", 0 },
609 + /* Wacom tablets */
610 +- { "WACF004", 0 },
611 +- { "WACF005", 0 },
612 +- { "WACF006", 0 },
613 +- { "WACF007", 0 },
614 +- { "WACF008", 0 },
615 +- { "WACF009", 0 },
616 +- { "WACF00A", 0 },
617 +- { "WACF00B", 0 },
618 +- { "WACF00C", 0 },
619 ++ { "WACFXXX", 0 },
620 + /* Compaq touchscreen */
621 + { "FPI2002", 0 },
622 + /* Fujitsu Stylistic touchscreens */
623 +diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
624 +index f4c2657..43c57b7 100644
625 +--- a/drivers/staging/asus_oled/asus_oled.c
626 ++++ b/drivers/staging/asus_oled/asus_oled.c
627 +@@ -194,9 +194,11 @@ static ssize_t set_enabled(struct device *dev, struct device_attribute *attr,
628 + {
629 + struct usb_interface *intf = to_usb_interface(dev);
630 + struct asus_oled_dev *odev = usb_get_intfdata(intf);
631 +- int temp = strict_strtoul(buf, 10, NULL);
632 ++ unsigned long value;
633 ++ if (strict_strtoul(buf, 10, &value))
634 ++ return -EINVAL;
635 +
636 +- enable_oled(odev, temp);
637 ++ enable_oled(odev, value);
638 +
639 + return count;
640 + }
641 +@@ -207,10 +209,12 @@ static ssize_t class_set_enabled(struct device *device,
642 + {
643 + struct asus_oled_dev *odev =
644 + (struct asus_oled_dev *) dev_get_drvdata(device);
645 ++ unsigned long value;
646 +
647 +- int temp = strict_strtoul(buf, 10, NULL);
648 ++ if (strict_strtoul(buf, 10, &value))
649 ++ return -EINVAL;
650 +
651 +- enable_oled(odev, temp);
652 ++ enable_oled(odev, value);
653 +
654 + return count;
655 + }
656 +diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
657 +index c5b6613..c2809f2 100644
658 +--- a/drivers/staging/hv/Hv.c
659 ++++ b/drivers/staging/hv/Hv.c
660 +@@ -386,7 +386,7 @@ u16 HvSignalEvent(void)
661 + * retrieve the initialized message and event pages. Otherwise, we create and
662 + * initialize the message and event pages.
663 + */
664 +-int HvSynicInit(u32 irqVector)
665 ++void HvSynicInit(void *irqarg)
666 + {
667 + u64 version;
668 + union hv_synic_simp simp;
669 +@@ -394,13 +394,14 @@ int HvSynicInit(u32 irqVector)
670 + union hv_synic_sint sharedSint;
671 + union hv_synic_scontrol sctrl;
672 + u64 guestID;
673 +- int ret = 0;
674 ++ u32 irqVector = *((u32 *)(irqarg));
675 ++ int cpu = smp_processor_id();
676 +
677 + DPRINT_ENTER(VMBUS);
678 +
679 + if (!gHvContext.HypercallPage) {
680 + DPRINT_EXIT(VMBUS);
681 +- return ret;
682 ++ return;
683 + }
684 +
685 + /* Check the version */
686 +@@ -425,27 +426,27 @@ int HvSynicInit(u32 irqVector)
687 + */
688 + rdmsrl(HV_X64_MSR_GUEST_OS_ID, guestID);
689 + if (guestID == HV_LINUX_GUEST_ID) {
690 +- gHvContext.synICMessagePage[0] =
691 ++ gHvContext.synICMessagePage[cpu] =
692 + phys_to_virt(simp.BaseSimpGpa << PAGE_SHIFT);
693 +- gHvContext.synICEventPage[0] =
694 ++ gHvContext.synICEventPage[cpu] =
695 + phys_to_virt(siefp.BaseSiefpGpa << PAGE_SHIFT);
696 + } else {
697 + DPRINT_ERR(VMBUS, "unknown guest id!!");
698 + goto Cleanup;
699 + }
700 + DPRINT_DBG(VMBUS, "MAPPED: Simp: %p, Sifep: %p",
701 +- gHvContext.synICMessagePage[0],
702 +- gHvContext.synICEventPage[0]);
703 ++ gHvContext.synICMessagePage[cpu],
704 ++ gHvContext.synICEventPage[cpu]);
705 + } else {
706 +- gHvContext.synICMessagePage[0] = osd_PageAlloc(1);
707 +- if (gHvContext.synICMessagePage[0] == NULL) {
708 ++ gHvContext.synICMessagePage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
709 ++ if (gHvContext.synICMessagePage[cpu] == NULL) {
710 + DPRINT_ERR(VMBUS,
711 + "unable to allocate SYNIC message page!!");
712 + goto Cleanup;
713 + }
714 +
715 +- gHvContext.synICEventPage[0] = osd_PageAlloc(1);
716 +- if (gHvContext.synICEventPage[0] == NULL) {
717 ++ gHvContext.synICEventPage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC);
718 ++ if (gHvContext.synICEventPage[cpu] == NULL) {
719 + DPRINT_ERR(VMBUS,
720 + "unable to allocate SYNIC event page!!");
721 + goto Cleanup;
722 +@@ -454,7 +455,7 @@ int HvSynicInit(u32 irqVector)
723 + /* Setup the Synic's message page */
724 + rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64);
725 + simp.SimpEnabled = 1;
726 +- simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[0])
727 ++ simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[cpu])
728 + >> PAGE_SHIFT;
729 +
730 + DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx",
731 +@@ -465,7 +466,7 @@ int HvSynicInit(u32 irqVector)
732 + /* Setup the Synic's event page */
733 + rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
734 + siefp.SiefpEnabled = 1;
735 +- siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[0])
736 ++ siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[cpu])
737 + >> PAGE_SHIFT;
738 +
739 + DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx",
740 +@@ -501,32 +502,30 @@ int HvSynicInit(u32 irqVector)
741 +
742 + DPRINT_EXIT(VMBUS);
743 +
744 +- return ret;
745 ++ return;
746 +
747 + Cleanup:
748 +- ret = -1;
749 +-
750 + if (gHvContext.GuestId == HV_LINUX_GUEST_ID) {
751 +- if (gHvContext.synICEventPage[0])
752 +- osd_PageFree(gHvContext.synICEventPage[0], 1);
753 ++ if (gHvContext.synICEventPage[cpu])
754 ++ osd_PageFree(gHvContext.synICEventPage[cpu], 1);
755 +
756 +- if (gHvContext.synICMessagePage[0])
757 +- osd_PageFree(gHvContext.synICMessagePage[0], 1);
758 ++ if (gHvContext.synICMessagePage[cpu])
759 ++ osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
760 + }
761 +
762 + DPRINT_EXIT(VMBUS);
763 +-
764 +- return ret;
765 ++ return;
766 + }
767 +
768 + /**
769 + * HvSynicCleanup - Cleanup routine for HvSynicInit().
770 + */
771 +-void HvSynicCleanup(void)
772 ++void HvSynicCleanup(void *arg)
773 + {
774 + union hv_synic_sint sharedSint;
775 + union hv_synic_simp simp;
776 + union hv_synic_siefp siefp;
777 ++ int cpu = smp_processor_id();
778 +
779 + DPRINT_ENTER(VMBUS);
780 +
781 +@@ -539,6 +538,7 @@ void HvSynicCleanup(void)
782 +
783 + sharedSint.Masked = 1;
784 +
785 ++ /* Need to correctly cleanup in the case of SMP!!! */
786 + /* Disable the interrupt */
787 + wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64);
788 +
789 +@@ -560,8 +560,8 @@ void HvSynicCleanup(void)
790 +
791 + wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64);
792 +
793 +- osd_PageFree(gHvContext.synICMessagePage[0], 1);
794 +- osd_PageFree(gHvContext.synICEventPage[0], 1);
795 ++ osd_PageFree(gHvContext.synICMessagePage[cpu], 1);
796 ++ osd_PageFree(gHvContext.synICEventPage[cpu], 1);
797 + }
798 +
799 + DPRINT_EXIT(VMBUS);
800 +diff --git a/drivers/staging/hv/Hv.h b/drivers/staging/hv/Hv.h
801 +index 5379e4b..fce4b5c 100644
802 +--- a/drivers/staging/hv/Hv.h
803 ++++ b/drivers/staging/hv/Hv.h
804 +@@ -93,7 +93,7 @@ static const struct hv_guid VMBUS_SERVICE_ID = {
805 + },
806 + };
807 +
808 +-#define MAX_NUM_CPUS 1
809 ++#define MAX_NUM_CPUS 32
810 +
811 +
812 + struct hv_input_signal_event_buffer {
813 +@@ -137,8 +137,8 @@ extern u16 HvPostMessage(union hv_connection_id connectionId,
814 +
815 + extern u16 HvSignalEvent(void);
816 +
817 +-extern int HvSynicInit(u32 irqVector);
818 ++extern void HvSynicInit(void *irqarg);
819 +
820 +-extern void HvSynicCleanup(void);
821 ++extern void HvSynicCleanup(void *arg);
822 +
823 + #endif /* __HV_H__ */
824 +diff --git a/drivers/staging/hv/Vmbus.c b/drivers/staging/hv/Vmbus.c
825 +index a4dd06f..35a023e 100644
826 +--- a/drivers/staging/hv/Vmbus.c
827 ++++ b/drivers/staging/hv/Vmbus.c
828 +@@ -129,7 +129,7 @@ static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo)
829 +
830 + /* strcpy(dev->name, "vmbus"); */
831 + /* SynIC setup... */
832 +- ret = HvSynicInit(*irqvector);
833 ++ on_each_cpu(HvSynicInit, (void *)irqvector, 1);
834 +
835 + /* Connect to VMBus in the root partition */
836 + ret = VmbusConnect();
837 +@@ -150,7 +150,7 @@ static int VmbusOnDeviceRemove(struct hv_device *dev)
838 + DPRINT_ENTER(VMBUS);
839 + VmbusChannelReleaseUnattachedChannels();
840 + VmbusDisconnect();
841 +- HvSynicCleanup();
842 ++ on_each_cpu(HvSynicCleanup, NULL, 1);
843 + DPRINT_EXIT(VMBUS);
844 +
845 + return ret;
846 +@@ -173,7 +173,8 @@ static void VmbusOnCleanup(struct hv_driver *drv)
847 + */
848 + static void VmbusOnMsgDPC(struct hv_driver *drv)
849 + {
850 +- void *page_addr = gHvContext.synICMessagePage[0];
851 ++ int cpu = smp_processor_id();
852 ++ void *page_addr = gHvContext.synICMessagePage[cpu];
853 + struct hv_message *msg = (struct hv_message *)page_addr +
854 + VMBUS_MESSAGE_SINT;
855 + struct hv_message *copied;
856 +@@ -230,11 +231,12 @@ static void VmbusOnEventDPC(struct hv_driver *drv)
857 + static int VmbusOnISR(struct hv_driver *drv)
858 + {
859 + int ret = 0;
860 ++ int cpu = smp_processor_id();
861 + void *page_addr;
862 + struct hv_message *msg;
863 + union hv_synic_event_flags *event;
864 +
865 +- page_addr = gHvContext.synICMessagePage[0];
866 ++ page_addr = gHvContext.synICMessagePage[cpu];
867 + msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
868 +
869 + DPRINT_ENTER(VMBUS);
870 +@@ -248,7 +250,7 @@ static int VmbusOnISR(struct hv_driver *drv)
871 + }
872 +
873 + /* TODO: Check if there are events to be process */
874 +- page_addr = gHvContext.synICEventPage[0];
875 ++ page_addr = gHvContext.synICEventPage[cpu];
876 + event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
877 +
878 + /* Since we are a child, we only need to check bit 0 */
879 +diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
880 +index 96f1171..355dffc 100644
881 +--- a/drivers/usb/core/devices.c
882 ++++ b/drivers/usb/core/devices.c
883 +@@ -494,7 +494,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
884 + return 0;
885 + /* allocate 2^1 pages = 8K (on i386);
886 + * should be more than enough for one device */
887 +- pages_start = (char *)__get_free_pages(GFP_KERNEL, 1);
888 ++ pages_start = (char *)__get_free_pages(GFP_NOIO, 1);
889 + if (!pages_start)
890 + return -ENOMEM;
891 +
892 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
893 +index 8b0c235..1a7d54b 100644
894 +--- a/drivers/usb/core/hub.c
895 ++++ b/drivers/usb/core/hub.c
896 +@@ -3286,6 +3286,9 @@ static void hub_events(void)
897 + USB_PORT_FEAT_C_SUSPEND);
898 + udev = hdev->children[i-1];
899 + if (udev) {
900 ++ /* TRSMRCY = 10 msec */
901 ++ msleep(10);
902 ++
903 + usb_lock_device(udev);
904 + ret = remote_wakeup(hdev->
905 + children[i-1]);
906 +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
907 +index da718e8..980a8d2 100644
908 +--- a/drivers/usb/core/message.c
909 ++++ b/drivers/usb/core/message.c
910 +@@ -911,11 +911,11 @@ char *usb_cache_string(struct usb_device *udev, int index)
911 + if (index <= 0)
912 + return NULL;
913 +
914 +- buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL);
915 ++ buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
916 + if (buf) {
917 + len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
918 + if (len > 0) {
919 +- smallbuf = kmalloc(++len, GFP_KERNEL);
920 ++ smallbuf = kmalloc(++len, GFP_NOIO);
921 + if (!smallbuf)
922 + return buf;
923 + memcpy(smallbuf, buf, len);
924 +@@ -1682,7 +1682,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
925 + if (cp) {
926 + nintf = cp->desc.bNumInterfaces;
927 + new_interfaces = kmalloc(nintf * sizeof(*new_interfaces),
928 +- GFP_KERNEL);
929 ++ GFP_NOIO);
930 + if (!new_interfaces) {
931 + dev_err(&dev->dev, "Out of memory\n");
932 + return -ENOMEM;
933 +@@ -1691,7 +1691,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
934 + for (; n < nintf; ++n) {
935 + new_interfaces[n] = kzalloc(
936 + sizeof(struct usb_interface),
937 +- GFP_KERNEL);
938 ++ GFP_NOIO);
939 + if (!new_interfaces[n]) {
940 + dev_err(&dev->dev, "Out of memory\n");
941 + ret = -ENOMEM;
942 +diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
943 +index 8752e55..fcdcad4 100644
944 +--- a/drivers/usb/core/sysfs.c
945 ++++ b/drivers/usb/core/sysfs.c
946 +@@ -115,6 +115,12 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf)
947 + case USB_SPEED_HIGH:
948 + speed = "480";
949 + break;
950 ++ case USB_SPEED_VARIABLE:
951 ++ speed = "480";
952 ++ break;
953 ++ case USB_SPEED_SUPER:
954 ++ speed = "5000";
955 ++ break;
956 + default:
957 + speed = "unknown";
958 + }
959 +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
960 +index f5f5601..e18c677 100644
961 +--- a/drivers/usb/host/ehci-hcd.c
962 ++++ b/drivers/usb/host/ehci-hcd.c
963 +@@ -785,9 +785,10 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
964 +
965 + /* start 20 msec resume signaling from this port,
966 + * and make khubd collect PORT_STAT_C_SUSPEND to
967 +- * stop that signaling.
968 ++ * stop that signaling. Use 5 ms extra for safety,
969 ++ * like usb_port_resume() does.
970 + */
971 +- ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
972 ++ ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
973 + ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
974 + mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
975 + }
976 +diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
977 +index 1b6f1c0..698f461 100644
978 +--- a/drivers/usb/host/ehci-hub.c
979 ++++ b/drivers/usb/host/ehci-hub.c
980 +@@ -120,9 +120,26 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
981 + del_timer_sync(&ehci->watchdog);
982 + del_timer_sync(&ehci->iaa_watchdog);
983 +
984 +- port = HCS_N_PORTS (ehci->hcs_params);
985 + spin_lock_irq (&ehci->lock);
986 +
987 ++ /* Once the controller is stopped, port resumes that are already
988 ++ * in progress won't complete. Hence if remote wakeup is enabled
989 ++ * for the root hub and any ports are in the middle of a resume or
990 ++ * remote wakeup, we must fail the suspend.
991 ++ */
992 ++ if (hcd->self.root_hub->do_remote_wakeup) {
993 ++ port = HCS_N_PORTS(ehci->hcs_params);
994 ++ while (port--) {
995 ++ if (ehci->reset_done[port] != 0) {
996 ++ spin_unlock_irq(&ehci->lock);
997 ++ ehci_dbg(ehci, "suspend failed because "
998 ++ "port %d is resuming\n",
999 ++ port + 1);
1000 ++ return -EBUSY;
1001 ++ }
1002 ++ }
1003 ++ }
1004 ++
1005 + /* stop schedules, clean any completed work */
1006 + if (HC_IS_RUNNING(hcd->state)) {
1007 + ehci_quiesce (ehci);
1008 +@@ -138,6 +155,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
1009 + */
1010 + ehci->bus_suspended = 0;
1011 + ehci->owned_ports = 0;
1012 ++ port = HCS_N_PORTS(ehci->hcs_params);
1013 + while (port--) {
1014 + u32 __iomem *reg = &ehci->regs->port_status [port];
1015 + u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
1016 +diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
1017 +index 139a2cc..c0d4b39 100644
1018 +--- a/drivers/usb/host/ehci-q.c
1019 ++++ b/drivers/usb/host/ehci-q.c
1020 +@@ -827,9 +827,10 @@ qh_make (
1021 + * But interval 1 scheduling is simpler, and
1022 + * includes high bandwidth.
1023 + */
1024 +- dbg ("intr period %d uframes, NYET!",
1025 +- urb->interval);
1026 +- goto done;
1027 ++ urb->interval = 1;
1028 ++ } else if (qh->period > ehci->periodic_size) {
1029 ++ qh->period = ehci->periodic_size;
1030 ++ urb->interval = qh->period << 3;
1031 + }
1032 + } else {
1033 + int think_time;
1034 +@@ -852,6 +853,10 @@ qh_make (
1035 + usb_calc_bus_time (urb->dev->speed,
1036 + is_input, 0, max_packet (maxp)));
1037 + qh->period = urb->interval;
1038 ++ if (qh->period > ehci->periodic_size) {
1039 ++ qh->period = ehci->periodic_size;
1040 ++ urb->interval = qh->period;
1041 ++ }
1042 + }
1043 + }
1044 +
1045 +diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
1046 +index 5cd0e48..99cd00f 100644
1047 +--- a/drivers/usb/host/uhci-hcd.c
1048 ++++ b/drivers/usb/host/uhci-hcd.c
1049 +@@ -749,7 +749,20 @@ static int uhci_rh_suspend(struct usb_hcd *hcd)
1050 + spin_lock_irq(&uhci->lock);
1051 + if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))
1052 + rc = -ESHUTDOWN;
1053 +- else if (!uhci->dead)
1054 ++ else if (uhci->dead)
1055 ++ ; /* Dead controllers tell no tales */
1056 ++
1057 ++ /* Once the controller is stopped, port resumes that are already
1058 ++ * in progress won't complete. Hence if remote wakeup is enabled
1059 ++ * for the root hub and any ports are in the middle of a resume or
1060 ++ * remote wakeup, we must fail the suspend.
1061 ++ */
1062 ++ else if (hcd->self.root_hub->do_remote_wakeup &&
1063 ++ uhci->resuming_ports) {
1064 ++ dev_dbg(uhci_dev(uhci), "suspend failed because a port "
1065 ++ "is resuming\n");
1066 ++ rc = -EBUSY;
1067 ++ } else
1068 + suspend_rh(uhci, UHCI_RH_SUSPENDED);
1069 + spin_unlock_irq(&uhci->lock);
1070 + return rc;
1071 +diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
1072 +index 885b585..8270055 100644
1073 +--- a/drivers/usb/host/uhci-hub.c
1074 ++++ b/drivers/usb/host/uhci-hub.c
1075 +@@ -167,7 +167,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
1076 + /* Port received a wakeup request */
1077 + set_bit(port, &uhci->resuming_ports);
1078 + uhci->ports_timeout = jiffies +
1079 +- msecs_to_jiffies(20);
1080 ++ msecs_to_jiffies(25);
1081 +
1082 + /* Make sure we see the port again
1083 + * after the resuming period is over. */
1084 +diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
1085 +index bbe005c..e0fb294 100644
1086 +--- a/drivers/usb/serial/generic.c
1087 ++++ b/drivers/usb/serial/generic.c
1088 +@@ -489,6 +489,8 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
1089 + dbg("%s - port %d", __func__, port->number);
1090 +
1091 + if (port->serial->type->max_in_flight_urbs) {
1092 ++ kfree(urb->transfer_buffer);
1093 ++
1094 + spin_lock_irqsave(&port->lock, flags);
1095 + --port->urbs_in_flight;
1096 + port->tx_bytes_flight -= urb->transfer_buffer_length;
1097 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1098 +index 64a0a2c..c932f90 100644
1099 +--- a/drivers/usb/storage/unusual_devs.h
1100 ++++ b/drivers/usb/storage/unusual_devs.h
1101 +@@ -1807,13 +1807,6 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
1102 + US_SC_DEVICE, US_PR_DEVICE, NULL,
1103 + US_FL_GO_SLOW ),
1104 +
1105 +-/* Reported by Rohan Hart <rohan.hart17@×××××.com> */
1106 +-UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
1107 +- "INTOVA",
1108 +- "Pixtreme",
1109 +- US_SC_DEVICE, US_PR_DEVICE, NULL,
1110 +- US_FL_FIX_CAPACITY ),
1111 +-
1112 + /* Reported by Frederic Marchal <frederic.marchal@××××××××××.com>
1113 + * Mio Moov 330
1114 + */
1115 +diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
1116 +index 716c8d7..33197fa 100644
1117 +--- a/drivers/usb/storage/usb.c
1118 ++++ b/drivers/usb/storage/usb.c
1119 +@@ -430,7 +430,8 @@ static void adjust_quirks(struct us_data *us)
1120 + u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor);
1121 + u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
1122 + unsigned f = 0;
1123 +- unsigned int mask = (US_FL_SANE_SENSE | US_FL_FIX_CAPACITY |
1124 ++ unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE |
1125 ++ US_FL_FIX_CAPACITY |
1126 + US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE |
1127 + US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
1128 + US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
1129 +diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
1130 +index fbb6e5e..7cb0a59 100644
1131 +--- a/fs/ecryptfs/crypto.c
1132 ++++ b/fs/ecryptfs/crypto.c
1133 +@@ -1748,7 +1748,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
1134 + char *cipher_name, size_t *key_size)
1135 + {
1136 + char dummy_key[ECRYPTFS_MAX_KEY_BYTES];
1137 +- char *full_alg_name;
1138 ++ char *full_alg_name = NULL;
1139 + int rc;
1140 +
1141 + *key_tfm = NULL;
1142 +@@ -1763,7 +1763,6 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
1143 + if (rc)
1144 + goto out;
1145 + *key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC);
1146 +- kfree(full_alg_name);
1147 + if (IS_ERR(*key_tfm)) {
1148 + rc = PTR_ERR(*key_tfm);
1149 + printk(KERN_ERR "Unable to allocate crypto cipher with name "
1150 +@@ -1786,6 +1785,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
1151 + goto out;
1152 + }
1153 + out:
1154 ++ kfree(full_alg_name);
1155 + return rc;
1156 + }
1157 +
1158 +diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
1159 +index 9e94405..1744f17 100644
1160 +--- a/fs/ecryptfs/file.c
1161 ++++ b/fs/ecryptfs/file.c
1162 +@@ -191,13 +191,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
1163 + | ECRYPTFS_ENCRYPTED);
1164 + }
1165 + mutex_unlock(&crypt_stat->cs_mutex);
1166 +- if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
1167 +- && !(file->f_flags & O_RDONLY)) {
1168 +- rc = -EPERM;
1169 +- printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
1170 +- "file must hence be opened RO\n", __func__);
1171 +- goto out;
1172 +- }
1173 + if (!ecryptfs_inode_to_private(inode)->lower_file) {
1174 + rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
1175 + if (rc) {
1176 +@@ -208,6 +201,13 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
1177 + goto out;
1178 + }
1179 + }
1180 ++ if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
1181 ++ && !(file->f_flags & O_RDONLY)) {
1182 ++ rc = -EPERM;
1183 ++ printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
1184 ++ "file must hence be opened RO\n", __func__);
1185 ++ goto out;
1186 ++ }
1187 + ecryptfs_set_file_lower(
1188 + file, ecryptfs_inode_to_private(inode)->lower_file);
1189 + if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
1190 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
1191 +index adf99c6..912b8ff 100644
1192 +--- a/include/linux/blkdev.h
1193 ++++ b/include/linux/blkdev.h
1194 +@@ -942,6 +942,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1195 + extern void blk_set_default_limits(struct queue_limits *lim);
1196 + extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1197 + sector_t offset);
1198 ++extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1199 ++ sector_t offset);
1200 + extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1201 + sector_t offset);
1202 + extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
1203 +diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
1204 +index 90d1c21..9a33c5f 100644
1205 +--- a/include/linux/enclosure.h
1206 ++++ b/include/linux/enclosure.h
1207 +@@ -42,6 +42,8 @@ enum enclosure_status {
1208 + ENCLOSURE_STATUS_NOT_INSTALLED,
1209 + ENCLOSURE_STATUS_UNKNOWN,
1210 + ENCLOSURE_STATUS_UNAVAILABLE,
1211 ++ /* last element for counting purposes */
1212 ++ ENCLOSURE_STATUS_MAX
1213 + };
1214 +
1215 + /* SFF-8485 activity light settings */
1216 +diff --git a/kernel/perf_event.c b/kernel/perf_event.c
1217 +index 6eee915..413d101 100644
1218 +--- a/kernel/perf_event.c
1219 ++++ b/kernel/perf_event.c
1220 +@@ -1359,6 +1359,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1221 + if (event->state != PERF_EVENT_STATE_ACTIVE)
1222 + continue;
1223 +
1224 ++ if (event->cpu != -1 && event->cpu != smp_processor_id())
1225 ++ continue;
1226 ++
1227 + hwc = &event->hw;
1228 +
1229 + interrupts = hwc->interrupts;
1230 +@@ -3226,6 +3229,12 @@ static void perf_event_task_output(struct perf_event *event,
1231 +
1232 + static int perf_event_task_match(struct perf_event *event)
1233 + {
1234 ++ if (event->state != PERF_EVENT_STATE_ACTIVE)
1235 ++ return 0;
1236 ++
1237 ++ if (event->cpu != -1 && event->cpu != smp_processor_id())
1238 ++ return 0;
1239 ++
1240 + if (event->attr.comm || event->attr.mmap || event->attr.task)
1241 + return 1;
1242 +
1243 +@@ -3255,13 +3264,13 @@ static void perf_event_task_event(struct perf_task_event *task_event)
1244 +
1245 + cpuctx = &get_cpu_var(perf_cpu_context);
1246 + perf_event_task_ctx(&cpuctx->ctx, task_event);
1247 +- put_cpu_var(perf_cpu_context);
1248 +
1249 + rcu_read_lock();
1250 + if (!ctx)
1251 + ctx = rcu_dereference(task_event->task->perf_event_ctxp);
1252 + if (ctx)
1253 + perf_event_task_ctx(ctx, task_event);
1254 ++ put_cpu_var(perf_cpu_context);
1255 + rcu_read_unlock();
1256 + }
1257 +
1258 +@@ -3338,6 +3347,12 @@ static void perf_event_comm_output(struct perf_event *event,
1259 +
1260 + static int perf_event_comm_match(struct perf_event *event)
1261 + {
1262 ++ if (event->state != PERF_EVENT_STATE_ACTIVE)
1263 ++ return 0;
1264 ++
1265 ++ if (event->cpu != -1 && event->cpu != smp_processor_id())
1266 ++ return 0;
1267 ++
1268 + if (event->attr.comm)
1269 + return 1;
1270 +
1271 +@@ -3378,7 +3393,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
1272 +
1273 + cpuctx = &get_cpu_var(perf_cpu_context);
1274 + perf_event_comm_ctx(&cpuctx->ctx, comm_event);
1275 +- put_cpu_var(perf_cpu_context);
1276 +
1277 + rcu_read_lock();
1278 + /*
1279 +@@ -3388,6 +3402,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
1280 + ctx = rcu_dereference(current->perf_event_ctxp);
1281 + if (ctx)
1282 + perf_event_comm_ctx(ctx, comm_event);
1283 ++ put_cpu_var(perf_cpu_context);
1284 + rcu_read_unlock();
1285 + }
1286 +
1287 +@@ -3462,6 +3477,12 @@ static void perf_event_mmap_output(struct perf_event *event,
1288 + static int perf_event_mmap_match(struct perf_event *event,
1289 + struct perf_mmap_event *mmap_event)
1290 + {
1291 ++ if (event->state != PERF_EVENT_STATE_ACTIVE)
1292 ++ return 0;
1293 ++
1294 ++ if (event->cpu != -1 && event->cpu != smp_processor_id())
1295 ++ return 0;
1296 ++
1297 + if (event->attr.mmap)
1298 + return 1;
1299 +
1300 +@@ -3539,7 +3560,6 @@ got_name:
1301 +
1302 + cpuctx = &get_cpu_var(perf_cpu_context);
1303 + perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
1304 +- put_cpu_var(perf_cpu_context);
1305 +
1306 + rcu_read_lock();
1307 + /*
1308 +@@ -3549,6 +3569,7 @@ got_name:
1309 + ctx = rcu_dereference(current->perf_event_ctxp);
1310 + if (ctx)
1311 + perf_event_mmap_ctx(ctx, mmap_event);
1312 ++ put_cpu_var(perf_cpu_context);
1313 + rcu_read_unlock();
1314 +
1315 + kfree(buf);
1316 +@@ -3811,6 +3832,9 @@ static int perf_swevent_match(struct perf_event *event,
1317 + enum perf_type_id type,
1318 + u32 event_id, struct pt_regs *regs)
1319 + {
1320 ++ if (event->cpu != -1 && event->cpu != smp_processor_id())
1321 ++ return 0;
1322 ++
1323 + if (!perf_swevent_is_counting(event))
1324 + return 0;
1325 +
1326 +diff --git a/mm/vmalloc.c b/mm/vmalloc.c
1327 +index 7758726..a3a99d3 100644
1328 +--- a/mm/vmalloc.c
1329 ++++ b/mm/vmalloc.c
1330 +@@ -555,10 +555,8 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
1331 + }
1332 + rcu_read_unlock();
1333 +
1334 +- if (nr) {
1335 +- BUG_ON(nr > atomic_read(&vmap_lazy_nr));
1336 ++ if (nr)
1337 + atomic_sub(nr, &vmap_lazy_nr);
1338 +- }
1339 +
1340 + if (nr || force_flush)
1341 + flush_tlb_kernel_range(*start, *end);
1342 +diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
1343 +index e8a510d..4101afe 100644
1344 +--- a/tools/perf/builtin-timechart.c
1345 ++++ b/tools/perf/builtin-timechart.c
1346 +@@ -275,7 +275,7 @@ static u64 cpus_pstate_state[MAX_CPUS];
1347 + static int
1348 + process_comm_event(event_t *event)
1349 + {
1350 +- pid_set_comm(event->comm.pid, event->comm.comm);
1351 ++ pid_set_comm(event->comm.tid, event->comm.comm);
1352 + return 0;
1353 + }
1354 + static int