Gentoo Archives: gentoo-commits

From: "Tom Wijsman (tomwij)" <tomwij@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2327 - in genpatches-2.6/trunk: 3.0 3.2 3.4
Date: Sun, 31 Mar 2013 21:28:11
Message-Id: 20130331212805.EB73F2171D@flycatcher.gentoo.org
1 Author: tomwij
2 Date: 2013-03-31 21:28:05 +0000 (Sun, 31 Mar 2013)
3 New Revision: 2327
4
5 Added:
6 genpatches-2.6/trunk/3.0/1070_linux-3.0.71.patch
7 genpatches-2.6/trunk/3.2/1041_linux-3.2.42.patch
8 genpatches-2.6/trunk/3.4/1037_linux-3.4.38.patch
9 Modified:
10 genpatches-2.6/trunk/3.0/0000_README
11 genpatches-2.6/trunk/3.2/0000_README
12 genpatches-2.6/trunk/3.4/0000_README
13 Log:
14 Linux patches 3.0.71, 3.2.42 and 3.4.38.
15
16 Modified: genpatches-2.6/trunk/3.0/0000_README
17 ===================================================================
18 --- genpatches-2.6/trunk/3.0/0000_README 2013-03-30 11:08:52 UTC (rev 2326)
19 +++ genpatches-2.6/trunk/3.0/0000_README 2013-03-31 21:28:05 UTC (rev 2327)
20 @@ -315,6 +315,10 @@
21 From: http://www.kernel.org
22 Desc: Linux 3.0.70
23
24 +Patch: 1070_linux-3.0.71.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 3.0.71
27 +
28 Patch: 1800_fix-zcache-build.patch
29 From: http://bugs.gentoo.org/show_bug.cgi?id=376325
30 Desc: Fix zcache build error
31
32 Added: genpatches-2.6/trunk/3.0/1070_linux-3.0.71.patch
33 ===================================================================
34 --- genpatches-2.6/trunk/3.0/1070_linux-3.0.71.patch (rev 0)
35 +++ genpatches-2.6/trunk/3.0/1070_linux-3.0.71.patch 2013-03-31 21:28:05 UTC (rev 2327)
36 @@ -0,0 +1,1817 @@
37 +diff --git a/Makefile b/Makefile
38 +index 6754172..fbba8bc 100644
39 +--- a/Makefile
40 ++++ b/Makefile
41 +@@ -1,6 +1,6 @@
42 + VERSION = 3
43 + PATCHLEVEL = 0
44 +-SUBLEVEL = 70
45 ++SUBLEVEL = 71
46 + EXTRAVERSION =
47 + NAME = Sneaky Weasel
48 +
49 +diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
50 +index b7c2849..554b7b5 100644
51 +--- a/arch/x86/lib/usercopy_64.c
52 ++++ b/arch/x86/lib/usercopy_64.c
53 +@@ -169,10 +169,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
54 + char c;
55 + unsigned zero_len;
56 +
57 +- for (; len; --len) {
58 ++ for (; len; --len, to++) {
59 + if (__get_user_nocheck(c, from++, sizeof(char)))
60 + break;
61 +- if (__put_user_nocheck(c, to++, sizeof(char)))
62 ++ if (__put_user_nocheck(c, to, sizeof(char)))
63 + break;
64 + }
65 +
66 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
67 +index a303b61..b4d7898 100644
68 +--- a/drivers/gpu/drm/drm_edid.c
69 ++++ b/drivers/gpu/drm/drm_edid.c
70 +@@ -841,7 +841,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
71 + unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
72 + unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
73 + unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
74 +- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
75 ++ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
76 + unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
77 +
78 + /* ignore tiny modes */
79 +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
80 +index e36efdc..19bab81 100644
81 +--- a/drivers/gpu/drm/i915/i915_debugfs.c
82 ++++ b/drivers/gpu/drm/i915/i915_debugfs.c
83 +@@ -119,7 +119,7 @@ static const char *cache_level_str(int type)
84 + static void
85 + describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
86 + {
87 +- seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
88 ++ seq_printf(m, "%pK: %s%s %8zd %04x %04x %d %d%s%s%s",
89 + &obj->base,
90 + get_pin_flag(obj),
91 + get_tiling_flag(obj),
92 +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
93 +index 490ab6b..1ca53ff 100644
94 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
95 ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
96 +@@ -888,15 +888,20 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
97 + int count)
98 + {
99 + int i;
100 ++ int relocs_total = 0;
101 ++ int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
102 +
103 + for (i = 0; i < count; i++) {
104 + char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
105 + int length; /* limited by fault_in_pages_readable() */
106 +
107 +- /* First check for malicious input causing overflow */
108 +- if (exec[i].relocation_count >
109 +- INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
110 ++ /* First check for malicious input causing overflow in
111 ++ * the worst case where we need to allocate the entire
112 ++ * relocation tree as a single array.
113 ++ */
114 ++ if (exec[i].relocation_count > relocs_max - relocs_total)
115 + return -EINVAL;
116 ++ relocs_total += exec[i].relocation_count;
117 +
118 + length = exec[i].relocation_count *
119 + sizeof(struct drm_i915_gem_relocation_entry);
120 +diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
121 +index d2c7104..e7a97b5 100644
122 +--- a/drivers/gpu/drm/i915/intel_opregion.c
123 ++++ b/drivers/gpu/drm/i915/intel_opregion.c
124 +@@ -413,6 +413,25 @@ blind_set:
125 + goto end;
126 + }
127 +
128 ++static void intel_setup_cadls(struct drm_device *dev)
129 ++{
130 ++ struct drm_i915_private *dev_priv = dev->dev_private;
131 ++ struct intel_opregion *opregion = &dev_priv->opregion;
132 ++ int i = 0;
133 ++ u32 disp_id;
134 ++
135 ++ /* Initialize the CADL field by duplicating the DIDL values.
136 ++ * Technically, this is not always correct as display outputs may exist,
137 ++ * but not active. This initialization is necessary for some Clevo
138 ++ * laptops that check this field before processing the brightness and
139 ++ * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
140 ++ * there are less than eight devices. */
141 ++ do {
142 ++ disp_id = ioread32(&opregion->acpi->didl[i]);
143 ++ iowrite32(disp_id, &opregion->acpi->cadl[i]);
144 ++ } while (++i < 8 && disp_id != 0);
145 ++}
146 ++
147 + void intel_opregion_init(struct drm_device *dev)
148 + {
149 + struct drm_i915_private *dev_priv = dev->dev_private;
150 +@@ -422,8 +441,10 @@ void intel_opregion_init(struct drm_device *dev)
151 + return;
152 +
153 + if (opregion->acpi) {
154 +- if (drm_core_check_feature(dev, DRIVER_MODESET))
155 ++ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
156 + intel_didl_outputs(dev);
157 ++ intel_setup_cadls(dev);
158 ++ }
159 +
160 + /* Notify BIOS we are ready to handle ACPI video ext notifs.
161 + * Right now, all the events are handled by the ACPI video module.
162 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
163 +index 39913a0..073acdf 100644
164 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
165 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
166 +@@ -753,9 +753,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
167 + if (++priv->tx_outstanding == ipoib_sendq_size) {
168 + ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
169 + tx->qp->qp_num);
170 +- if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
171 +- ipoib_warn(priv, "request notify on send CQ failed\n");
172 + netif_stop_queue(dev);
173 ++ rc = ib_req_notify_cq(priv->send_cq,
174 ++ IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
175 ++ if (rc < 0)
176 ++ ipoib_warn(priv, "request notify on send CQ failed\n");
177 ++ else if (rc)
178 ++ ipoib_send_comp_handler(priv->send_cq, dev);
179 + }
180 + }
181 + }
182 +diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
183 +index 4309296..8824dd4 100644
184 +--- a/drivers/net/netconsole.c
185 ++++ b/drivers/net/netconsole.c
186 +@@ -630,6 +630,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
187 + goto done;
188 +
189 + spin_lock_irqsave(&target_list_lock, flags);
190 ++restart:
191 + list_for_each_entry(nt, &target_list, list) {
192 + netconsole_target_get(nt);
193 + if (nt->np.dev == dev) {
194 +@@ -642,20 +643,17 @@ static int netconsole_netdev_event(struct notifier_block *this,
195 + case NETDEV_UNREGISTER:
196 + /*
197 + * rtnl_lock already held
198 ++ * we might sleep in __netpoll_cleanup()
199 + */
200 +- if (nt->np.dev) {
201 +- spin_unlock_irqrestore(
202 +- &target_list_lock,
203 +- flags);
204 +- __netpoll_cleanup(&nt->np);
205 +- spin_lock_irqsave(&target_list_lock,
206 +- flags);
207 +- dev_put(nt->np.dev);
208 +- nt->np.dev = NULL;
209 +- }
210 ++ spin_unlock_irqrestore(&target_list_lock, flags);
211 ++ __netpoll_cleanup(&nt->np);
212 ++ spin_lock_irqsave(&target_list_lock, flags);
213 ++ dev_put(nt->np.dev);
214 ++ nt->np.dev = NULL;
215 + nt->enabled = 0;
216 + stopped = true;
217 +- break;
218 ++ netconsole_target_put(nt);
219 ++ goto restart;
220 + }
221 + }
222 + netconsole_target_put(nt);
223 +diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
224 +index be3cade..07e526d 100644
225 +--- a/drivers/net/sfc/efx.c
226 ++++ b/drivers/net/sfc/efx.c
227 +@@ -651,25 +651,30 @@ static void efx_fini_channels(struct efx_nic *efx)
228 + struct efx_channel *channel;
229 + struct efx_tx_queue *tx_queue;
230 + struct efx_rx_queue *rx_queue;
231 ++ struct pci_dev *dev = efx->pci_dev;
232 + int rc;
233 +
234 + EFX_ASSERT_RESET_SERIALISED(efx);
235 + BUG_ON(efx->port_enabled);
236 +
237 +- rc = efx_nic_flush_queues(efx);
238 +- if (rc && EFX_WORKAROUND_7803(efx)) {
239 +- /* Schedule a reset to recover from the flush failure. The
240 +- * descriptor caches reference memory we're about to free,
241 +- * but falcon_reconfigure_mac_wrapper() won't reconnect
242 +- * the MACs because of the pending reset. */
243 +- netif_err(efx, drv, efx->net_dev,
244 +- "Resetting to recover from flush failure\n");
245 +- efx_schedule_reset(efx, RESET_TYPE_ALL);
246 +- } else if (rc) {
247 +- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
248 +- } else {
249 +- netif_dbg(efx, drv, efx->net_dev,
250 +- "successfully flushed all queues\n");
251 ++ /* Only perform flush if dma is enabled */
252 ++ if (dev->is_busmaster) {
253 ++ rc = efx_nic_flush_queues(efx);
254 ++
255 ++ if (rc && EFX_WORKAROUND_7803(efx)) {
256 ++ /* Schedule a reset to recover from the flush failure. The
257 ++ * descriptor caches reference memory we're about to free,
258 ++ * but falcon_reconfigure_mac_wrapper() won't reconnect
259 ++ * the MACs because of the pending reset. */
260 ++ netif_err(efx, drv, efx->net_dev,
261 ++ "Resetting to recover from flush failure\n");
262 ++ efx_schedule_reset(efx, RESET_TYPE_ALL);
263 ++ } else if (rc) {
264 ++ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
265 ++ } else {
266 ++ netif_dbg(efx, drv, efx->net_dev,
267 ++ "successfully flushed all queues\n");
268 ++ }
269 + }
270 +
271 + efx_for_each_channel(channel, efx) {
272 +@@ -715,6 +720,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
273 + unsigned i;
274 + int rc;
275 +
276 ++ efx_device_detach_sync(efx);
277 + efx_stop_all(efx);
278 + efx_fini_channels(efx);
279 +
280 +@@ -758,6 +764,7 @@ out:
281 +
282 + efx_init_channels(efx);
283 + efx_start_all(efx);
284 ++ netif_device_attach(efx->net_dev);
285 + return rc;
286 +
287 + rollback:
288 +@@ -1525,8 +1532,12 @@ static void efx_stop_all(struct efx_nic *efx)
289 + /* Flush efx_mac_work(), refill_workqueue, monitor_work */
290 + efx_flush_all(efx);
291 +
292 +- /* Stop the kernel transmit interface late, so the watchdog
293 +- * timer isn't ticking over the flush */
294 ++ /* Stop the kernel transmit interface. This is only valid if
295 ++ * the device is stopped or detached; otherwise the watchdog
296 ++ * may fire immediately.
297 ++ */
298 ++ WARN_ON(netif_running(efx->net_dev) &&
299 ++ netif_device_present(efx->net_dev));
300 + if (efx_dev_registered(efx)) {
301 + netif_tx_stop_all_queues(efx->net_dev);
302 + netif_tx_lock_bh(efx->net_dev);
303 +@@ -1796,10 +1807,11 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
304 + if (new_mtu > EFX_MAX_MTU)
305 + return -EINVAL;
306 +
307 +- efx_stop_all(efx);
308 +-
309 + netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
310 +
311 ++ efx_device_detach_sync(efx);
312 ++ efx_stop_all(efx);
313 ++
314 + efx_fini_channels(efx);
315 +
316 + mutex_lock(&efx->mac_lock);
317 +@@ -1812,6 +1824,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
318 + efx_init_channels(efx);
319 +
320 + efx_start_all(efx);
321 ++ netif_device_attach(efx->net_dev);
322 + return rc;
323 + }
324 +
325 +@@ -2101,7 +2114,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
326 + netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
327 + RESET_TYPE(method));
328 +
329 +- netif_device_detach(efx->net_dev);
330 ++ efx_device_detach_sync(efx);
331 + efx_reset_down(efx, method);
332 +
333 + rc = efx->type->reset(efx, method);
334 +@@ -2560,7 +2573,7 @@ static int efx_pm_freeze(struct device *dev)
335 +
336 + efx->state = STATE_FINI;
337 +
338 +- netif_device_detach(efx->net_dev);
339 ++ efx_device_detach_sync(efx);
340 +
341 + efx_stop_all(efx);
342 + efx_fini_channels(efx);
343 +diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
344 +index a5d1c60d..76e891e 100644
345 +--- a/drivers/net/sfc/efx.h
346 ++++ b/drivers/net/sfc/efx.h
347 +@@ -150,4 +150,17 @@ extern void efx_link_status_changed(struct efx_nic *efx);
348 + extern void efx_link_set_advertising(struct efx_nic *efx, u32);
349 + extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
350 +
351 ++static inline void efx_device_detach_sync(struct efx_nic *efx)
352 ++{
353 ++ struct net_device *dev = efx->net_dev;
354 ++
355 ++ /* Lock/freeze all TX queues so that we can be sure the
356 ++ * TX scheduler is stopped when we're done and before
357 ++ * netif_device_present() becomes false.
358 ++ */
359 ++ netif_tx_lock_bh(dev);
360 ++ netif_device_detach(dev);
361 ++ netif_tx_unlock_bh(dev);
362 ++}
363 ++
364 + #endif /* EFX_EFX_H */
365 +diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
366 +index 60176e8..19b996b3 100644
367 +--- a/drivers/net/sfc/falcon.c
368 ++++ b/drivers/net/sfc/falcon.c
369 +@@ -1714,6 +1714,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
370 + .remove_port = falcon_remove_port,
371 + .handle_global_event = falcon_handle_global_event,
372 + .prepare_flush = falcon_prepare_flush,
373 ++ .finish_flush = efx_port_dummy_op_void,
374 + .update_stats = falcon_update_nic_stats,
375 + .start_stats = falcon_start_nic_stats,
376 + .stop_stats = falcon_stop_nic_stats,
377 +@@ -1755,6 +1756,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
378 + .remove_port = falcon_remove_port,
379 + .handle_global_event = falcon_handle_global_event,
380 + .prepare_flush = falcon_prepare_flush,
381 ++ .finish_flush = efx_port_dummy_op_void,
382 + .update_stats = falcon_update_nic_stats,
383 + .start_stats = falcon_start_nic_stats,
384 + .stop_stats = falcon_stop_nic_stats,
385 +diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
386 +index 95a980f..08addc9 100644
387 +--- a/drivers/net/sfc/filter.c
388 ++++ b/drivers/net/sfc/filter.c
389 +@@ -335,28 +335,35 @@ static int efx_filter_search(struct efx_filter_table *table,
390 + bool for_insert, int *depth_required)
391 + {
392 + unsigned hash, incr, filter_idx, depth, depth_max;
393 +- struct efx_filter_spec *cmp;
394 +
395 + hash = efx_filter_hash(key);
396 + incr = efx_filter_increment(key);
397 +- depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
398 +- FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
399 +-
400 +- for (depth = 1, filter_idx = hash & (table->size - 1);
401 +- depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
402 +- ++depth) {
403 +- cmp = &table->spec[filter_idx];
404 +- if (efx_filter_equal(spec, cmp))
405 +- goto found;
406 ++
407 ++ filter_idx = hash & (table->size - 1);
408 ++ depth = 1;
409 ++ depth_max = (for_insert ?
410 ++ (spec->priority <= EFX_FILTER_PRI_HINT ?
411 ++ FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
412 ++ table->search_depth[spec->type]);
413 ++
414 ++ for (;;) {
415 ++ /* Return success if entry is used and matches this spec
416 ++ * or entry is unused and we are trying to insert.
417 ++ */
418 ++ if (test_bit(filter_idx, table->used_bitmap) ?
419 ++ efx_filter_equal(spec, &table->spec[filter_idx]) :
420 ++ for_insert) {
421 ++ *depth_required = depth;
422 ++ return filter_idx;
423 ++ }
424 ++
425 ++ /* Return failure if we reached the maximum search depth */
426 ++ if (depth == depth_max)
427 ++ return for_insert ? -EBUSY : -ENOENT;
428 ++
429 + filter_idx = (filter_idx + incr) & (table->size - 1);
430 ++ ++depth;
431 + }
432 +- if (!for_insert)
433 +- return -ENOENT;
434 +- if (depth > depth_max)
435 +- return -EBUSY;
436 +-found:
437 +- *depth_required = depth;
438 +- return filter_idx;
439 + }
440 +
441 + /* Construct/deconstruct external filter IDs */
442 +diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
443 +index 81a4253..c1000ce 100644
444 +--- a/drivers/net/sfc/mcdi.c
445 ++++ b/drivers/net/sfc/mcdi.c
446 +@@ -30,7 +30,7 @@
447 + #define REBOOT_FLAG_PORT0 0x3f8
448 + #define REBOOT_FLAG_PORT1 0x3fc
449 +
450 +-#define MCDI_RPC_TIMEOUT 10 /*seconds */
451 ++#define MCDI_RPC_TIMEOUT (10 * HZ)
452 +
453 + #define MCDI_PDU(efx) \
454 + (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0)
455 +@@ -120,7 +120,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
456 + static int efx_mcdi_poll(struct efx_nic *efx)
457 + {
458 + struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
459 +- unsigned int time, finish;
460 ++ unsigned long time, finish;
461 + unsigned int respseq, respcmd, error;
462 + unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
463 + unsigned int rc, spins;
464 +@@ -136,7 +136,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
465 + * and poll once a jiffy (approximately)
466 + */
467 + spins = TICK_USEC;
468 +- finish = get_seconds() + MCDI_RPC_TIMEOUT;
469 ++ finish = jiffies + MCDI_RPC_TIMEOUT;
470 +
471 + while (1) {
472 + if (spins != 0) {
473 +@@ -146,7 +146,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
474 + schedule_timeout_uninterruptible(1);
475 + }
476 +
477 +- time = get_seconds();
478 ++ time = jiffies;
479 +
480 + rmb();
481 + efx_readd(efx, &reg, pdu);
482 +@@ -158,7 +158,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
483 + EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
484 + break;
485 +
486 +- if (time >= finish)
487 ++ if (time_after(time, finish))
488 + return -ETIMEDOUT;
489 + }
490 +
491 +@@ -250,7 +250,7 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
492 + if (wait_event_timeout(
493 + mcdi->wq,
494 + atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
495 +- msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
496 ++ MCDI_RPC_TIMEOUT) == 0)
497 + return -ETIMEDOUT;
498 +
499 + /* Check if efx_mcdi_set_mode() switched us back to polled completions.
500 +@@ -666,9 +666,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
501 + u16 *fw_subtype_list)
502 + {
503 + uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN];
504 +- size_t outlen;
505 ++ size_t outlen, offset, i;
506 + int port_num = efx_port_num(efx);
507 +- int offset;
508 + int rc;
509 +
510 + BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
511 +@@ -688,10 +687,16 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
512 + : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
513 + if (mac_address)
514 + memcpy(mac_address, outbuf + offset, ETH_ALEN);
515 +- if (fw_subtype_list)
516 +- memcpy(fw_subtype_list,
517 +- outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
518 +- MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN);
519 ++ if (fw_subtype_list) {
520 ++ offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
521 ++ for (i = 0;
522 ++ i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN / 2;
523 ++ i++) {
524 ++ fw_subtype_list[i] =
525 ++ le16_to_cpup((__le16 *)(outbuf + offset));
526 ++ offset += 2;
527 ++ }
528 ++ }
529 +
530 + return 0;
531 +
532 +diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
533 +index aced2a7..b61eea0 100644
534 +--- a/drivers/net/sfc/mcdi.h
535 ++++ b/drivers/net/sfc/mcdi.h
536 +@@ -126,5 +126,6 @@ extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
537 + extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
538 + extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
539 + extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
540 ++extern int efx_mcdi_set_mac(struct efx_nic *efx);
541 +
542 + #endif /* EFX_MCDI_H */
543 +diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
544 +index 50c2077..da269d7 100644
545 +--- a/drivers/net/sfc/mcdi_mac.c
546 ++++ b/drivers/net/sfc/mcdi_mac.c
547 +@@ -13,7 +13,7 @@
548 + #include "mcdi.h"
549 + #include "mcdi_pcol.h"
550 +
551 +-static int efx_mcdi_set_mac(struct efx_nic *efx)
552 ++int efx_mcdi_set_mac(struct efx_nic *efx)
553 + {
554 + u32 reject, fcntl;
555 + u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
556 +@@ -45,6 +45,8 @@ static int efx_mcdi_set_mac(struct efx_nic *efx)
557 + }
558 + if (efx->wanted_fc & EFX_FC_AUTO)
559 + fcntl = MC_CMD_FCNTL_AUTO;
560 ++ if (efx->fc_disable)
561 ++ fcntl = MC_CMD_FCNTL_OFF;
562 +
563 + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
564 +
565 +diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
566 +index e8d5f03..2f932c5 100644
567 +--- a/drivers/net/sfc/net_driver.h
568 ++++ b/drivers/net/sfc/net_driver.h
569 +@@ -214,6 +214,7 @@ struct efx_tx_queue {
570 + * If both this and page are %NULL, the buffer slot is currently free.
571 + * @page: The associated page buffer, if any.
572 + * If both this and skb are %NULL, the buffer slot is currently free.
573 ++ * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
574 + * @len: Buffer length, in bytes.
575 + * @is_page: Indicates if @page is valid. If false, @skb is valid.
576 + */
577 +@@ -223,7 +224,8 @@ struct efx_rx_buffer {
578 + struct sk_buff *skb;
579 + struct page *page;
580 + } u;
581 +- unsigned int len;
582 ++ u16 page_offset;
583 ++ u16 len;
584 + bool is_page;
585 + };
586 +
587 +@@ -690,6 +692,9 @@ struct efx_filter_state;
588 + * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
589 + * @multicast_hash: Multicast hash table
590 + * @wanted_fc: Wanted flow control flags
591 ++ * @fc_disable: When non-zero flow control is disabled. Typically used to
592 ++ * ensure that network back pressure doesn't delay dma queue flushes.
593 ++ * Serialised by the rtnl lock.
594 + * @mac_work: Work item for changing MAC promiscuity and multicast hash
595 + * @loopback_mode: Loopback status
596 + * @loopback_modes: Supported loopback mode bitmask
597 +@@ -783,6 +788,7 @@ struct efx_nic {
598 + bool promiscuous;
599 + union efx_multicast_hash multicast_hash;
600 + u8 wanted_fc;
601 ++ unsigned fc_disable;
602 +
603 + atomic_t rx_reset;
604 + enum efx_loopback_mode loopback_mode;
605 +@@ -834,6 +840,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
606 + * @remove_port: Free resources allocated by probe_port()
607 + * @handle_global_event: Handle a "global" event (may be %NULL)
608 + * @prepare_flush: Prepare the hardware for flushing the DMA queues
609 ++ * @finish_flush: Clean up after flushing the DMA queues
610 + * @update_stats: Update statistics not provided by event handling
611 + * @start_stats: Start the regular fetching of statistics
612 + * @stop_stats: Stop the regular fetching of statistics
613 +@@ -879,6 +886,7 @@ struct efx_nic_type {
614 + void (*remove_port)(struct efx_nic *efx);
615 + bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
616 + void (*prepare_flush)(struct efx_nic *efx);
617 ++ void (*finish_flush)(struct efx_nic *efx);
618 + void (*update_stats)(struct efx_nic *efx);
619 + void (*start_stats)(struct efx_nic *efx);
620 + void (*stop_stats)(struct efx_nic *efx);
621 +diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
622 +index 5ac9fa2..4949004 100644
623 +--- a/drivers/net/sfc/nic.c
624 ++++ b/drivers/net/sfc/nic.c
625 +@@ -370,7 +370,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
626 + return false;
627 +
628 + tx_queue->empty_read_count = 0;
629 +- return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
630 ++ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
631 ++ && tx_queue->write_count - write_count == 1;
632 + }
633 +
634 + /* For each entry inserted into the software descriptor ring, create a
635 +@@ -1260,13 +1261,27 @@ int efx_nic_flush_queues(struct efx_nic *efx)
636 + }
637 + efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
638 + if (tx_queue->initialised &&
639 +- tx_queue->flushed != FLUSH_DONE)
640 +- ++tx_pending;
641 ++ tx_queue->flushed != FLUSH_DONE) {
642 ++ efx_oword_t txd_ptr_tbl;
643 ++
644 ++ efx_reado_table(efx, &txd_ptr_tbl,
645 ++ FR_BZ_TX_DESC_PTR_TBL,
646 ++ tx_queue->queue);
647 ++ if (EFX_OWORD_FIELD(txd_ptr_tbl,
648 ++ FRF_AZ_TX_DESCQ_FLUSH) ||
649 ++ EFX_OWORD_FIELD(txd_ptr_tbl,
650 ++ FRF_AZ_TX_DESCQ_EN))
651 ++ ++tx_pending;
652 ++ else
653 ++ tx_queue->flushed = FLUSH_DONE;
654 ++ }
655 + }
656 + }
657 +
658 +- if (rx_pending == 0 && tx_pending == 0)
659 ++ if (rx_pending == 0 && tx_pending == 0) {
660 ++ efx->type->finish_flush(efx);
661 + return 0;
662 ++ }
663 +
664 + msleep(EFX_FLUSH_INTERVAL);
665 + efx_poll_flush_events(efx);
666 +@@ -1292,6 +1307,7 @@ int efx_nic_flush_queues(struct efx_nic *efx)
667 + }
668 + }
669 +
670 ++ efx->type->finish_flush(efx);
671 + return -ETIMEDOUT;
672 + }
673 +
674 +diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
675 +index d2405ce..8a2c4f5 100644
676 +--- a/drivers/net/sfc/nic.h
677 ++++ b/drivers/net/sfc/nic.h
678 +@@ -211,6 +211,8 @@ extern void falcon_irq_ack_a1(struct efx_nic *efx);
679 +
680 + /* Global Resources */
681 + extern int efx_nic_flush_queues(struct efx_nic *efx);
682 ++extern void siena_prepare_flush(struct efx_nic *efx);
683 ++extern void siena_finish_flush(struct efx_nic *efx);
684 + extern void falcon_start_nic_stats(struct efx_nic *efx);
685 + extern void falcon_stop_nic_stats(struct efx_nic *efx);
686 + extern void falcon_setup_xaui(struct efx_nic *efx);
687 +diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
688 +index 4004fc2..d429f0a 100644
689 +--- a/drivers/net/sfc/rx.c
690 ++++ b/drivers/net/sfc/rx.c
691 +@@ -94,11 +94,7 @@ static unsigned int rx_refill_limit = 95;
692 + static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
693 + struct efx_rx_buffer *buf)
694 + {
695 +- /* Offset is always within one page, so we don't need to consider
696 +- * the page order.
697 +- */
698 +- return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
699 +- efx->type->rx_buffer_hash_size);
700 ++ return buf->page_offset + efx->type->rx_buffer_hash_size;
701 + }
702 + static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
703 + {
704 +@@ -193,6 +189,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
705 + struct efx_rx_buffer *rx_buf;
706 + struct page *page;
707 + void *page_addr;
708 ++ unsigned int page_offset;
709 + struct efx_rx_page_state *state;
710 + dma_addr_t dma_addr;
711 + unsigned index, count;
712 +@@ -219,12 +216,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
713 +
714 + page_addr += sizeof(struct efx_rx_page_state);
715 + dma_addr += sizeof(struct efx_rx_page_state);
716 ++ page_offset = sizeof(struct efx_rx_page_state);
717 +
718 + split:
719 + index = rx_queue->added_count & rx_queue->ptr_mask;
720 + rx_buf = efx_rx_buffer(rx_queue, index);
721 + rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
722 + rx_buf->u.page = page;
723 ++ rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
724 + rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
725 + rx_buf->is_page = true;
726 + ++rx_queue->added_count;
727 +@@ -236,6 +235,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
728 + get_page(page);
729 + dma_addr += (PAGE_SIZE >> 1);
730 + page_addr += (PAGE_SIZE >> 1);
731 ++ page_offset += (PAGE_SIZE >> 1);
732 + ++count;
733 + goto split;
734 + }
735 +@@ -245,7 +245,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
736 + }
737 +
738 + static void efx_unmap_rx_buffer(struct efx_nic *efx,
739 +- struct efx_rx_buffer *rx_buf)
740 ++ struct efx_rx_buffer *rx_buf,
741 ++ unsigned int used_len)
742 + {
743 + if (rx_buf->is_page && rx_buf->u.page) {
744 + struct efx_rx_page_state *state;
745 +@@ -256,6 +257,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
746 + state->dma_addr,
747 + efx_rx_buf_size(efx),
748 + PCI_DMA_FROMDEVICE);
749 ++ } else if (used_len) {
750 ++ dma_sync_single_for_cpu(&efx->pci_dev->dev,
751 ++ rx_buf->dma_addr, used_len,
752 ++ DMA_FROM_DEVICE);
753 + }
754 + } else if (!rx_buf->is_page && rx_buf->u.skb) {
755 + pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
756 +@@ -278,7 +283,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
757 + static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
758 + struct efx_rx_buffer *rx_buf)
759 + {
760 +- efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
761 ++ efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
762 + efx_free_rx_buffer(rx_queue->efx, rx_buf);
763 + }
764 +
765 +@@ -549,10 +554,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
766 + goto out;
767 + }
768 +
769 +- /* Release card resources - assumes all RX buffers consumed in-order
770 +- * per RX queue
771 ++ /* Release and/or sync DMA mapping - assumes all RX buffers
772 ++ * consumed in-order per RX queue
773 + */
774 +- efx_unmap_rx_buffer(efx, rx_buf);
775 ++ efx_unmap_rx_buffer(efx, rx_buf, len);
776 +
777 + /* Prefetch nice and early so data will (hopefully) be in cache by
778 + * the time we look at it.
779 +diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
780 +index 822f6c2..4907885 100644
781 +--- a/drivers/net/sfc/selftest.c
782 ++++ b/drivers/net/sfc/selftest.c
783 +@@ -698,7 +698,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
784 + /* Detach the device so the kernel doesn't transmit during the
785 + * loopback test and the watchdog timeout doesn't fire.
786 + */
787 +- netif_device_detach(efx->net_dev);
788 ++ efx_device_detach_sync(efx);
789 +
790 + mutex_lock(&efx->mac_lock);
791 + if (efx->loopback_modes) {
792 +diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
793 +index ceac1c9..062494a 100644
794 +--- a/drivers/net/sfc/siena.c
795 ++++ b/drivers/net/sfc/siena.c
796 +@@ -135,6 +135,18 @@ static void siena_remove_port(struct efx_nic *efx)
797 + efx_nic_free_buffer(efx, &efx->stats_buffer);
798 + }
799 +
800 ++void siena_prepare_flush(struct efx_nic *efx)
801 ++{
802 ++ if (efx->fc_disable++ == 0)
803 ++ efx_mcdi_set_mac(efx);
804 ++}
805 ++
806 ++void siena_finish_flush(struct efx_nic *efx)
807 ++{
808 ++ if (--efx->fc_disable == 0)
809 ++ efx_mcdi_set_mac(efx);
810 ++}
811 ++
812 + static const struct efx_nic_register_test siena_register_tests[] = {
813 + { FR_AZ_ADR_REGION,
814 + EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
815 +@@ -372,14 +384,13 @@ static void siena_remove_nic(struct efx_nic *efx)
816 + efx->nic_data = NULL;
817 + }
818 +
819 +-#define STATS_GENERATION_INVALID ((u64)(-1))
820 ++#define STATS_GENERATION_INVALID ((__force __le64)(-1))
821 +
822 + static int siena_try_update_nic_stats(struct efx_nic *efx)
823 + {
824 +- u64 *dma_stats;
825 ++ __le64 *dma_stats;
826 + struct efx_mac_stats *mac_stats;
827 +- u64 generation_start;
828 +- u64 generation_end;
829 ++ __le64 generation_start, generation_end;
830 +
831 + mac_stats = &efx->mac_stats;
832 + dma_stats = (u64 *)efx->stats_buffer.addr;
833 +@@ -390,7 +401,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
834 + rmb();
835 +
836 + #define MAC_STAT(M, D) \
837 +- mac_stats->M = dma_stats[MC_CMD_MAC_ ## D]
838 ++ mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
839 +
840 + MAC_STAT(tx_bytes, TX_BYTES);
841 + MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
842 +@@ -460,7 +471,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
843 + MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
844 + mac_stats->rx_good_lt64 = 0;
845 +
846 +- efx->n_rx_nodesc_drop_cnt = dma_stats[MC_CMD_MAC_RX_NODESC_DROPS];
847 ++ efx->n_rx_nodesc_drop_cnt =
848 ++ le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
849 +
850 + #undef MAC_STAT
851 +
852 +@@ -489,7 +501,7 @@ static void siena_update_nic_stats(struct efx_nic *efx)
853 +
854 + static void siena_start_nic_stats(struct efx_nic *efx)
855 + {
856 +- u64 *dma_stats = (u64 *)efx->stats_buffer.addr;
857 ++ __le64 *dma_stats = efx->stats_buffer.addr;
858 +
859 + dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
860 +
861 +@@ -590,7 +602,8 @@ const struct efx_nic_type siena_a0_nic_type = {
862 + .reset = siena_reset_hw,
863 + .probe_port = siena_probe_port,
864 + .remove_port = siena_remove_port,
865 +- .prepare_flush = efx_port_dummy_op_void,
866 ++ .prepare_flush = siena_prepare_flush,
867 ++ .finish_flush = siena_finish_flush,
868 + .update_stats = siena_update_nic_stats,
869 + .start_stats = siena_start_nic_stats,
870 + .stop_stats = siena_stop_nic_stats,
871 +diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
872 +index 5eab3dc..890841a 100644
873 +--- a/drivers/net/wireless/mwifiex/join.c
874 ++++ b/drivers/net/wireless/mwifiex/join.c
875 +@@ -1102,10 +1102,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
876 + adhoc_join->bss_descriptor.bssid,
877 + adhoc_join->bss_descriptor.ssid);
878 +
879 +- for (i = 0; bss_desc->supported_rates[i] &&
880 +- i < MWIFIEX_SUPPORTED_RATES;
881 +- i++)
882 +- ;
883 ++ for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
884 ++ bss_desc->supported_rates[i]; i++)
885 ++ ;
886 + rates_size = i;
887 +
888 + /* Copy Data Rates from the Rates recorded in scan response */
889 +diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
890 +index 5bd2c55..031aa2b 100644
891 +--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
892 ++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
893 +@@ -1113,7 +1113,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
894 + rt2x00dev->hw->wiphy->interface_modes |=
895 + BIT(NL80211_IFTYPE_ADHOC) |
896 + BIT(NL80211_IFTYPE_AP) |
897 ++#ifdef CONFIG_MAC80211_MESH
898 + BIT(NL80211_IFTYPE_MESH_POINT) |
899 ++#endif
900 + BIT(NL80211_IFTYPE_WDS);
901 +
902 + /*
903 +diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
904 +index 26f7f01..f0bf9c2 100644
905 +--- a/drivers/platform/x86/asus-laptop.c
906 ++++ b/drivers/platform/x86/asus-laptop.c
907 +@@ -643,8 +643,10 @@ static ssize_t show_infos(struct device *dev,
908 + /*
909 + * The HWRS method return informations about the hardware.
910 + * 0x80 bit is for WLAN, 0x100 for Bluetooth.
911 ++ * 0x40 for WWAN, 0x10 for WIMAX.
912 + * The significance of others is yet to be found.
913 +- * If we don't find the method, we assume the device are present.
914 ++ * We don't currently use this for device detection, and it
915 ++ * takes several seconds to run on some systems.
916 + */
917 + rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
918 + if (!ACPI_FAILURE(rv))
919 +@@ -1271,7 +1273,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
920 + {
921 + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
922 + union acpi_object *model = NULL;
923 +- unsigned long long bsts_result, hwrs_result;
924 ++ unsigned long long bsts_result;
925 + char *string = NULL;
926 + acpi_status status;
927 +
928 +@@ -1333,17 +1335,6 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
929 + if (*string)
930 + pr_notice(" %s model detected\n", string);
931 +
932 +- /*
933 +- * The HWRS method return informations about the hardware.
934 +- * 0x80 bit is for WLAN, 0x100 for Bluetooth,
935 +- * 0x40 for WWAN, 0x10 for WIMAX.
936 +- * The significance of others is yet to be found.
937 +- */
938 +- status =
939 +- acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
940 +- if (!ACPI_FAILURE(status))
941 +- pr_notice(" HWRS returned %x", (int)hwrs_result);
942 +-
943 + if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
944 + asus->have_rsts = true;
945 +
946 +diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
947 +index 92aa545..bf583fa 100644
948 +--- a/drivers/tty/serial/sunsu.c
949 ++++ b/drivers/tty/serial/sunsu.c
950 +@@ -968,6 +968,7 @@ static struct uart_ops sunsu_pops = {
951 + #define UART_NR 4
952 +
953 + static struct uart_sunsu_port sunsu_ports[UART_NR];
954 ++static int nr_inst; /* Number of already registered ports */
955 +
956 + #ifdef CONFIG_SERIO
957 +
958 +@@ -1337,13 +1338,8 @@ static int __init sunsu_console_setup(struct console *co, char *options)
959 + printk("Console: ttyS%d (SU)\n",
960 + (sunsu_reg.minor - 64) + co->index);
961 +
962 +- /*
963 +- * Check whether an invalid uart number has been specified, and
964 +- * if so, search for the first available port that does have
965 +- * console support.
966 +- */
967 +- if (co->index >= UART_NR)
968 +- co->index = 0;
969 ++ if (co->index > nr_inst)
970 ++ return -ENODEV;
971 + port = &sunsu_ports[co->index].port;
972 +
973 + /*
974 +@@ -1408,7 +1404,6 @@ static enum su_type __devinit su_get_type(struct device_node *dp)
975 +
976 + static int __devinit su_probe(struct platform_device *op)
977 + {
978 +- static int inst;
979 + struct device_node *dp = op->dev.of_node;
980 + struct uart_sunsu_port *up;
981 + struct resource *rp;
982 +@@ -1418,16 +1413,16 @@ static int __devinit su_probe(struct platform_device *op)
983 +
984 + type = su_get_type(dp);
985 + if (type == SU_PORT_PORT) {
986 +- if (inst >= UART_NR)
987 ++ if (nr_inst >= UART_NR)
988 + return -EINVAL;
989 +- up = &sunsu_ports[inst];
990 ++ up = &sunsu_ports[nr_inst];
991 + } else {
992 + up = kzalloc(sizeof(*up), GFP_KERNEL);
993 + if (!up)
994 + return -ENOMEM;
995 + }
996 +
997 +- up->port.line = inst;
998 ++ up->port.line = nr_inst;
999 +
1000 + spin_lock_init(&up->port.lock);
1001 +
1002 +@@ -1461,6 +1456,8 @@ static int __devinit su_probe(struct platform_device *op)
1003 + }
1004 + dev_set_drvdata(&op->dev, up);
1005 +
1006 ++ nr_inst++;
1007 ++
1008 + return 0;
1009 + }
1010 +
1011 +@@ -1488,7 +1485,7 @@ static int __devinit su_probe(struct platform_device *op)
1012 +
1013 + dev_set_drvdata(&op->dev, up);
1014 +
1015 +- inst++;
1016 ++ nr_inst++;
1017 +
1018 + return 0;
1019 +
1020 +diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
1021 +index a54c380..923153c 100644
1022 +--- a/drivers/usb/host/ehci-q.c
1023 ++++ b/drivers/usb/host/ehci-q.c
1024 +@@ -137,7 +137,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
1025 + * qtd is updated in qh_completions(). Update the QH
1026 + * overlay here.
1027 + */
1028 +- if (qh->hw->hw_token & ACTIVE_BIT(ehci)) {
1029 ++ if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) {
1030 + qh->hw->hw_qtd_next = qtd->hw_next;
1031 + qtd = NULL;
1032 + }
1033 +@@ -450,19 +450,11 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
1034 + else if (last_status == -EINPROGRESS && !urb->unlinked)
1035 + continue;
1036 +
1037 +- /*
1038 +- * If this was the active qtd when the qh was unlinked
1039 +- * and the overlay's token is active, then the overlay
1040 +- * hasn't been written back to the qtd yet so use its
1041 +- * token instead of the qtd's. After the qtd is
1042 +- * processed and removed, the overlay won't be valid
1043 +- * any more.
1044 +- */
1045 +- if (state == QH_STATE_IDLE &&
1046 +- qh->qtd_list.next == &qtd->qtd_list &&
1047 +- (hw->hw_token & ACTIVE_BIT(ehci))) {
1048 ++ /* qh unlinked; token in overlay may be most current */
1049 ++ if (state == QH_STATE_IDLE
1050 ++ && cpu_to_hc32(ehci, qtd->qtd_dma)
1051 ++ == hw->hw_current) {
1052 + token = hc32_to_cpu(ehci, hw->hw_token);
1053 +- hw->hw_token &= ~ACTIVE_BIT(ehci);
1054 +
1055 + /* An unlink may leave an incomplete
1056 + * async transaction in the TT buffer.
1057 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
1058 +index 1d72895..8b94412 100644
1059 +--- a/drivers/usb/host/xhci.h
1060 ++++ b/drivers/usb/host/xhci.h
1061 +@@ -206,8 +206,8 @@ struct xhci_op_regs {
1062 + /* bits 12:31 are reserved (and should be preserved on writes). */
1063 +
1064 + /* IMAN - Interrupt Management Register */
1065 +-#define IMAN_IP (1 << 1)
1066 +-#define IMAN_IE (1 << 0)
1067 ++#define IMAN_IE (1 << 1)
1068 ++#define IMAN_IP (1 << 0)
1069 +
1070 + /* USBSTS - USB status - status bitmasks */
1071 + /* HC not running - set to 1 when run/stop bit is cleared. */
1072 +diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
1073 +index b0a7a9e..51b22c3 100644
1074 +--- a/drivers/usb/serial/garmin_gps.c
1075 ++++ b/drivers/usb/serial/garmin_gps.c
1076 +@@ -973,10 +973,7 @@ static void garmin_close(struct usb_serial_port *port)
1077 + if (!serial)
1078 + return;
1079 +
1080 +- mutex_lock(&port->serial->disc_mutex);
1081 +-
1082 +- if (!port->serial->disconnected)
1083 +- garmin_clear(garmin_data_p);
1084 ++ garmin_clear(garmin_data_p);
1085 +
1086 + /* shutdown our urbs */
1087 + usb_kill_urb(port->read_urb);
1088 +@@ -985,8 +982,6 @@ static void garmin_close(struct usb_serial_port *port)
1089 + /* keep reset state so we know that we must start a new session */
1090 + if (garmin_data_p->state != STATE_RESET)
1091 + garmin_data_p->state = STATE_DISCONNECTED;
1092 +-
1093 +- mutex_unlock(&port->serial->disc_mutex);
1094 + }
1095 +
1096 +
1097 +diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
1098 +index 8a90d58..bdb44fc 100644
1099 +--- a/drivers/usb/serial/io_ti.c
1100 ++++ b/drivers/usb/serial/io_ti.c
1101 +@@ -2793,6 +2793,7 @@ static struct usb_serial_driver edgeport_2port_device = {
1102 + .set_termios = edge_set_termios,
1103 + .tiocmget = edge_tiocmget,
1104 + .tiocmset = edge_tiocmset,
1105 ++ .get_icount = edge_get_icount,
1106 + .write = edge_write,
1107 + .write_room = edge_write_room,
1108 + .chars_in_buffer = edge_chars_in_buffer,
1109 +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
1110 +index 637dfa4..f678532 100644
1111 +--- a/drivers/usb/serial/usb-serial.c
1112 ++++ b/drivers/usb/serial/usb-serial.c
1113 +@@ -168,6 +168,7 @@ static void destroy_serial(struct kref *kref)
1114 + }
1115 + }
1116 +
1117 ++ usb_put_intf(serial->interface);
1118 + usb_put_dev(serial->dev);
1119 + kfree(serial);
1120 + }
1121 +@@ -624,7 +625,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
1122 + }
1123 + serial->dev = usb_get_dev(dev);
1124 + serial->type = driver;
1125 +- serial->interface = interface;
1126 ++ serial->interface = usb_get_intf(interface);
1127 + kref_init(&serial->kref);
1128 + mutex_init(&serial->disc_mutex);
1129 + serial->minor = SERIAL_TTY_NO_MINOR;
1130 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1131 +index fa8a1b2..7b8d564 100644
1132 +--- a/drivers/usb/storage/unusual_devs.h
1133 ++++ b/drivers/usb/storage/unusual_devs.h
1134 +@@ -488,6 +488,13 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999,
1135 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1136 + US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG),
1137 +
1138 ++/* Added by Dmitry Artamonow <mad_soft@×××××.ru> */
1139 ++UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999,
1140 ++ "Samsung",
1141 ++ "YP-Z3",
1142 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1143 ++ US_FL_MAX_SECTORS_64),
1144 ++
1145 + /* Entry and supporting patch by Theodore Kilgore <kilgota@××××××.edu>.
1146 + * Device uses standards-violating 32-byte Bulk Command Block Wrappers and
1147 + * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
1148 +diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
1149 +index b8e8b0a..4a1b984 100644
1150 +--- a/fs/binfmt_em86.c
1151 ++++ b/fs/binfmt_em86.c
1152 +@@ -42,7 +42,6 @@ static int load_em86(struct linux_binprm *bprm,struct pt_regs *regs)
1153 + return -ENOEXEC;
1154 + }
1155 +
1156 +- bprm->recursion_depth++; /* Well, the bang-shell is implicit... */
1157 + allow_write_access(bprm->file);
1158 + fput(bprm->file);
1159 + bprm->file = NULL;
1160 +diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
1161 +index 5463952..b2497d4 100644
1162 +--- a/fs/binfmt_misc.c
1163 ++++ b/fs/binfmt_misc.c
1164 +@@ -116,10 +116,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1165 + if (!enabled)
1166 + goto _ret;
1167 +
1168 +- retval = -ENOEXEC;
1169 +- if (bprm->recursion_depth > BINPRM_MAX_RECURSION)
1170 +- goto _ret;
1171 +-
1172 + /* to keep locking time low, we copy the interpreter string */
1173 + read_lock(&entries_lock);
1174 + fmt = check_file(bprm);
1175 +@@ -200,8 +196,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1176 + if (retval < 0)
1177 + goto _error;
1178 +
1179 +- bprm->recursion_depth++;
1180 +-
1181 + retval = search_binary_handler (bprm, regs);
1182 + if (retval < 0)
1183 + goto _error;
1184 +diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
1185 +index e39c18a..211ede0 100644
1186 +--- a/fs/binfmt_script.c
1187 ++++ b/fs/binfmt_script.c
1188 +@@ -22,15 +22,13 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
1189 + char interp[BINPRM_BUF_SIZE];
1190 + int retval;
1191 +
1192 +- if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!') ||
1193 +- (bprm->recursion_depth > BINPRM_MAX_RECURSION))
1194 ++ if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
1195 + return -ENOEXEC;
1196 + /*
1197 + * This section does the #! interpretation.
1198 + * Sorta complicated, but hopefully it will work. -TYT
1199 + */
1200 +
1201 +- bprm->recursion_depth++;
1202 + allow_write_access(bprm->file);
1203 + fput(bprm->file);
1204 + bprm->file = NULL;
1205 +diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
1206 +index cfd1ce3..1d36db1 100644
1207 +--- a/fs/cifs/asn1.c
1208 ++++ b/fs/cifs/asn1.c
1209 +@@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *security_blob, int length,
1210 + }
1211 + }
1212 +
1213 +- /* mechlistMIC */
1214 +- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
1215 +- /* Check if we have reached the end of the blob, but with
1216 +- no mechListMic (e.g. NTLMSSP instead of KRB5) */
1217 +- if (ctx.error == ASN1_ERR_DEC_EMPTY)
1218 +- goto decode_negtoken_exit;
1219 +- cFYI(1, "Error decoding last part negTokenInit exit3");
1220 +- return 0;
1221 +- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
1222 +- /* tag = 3 indicating mechListMIC */
1223 +- cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
1224 +- cls, con, tag, end, *end);
1225 +- return 0;
1226 +- }
1227 +-
1228 +- /* sequence */
1229 +- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
1230 +- cFYI(1, "Error decoding last part negTokenInit exit5");
1231 +- return 0;
1232 +- } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
1233 +- || (tag != ASN1_SEQ)) {
1234 +- cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)",
1235 +- cls, con, tag, end, *end);
1236 +- }
1237 +-
1238 +- /* sequence of */
1239 +- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
1240 +- cFYI(1, "Error decoding last part negTokenInit exit 7");
1241 +- return 0;
1242 +- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
1243 +- cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
1244 +- cls, con, tag, end, *end);
1245 +- return 0;
1246 +- }
1247 +-
1248 +- /* general string */
1249 +- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
1250 +- cFYI(1, "Error decoding last part negTokenInit exit9");
1251 +- return 0;
1252 +- } else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
1253 +- || (tag != ASN1_GENSTR)) {
1254 +- cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)",
1255 +- cls, con, tag, end, *end);
1256 +- return 0;
1257 +- }
1258 +- cFYI(1, "Need to call asn1_octets_decode() function for %s",
1259 +- ctx.pointer); /* is this UTF-8 or ASCII? */
1260 +-decode_negtoken_exit:
1261 ++ /*
1262 ++ * We currently ignore anything at the end of the SPNEGO blob after
1263 ++ * the mechTypes have been parsed, since none of that info is
1264 ++ * used at the moment.
1265 ++ */
1266 + return 1;
1267 + }
1268 +diff --git a/fs/compat.c b/fs/compat.c
1269 +index e5358c2..f77a963 100644
1270 +--- a/fs/compat.c
1271 ++++ b/fs/compat.c
1272 +@@ -576,6 +576,10 @@ ssize_t compat_rw_copy_check_uvector(int type,
1273 + }
1274 + *ret_pointer = iov;
1275 +
1276 ++ ret = -EFAULT;
1277 ++ if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
1278 ++ goto out;
1279 ++
1280 + /*
1281 + * Single unix specification:
1282 + * We should -EINVAL if an element length is not >= 0 and fitting an
1283 +@@ -1106,17 +1110,12 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
1284 + if (!file->f_op)
1285 + goto out;
1286 +
1287 +- ret = -EFAULT;
1288 +- if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
1289 +- goto out;
1290 +-
1291 +- tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
1292 ++ ret = compat_rw_copy_check_uvector(type, uvector, nr_segs,
1293 + UIO_FASTIOV, iovstack, &iov);
1294 +- if (tot_len == 0) {
1295 +- ret = 0;
1296 ++ if (ret <= 0)
1297 + goto out;
1298 +- }
1299 +
1300 ++ tot_len = ret;
1301 + ret = rw_verify_area(type, file, pos, tot_len);
1302 + if (ret < 0)
1303 + goto out;
1304 +diff --git a/fs/exec.c b/fs/exec.c
1305 +index 08f3e4e..3801daf 100644
1306 +--- a/fs/exec.c
1307 ++++ b/fs/exec.c
1308 +@@ -1369,6 +1369,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1309 + int try,retval;
1310 + struct linux_binfmt *fmt;
1311 +
1312 ++ /* This allows 4 levels of binfmt rewrites before failing hard. */
1313 ++ if (depth > 5)
1314 ++ return -ELOOP;
1315 ++
1316 + retval = security_bprm_check(bprm);
1317 + if (retval)
1318 + return retval;
1319 +@@ -1387,12 +1391,8 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1320 + if (!try_module_get(fmt->module))
1321 + continue;
1322 + read_unlock(&binfmt_lock);
1323 ++ bprm->recursion_depth = depth + 1;
1324 + retval = fn(bprm, regs);
1325 +- /*
1326 +- * Restore the depth counter to its starting value
1327 +- * in this call, so we don't have to rely on every
1328 +- * load_binary function to restore it on return.
1329 +- */
1330 + bprm->recursion_depth = depth;
1331 + if (retval >= 0) {
1332 + if (depth == 0)
1333 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1334 +index 680df5d..354ba48 100644
1335 +--- a/fs/ext4/extents.c
1336 ++++ b/fs/ext4/extents.c
1337 +@@ -2901,6 +2901,7 @@ static int ext4_split_extent(handle_t *handle,
1338 + int err = 0;
1339 + int uninitialized;
1340 + int split_flag1, flags1;
1341 ++ int allocated = map->m_len;
1342 +
1343 + depth = ext_depth(inode);
1344 + ex = path[depth].p_ext;
1345 +@@ -2919,6 +2920,8 @@ static int ext4_split_extent(handle_t *handle,
1346 + map->m_lblk + map->m_len, split_flag1, flags1);
1347 + if (err)
1348 + goto out;
1349 ++ } else {
1350 ++ allocated = ee_len - (map->m_lblk - ee_block);
1351 + }
1352 +
1353 + ext4_ext_drop_refs(path);
1354 +@@ -2941,7 +2944,7 @@ static int ext4_split_extent(handle_t *handle,
1355 +
1356 + ext4_ext_show_leaf(inode, path);
1357 + out:
1358 +- return err ? err : map->m_len;
1359 ++ return err ? err : allocated;
1360 + }
1361 +
1362 + #define EXT4_EXT_ZERO_LEN 7
1363 +@@ -3309,6 +3312,7 @@ out:
1364 + allocated - map->m_len);
1365 + allocated = map->m_len;
1366 + }
1367 ++ map->m_len = allocated;
1368 +
1369 + /*
1370 + * If we have done fallocate with the offset that is already
1371 +diff --git a/fs/isofs/export.c b/fs/isofs/export.c
1372 +index 516eb21..fd88add 100644
1373 +--- a/fs/isofs/export.c
1374 ++++ b/fs/isofs/export.c
1375 +@@ -135,6 +135,7 @@ isofs_export_encode_fh(struct dentry *dentry,
1376 + len = 3;
1377 + fh32[0] = ei->i_iget5_block;
1378 + fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
1379 ++ fh16[3] = 0; /* avoid leaking uninitialized data */
1380 + fh32[2] = inode->i_generation;
1381 + if (connectable && !S_ISDIR(inode->i_mode)) {
1382 + struct inode *parent;
1383 +diff --git a/fs/udf/namei.c b/fs/udf/namei.c
1384 +index f1dce84..d8c1bb5 100644
1385 +--- a/fs/udf/namei.c
1386 ++++ b/fs/udf/namei.c
1387 +@@ -1297,6 +1297,7 @@ static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp,
1388 + *lenp = 3;
1389 + fid->udf.block = location.logicalBlockNum;
1390 + fid->udf.partref = location.partitionReferenceNum;
1391 ++ fid->udf.parent_partref = 0;
1392 + fid->udf.generation = inode->i_generation;
1393 +
1394 + if (connectable && !S_ISDIR(inode->i_mode)) {
1395 +diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
1396 +index 4858c19..54706dc 100644
1397 +--- a/fs/udf/udf_sb.h
1398 ++++ b/fs/udf/udf_sb.h
1399 +@@ -79,7 +79,7 @@ struct udf_virtual_data {
1400 + struct udf_bitmap {
1401 + __u32 s_extLength;
1402 + __u32 s_extPosition;
1403 +- __u16 s_nr_groups;
1404 ++ int s_nr_groups;
1405 + struct buffer_head **s_block_bitmap;
1406 + };
1407 +
1408 +diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
1409 +index 384e37f..718eb0b 100644
1410 +--- a/include/linux/binfmts.h
1411 ++++ b/include/linux/binfmts.h
1412 +@@ -67,8 +67,6 @@ struct linux_binprm {
1413 + #define BINPRM_FLAGS_EXECFD_BIT 1
1414 + #define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
1415 +
1416 +-#define BINPRM_MAX_RECURSION 4
1417 +-
1418 + /* Function parameter for binfmt->coredump */
1419 + struct coredump_params {
1420 + long signr;
1421 +diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
1422 +index 16ff29a..b289bd2 100644
1423 +--- a/include/net/inet_frag.h
1424 ++++ b/include/net/inet_frag.h
1425 +@@ -33,6 +33,13 @@ struct inet_frag_queue {
1426 +
1427 + #define INETFRAGS_HASHSZ 64
1428 +
1429 ++/* averaged:
1430 ++ * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
1431 ++ * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
1432 ++ * struct frag_queue))
1433 ++ */
1434 ++#define INETFRAGS_MAXDEPTH 128
1435 ++
1436 + struct inet_frags {
1437 + struct hlist_head hash[INETFRAGS_HASHSZ];
1438 + rwlock_t lock;
1439 +@@ -64,6 +71,8 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f);
1440 + struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
1441 + struct inet_frags *f, void *key, unsigned int hash)
1442 + __releases(&f->lock);
1443 ++void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
1444 ++ const char *prefix);
1445 +
1446 + static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
1447 + {
1448 +diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
1449 +index 10422ef..2124004 100644
1450 +--- a/include/net/ip_fib.h
1451 ++++ b/include/net/ip_fib.h
1452 +@@ -129,18 +129,16 @@ struct fib_result_nl {
1453 + };
1454 +
1455 + #ifdef CONFIG_IP_ROUTE_MULTIPATH
1456 +-
1457 + #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
1458 +-
1459 +-#define FIB_TABLE_HASHSZ 2
1460 +-
1461 + #else /* CONFIG_IP_ROUTE_MULTIPATH */
1462 +-
1463 + #define FIB_RES_NH(res) ((res).fi->fib_nh[0])
1464 ++#endif /* CONFIG_IP_ROUTE_MULTIPATH */
1465 +
1466 ++#ifdef CONFIG_IP_MULTIPLE_TABLES
1467 + #define FIB_TABLE_HASHSZ 256
1468 +-
1469 +-#endif /* CONFIG_IP_ROUTE_MULTIPATH */
1470 ++#else
1471 ++#define FIB_TABLE_HASHSZ 2
1472 ++#endif
1473 +
1474 + extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
1475 +
1476 +diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
1477 +index 7a90d02..9e40370 100644
1478 +--- a/kernel/time/tick-broadcast.c
1479 ++++ b/kernel/time/tick-broadcast.c
1480 +@@ -66,7 +66,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
1481 + */
1482 + int tick_check_broadcast_device(struct clock_event_device *dev)
1483 + {
1484 +- if ((tick_broadcast_device.evtdev &&
1485 ++ if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
1486 ++ (tick_broadcast_device.evtdev &&
1487 + tick_broadcast_device.evtdev->rating >= dev->rating) ||
1488 + (dev->features & CLOCK_EVT_FEAT_C3STOP))
1489 + return 0;
1490 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1491 +index 86fd417..b2ca34a 100644
1492 +--- a/kernel/trace/ftrace.c
1493 ++++ b/kernel/trace/ftrace.c
1494 +@@ -2709,8 +2709,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1495 + continue;
1496 + }
1497 +
1498 +- hlist_del(&entry->node);
1499 +- call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1500 ++ hlist_del_rcu(&entry->node);
1501 ++ call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
1502 + }
1503 + }
1504 + __disable_ftrace_function_probe();
1505 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1506 +index 672a749..97bf540 100644
1507 +--- a/kernel/trace/trace.c
1508 ++++ b/kernel/trace/trace.c
1509 +@@ -649,7 +649,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1510 + void
1511 + update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1512 + {
1513 +- struct ring_buffer *buf = tr->buffer;
1514 ++ struct ring_buffer *buf;
1515 +
1516 + if (trace_stop_count)
1517 + return;
1518 +@@ -661,6 +661,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1519 + }
1520 + arch_spin_lock(&ftrace_max_lock);
1521 +
1522 ++ buf = tr->buffer;
1523 + tr->buffer = max_tr.buffer;
1524 + max_tr.buffer = buf;
1525 +
1526 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1527 +index 037f077..2c56a53 100644
1528 +--- a/mm/hugetlb.c
1529 ++++ b/mm/hugetlb.c
1530 +@@ -2006,8 +2006,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
1531 + /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
1532 + unsigned long hugetlb_total_pages(void)
1533 + {
1534 +- struct hstate *h = &default_hstate;
1535 +- return h->nr_huge_pages * pages_per_huge_page(h);
1536 ++ struct hstate *h;
1537 ++ unsigned long nr_total_pages = 0;
1538 ++
1539 ++ for_each_hstate(h)
1540 ++ nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
1541 ++ return nr_total_pages;
1542 + }
1543 +
1544 + static int hugetlb_acct_memory(struct hstate *h, long delta)
1545 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1546 +index 222312e..81c16d3 100644
1547 +--- a/net/core/rtnetlink.c
1548 ++++ b/net/core/rtnetlink.c
1549 +@@ -2011,7 +2011,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1550 + struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
1551 +
1552 + while (RTA_OK(attr, attrlen)) {
1553 +- unsigned flavor = attr->rta_type;
1554 ++ unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
1555 + if (flavor) {
1556 + if (flavor > rta_max[sz_idx])
1557 + return -EINVAL;
1558 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
1559 +index 5ff2a51..210b710 100644
1560 +--- a/net/ipv4/inet_fragment.c
1561 ++++ b/net/ipv4/inet_fragment.c
1562 +@@ -21,6 +21,7 @@
1563 + #include <linux/rtnetlink.h>
1564 + #include <linux/slab.h>
1565 +
1566 ++#include <net/sock.h>
1567 + #include <net/inet_frag.h>
1568 +
1569 + static void inet_frag_secret_rebuild(unsigned long dummy)
1570 +@@ -271,6 +272,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
1571 + {
1572 + struct inet_frag_queue *q;
1573 + struct hlist_node *n;
1574 ++ int depth = 0;
1575 +
1576 + hlist_for_each_entry(q, n, &f->hash[hash], list) {
1577 + if (q->net == nf && f->match(q, key)) {
1578 +@@ -278,9 +280,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
1579 + read_unlock(&f->lock);
1580 + return q;
1581 + }
1582 ++ depth++;
1583 + }
1584 + read_unlock(&f->lock);
1585 +
1586 +- return inet_frag_create(nf, f, key);
1587 ++ if (depth <= INETFRAGS_MAXDEPTH)
1588 ++ return inet_frag_create(nf, f, key);
1589 ++ else
1590 ++ return ERR_PTR(-ENOBUFS);
1591 + }
1592 + EXPORT_SYMBOL(inet_frag_find);
1593 ++
1594 ++void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
1595 ++ const char *prefix)
1596 ++{
1597 ++ static const char msg[] = "inet_frag_find: Fragment hash bucket"
1598 ++ " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
1599 ++ ". Dropping fragment.\n";
1600 ++
1601 ++ if (PTR_ERR(q) == -ENOBUFS)
1602 ++ LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
1603 ++}
1604 ++EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
1605 +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
1606 +index 0ad6035..da38472 100644
1607 +--- a/net/ipv4/ip_fragment.c
1608 ++++ b/net/ipv4/ip_fragment.c
1609 +@@ -20,6 +20,8 @@
1610 + * Patrick McHardy : LRU queue of frag heads for evictor.
1611 + */
1612 +
1613 ++#define pr_fmt(fmt) "IPv4: " fmt
1614 ++
1615 + #include <linux/compiler.h>
1616 + #include <linux/module.h>
1617 + #include <linux/types.h>
1618 +@@ -292,14 +294,12 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
1619 + hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
1620 +
1621 + q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
1622 +- if (q == NULL)
1623 +- goto out_nomem;
1624 ++ if (IS_ERR_OR_NULL(q)) {
1625 ++ inet_frag_maybe_warn_overflow(q, pr_fmt());
1626 ++ return NULL;
1627 ++ }
1628 +
1629 + return container_of(q, struct ipq, q);
1630 +-
1631 +-out_nomem:
1632 +- LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
1633 +- return NULL;
1634 + }
1635 +
1636 + /* Is the fragment too far ahead to be part of ipq? */
1637 +diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
1638 +index 42dd1a9..40eb4fc 100644
1639 +--- a/net/ipv4/ip_options.c
1640 ++++ b/net/ipv4/ip_options.c
1641 +@@ -358,7 +358,6 @@ int ip_options_compile(struct net *net,
1642 + }
1643 + switch (optptr[3]&0xF) {
1644 + case IPOPT_TS_TSONLY:
1645 +- opt->ts = optptr - iph;
1646 + if (skb)
1647 + timeptr = &optptr[optptr[2]-1];
1648 + opt->ts_needtime = 1;
1649 +@@ -369,7 +368,6 @@ int ip_options_compile(struct net *net,
1650 + pp_ptr = optptr + 2;
1651 + goto error;
1652 + }
1653 +- opt->ts = optptr - iph;
1654 + if (rt) {
1655 + memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
1656 + timeptr = &optptr[optptr[2]+3];
1657 +@@ -383,7 +381,6 @@ int ip_options_compile(struct net *net,
1658 + pp_ptr = optptr + 2;
1659 + goto error;
1660 + }
1661 +- opt->ts = optptr - iph;
1662 + {
1663 + __be32 addr;
1664 + memcpy(&addr, &optptr[optptr[2]-1], 4);
1665 +@@ -416,12 +413,12 @@ int ip_options_compile(struct net *net,
1666 + pp_ptr = optptr + 3;
1667 + goto error;
1668 + }
1669 +- opt->ts = optptr - iph;
1670 + if (skb) {
1671 + optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
1672 + opt->is_changed = 1;
1673 + }
1674 + }
1675 ++ opt->ts = optptr - iph;
1676 + break;
1677 + case IPOPT_RA:
1678 + if (optlen < 4) {
1679 +diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
1680 +index 0857272..afc7530 100644
1681 +--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
1682 ++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
1683 +@@ -14,6 +14,8 @@
1684 + * 2 of the License, or (at your option) any later version.
1685 + */
1686 +
1687 ++#define pr_fmt(fmt) "IPv6-nf: " fmt
1688 ++
1689 + #include <linux/errno.h>
1690 + #include <linux/types.h>
1691 + #include <linux/string.h>
1692 +@@ -176,14 +178,12 @@ fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
1693 +
1694 + q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
1695 + local_bh_enable();
1696 +- if (q == NULL)
1697 +- goto oom;
1698 ++ if (IS_ERR_OR_NULL(q)) {
1699 ++ inet_frag_maybe_warn_overflow(q, pr_fmt());
1700 ++ return NULL;
1701 ++ }
1702 +
1703 + return container_of(q, struct nf_ct_frag6_queue, q);
1704 +-
1705 +-oom:
1706 +- pr_debug("Can't alloc new queue\n");
1707 +- return NULL;
1708 + }
1709 +
1710 +
1711 +diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
1712 +index 7b954e2..43f8fc9 100644
1713 +--- a/net/ipv6/reassembly.c
1714 ++++ b/net/ipv6/reassembly.c
1715 +@@ -26,6 +26,9 @@
1716 + * YOSHIFUJI,H. @USAGI Always remove fragment header to
1717 + * calculate ICV correctly.
1718 + */
1719 ++
1720 ++#define pr_fmt(fmt) "IPv6: " fmt
1721 ++
1722 + #include <linux/errno.h>
1723 + #include <linux/types.h>
1724 + #include <linux/string.h>
1725 +@@ -239,9 +242,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6
1726 + hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
1727 +
1728 + q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
1729 +- if (q == NULL)
1730 ++ if (IS_ERR_OR_NULL(q)) {
1731 ++ inet_frag_maybe_warn_overflow(q, pr_fmt());
1732 + return NULL;
1733 +-
1734 ++ }
1735 + return container_of(q, struct frag_queue, q);
1736 + }
1737 +
1738 +diff --git a/net/sctp/associola.c b/net/sctp/associola.c
1739 +index 17a6e65..6c8c8da 100644
1740 +--- a/net/sctp/associola.c
1741 ++++ b/net/sctp/associola.c
1742 +@@ -1043,7 +1043,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
1743 + transports) {
1744 +
1745 + if (transport == active)
1746 +- break;
1747 ++ continue;
1748 + list_for_each_entry(chunk, &transport->transmitted,
1749 + transmitted_list) {
1750 + if (key == chunk->subh.data_hdr->tsn) {
1751 +diff --git a/security/keys/compat.c b/security/keys/compat.c
1752 +index 338b510..10a6e4c 100644
1753 +--- a/security/keys/compat.c
1754 ++++ b/security/keys/compat.c
1755 +@@ -40,12 +40,12 @@ long compat_keyctl_instantiate_key_iov(
1756 + ARRAY_SIZE(iovstack),
1757 + iovstack, &iov);
1758 + if (ret < 0)
1759 +- return ret;
1760 ++ goto err;
1761 + if (ret == 0)
1762 + goto no_payload_free;
1763 +
1764 + ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
1765 +-
1766 ++err:
1767 + if (iov != iovstack)
1768 + kfree(iov);
1769 + return ret;
1770 +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
1771 +index eca5191..9f9cc3a 100644
1772 +--- a/security/keys/keyctl.c
1773 ++++ b/security/keys/keyctl.c
1774 +@@ -1067,12 +1067,12 @@ long keyctl_instantiate_key_iov(key_serial_t id,
1775 + ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
1776 + ARRAY_SIZE(iovstack), iovstack, &iov);
1777 + if (ret < 0)
1778 +- return ret;
1779 ++ goto err;
1780 + if (ret == 0)
1781 + goto no_payload_free;
1782 +
1783 + ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
1784 +-
1785 ++err:
1786 + if (iov != iovstack)
1787 + kfree(iov);
1788 + return ret;
1789 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
1790 +index 39e1a6a..7b747af 100644
1791 +--- a/sound/pci/hda/hda_codec.c
1792 ++++ b/sound/pci/hda/hda_codec.c
1793 +@@ -2625,7 +2625,7 @@ static unsigned int convert_to_spdif_status(unsigned short val)
1794 + if (val & AC_DIG1_PROFESSIONAL)
1795 + sbits |= IEC958_AES0_PROFESSIONAL;
1796 + if (sbits & IEC958_AES0_PROFESSIONAL) {
1797 +- if (sbits & AC_DIG1_EMPHASIS)
1798 ++ if (val & AC_DIG1_EMPHASIS)
1799 + sbits |= IEC958_AES0_PRO_EMPHASIS_5015;
1800 + } else {
1801 + if (val & AC_DIG1_EMPHASIS)
1802 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
1803 +index 4d68f90..0957e1c 100644
1804 +--- a/sound/usb/mixer.c
1805 ++++ b/sound/usb/mixer.c
1806 +@@ -711,8 +711,9 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
1807 + case UAC2_CLOCK_SELECTOR: {
1808 + struct uac_selector_unit_descriptor *d = p1;
1809 + /* call recursively to retrieve the channel info */
1810 +- if (check_input_term(state, d->baSourceID[0], term) < 0)
1811 +- return -ENODEV;
1812 ++ err = check_input_term(state, d->baSourceID[0], term);
1813 ++ if (err < 0)
1814 ++ return err;
1815 + term->type = d->bDescriptorSubtype << 16; /* virtual type */
1816 + term->id = id;
1817 + term->name = uac_selector_unit_iSelector(d);
1818 +@@ -1262,8 +1263,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
1819 + return err;
1820 +
1821 + /* determine the input source type and name */
1822 +- if (check_input_term(state, hdr->bSourceID, &iterm) < 0)
1823 +- return -EINVAL;
1824 ++ err = check_input_term(state, hdr->bSourceID, &iterm);
1825 ++ if (err < 0)
1826 ++ return err;
1827 +
1828 + master_bits = snd_usb_combine_bytes(bmaControls, csize);
1829 + /* master configuration quirks */
1830 +@@ -2024,7 +2026,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
1831 + state.oterm.type = le16_to_cpu(desc->wTerminalType);
1832 + state.oterm.name = desc->iTerminal;
1833 + err = parse_audio_unit(&state, desc->bSourceID);
1834 +- if (err < 0)
1835 ++ if (err < 0 && err != -EINVAL)
1836 + return err;
1837 + } else { /* UAC_VERSION_2 */
1838 + struct uac2_output_terminal_descriptor *desc = p;
1839 +@@ -2036,12 +2038,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
1840 + state.oterm.type = le16_to_cpu(desc->wTerminalType);
1841 + state.oterm.name = desc->iTerminal;
1842 + err = parse_audio_unit(&state, desc->bSourceID);
1843 +- if (err < 0)
1844 ++ if (err < 0 && err != -EINVAL)
1845 + return err;
1846 +
1847 + /* for UAC2, use the same approach to also add the clock selectors */
1848 + err = parse_audio_unit(&state, desc->bCSourceID);
1849 +- if (err < 0)
1850 ++ if (err < 0 && err != -EINVAL)
1851 + return err;
1852 + }
1853 + }
1854
1855 Modified: genpatches-2.6/trunk/3.2/0000_README
1856 ===================================================================
1857 --- genpatches-2.6/trunk/3.2/0000_README 2013-03-30 11:08:52 UTC (rev 2326)
1858 +++ genpatches-2.6/trunk/3.2/0000_README 2013-03-31 21:28:05 UTC (rev 2327)
1859 @@ -204,6 +204,10 @@
1860 From: http://www.kernel.org
1861 Desc: Linux 3.2.41
1862
1863 +Patch: 1041_linux-3.2.42.patch
1864 +From: http://www.kernel.org
1865 +Desc: Linux 3.2.42
1866 +
1867 Patch: 2300_per-pci-device-msi-irq-listing.patch
1868 From: http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=da8d1c8ba4dcb16d60be54b233deca9a7cac98dc
1869 Desc: Add a per-pci-device subdirectory in sysfs
1870
1871 Added: genpatches-2.6/trunk/3.2/1041_linux-3.2.42.patch
1872 ===================================================================
1873 --- genpatches-2.6/trunk/3.2/1041_linux-3.2.42.patch (rev 0)
1874 +++ genpatches-2.6/trunk/3.2/1041_linux-3.2.42.patch 2013-03-31 21:28:05 UTC (rev 2327)
1875 @@ -0,0 +1,3602 @@
1876 +diff --git a/Makefile b/Makefile
1877 +index 95e6220..d44f009 100644
1878 +--- a/Makefile
1879 ++++ b/Makefile
1880 +@@ -1,6 +1,6 @@
1881 + VERSION = 3
1882 + PATCHLEVEL = 2
1883 +-SUBLEVEL = 41
1884 ++SUBLEVEL = 42
1885 + EXTRAVERSION =
1886 + NAME = Saber-toothed Squirrel
1887 +
1888 +diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
1889 +index 43ba0fb..559ee24 100644
1890 +--- a/arch/arm/include/asm/signal.h
1891 ++++ b/arch/arm/include/asm/signal.h
1892 +@@ -127,6 +127,7 @@ struct sigaction {
1893 + __sigrestore_t sa_restorer;
1894 + sigset_t sa_mask; /* mask last for extensibility */
1895 + };
1896 ++#define __ARCH_HAS_SA_RESTORER
1897 +
1898 + struct k_sigaction {
1899 + struct sigaction sa;
1900 +diff --git a/arch/avr32/include/asm/signal.h b/arch/avr32/include/asm/signal.h
1901 +index 8790dfc..e6952a0 100644
1902 +--- a/arch/avr32/include/asm/signal.h
1903 ++++ b/arch/avr32/include/asm/signal.h
1904 +@@ -128,6 +128,7 @@ struct sigaction {
1905 + __sigrestore_t sa_restorer;
1906 + sigset_t sa_mask; /* mask last for extensibility */
1907 + };
1908 ++#define __ARCH_HAS_SA_RESTORER
1909 +
1910 + struct k_sigaction {
1911 + struct sigaction sa;
1912 +diff --git a/arch/cris/include/asm/signal.h b/arch/cris/include/asm/signal.h
1913 +index ea6af9a..057fea2 100644
1914 +--- a/arch/cris/include/asm/signal.h
1915 ++++ b/arch/cris/include/asm/signal.h
1916 +@@ -122,6 +122,7 @@ struct sigaction {
1917 + void (*sa_restorer)(void);
1918 + sigset_t sa_mask; /* mask last for extensibility */
1919 + };
1920 ++#define __ARCH_HAS_SA_RESTORER
1921 +
1922 + struct k_sigaction {
1923 + struct sigaction sa;
1924 +diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h
1925 +index fd8b66e..8695707 100644
1926 +--- a/arch/h8300/include/asm/signal.h
1927 ++++ b/arch/h8300/include/asm/signal.h
1928 +@@ -121,6 +121,7 @@ struct sigaction {
1929 + void (*sa_restorer)(void);
1930 + sigset_t sa_mask; /* mask last for extensibility */
1931 + };
1932 ++#define __ARCH_HAS_SA_RESTORER
1933 +
1934 + struct k_sigaction {
1935 + struct sigaction sa;
1936 +diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h
1937 +index b2eeb0d..802d561 100644
1938 +--- a/arch/m32r/include/asm/signal.h
1939 ++++ b/arch/m32r/include/asm/signal.h
1940 +@@ -123,6 +123,7 @@ struct sigaction {
1941 + __sigrestore_t sa_restorer;
1942 + sigset_t sa_mask; /* mask last for extensibility */
1943 + };
1944 ++#define __ARCH_HAS_SA_RESTORER
1945 +
1946 + struct k_sigaction {
1947 + struct sigaction sa;
1948 +diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
1949 +index 93fe83e..a20ae63 100644
1950 +--- a/arch/m68k/include/asm/signal.h
1951 ++++ b/arch/m68k/include/asm/signal.h
1952 +@@ -119,6 +119,7 @@ struct sigaction {
1953 + __sigrestore_t sa_restorer;
1954 + sigset_t sa_mask; /* mask last for extensibility */
1955 + };
1956 ++#define __ARCH_HAS_SA_RESTORER
1957 +
1958 + struct k_sigaction {
1959 + struct sigaction sa;
1960 +diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h
1961 +index 1865d72..eecaa76 100644
1962 +--- a/arch/mn10300/include/asm/signal.h
1963 ++++ b/arch/mn10300/include/asm/signal.h
1964 +@@ -131,6 +131,7 @@ struct sigaction {
1965 + __sigrestore_t sa_restorer;
1966 + sigset_t sa_mask; /* mask last for extensibility */
1967 + };
1968 ++#define __ARCH_HAS_SA_RESTORER
1969 +
1970 + struct k_sigaction {
1971 + struct sigaction sa;
1972 +diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
1973 +index 3eb13be..ec63a0a 100644
1974 +--- a/arch/powerpc/include/asm/signal.h
1975 ++++ b/arch/powerpc/include/asm/signal.h
1976 +@@ -109,6 +109,7 @@ struct sigaction {
1977 + __sigrestore_t sa_restorer;
1978 + sigset_t sa_mask; /* mask last for extensibility */
1979 + };
1980 ++#define __ARCH_HAS_SA_RESTORER
1981 +
1982 + struct k_sigaction {
1983 + struct sigaction sa;
1984 +diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
1985 +index edae5bb..b92b756 100644
1986 +--- a/arch/powerpc/kernel/cputable.c
1987 ++++ b/arch/powerpc/kernel/cputable.c
1988 +@@ -268,7 +268,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1989 + .cpu_features = CPU_FTRS_PPC970,
1990 + .cpu_user_features = COMMON_USER_POWER4 |
1991 + PPC_FEATURE_HAS_ALTIVEC_COMP,
1992 +- .mmu_features = MMU_FTR_HPTE_TABLE,
1993 ++ .mmu_features = MMU_FTRS_PPC970,
1994 + .icache_bsize = 128,
1995 + .dcache_bsize = 128,
1996 + .num_pmcs = 8,
1997 +diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h
1998 +index cdf5cb2..c872626 100644
1999 +--- a/arch/s390/include/asm/signal.h
2000 ++++ b/arch/s390/include/asm/signal.h
2001 +@@ -131,6 +131,7 @@ struct sigaction {
2002 + void (*sa_restorer)(void);
2003 + sigset_t sa_mask; /* mask last for extensibility */
2004 + };
2005 ++#define __ARCH_HAS_SA_RESTORER
2006 +
2007 + struct k_sigaction {
2008 + struct sigaction sa;
2009 +diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
2010 +index 1d8648c..8743029 100644
2011 +--- a/arch/s390/include/asm/tlbflush.h
2012 ++++ b/arch/s390/include/asm/tlbflush.h
2013 +@@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
2014 +
2015 + static inline void __tlb_flush_mm(struct mm_struct * mm)
2016 + {
2017 +- if (unlikely(cpumask_empty(mm_cpumask(mm))))
2018 +- return;
2019 + /*
2020 + * If the machine has IDTE we prefer to do a per mm flush
2021 + * on all cpus instead of doing a local flush if the mm
2022 +diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h
2023 +index e49b828..4929431 100644
2024 +--- a/arch/sparc/include/asm/signal.h
2025 ++++ b/arch/sparc/include/asm/signal.h
2026 +@@ -191,6 +191,7 @@ struct __old_sigaction {
2027 + unsigned long sa_flags;
2028 + void (*sa_restorer)(void); /* not used by Linux/SPARC yet */
2029 + };
2030 ++#define __ARCH_HAS_SA_RESTORER
2031 +
2032 + typedef struct sigaltstack {
2033 + void __user *ss_sp;
2034 +diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
2035 +index 598457c..6cbc795 100644
2036 +--- a/arch/x86/include/asm/signal.h
2037 ++++ b/arch/x86/include/asm/signal.h
2038 +@@ -125,6 +125,8 @@ typedef unsigned long sigset_t;
2039 + extern void do_notify_resume(struct pt_regs *, void *, __u32);
2040 + # endif /* __KERNEL__ */
2041 +
2042 ++#define __ARCH_HAS_SA_RESTORER
2043 ++
2044 + #ifdef __i386__
2045 + # ifdef __KERNEL__
2046 + struct old_sigaction {
2047 +diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
2048 +index 73da6b6..2d4e76b 100644
2049 +--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
2050 ++++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
2051 +@@ -736,3 +736,13 @@ void intel_ds_init(void)
2052 + }
2053 + }
2054 + }
2055 ++
2056 ++void perf_restore_debug_store(void)
2057 ++{
2058 ++ struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2059 ++
2060 ++ if (!x86_pmu.bts && !x86_pmu.pebs)
2061 ++ return;
2062 ++
2063 ++ wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
2064 ++}
2065 +diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
2066 +index b7c2849..554b7b5 100644
2067 +--- a/arch/x86/lib/usercopy_64.c
2068 ++++ b/arch/x86/lib/usercopy_64.c
2069 +@@ -169,10 +169,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
2070 + char c;
2071 + unsigned zero_len;
2072 +
2073 +- for (; len; --len) {
2074 ++ for (; len; --len, to++) {
2075 + if (__get_user_nocheck(c, from++, sizeof(char)))
2076 + break;
2077 +- if (__put_user_nocheck(c, to++, sizeof(char)))
2078 ++ if (__put_user_nocheck(c, to, sizeof(char)))
2079 + break;
2080 + }
2081 +
2082 +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
2083 +index f10c0af..43c9f6a 100644
2084 +--- a/arch/x86/power/cpu.c
2085 ++++ b/arch/x86/power/cpu.c
2086 +@@ -11,6 +11,7 @@
2087 + #include <linux/suspend.h>
2088 + #include <linux/export.h>
2089 + #include <linux/smp.h>
2090 ++#include <linux/perf_event.h>
2091 +
2092 + #include <asm/pgtable.h>
2093 + #include <asm/proto.h>
2094 +@@ -225,6 +226,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
2095 +
2096 + do_fpu_end();
2097 + mtrr_bp_restore();
2098 ++ perf_restore_debug_store();
2099 + }
2100 +
2101 + /* Needed by apm.c */
2102 +diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h
2103 +index 633ba73..75edf8a 100644
2104 +--- a/arch/xtensa/include/asm/signal.h
2105 ++++ b/arch/xtensa/include/asm/signal.h
2106 +@@ -133,6 +133,7 @@ struct sigaction {
2107 + void (*sa_restorer)(void);
2108 + sigset_t sa_mask; /* mask last for extensibility */
2109 + };
2110 ++#define __ARCH_HAS_SA_RESTORER
2111 +
2112 + struct k_sigaction {
2113 + struct sigaction sa;
2114 +diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
2115 +index efba163..bf4d6e2 100644
2116 +--- a/drivers/firmware/Kconfig
2117 ++++ b/drivers/firmware/Kconfig
2118 +@@ -53,6 +53,24 @@ config EFI_VARS
2119 + Subsequent efibootmgr releases may be found at:
2120 + <http://linux.dell.com/efibootmgr>
2121 +
2122 ++config EFI_VARS_PSTORE
2123 ++ bool "Register efivars backend for pstore"
2124 ++ depends on EFI_VARS && PSTORE
2125 ++ default y
2126 ++ help
2127 ++ Say Y here to enable use efivars as a backend to pstore. This
2128 ++ will allow writing console messages, crash dumps, or anything
2129 ++ else supported by pstore to EFI variables.
2130 ++
2131 ++config EFI_VARS_PSTORE_DEFAULT_DISABLE
2132 ++ bool "Disable using efivars as a pstore backend by default"
2133 ++ depends on EFI_VARS_PSTORE
2134 ++ default n
2135 ++ help
2136 ++ Saying Y here will disable the use of efivars as a storage
2137 ++ backend for pstore by default. This setting can be overridden
2138 ++ using the efivars module's pstore_disable parameter.
2139 ++
2140 + config EFI_PCDP
2141 + bool "Console device selection via EFI PCDP or HCDP table"
2142 + depends on ACPI && EFI && IA64
2143 +diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
2144 +index 81346ae..b15c0aa 100644
2145 +--- a/drivers/firmware/efivars.c
2146 ++++ b/drivers/firmware/efivars.c
2147 +@@ -92,6 +92,11 @@ MODULE_VERSION(EFIVARS_VERSION);
2148 +
2149 + #define DUMP_NAME_LEN 52
2150 +
2151 ++static bool efivars_pstore_disable =
2152 ++ IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
2153 ++
2154 ++module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
2155 ++
2156 + /*
2157 + * The maximum size of VariableName + Data = 1024
2158 + * Therefore, it's reasonable to save that much
2159 +@@ -122,6 +127,8 @@ struct efivar_attribute {
2160 + ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
2161 + };
2162 +
2163 ++static struct efivars __efivars;
2164 ++
2165 + #define PSTORE_EFI_ATTRIBUTES \
2166 + (EFI_VARIABLE_NON_VOLATILE | \
2167 + EFI_VARIABLE_BOOTSERVICE_ACCESS | \
2168 +@@ -146,6 +153,14 @@ efivar_create_sysfs_entry(struct efivars *efivars,
2169 + efi_char16_t *variable_name,
2170 + efi_guid_t *vendor_guid);
2171 +
2172 ++/*
2173 ++ * Prototype for workqueue functions updating sysfs entry
2174 ++ */
2175 ++
2176 ++static void efivar_update_sysfs_entries(struct work_struct *);
2177 ++static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
2178 ++static bool efivar_wq_enabled = true;
2179 ++
2180 + /* Return the number of unicode characters in data */
2181 + static unsigned long
2182 + utf16_strnlen(efi_char16_t *s, size_t maxlength)
2183 +@@ -659,8 +674,6 @@ static struct kobj_type efivar_ktype = {
2184 + .default_attrs = def_attrs,
2185 + };
2186 +
2187 +-static struct pstore_info efi_pstore_info;
2188 +-
2189 + static inline void
2190 + efivar_unregister(struct efivar_entry *var)
2191 + {
2192 +@@ -697,7 +710,7 @@ static int efi_status_to_err(efi_status_t status)
2193 + return err;
2194 + }
2195 +
2196 +-#ifdef CONFIG_PSTORE
2197 ++#ifdef CONFIG_EFI_VARS_PSTORE
2198 +
2199 + static int efi_pstore_open(struct pstore_info *psi)
2200 + {
2201 +@@ -774,19 +787,21 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
2202 +
2203 + spin_lock_irqsave(&efivars->lock, flags);
2204 +
2205 +- /*
2206 +- * Check if there is a space enough to log.
2207 +- * size: a size of logging data
2208 +- * DUMP_NAME_LEN * 2: a maximum size of variable name
2209 +- */
2210 ++ if (size) {
2211 ++ /*
2212 ++ * Check if there is a space enough to log.
2213 ++ * size: a size of logging data
2214 ++ * DUMP_NAME_LEN * 2: a maximum size of variable name
2215 ++ */
2216 +
2217 +- status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
2218 +- size + DUMP_NAME_LEN * 2);
2219 ++ status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
2220 ++ size + DUMP_NAME_LEN * 2);
2221 +
2222 +- if (status) {
2223 +- spin_unlock_irqrestore(&efivars->lock, flags);
2224 +- *id = part;
2225 +- return -ENOSPC;
2226 ++ if (status) {
2227 ++ spin_unlock_irqrestore(&efivars->lock, flags);
2228 ++ *id = part;
2229 ++ return -ENOSPC;
2230 ++ }
2231 + }
2232 +
2233 + for (i = 0; i < DUMP_NAME_LEN; i++)
2234 +@@ -830,11 +845,8 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
2235 + if (found)
2236 + efivar_unregister(found);
2237 +
2238 +- if (size)
2239 +- ret = efivar_create_sysfs_entry(efivars,
2240 +- utf16_strsize(efi_name,
2241 +- DUMP_NAME_LEN * 2),
2242 +- efi_name, &vendor);
2243 ++ if (efivar_wq_enabled)
2244 ++ schedule_work(&efivar_work);
2245 +
2246 + *id = part;
2247 + return ret;
2248 +@@ -847,36 +859,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id,
2249 +
2250 + return 0;
2251 + }
2252 +-#else
2253 +-static int efi_pstore_open(struct pstore_info *psi)
2254 +-{
2255 +- return 0;
2256 +-}
2257 +-
2258 +-static int efi_pstore_close(struct pstore_info *psi)
2259 +-{
2260 +- return 0;
2261 +-}
2262 +-
2263 +-static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
2264 +- struct timespec *timespec,
2265 +- char **buf, struct pstore_info *psi)
2266 +-{
2267 +- return -1;
2268 +-}
2269 +-
2270 +-static int efi_pstore_write(enum pstore_type_id type, u64 *id,
2271 +- unsigned int part, size_t size, struct pstore_info *psi)
2272 +-{
2273 +- return 0;
2274 +-}
2275 +-
2276 +-static int efi_pstore_erase(enum pstore_type_id type, u64 id,
2277 +- struct pstore_info *psi)
2278 +-{
2279 +- return 0;
2280 +-}
2281 +-#endif
2282 +
2283 + static struct pstore_info efi_pstore_info = {
2284 + .owner = THIS_MODULE,
2285 +@@ -888,6 +870,24 @@ static struct pstore_info efi_pstore_info = {
2286 + .erase = efi_pstore_erase,
2287 + };
2288 +
2289 ++static void efivar_pstore_register(struct efivars *efivars)
2290 ++{
2291 ++ efivars->efi_pstore_info = efi_pstore_info;
2292 ++ efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
2293 ++ if (efivars->efi_pstore_info.buf) {
2294 ++ efivars->efi_pstore_info.bufsize = 1024;
2295 ++ efivars->efi_pstore_info.data = efivars;
2296 ++ spin_lock_init(&efivars->efi_pstore_info.buf_lock);
2297 ++ pstore_register(&efivars->efi_pstore_info);
2298 ++ }
2299 ++}
2300 ++#else
2301 ++static void efivar_pstore_register(struct efivars *efivars)
2302 ++{
2303 ++ return;
2304 ++}
2305 ++#endif
2306 ++
2307 + static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
2308 + struct bin_attribute *bin_attr,
2309 + char *buf, loff_t pos, size_t count)
2310 +@@ -1025,6 +1025,103 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
2311 + return count;
2312 + }
2313 +
2314 ++static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
2315 ++{
2316 ++ struct efivar_entry *entry, *n;
2317 ++ struct efivars *efivars = &__efivars;
2318 ++ unsigned long strsize1, strsize2;
2319 ++ bool found = false;
2320 ++
2321 ++ strsize1 = utf16_strsize(variable_name, 1024);
2322 ++ list_for_each_entry_safe(entry, n, &efivars->list, list) {
2323 ++ strsize2 = utf16_strsize(entry->var.VariableName, 1024);
2324 ++ if (strsize1 == strsize2 &&
2325 ++ !memcmp(variable_name, &(entry->var.VariableName),
2326 ++ strsize2) &&
2327 ++ !efi_guidcmp(entry->var.VendorGuid,
2328 ++ *vendor)) {
2329 ++ found = true;
2330 ++ break;
2331 ++ }
2332 ++ }
2333 ++ return found;
2334 ++}
2335 ++
2336 ++/*
2337 ++ * Returns the size of variable_name, in bytes, including the
2338 ++ * terminating NULL character, or variable_name_size if no NULL
2339 ++ * character is found among the first variable_name_size bytes.
2340 ++ */
2341 ++static unsigned long var_name_strnsize(efi_char16_t *variable_name,
2342 ++ unsigned long variable_name_size)
2343 ++{
2344 ++ unsigned long len;
2345 ++ efi_char16_t c;
2346 ++
2347 ++ /*
2348 ++ * The variable name is, by definition, a NULL-terminated
2349 ++ * string, so make absolutely sure that variable_name_size is
2350 ++ * the value we expect it to be. If not, return the real size.
2351 ++ */
2352 ++ for (len = 2; len <= variable_name_size; len += sizeof(c)) {
2353 ++ c = variable_name[(len / sizeof(c)) - 1];
2354 ++ if (!c)
2355 ++ break;
2356 ++ }
2357 ++
2358 ++ return min(len, variable_name_size);
2359 ++}
2360 ++
2361 ++static void efivar_update_sysfs_entries(struct work_struct *work)
2362 ++{
2363 ++ struct efivars *efivars = &__efivars;
2364 ++ efi_guid_t vendor;
2365 ++ efi_char16_t *variable_name;
2366 ++ unsigned long variable_name_size = 1024;
2367 ++ efi_status_t status = EFI_NOT_FOUND;
2368 ++ bool found;
2369 ++
2370 ++ /* Add new sysfs entries */
2371 ++ while (1) {
2372 ++ variable_name = kzalloc(variable_name_size, GFP_KERNEL);
2373 ++ if (!variable_name) {
2374 ++ pr_err("efivars: Memory allocation failed.\n");
2375 ++ return;
2376 ++ }
2377 ++
2378 ++ spin_lock_irq(&efivars->lock);
2379 ++ found = false;
2380 ++ while (1) {
2381 ++ variable_name_size = 1024;
2382 ++ status = efivars->ops->get_next_variable(
2383 ++ &variable_name_size,
2384 ++ variable_name,
2385 ++ &vendor);
2386 ++ if (status != EFI_SUCCESS) {
2387 ++ break;
2388 ++ } else {
2389 ++ if (!variable_is_present(variable_name,
2390 ++ &vendor)) {
2391 ++ found = true;
2392 ++ break;
2393 ++ }
2394 ++ }
2395 ++ }
2396 ++ spin_unlock_irq(&efivars->lock);
2397 ++
2398 ++ if (!found) {
2399 ++ kfree(variable_name);
2400 ++ break;
2401 ++ } else {
2402 ++ variable_name_size = var_name_strnsize(variable_name,
2403 ++ variable_name_size);
2404 ++ efivar_create_sysfs_entry(efivars,
2405 ++ variable_name_size,
2406 ++ variable_name, &vendor);
2407 ++ }
2408 ++ }
2409 ++}
2410 ++
2411 + /*
2412 + * Let's not leave out systab information that snuck into
2413 + * the efivars driver
2414 +@@ -1212,6 +1309,35 @@ void unregister_efivars(struct efivars *efivars)
2415 + }
2416 + EXPORT_SYMBOL_GPL(unregister_efivars);
2417 +
2418 ++/*
2419 ++ * Print a warning when duplicate EFI variables are encountered and
2420 ++ * disable the sysfs workqueue since the firmware is buggy.
2421 ++ */
2422 ++static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
2423 ++ unsigned long len16)
2424 ++{
2425 ++ size_t i, len8 = len16 / sizeof(efi_char16_t);
2426 ++ char *s8;
2427 ++
2428 ++ /*
2429 ++ * Disable the workqueue since the algorithm it uses for
2430 ++ * detecting new variables won't work with this buggy
2431 ++ * implementation of GetNextVariableName().
2432 ++ */
2433 ++ efivar_wq_enabled = false;
2434 ++
2435 ++ s8 = kzalloc(len8, GFP_KERNEL);
2436 ++ if (!s8)
2437 ++ return;
2438 ++
2439 ++ for (i = 0; i < len8; i++)
2440 ++ s8[i] = s16[i];
2441 ++
2442 ++ printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
2443 ++ s8, vendor_guid);
2444 ++ kfree(s8);
2445 ++}
2446 ++
2447 + int register_efivars(struct efivars *efivars,
2448 + const struct efivar_operations *ops,
2449 + struct kobject *parent_kobj)
2450 +@@ -1252,6 +1378,24 @@ int register_efivars(struct efivars *efivars,
2451 + &vendor_guid);
2452 + switch (status) {
2453 + case EFI_SUCCESS:
2454 ++ variable_name_size = var_name_strnsize(variable_name,
2455 ++ variable_name_size);
2456 ++
2457 ++ /*
2458 ++ * Some firmware implementations return the
2459 ++ * same variable name on multiple calls to
2460 ++ * get_next_variable(). Terminate the loop
2461 ++ * immediately as there is no guarantee that
2462 ++ * we'll ever see a different variable name,
2463 ++ * and may end up looping here forever.
2464 ++ */
2465 ++ if (variable_is_present(variable_name, &vendor_guid)) {
2466 ++ dup_variable_bug(variable_name, &vendor_guid,
2467 ++ variable_name_size);
2468 ++ status = EFI_NOT_FOUND;
2469 ++ break;
2470 ++ }
2471 ++
2472 + efivar_create_sysfs_entry(efivars,
2473 + variable_name_size,
2474 + variable_name,
2475 +@@ -1271,15 +1415,8 @@ int register_efivars(struct efivars *efivars,
2476 + if (error)
2477 + unregister_efivars(efivars);
2478 +
2479 +- efivars->efi_pstore_info = efi_pstore_info;
2480 +-
2481 +- efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
2482 +- if (efivars->efi_pstore_info.buf) {
2483 +- efivars->efi_pstore_info.bufsize = 1024;
2484 +- efivars->efi_pstore_info.data = efivars;
2485 +- spin_lock_init(&efivars->efi_pstore_info.buf_lock);
2486 +- pstore_register(&efivars->efi_pstore_info);
2487 +- }
2488 ++ if (!efivars_pstore_disable)
2489 ++ efivar_pstore_register(efivars);
2490 +
2491 + out:
2492 + kfree(variable_name);
2493 +@@ -1288,7 +1425,6 @@ out:
2494 + }
2495 + EXPORT_SYMBOL_GPL(register_efivars);
2496 +
2497 +-static struct efivars __efivars;
2498 + static struct efivar_operations ops;
2499 +
2500 + /*
2501 +@@ -1346,6 +1482,8 @@ err_put:
2502 + static void __exit
2503 + efivars_exit(void)
2504 + {
2505 ++ cancel_work_sync(&efivar_work);
2506 ++
2507 + if (efi_enabled(EFI_RUNTIME_SERVICES)) {
2508 + unregister_efivars(&__efivars);
2509 + kobject_put(efi_kobj);
2510 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
2511 +index 9080eb7..7211f67 100644
2512 +--- a/drivers/gpu/drm/drm_edid.c
2513 ++++ b/drivers/gpu/drm/drm_edid.c
2514 +@@ -852,7 +852,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
2515 + unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
2516 + unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
2517 + unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
2518 +- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
2519 ++ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
2520 + unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
2521 +
2522 + /* ignore tiny modes */
2523 +@@ -933,6 +933,7 @@ set_size:
2524 + }
2525 +
2526 + mode->type = DRM_MODE_TYPE_DRIVER;
2527 ++ mode->vrefresh = drm_mode_vrefresh(mode);
2528 + drm_mode_set_name(mode);
2529 +
2530 + return mode;
2531 +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
2532 +index 5620192..9b4e5c6 100644
2533 +--- a/drivers/gpu/drm/i915/i915_debugfs.c
2534 ++++ b/drivers/gpu/drm/i915/i915_debugfs.c
2535 +@@ -122,7 +122,7 @@ static const char *cache_level_str(int type)
2536 + static void
2537 + describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
2538 + {
2539 +- seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
2540 ++ seq_printf(m, "%pK: %s%s %8zd %04x %04x %d %d%s%s%s",
2541 + &obj->base,
2542 + get_pin_flag(obj),
2543 + get_tiling_flag(obj),
2544 +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
2545 +index 878b989..b1bb734 100644
2546 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
2547 ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
2548 +@@ -907,15 +907,20 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
2549 + int count)
2550 + {
2551 + int i;
2552 ++ int relocs_total = 0;
2553 ++ int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
2554 +
2555 + for (i = 0; i < count; i++) {
2556 + char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
2557 + int length; /* limited by fault_in_pages_readable() */
2558 +
2559 +- /* First check for malicious input causing overflow */