Gentoo Archives: gentoo-commits

From: "Mike Pagano (mpagano)" <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2380 - genpatches-2.6/trunk/3.4
Date: Sun, 19 May 2013 19:38:11
Message-Id: 20130519193804.2BCB42171D@flycatcher.gentoo.org
1 Author: mpagano
2 Date: 2013-05-19 19:38:03 +0000 (Sun, 19 May 2013)
3 New Revision: 2380
4
5 Added:
6 genpatches-2.6/trunk/3.4/1045_linux-3.4.46.patch
7 Modified:
8 genpatches-2.6/trunk/3.4/0000_README
9 Log:
10 Linux patch 3.4.46
11
12 Modified: genpatches-2.6/trunk/3.4/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/3.4/0000_README 2013-05-19 19:19:44 UTC (rev 2379)
15 +++ genpatches-2.6/trunk/3.4/0000_README 2013-05-19 19:38:03 UTC (rev 2380)
16 @@ -219,6 +219,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 3.4.45
19
20 +Patch: 1045_linux-3.4.46.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 3.4.46
23 +
24 Patch: 1700_correct-bnx2-firware-ver-mips.patch
25 From: https://bugs.gentoo.org/show_bug.cgi?id=424609
26 Desc: Correct firmware version for bnx2 on mips
27
28 Added: genpatches-2.6/trunk/3.4/1045_linux-3.4.46.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/3.4/1045_linux-3.4.46.patch (rev 0)
31 +++ genpatches-2.6/trunk/3.4/1045_linux-3.4.46.patch 2013-05-19 19:38:03 UTC (rev 2380)
32 @@ -0,0 +1,1875 @@
33 +diff --git a/Makefile b/Makefile
34 +index 0ec4a35..3d88eb8 100644
35 +--- a/Makefile
36 ++++ b/Makefile
37 +@@ -1,6 +1,6 @@
38 + VERSION = 3
39 + PATCHLEVEL = 4
40 +-SUBLEVEL = 45
41 ++SUBLEVEL = 46
42 + EXTRAVERSION =
43 + NAME = Saber-toothed Squirrel
44 +
45 +diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
46 +index d87ee06..283a8d0 100644
47 +--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
48 ++++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
49 +@@ -63,11 +63,11 @@
50 + #define RX51_TSC2005_RESET_GPIO 104
51 + #define RX51_TSC2005_IRQ_GPIO 100
52 +
53 +-/* list all spi devices here */
54 ++/* List all SPI devices here. Note that the list/probe order seems to matter! */
55 + enum {
56 + RX51_SPI_WL1251,
57 +- RX51_SPI_MIPID, /* LCD panel */
58 + RX51_SPI_TSC2005, /* Touch Controller */
59 ++ RX51_SPI_MIPID, /* LCD panel */
60 + };
61 +
62 + static struct wl12xx_platform_data wl1251_pdata;
63 +diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
64 +index 557cff8..5e7e008 100644
65 +--- a/arch/powerpc/include/asm/rtas.h
66 ++++ b/arch/powerpc/include/asm/rtas.h
67 +@@ -262,6 +262,8 @@ extern void rtas_progress(char *s, unsigned short hex);
68 + extern void rtas_initialize(void);
69 + extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
70 + extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
71 ++extern int rtas_online_cpus_mask(cpumask_var_t cpus);
72 ++extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
73 + extern int rtas_ibm_suspend_me(struct rtas_args *);
74 +
75 + struct rtc_time;
76 +diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
77 +index fcec382..225e9f2 100644
78 +--- a/arch/powerpc/kernel/rtas.c
79 ++++ b/arch/powerpc/kernel/rtas.c
80 +@@ -19,6 +19,7 @@
81 + #include <linux/init.h>
82 + #include <linux/capability.h>
83 + #include <linux/delay.h>
84 ++#include <linux/cpu.h>
85 + #include <linux/smp.h>
86 + #include <linux/completion.h>
87 + #include <linux/cpumask.h>
88 +@@ -808,6 +809,95 @@ static void rtas_percpu_suspend_me(void *info)
89 + __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
90 + }
91 +
92 ++enum rtas_cpu_state {
93 ++ DOWN,
94 ++ UP,
95 ++};
96 ++
97 ++#ifndef CONFIG_SMP
98 ++static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
99 ++ cpumask_var_t cpus)
100 ++{
101 ++ if (!cpumask_empty(cpus)) {
102 ++ cpumask_clear(cpus);
103 ++ return -EINVAL;
104 ++ } else
105 ++ return 0;
106 ++}
107 ++#else
108 ++/* On return cpumask will be altered to indicate CPUs changed.
109 ++ * CPUs with states changed will be set in the mask,
110 ++ * CPUs with status unchanged will be unset in the mask. */
111 ++static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
112 ++ cpumask_var_t cpus)
113 ++{
114 ++ int cpu;
115 ++ int cpuret = 0;
116 ++ int ret = 0;
117 ++
118 ++ if (cpumask_empty(cpus))
119 ++ return 0;
120 ++
121 ++ for_each_cpu(cpu, cpus) {
122 ++ switch (state) {
123 ++ case DOWN:
124 ++ cpuret = cpu_down(cpu);
125 ++ break;
126 ++ case UP:
127 ++ cpuret = cpu_up(cpu);
128 ++ break;
129 ++ }
130 ++ if (cpuret) {
131 ++ pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
132 ++ __func__,
133 ++ ((state == UP) ? "up" : "down"),
134 ++ cpu, cpuret);
135 ++ if (!ret)
136 ++ ret = cpuret;
137 ++ if (state == UP) {
138 ++ /* clear bits for unchanged cpus, return */
139 ++ cpumask_shift_right(cpus, cpus, cpu);
140 ++ cpumask_shift_left(cpus, cpus, cpu);
141 ++ break;
142 ++ } else {
143 ++ /* clear bit for unchanged cpu, continue */
144 ++ cpumask_clear_cpu(cpu, cpus);
145 ++ }
146 ++ }
147 ++ }
148 ++
149 ++ return ret;
150 ++}
151 ++#endif
152 ++
153 ++int rtas_online_cpus_mask(cpumask_var_t cpus)
154 ++{
155 ++ int ret;
156 ++
157 ++ ret = rtas_cpu_state_change_mask(UP, cpus);
158 ++
159 ++ if (ret) {
160 ++ cpumask_var_t tmp_mask;
161 ++
162 ++ if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
163 ++ return ret;
164 ++
165 ++ /* Use tmp_mask to preserve cpus mask from first failure */
166 ++ cpumask_copy(tmp_mask, cpus);
167 ++ rtas_offline_cpus_mask(tmp_mask);
168 ++ free_cpumask_var(tmp_mask);
169 ++ }
170 ++
171 ++ return ret;
172 ++}
173 ++EXPORT_SYMBOL(rtas_online_cpus_mask);
174 ++
175 ++int rtas_offline_cpus_mask(cpumask_var_t cpus)
176 ++{
177 ++ return rtas_cpu_state_change_mask(DOWN, cpus);
178 ++}
179 ++EXPORT_SYMBOL(rtas_offline_cpus_mask);
180 ++
181 + int rtas_ibm_suspend_me(struct rtas_args *args)
182 + {
183 + long state;
184 +@@ -815,6 +905,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
185 + unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
186 + struct rtas_suspend_me_data data;
187 + DECLARE_COMPLETION_ONSTACK(done);
188 ++ cpumask_var_t offline_mask;
189 ++ int cpuret;
190 +
191 + if (!rtas_service_present("ibm,suspend-me"))
192 + return -ENOSYS;
193 +@@ -838,11 +930,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
194 + return 0;
195 + }
196 +
197 ++ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
198 ++ return -ENOMEM;
199 ++
200 + atomic_set(&data.working, 0);
201 + atomic_set(&data.done, 0);
202 + atomic_set(&data.error, 0);
203 + data.token = rtas_token("ibm,suspend-me");
204 + data.complete = &done;
205 ++
206 ++ /* All present CPUs must be online */
207 ++ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
208 ++ cpuret = rtas_online_cpus_mask(offline_mask);
209 ++ if (cpuret) {
210 ++ pr_err("%s: Could not bring present CPUs online.\n", __func__);
211 ++ atomic_set(&data.error, cpuret);
212 ++ goto out;
213 ++ }
214 ++
215 + stop_topology_update();
216 +
217 + /* Call function on all CPUs. One of us will make the
218 +@@ -858,6 +963,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
219 +
220 + start_topology_update();
221 +
222 ++ /* Take down CPUs not online prior to suspend */
223 ++ cpuret = rtas_offline_cpus_mask(offline_mask);
224 ++ if (cpuret)
225 ++ pr_warn("%s: Could not restore CPUs to offline state.\n",
226 ++ __func__);
227 ++
228 ++out:
229 ++ free_cpumask_var(offline_mask);
230 + return atomic_read(&data.error);
231 + }
232 + #else /* CONFIG_PPC_PSERIES */
233 +diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
234 +index 47226e0..5f997e7 100644
235 +--- a/arch/powerpc/platforms/pseries/suspend.c
236 ++++ b/arch/powerpc/platforms/pseries/suspend.c
237 +@@ -16,6 +16,7 @@
238 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
239 + */
240 +
241 ++#include <linux/cpu.h>
242 + #include <linux/delay.h>
243 + #include <linux/suspend.h>
244 + #include <linux/stat.h>
245 +@@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev,
246 + struct device_attribute *attr,
247 + const char *buf, size_t count)
248 + {
249 ++ cpumask_var_t offline_mask;
250 + int rc;
251 +
252 + if (!capable(CAP_SYS_ADMIN))
253 + return -EPERM;
254 +
255 ++ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
256 ++ return -ENOMEM;
257 ++
258 + stream_id = simple_strtoul(buf, NULL, 16);
259 +
260 + do {
261 +@@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev,
262 + } while (rc == -EAGAIN);
263 +
264 + if (!rc) {
265 ++ /* All present CPUs must be online */
266 ++ cpumask_andnot(offline_mask, cpu_present_mask,
267 ++ cpu_online_mask);
268 ++ rc = rtas_online_cpus_mask(offline_mask);
269 ++ if (rc) {
270 ++ pr_err("%s: Could not bring present CPUs online.\n",
271 ++ __func__);
272 ++ goto out;
273 ++ }
274 ++
275 + stop_topology_update();
276 + rc = pm_suspend(PM_SUSPEND_MEM);
277 + start_topology_update();
278 ++
279 ++ /* Take down CPUs not online prior to suspend */
280 ++ if (!rtas_offline_cpus_mask(offline_mask))
281 ++ pr_warn("%s: Could not restore CPUs to offline "
282 ++ "state.\n", __func__);
283 + }
284 +
285 + stream_id = 0;
286 +
287 + if (!rc)
288 + rc = count;
289 ++out:
290 ++ free_cpumask_var(offline_mask);
291 + return rc;
292 + }
293 +
294 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
295 +index 4ff0ab9..90f5c0e 100644
296 +--- a/arch/x86/kvm/vmx.c
297 ++++ b/arch/x86/kvm/vmx.c
298 +@@ -4889,6 +4889,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
299 + if (err != EMULATE_DONE)
300 + return 0;
301 +
302 ++ if (vcpu->arch.halt_request) {
303 ++ vcpu->arch.halt_request = 0;
304 ++ ret = kvm_emulate_halt(vcpu);
305 ++ goto out;
306 ++ }
307 ++
308 + if (signal_pending(current))
309 + goto out;
310 + if (need_resched())
311 +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
312 +index 2992678..a7678fa 100644
313 +--- a/arch/x86/xen/enlighten.c
314 ++++ b/arch/x86/xen/enlighten.c
315 +@@ -139,6 +139,21 @@ static void xen_vcpu_setup(int cpu)
316 +
317 + BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
318 +
319 ++ /*
320 ++ * This path is called twice on PVHVM - first during bootup via
321 ++ * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
322 ++ * hotplugged: cpu_up -> xen_hvm_cpu_notify.
323 ++ * As we can only do the VCPUOP_register_vcpu_info once lets
324 ++ * not over-write its result.
325 ++ *
326 ++ * For PV it is called during restore (xen_vcpu_restore) and bootup
327 ++ * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
328 ++ * use this function.
329 ++ */
330 ++ if (xen_hvm_domain()) {
331 ++ if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
332 ++ return;
333 ++ }
334 + if (cpu < MAX_VIRT_CPUS)
335 + per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
336 +
337 +diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
338 +index 149de45..af9f1f6 100644
339 +--- a/drivers/acpi/acpica/exfldio.c
340 ++++ b/drivers/acpi/acpica/exfldio.c
341 +@@ -722,7 +722,19 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
342 +
343 + if ((obj_desc->common_field.start_field_bit_offset == 0) &&
344 + (obj_desc->common_field.bit_length == access_bit_width)) {
345 +- status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ);
346 ++ if (buffer_length >= sizeof(u64)) {
347 ++ status =
348 ++ acpi_ex_field_datum_io(obj_desc, 0, buffer,
349 ++ ACPI_READ);
350 ++ } else {
351 ++ /* Use raw_datum (u64) to handle buffers < 64 bits */
352 ++
353 ++ status =
354 ++ acpi_ex_field_datum_io(obj_desc, 0, &raw_datum,
355 ++ ACPI_READ);
356 ++ ACPI_MEMCPY(buffer, &raw_datum, buffer_length);
357 ++ }
358 ++
359 + return_ACPI_STATUS(status);
360 + }
361 +
362 +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
363 +index a51df96..f9914e5 100644
364 +--- a/drivers/acpi/ec.c
365 ++++ b/drivers/acpi/ec.c
366 +@@ -217,7 +217,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
367 + static int ec_poll(struct acpi_ec *ec)
368 + {
369 + unsigned long flags;
370 +- int repeat = 2; /* number of command restarts */
371 ++ int repeat = 5; /* number of command restarts */
372 + while (repeat--) {
373 + unsigned long delay = jiffies +
374 + msecs_to_jiffies(ec_delay);
375 +@@ -235,8 +235,6 @@ static int ec_poll(struct acpi_ec *ec)
376 + }
377 + advance_transaction(ec, acpi_ec_read_status(ec));
378 + } while (time_before(jiffies, delay));
379 +- if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
380 +- break;
381 + pr_debug(PREFIX "controller reset, restart transaction\n");
382 + spin_lock_irqsave(&ec->curr_lock, flags);
383 + start_transaction(ec);
384 +diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
385 +index 43beaca..13cbdd3 100644
386 +--- a/drivers/block/drbd/drbd_receiver.c
387 ++++ b/drivers/block/drbd/drbd_receiver.c
388 +@@ -2225,7 +2225,6 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
389 + if (hg == -1 && mdev->state.role == R_PRIMARY) {
390 + enum drbd_state_rv rv2;
391 +
392 +- drbd_set_role(mdev, R_SECONDARY, 0);
393 + /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
394 + * we might be here in C_WF_REPORT_PARAMS which is transient.
395 + * we do not need to wait for the after state change work either. */
396 +diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
397 +index cdd4c09f..a22a7a5 100644
398 +--- a/drivers/char/ipmi/ipmi_bt_sm.c
399 ++++ b/drivers/char/ipmi/ipmi_bt_sm.c
400 +@@ -95,9 +95,9 @@ struct si_sm_data {
401 + enum bt_states state;
402 + unsigned char seq; /* BT sequence number */
403 + struct si_sm_io *io;
404 +- unsigned char write_data[IPMI_MAX_MSG_LENGTH];
405 ++ unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
406 + int write_count;
407 +- unsigned char read_data[IPMI_MAX_MSG_LENGTH];
408 ++ unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
409 + int read_count;
410 + int truncated;
411 + long timeout; /* microseconds countdown */
412 +diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
413 +index 9eb360f..d5a5f02 100644
414 +--- a/drivers/char/ipmi/ipmi_devintf.c
415 ++++ b/drivers/char/ipmi/ipmi_devintf.c
416 +@@ -837,13 +837,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
417 + return ipmi_ioctl(filep, cmd, arg);
418 + }
419 + }
420 ++
421 ++static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
422 ++ unsigned long arg)
423 ++{
424 ++ int ret;
425 ++
426 ++ mutex_lock(&ipmi_mutex);
427 ++ ret = compat_ipmi_ioctl(filep, cmd, arg);
428 ++ mutex_unlock(&ipmi_mutex);
429 ++
430 ++ return ret;
431 ++}
432 + #endif
433 +
434 + static const struct file_operations ipmi_fops = {
435 + .owner = THIS_MODULE,
436 + .unlocked_ioctl = ipmi_unlocked_ioctl,
437 + #ifdef CONFIG_COMPAT
438 +- .compat_ioctl = compat_ipmi_ioctl,
439 ++ .compat_ioctl = unlocked_compat_ipmi_ioctl,
440 + #endif
441 + .open = ipmi_open,
442 + .release = ipmi_release,
443 +diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
444 +index 65c0495..d05669b 100644
445 +--- a/drivers/dma/pch_dma.c
446 ++++ b/drivers/dma/pch_dma.c
447 +@@ -476,7 +476,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
448 + dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
449 +
450 + if (!ret) {
451 +- ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
452 ++ ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
453 + if (ret) {
454 + spin_lock(&pd_chan->lock);
455 + pd_chan->descs_allocated++;
456 +diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
457 +index 961fb54..7f88de6 100644
458 +--- a/drivers/gpu/drm/drm_mm.c
459 ++++ b/drivers/gpu/drm/drm_mm.c
460 +@@ -680,33 +680,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
461 + EXPORT_SYMBOL(drm_mm_debug_table);
462 +
463 + #if defined(CONFIG_DEBUG_FS)
464 +-int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
465 ++static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
466 + {
467 +- struct drm_mm_node *entry;
468 +- unsigned long total_used = 0, total_free = 0, total = 0;
469 + unsigned long hole_start, hole_end, hole_size;
470 +
471 +- hole_start = drm_mm_hole_node_start(&mm->head_node);
472 +- hole_end = drm_mm_hole_node_end(&mm->head_node);
473 +- hole_size = hole_end - hole_start;
474 +- if (hole_size)
475 ++ if (entry->hole_follows) {
476 ++ hole_start = drm_mm_hole_node_start(entry);
477 ++ hole_end = drm_mm_hole_node_end(entry);
478 ++ hole_size = hole_end - hole_start;
479 + seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
480 + hole_start, hole_end, hole_size);
481 +- total_free += hole_size;
482 ++ return hole_size;
483 ++ }
484 ++
485 ++ return 0;
486 ++}
487 ++
488 ++int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
489 ++{
490 ++ struct drm_mm_node *entry;
491 ++ unsigned long total_used = 0, total_free = 0, total = 0;
492 ++
493 ++ total_free += drm_mm_dump_hole(m, &mm->head_node);
494 +
495 + drm_mm_for_each_node(entry, mm) {
496 + seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
497 + entry->start, entry->start + entry->size,
498 + entry->size);
499 + total_used += entry->size;
500 +- if (entry->hole_follows) {
501 +- hole_start = drm_mm_hole_node_start(entry);
502 +- hole_end = drm_mm_hole_node_end(entry);
503 +- hole_size = hole_end - hole_start;
504 +- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
505 +- hole_start, hole_end, hole_size);
506 +- total_free += hole_size;
507 +- }
508 ++ total_free += drm_mm_dump_hole(m, entry);
509 + }
510 + total = total_free + total_used;
511 +
512 +diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
513 +index 1fe98b4..9aa02be 100644
514 +--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
515 ++++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
516 +@@ -74,7 +74,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
517 + OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
518 +
519 + for (i = 0; i < nr; ++i) {
520 +- if (DRM_COPY_FROM_USER_UNCHECKED
521 ++ if (DRM_COPY_FROM_USER
522 + (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
523 + DRM_ERROR("copy cliprect faulted\n");
524 + return -EFAULT;
525 +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
526 +index 6f75887..ff62ddc 100644
527 +--- a/drivers/md/dm-snap.c
528 ++++ b/drivers/md/dm-snap.c
529 +@@ -1117,6 +1117,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
530 + s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
531 + if (!s->pending_pool) {
532 + ti->error = "Could not allocate mempool for pending exceptions";
533 ++ r = -ENOMEM;
534 + goto bad_pending_pool;
535 + }
536 +
537 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
538 +index 1555f0b..7c3ab8f 100644
539 +--- a/drivers/md/dm-thin.c
540 ++++ b/drivers/md/dm-thin.c
541 +@@ -2027,6 +2027,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
542 + * thin devices' discard limits consistent).
543 + */
544 + ti->discards_supported = 1;
545 ++ ti->discard_zeroes_data_unsupported = 1;
546 + }
547 + ti->private = pt;
548 +
549 +@@ -2443,7 +2444,6 @@ static void set_discard_limits(struct pool *pool, struct queue_limits *limits)
550 + * bios that overlap 2 blocks.
551 + */
552 + limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
553 +- limits->discard_zeroes_data = pool->pf.zero_new_blocks;
554 + }
555 +
556 + static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
557 +diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
558 +index 41719da..9040a62 100644
559 +--- a/drivers/net/ethernet/3com/3c509.c
560 ++++ b/drivers/net/ethernet/3com/3c509.c
561 +@@ -309,6 +309,7 @@ static int __devinit el3_isa_match(struct device *pdev,
562 + if (!dev)
563 + return -ENOMEM;
564 +
565 ++ SET_NETDEV_DEV(dev, pdev);
566 + netdev_boot_setup_check(dev);
567 +
568 + if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
569 +@@ -704,6 +705,7 @@ static int __init el3_eisa_probe (struct device *device)
570 + return -ENOMEM;
571 + }
572 +
573 ++ SET_NETDEV_DEV(dev, device);
574 + netdev_boot_setup_check(dev);
575 +
576 + el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
577 +diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
578 +index e463d10..5673d6e 100644
579 +--- a/drivers/net/ethernet/3com/3c59x.c
580 ++++ b/drivers/net/ethernet/3com/3c59x.c
581 +@@ -632,7 +632,6 @@ struct vortex_private {
582 + pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
583 + open:1,
584 + medialock:1,
585 +- must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
586 + large_frames:1, /* accept large frames */
587 + handling_irq:1; /* private in_irq indicator */
588 + /* {get|set}_wol operations are already serialized by rtnl.
589 +@@ -951,7 +950,7 @@ static int __devexit vortex_eisa_remove(struct device *device)
590 +
591 + unregister_netdev(dev);
592 + iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
593 +- release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
594 ++ release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
595 +
596 + free_netdev(dev);
597 + return 0;
598 +@@ -1012,6 +1011,12 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
599 + if (rc < 0)
600 + goto out;
601 +
602 ++ rc = pci_request_regions(pdev, DRV_NAME);
603 ++ if (rc < 0) {
604 ++ pci_disable_device(pdev);
605 ++ goto out;
606 ++ }
607 ++
608 + unit = vortex_cards_found;
609 +
610 + if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
611 +@@ -1027,6 +1032,7 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
612 + if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
613 + ioaddr = pci_iomap(pdev, 0, 0);
614 + if (!ioaddr) {
615 ++ pci_release_regions(pdev);
616 + pci_disable_device(pdev);
617 + rc = -ENOMEM;
618 + goto out;
619 +@@ -1036,6 +1042,7 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
620 + ent->driver_data, unit);
621 + if (rc < 0) {
622 + pci_iounmap(pdev, ioaddr);
623 ++ pci_release_regions(pdev);
624 + pci_disable_device(pdev);
625 + goto out;
626 + }
627 +@@ -1179,11 +1186,6 @@ static int __devinit vortex_probe1(struct device *gendev,
628 +
629 + /* PCI-only startup logic */
630 + if (pdev) {
631 +- /* EISA resources already marked, so only PCI needs to do this here */
632 +- /* Ignore return value, because Cardbus drivers already allocate for us */
633 +- if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
634 +- vp->must_free_region = 1;
635 +-
636 + /* enable bus-mastering if necessary */
637 + if (vci->flags & PCI_USES_MASTER)
638 + pci_set_master(pdev);
639 +@@ -1221,7 +1223,7 @@ static int __devinit vortex_probe1(struct device *gendev,
640 + &vp->rx_ring_dma);
641 + retval = -ENOMEM;
642 + if (!vp->rx_ring)
643 +- goto free_region;
644 ++ goto free_device;
645 +
646 + vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
647 + vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
648 +@@ -1486,9 +1488,7 @@ free_ring:
649 + + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
650 + vp->rx_ring,
651 + vp->rx_ring_dma);
652 +-free_region:
653 +- if (vp->must_free_region)
654 +- release_region(dev->base_addr, vci->io_size);
655 ++free_device:
656 + free_netdev(dev);
657 + pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
658 + out:
659 +@@ -3256,8 +3256,9 @@ static void __devexit vortex_remove_one(struct pci_dev *pdev)
660 + + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
661 + vp->rx_ring,
662 + vp->rx_ring_dma);
663 +- if (vp->must_free_region)
664 +- release_region(dev->base_addr, vp->io_size);
665 ++
666 ++ pci_release_regions(pdev);
667 ++
668 + free_netdev(dev);
669 + }
670 +
671 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
672 +index dd037dd..cf20388 100644
673 +--- a/drivers/net/ethernet/realtek/r8169.c
674 ++++ b/drivers/net/ethernet/realtek/r8169.c
675 +@@ -1690,8 +1690,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
676 +
677 + if (opts2 & RxVlanTag)
678 + __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
679 +-
680 +- desc->opts2 = 0;
681 + }
682 +
683 + static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
684 +@@ -5434,8 +5432,6 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
685 + !(status & (RxRWT | RxFOVF)) &&
686 + (dev->features & NETIF_F_RXALL))
687 + goto process_pkt;
688 +-
689 +- rtl8169_mark_to_asic(desc, rx_buf_sz);
690 + } else {
691 + struct sk_buff *skb;
692 + dma_addr_t addr;
693 +@@ -5456,16 +5452,14 @@ process_pkt:
694 + if (unlikely(rtl8169_fragmented_frame(status))) {
695 + dev->stats.rx_dropped++;
696 + dev->stats.rx_length_errors++;
697 +- rtl8169_mark_to_asic(desc, rx_buf_sz);
698 +- continue;
699 ++ goto release_descriptor;
700 + }
701 +
702 + skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
703 + tp, pkt_size, addr);
704 +- rtl8169_mark_to_asic(desc, rx_buf_sz);
705 + if (!skb) {
706 + dev->stats.rx_dropped++;
707 +- continue;
708 ++ goto release_descriptor;
709 + }
710 +
711 + rtl8169_rx_csum(skb, status);
712 +@@ -5481,6 +5475,10 @@ process_pkt:
713 + tp->rx_stats.bytes += pkt_size;
714 + u64_stats_update_end(&tp->rx_stats.syncp);
715 + }
716 ++release_descriptor:
717 ++ desc->opts2 = 0;
718 ++ wmb();
719 ++ rtl8169_mark_to_asic(desc, rx_buf_sz);
720 + }
721 +
722 + count = cur_rx - tp->cur_rx;
723 +diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
724 +index eb85217..192026f 100644
725 +--- a/drivers/net/ethernet/sfc/mcdi.c
726 ++++ b/drivers/net/ethernet/sfc/mcdi.c
727 +@@ -640,7 +640,7 @@ fail:
728 + int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
729 + u16 *fw_subtype_list, u32 *capabilities)
730 + {
731 +- uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
732 ++ uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX];
733 + size_t outlen, offset, i;
734 + int port_num = efx_port_num(efx);
735 + int rc;
736 +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
737 +index 956a5ed..7160523 100644
738 +--- a/drivers/net/macvlan.c
739 ++++ b/drivers/net/macvlan.c
740 +@@ -205,7 +205,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
741 + }
742 +
743 + if (port->passthru)
744 +- vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
745 ++ vlan = list_first_or_null_rcu(&port->vlans,
746 ++ struct macvlan_dev, list);
747 + else
748 + vlan = macvlan_hash_lookup(port, eth->h_dest);
749 + if (vlan == NULL)
750 +@@ -724,7 +725,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
751 + if (err < 0)
752 + goto destroy_port;
753 +
754 +- list_add_tail(&vlan->list, &port->vlans);
755 ++ list_add_tail_rcu(&vlan->list, &port->vlans);
756 + netif_stacked_transfer_operstate(lowerdev, dev);
757 +
758 + return 0;
759 +@@ -750,7 +751,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
760 + {
761 + struct macvlan_dev *vlan = netdev_priv(dev);
762 +
763 +- list_del(&vlan->list);
764 ++ list_del_rcu(&vlan->list);
765 + unregister_netdevice_queue(dev, head);
766 + }
767 + EXPORT_SYMBOL_GPL(macvlan_dellink);
768 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
769 +index 8669c77..3b6e932 100644
770 +--- a/drivers/net/usb/qmi_wwan.c
771 ++++ b/drivers/net/usb/qmi_wwan.c
772 +@@ -9,6 +9,7 @@
773 + #include <linux/module.h>
774 + #include <linux/netdevice.h>
775 + #include <linux/ethtool.h>
776 ++#include <linux/etherdevice.h>
777 + #include <linux/mii.h>
778 + #include <linux/usb.h>
779 + #include <linux/usb/cdc.h>
780 +@@ -174,6 +175,93 @@ err:
781 + return status;
782 + }
783 +
784 ++/* default ethernet address used by the modem */
785 ++static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
786 ++
787 ++/* Make up an ethernet header if the packet doesn't have one.
788 ++ *
789 ++ * A firmware bug common among several devices cause them to send raw
790 ++ * IP packets under some circumstances. There is no way for the
791 ++ * driver/host to know when this will happen. And even when the bug
792 ++ * hits, some packets will still arrive with an intact header.
793 ++ *
794 ++ * The supported devices are only capably of sending IPv4, IPv6 and
795 ++ * ARP packets on a point-to-point link. Any packet with an ethernet
796 ++ * header will have either our address or a broadcast/multicast
797 ++ * address as destination. ARP packets will always have a header.
798 ++ *
799 ++ * This means that this function will reliably add the appropriate
800 ++ * header iff necessary, provided our hardware address does not start
801 ++ * with 4 or 6.
802 ++ *
803 ++ * Another common firmware bug results in all packets being addressed
804 ++ * to 00:a0:c6:00:00:00 despite the host address being different.
805 ++ * This function will also fixup such packets.
806 ++ */
807 ++static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
808 ++{
809 ++ __be16 proto;
810 ++
811 ++ /* usbnet rx_complete guarantees that skb->len is at least
812 ++ * hard_header_len, so we can inspect the dest address without
813 ++ * checking skb->len
814 ++ */
815 ++ switch (skb->data[0] & 0xf0) {
816 ++ case 0x40:
817 ++ proto = htons(ETH_P_IP);
818 ++ break;
819 ++ case 0x60:
820 ++ proto = htons(ETH_P_IPV6);
821 ++ break;
822 ++ case 0x00:
823 ++ if (is_multicast_ether_addr(skb->data))
824 ++ return 1;
825 ++ /* possibly bogus destination - rewrite just in case */
826 ++ skb_reset_mac_header(skb);
827 ++ goto fix_dest;
828 ++ default:
829 ++ /* pass along other packets without modifications */
830 ++ return 1;
831 ++ }
832 ++ if (skb_headroom(skb) < ETH_HLEN)
833 ++ return 0;
834 ++ skb_push(skb, ETH_HLEN);
835 ++ skb_reset_mac_header(skb);
836 ++ eth_hdr(skb)->h_proto = proto;
837 ++ memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
838 ++fix_dest:
839 ++ memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
840 ++ return 1;
841 ++}
842 ++
843 ++/* very simplistic detection of IPv4 or IPv6 headers */
844 ++static bool possibly_iphdr(const char *data)
845 ++{
846 ++ return (data[0] & 0xd0) == 0x40;
847 ++}
848 ++
849 ++/* disallow addresses which may be confused with IP headers */
850 ++static int qmi_wwan_mac_addr(struct net_device *dev, void *p)
851 ++{
852 ++ struct sockaddr *addr = p;
853 ++
854 ++ if (!is_valid_ether_addr(addr->sa_data) ||
855 ++ possibly_iphdr(addr->sa_data))
856 ++ return -EADDRNOTAVAIL;
857 ++ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
858 ++ return 0;
859 ++}
860 ++
861 ++static const struct net_device_ops qmi_wwan_netdev_ops = {
862 ++ .ndo_open = usbnet_open,
863 ++ .ndo_stop = usbnet_stop,
864 ++ .ndo_start_xmit = usbnet_start_xmit,
865 ++ .ndo_tx_timeout = usbnet_tx_timeout,
866 ++ .ndo_change_mtu = usbnet_change_mtu,
867 ++ .ndo_set_mac_address = qmi_wwan_mac_addr,
868 ++ .ndo_validate_addr = eth_validate_addr,
869 ++};
870 ++
871 + /* using a counter to merge subdriver requests with our own into a combined state */
872 + static int qmi_wwan_manage_power(struct usbnet *dev, int on)
873 + {
874 +@@ -257,6 +345,18 @@ static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
875 + /* save subdriver struct for suspend/resume wrappers */
876 + dev->data[0] = (unsigned long)subdriver;
877 +
878 ++ /* Never use the same address on both ends of the link, even
879 ++ * if the buggy firmware told us to.
880 ++ */
881 ++ if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr))
882 ++ eth_hw_addr_random(dev->net);
883 ++
884 ++ /* make MAC addr easily distinguishable from an IP header */
885 ++ if (possibly_iphdr(dev->net->dev_addr)) {
886 ++ dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */
887 ++ dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */
888 ++ }
889 ++ dev->net->netdev_ops = &qmi_wwan_netdev_ops;
890 + err:
891 + return rv;
892 + }
893 +@@ -326,6 +426,7 @@ static const struct driver_info qmi_wwan_shared = {
894 + .bind = qmi_wwan_bind_shared,
895 + .unbind = qmi_wwan_unbind_shared,
896 + .manage_power = qmi_wwan_manage_power,
897 ++ .rx_fixup = qmi_wwan_rx_fixup,
898 + };
899 +
900 + static const struct driver_info qmi_wwan_force_int0 = {
901 +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
902 +index 91e2c4f..f6a095f 100644
903 +--- a/drivers/net/wireless/ath/ath9k/main.c
904 ++++ b/drivers/net/wireless/ath/ath9k/main.c
905 +@@ -1711,6 +1711,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
906 + struct ath_common *common = ath9k_hw_common(sc->sc_ah);
907 + struct ath_node *an = (struct ath_node *) sta->drv_priv;
908 + struct ieee80211_key_conf ps_key = { };
909 ++ int key;
910 +
911 + ath_node_attach(sc, sta, vif);
912 +
913 +@@ -1718,7 +1719,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
914 + vif->type != NL80211_IFTYPE_AP_VLAN)
915 + return 0;
916 +
917 +- an->ps_key = ath_key_config(common, vif, sta, &ps_key);
918 ++ key = ath_key_config(common, vif, sta, &ps_key);
919 ++ if (key > 0)
920 ++ an->ps_key = key;
921 +
922 + return 0;
923 + }
924 +@@ -1735,6 +1738,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
925 + return;
926 +
927 + ath_key_delete(common, &ps_key);
928 ++ an->ps_key = 0;
929 + }
930 +
931 + static int ath9k_sta_remove(struct ieee80211_hw *hw,
932 +diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
933 +index bb2848a..448f545 100644
934 +--- a/drivers/net/wireless/b43/dma.c
935 ++++ b/drivers/net/wireless/b43/dma.c
936 +@@ -1733,6 +1733,25 @@ drop_recycle_buffer:
937 + sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
938 + }
939 +
940 ++void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
941 ++{
942 ++ int current_slot, previous_slot;
943 ++
944 ++ B43_WARN_ON(ring->tx);
945 ++
946 ++ /* Device has filled all buffers, drop all packets and let TCP
947 ++ * decrease speed.
948 ++ * Decrement RX index by one will let the device to see all slots
949 ++ * as free again
950 ++ */
951 ++ /*
952 ++ *TODO: How to increase rx_drop in mac80211?
953 ++ */
954 ++ current_slot = ring->ops->get_current_rxslot(ring);
955 ++ previous_slot = prev_slot(ring, current_slot);
956 ++ ring->ops->set_current_rxslot(ring, previous_slot);
957 ++}
958 ++
959 + void b43_dma_rx(struct b43_dmaring *ring)
960 + {
961 + const struct b43_dma_ops *ops = ring->ops;
962 +diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
963 +index 9fdd198..df8c8cd 100644
964 +--- a/drivers/net/wireless/b43/dma.h
965 ++++ b/drivers/net/wireless/b43/dma.h
966 +@@ -9,7 +9,7 @@
967 + /* DMA-Interrupt reasons. */
968 + #define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
969 + | (1 << 14) | (1 << 15))
970 +-#define B43_DMAIRQ_NONFATALMASK (1 << 13)
971 ++#define B43_DMAIRQ_RDESC_UFLOW (1 << 13)
972 + #define B43_DMAIRQ_RX_DONE (1 << 16)
973 +
974 + /*** 32-bit DMA Engine. ***/
975 +@@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev,
976 + void b43_dma_handle_txstatus(struct b43_wldev *dev,
977 + const struct b43_txstatus *status);
978 +
979 ++void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
980 ++
981 + void b43_dma_rx(struct b43_dmaring *ring);
982 +
983 + void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
984 +diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
985 +index 14fd2ca..b54c750 100644
986 +--- a/drivers/net/wireless/b43/main.c
987 ++++ b/drivers/net/wireless/b43/main.c
988 +@@ -1895,30 +1895,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
989 + }
990 + }
991 +
992 +- if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK |
993 +- B43_DMAIRQ_NONFATALMASK))) {
994 +- if (merged_dma_reason & B43_DMAIRQ_FATALMASK) {
995 +- b43err(dev->wl, "Fatal DMA error: "
996 +- "0x%08X, 0x%08X, 0x%08X, "
997 +- "0x%08X, 0x%08X, 0x%08X\n",
998 +- dma_reason[0], dma_reason[1],
999 +- dma_reason[2], dma_reason[3],
1000 +- dma_reason[4], dma_reason[5]);
1001 +- b43err(dev->wl, "This device does not support DMA "
1002 ++ if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
1003 ++ b43err(dev->wl,
1004 ++ "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
1005 ++ dma_reason[0], dma_reason[1],
1006 ++ dma_reason[2], dma_reason[3],
1007 ++ dma_reason[4], dma_reason[5]);
1008 ++ b43err(dev->wl, "This device does not support DMA "
1009 + "on your system. It will now be switched to PIO.\n");
1010 +- /* Fall back to PIO transfers if we get fatal DMA errors! */
1011 +- dev->use_pio = true;
1012 +- b43_controller_restart(dev, "DMA error");
1013 +- return;
1014 +- }
1015 +- if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
1016 +- b43err(dev->wl, "DMA error: "
1017 +- "0x%08X, 0x%08X, 0x%08X, "
1018 +- "0x%08X, 0x%08X, 0x%08X\n",
1019 +- dma_reason[0], dma_reason[1],
1020 +- dma_reason[2], dma_reason[3],
1021 +- dma_reason[4], dma_reason[5]);
1022 +- }
1023 ++ /* Fall back to PIO transfers if we get fatal DMA errors! */
1024 ++ dev->use_pio = true;
1025 ++ b43_controller_restart(dev, "DMA error");
1026 ++ return;
1027 + }
1028 +
1029 + if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
1030 +@@ -1937,6 +1925,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
1031 + handle_irq_noise(dev);
1032 +
1033 + /* Check the DMA reason registers for received data. */
1034 ++ if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
1035 ++ if (B43_DEBUG)
1036 ++ b43warn(dev->wl, "RX descriptor underrun\n");
1037 ++ b43_dma_handle_rx_overflow(dev->dma.rx_ring);
1038 ++ }
1039 + if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
1040 + if (b43_using_pio_transfers(dev))
1041 + b43_pio_rx(dev->pio.rx_queue);
1042 +@@ -1994,7 +1987,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev)
1043 + return IRQ_NONE;
1044 +
1045 + dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
1046 +- & 0x0001DC00;
1047 ++ & 0x0001FC00;
1048 + dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
1049 + & 0x0000DC00;
1050 + dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
1051 +@@ -3122,7 +3115,7 @@ static int b43_chip_init(struct b43_wldev *dev)
1052 + b43_write32(dev, 0x018C, 0x02000000);
1053 + }
1054 + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
1055 +- b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
1056 ++ b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
1057 + b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
1058 + b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
1059 + b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
1060 +diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
1061 +index 2977a12..3050b6a 100644
1062 +--- a/drivers/net/wireless/mwifiex/cmdevt.c
1063 ++++ b/drivers/net/wireless/mwifiex/cmdevt.c
1064 +@@ -1084,6 +1084,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
1065 + adapter->if_ops.wakeup(adapter);
1066 + adapter->hs_activated = false;
1067 + adapter->is_hs_configured = false;
1068 ++ adapter->is_suspended = false;
1069 + mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
1070 + MWIFIEX_BSS_ROLE_ANY),
1071 + false);
1072 +diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
1073 +index 4fb146a..dc70c0f 100644
1074 +--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
1075 ++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
1076 +@@ -105,7 +105,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
1077 + } else {
1078 + /* Multicast */
1079 + priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
1080 +- if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) {
1081 ++ if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
1082 + dev_dbg(priv->adapter->dev,
1083 + "info: Enabling All Multicast!\n");
1084 + priv->curr_pkt_filter |=
1085 +@@ -117,20 +117,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
1086 + dev_dbg(priv->adapter->dev,
1087 + "info: Set multicast list=%d\n",
1088 + mcast_list->num_multicast_addr);
1089 +- /* Set multicast addresses to firmware */
1090 +- if (old_pkt_filter == priv->curr_pkt_filter) {
1091 +- /* Send request to firmware */
1092 +- ret = mwifiex_send_cmd_async(priv,
1093 +- HostCmd_CMD_MAC_MULTICAST_ADR,
1094 +- HostCmd_ACT_GEN_SET, 0,
1095 +- mcast_list);
1096 +- } else {
1097 +- /* Send request to firmware */
1098 +- ret = mwifiex_send_cmd_async(priv,
1099 +- HostCmd_CMD_MAC_MULTICAST_ADR,
1100 +- HostCmd_ACT_GEN_SET, 0,
1101 +- mcast_list);
1102 +- }
1103 ++ /* Send multicast addresses to firmware */
1104 ++ ret = mwifiex_send_cmd_async(priv,
1105 ++ HostCmd_CMD_MAC_MULTICAST_ADR,
1106 ++ HostCmd_ACT_GEN_SET, 0,
1107 ++ mcast_list);
1108 + }
1109 + }
1110 + }
1111 +diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
1112 +index 22b2dfa..fdacfce 100644
1113 +--- a/drivers/platform/x86/hp_accel.c
1114 ++++ b/drivers/platform/x86/hp_accel.c
1115 +@@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
1116 +
1117 + static int lis3lv02d_resume(struct acpi_device *device)
1118 + {
1119 +- return lis3lv02d_poweron(&lis3_dev);
1120 ++ lis3lv02d_poweron(&lis3_dev);
1121 ++ return 0;
1122 + }
1123 + #else
1124 + #define lis3lv02d_suspend NULL
1125 +diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
1126 +index 8361187..9ea2555 100644
1127 +--- a/drivers/rtc/rtc-pcf2123.c
1128 ++++ b/drivers/rtc/rtc-pcf2123.c
1129 +@@ -264,6 +264,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi)
1130 +
1131 + if (!(rxbuf[0] & 0x20)) {
1132 + dev_err(&spi->dev, "chip not found\n");
1133 ++ ret = -ENODEV;
1134 + goto kfree_exit;
1135 + }
1136 +
1137 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1138 +index 5b3cadb..2bbb845 100644
1139 +--- a/drivers/scsi/sd.c
1140 ++++ b/drivers/scsi/sd.c
1141 +@@ -140,6 +140,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
1142 + char *buffer_data;
1143 + struct scsi_mode_data data;
1144 + struct scsi_sense_hdr sshdr;
1145 ++ const char *temp = "temporary ";
1146 + int len;
1147 +
1148 + if (sdp->type != TYPE_DISK)
1149 +@@ -148,6 +149,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
1150 + * it's not worth the risk */
1151 + return -EINVAL;
1152 +
1153 ++ if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
1154 ++ buf += sizeof(temp) - 1;
1155 ++ sdkp->cache_override = 1;
1156 ++ } else {
1157 ++ sdkp->cache_override = 0;
1158 ++ }
1159 ++
1160 + for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
1161 + len = strlen(sd_cache_types[i]);
1162 + if (strncmp(sd_cache_types[i], buf, len) == 0 &&
1163 +@@ -160,6 +168,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
1164 + return -EINVAL;
1165 + rcd = ct & 0x01 ? 1 : 0;
1166 + wce = ct & 0x02 ? 1 : 0;
1167 ++
1168 ++ if (sdkp->cache_override) {
1169 ++ sdkp->WCE = wce;
1170 ++ sdkp->RCD = rcd;
1171 ++ return count;
1172 ++ }
1173 ++
1174 + if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
1175 + SD_MAX_RETRIES, &data, NULL))
1176 + return -EINVAL;
1177 +@@ -2126,6 +2141,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
1178 + int old_rcd = sdkp->RCD;
1179 + int old_dpofua = sdkp->DPOFUA;
1180 +
1181 ++
1182 ++ if (sdkp->cache_override)
1183 ++ return;
1184 ++
1185 + first_len = 4;
1186 + if (sdp->skip_ms_page_8) {
1187 + if (sdp->type == TYPE_RBC)
1188 +@@ -2607,6 +2626,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1189 + sdkp->capacity = 0;
1190 + sdkp->media_present = 1;
1191 + sdkp->write_prot = 0;
1192 ++ sdkp->cache_override = 0;
1193 + sdkp->WCE = 0;
1194 + sdkp->RCD = 0;
1195 + sdkp->ATO = 0;
1196 +diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
1197 +index f703f48..e2b2956 100644
1198 +--- a/drivers/scsi/sd.h
1199 ++++ b/drivers/scsi/sd.h
1200 +@@ -67,6 +67,7 @@ struct scsi_disk {
1201 + u8 protection_type;/* Data Integrity Field */
1202 + u8 provisioning_mode;
1203 + unsigned ATO : 1; /* state of disk ATO bit */
1204 ++ unsigned cache_override : 1; /* temp override of WCE,RCD */
1205 + unsigned WCE : 1; /* state of disk WCE bit */
1206 + unsigned RCD : 1; /* state of disk RCD bit, unused */
1207 + unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
1208 +diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
1209 +index 006f605..fd669b5 100644
1210 +--- a/drivers/target/iscsi/iscsi_target_erl1.c
1211 ++++ b/drivers/target/iscsi/iscsi_target_erl1.c
1212 +@@ -824,7 +824,7 @@ static int iscsit_attach_ooo_cmdsn(
1213 + /*
1214 + * CmdSN is greater than the tail of the list.
1215 + */
1216 +- if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
1217 ++ if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
1218 + list_add_tail(&ooo_cmdsn->ooo_list,
1219 + &sess->sess_ooo_cmdsn_list);
1220 + else {
1221 +@@ -834,11 +834,12 @@ static int iscsit_attach_ooo_cmdsn(
1222 + */
1223 + list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
1224 + ooo_list) {
1225 +- if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
1226 ++ if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
1227 + continue;
1228 +
1229 ++ /* Insert before this entry */
1230 + list_add(&ooo_cmdsn->ooo_list,
1231 +- &ooo_tmp->ooo_list);
1232 ++ ooo_tmp->ooo_list.prev);
1233 + break;
1234 + }
1235 + }
1236 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
1237 +index ec970cb..19e4518 100644
1238 +--- a/fs/ext4/mballoc.c
1239 ++++ b/fs/ext4/mballoc.c
1240 +@@ -1980,7 +1980,11 @@ repeat:
1241 + group = ac->ac_g_ex.fe_group;
1242 +
1243 + for (i = 0; i < ngroups; group++, i++) {
1244 +- if (group == ngroups)
1245 ++ /*
1246 ++ * Artificially restricted ngroups for non-extent
1247 ++ * files makes group > ngroups possible on first loop.
1248 ++ */
1249 ++ if (group >= ngroups)
1250 + group = 0;
1251 +
1252 + /* This now checks without needing the buddy page */
1253 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
1254 +index 001ef01..36ad5b4 100644
1255 +--- a/fs/hugetlbfs/inode.c
1256 ++++ b/fs/hugetlbfs/inode.c
1257 +@@ -927,9 +927,13 @@ static int can_do_hugetlb_shm(void)
1258 + return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group);
1259 + }
1260 +
1261 +-struct file *hugetlb_file_setup(const char *name, unsigned long addr,
1262 +- size_t size, vm_flags_t acctflag,
1263 +- struct user_struct **user, int creat_flags)
1264 ++/*
1265 ++ * Note that size should be aligned to proper hugepage size in caller side,
1266 ++ * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1267 ++ */
1268 ++struct file *hugetlb_file_setup(const char *name, size_t size,
1269 ++ vm_flags_t acctflag, struct user_struct **user,
1270 ++ int creat_flags)
1271 + {
1272 + int error = -ENOMEM;
1273 + struct file *file;
1274 +@@ -937,8 +941,6 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
1275 + struct path path;
1276 + struct dentry *root;
1277 + struct qstr quick_string;
1278 +- struct hstate *hstate;
1279 +- unsigned long num_pages;
1280 +
1281 + *user = NULL;
1282 + if (!hugetlbfs_vfsmount)
1283 +@@ -972,12 +974,10 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
1284 + if (!inode)
1285 + goto out_dentry;
1286 +
1287 +- hstate = hstate_inode(inode);
1288 +- size += addr & ~huge_page_mask(hstate);
1289 +- num_pages = ALIGN(size, huge_page_size(hstate)) >>
1290 +- huge_page_shift(hstate);
1291 + error = -ENOMEM;
1292 +- if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag))
1293 ++ if (hugetlb_reserve_pages(inode, 0,
1294 ++ size >> huge_page_shift(hstate_inode(inode)), NULL,
1295 ++ acctflag))
1296 + goto out_inode;
1297 +
1298 + d_instantiate(path.dentry, inode);
1299 +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
1300 +index dd0308d..64198ed 100644
1301 +--- a/fs/nfsd/nfs4proc.c
1302 ++++ b/fs/nfsd/nfs4proc.c
1303 +@@ -270,6 +270,7 @@ static __be32
1304 + do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
1305 + {
1306 + __be32 status;
1307 ++ int accmode = 0;
1308 +
1309 + /* We don't know the target directory, and therefore can not
1310 + * set the change info
1311 +@@ -283,9 +284,19 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
1312 +
1313 + open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
1314 + (open->op_iattr.ia_size == 0);
1315 ++ /*
1316 ++ * In the delegation case, the client is telling us about an
1317 ++ * open that it *already* performed locally, some time ago. We
1318 ++ * should let it succeed now if possible.
1319 ++ *
1320 ++ * In the case of a CLAIM_FH open, on the other hand, the client
1321 ++ * may be counting on us to enforce permissions (the Linux 4.1
1322 ++ * client uses this for normal opens, for example).
1323 ++ */
1324 ++ if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
1325 ++ accmode = NFSD_MAY_OWNER_OVERRIDE;
1326 +
1327 +- status = do_open_permission(rqstp, current_fh, open,
1328 +- NFSD_MAY_OWNER_OVERRIDE);
1329 ++ status = do_open_permission(rqstp, current_fh, open, accmode);
1330 +
1331 + return status;
1332 + }
1333 +diff --git a/include/linux/audit.h b/include/linux/audit.h
1334 +index ed3ef19..4f334d5 100644
1335 +--- a/include/linux/audit.h
1336 ++++ b/include/linux/audit.h
1337 +@@ -480,7 +480,7 @@ static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
1338 + unsigned long a1, unsigned long a2,
1339 + unsigned long a3)
1340 + {
1341 +- if (unlikely(!audit_dummy_context()))
1342 ++ if (unlikely(current->audit_context))
1343 + __audit_syscall_entry(arch, major, a0, a1, a2, a3);
1344 + }
1345 + static inline void audit_syscall_exit(void *pt_regs)
1346 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
1347 +index 000837e..2c36a71 100644
1348 +--- a/include/linux/hugetlb.h
1349 ++++ b/include/linux/hugetlb.h
1350 +@@ -152,8 +152,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
1351 +
1352 + extern const struct file_operations hugetlbfs_file_operations;
1353 + extern const struct vm_operations_struct hugetlb_vm_ops;
1354 +-struct file *hugetlb_file_setup(const char *name, unsigned long addr,
1355 +- size_t size, vm_flags_t acct,
1356 ++struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
1357 + struct user_struct **user, int creat_flags);
1358 +
1359 + static inline int is_file_hugepages(struct file *file)
1360 +@@ -170,8 +169,8 @@ static inline int is_file_hugepages(struct file *file)
1361 +
1362 + #define is_file_hugepages(file) 0
1363 + static inline struct file *
1364 +-hugetlb_file_setup(const char *name, unsigned long addr, size_t size,
1365 +- vm_flags_t acctflag, struct user_struct **user, int creat_flags)
1366 ++hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
1367 ++ struct user_struct **user, int creat_flags)
1368 + {
1369 + return ERR_PTR(-ENOSYS);
1370 + }
1371 +@@ -294,7 +293,7 @@ static inline unsigned hstate_index_to_shift(unsigned index)
1372 + return hstates[index].order + PAGE_SHIFT;
1373 + }
1374 +
1375 +-#else
1376 ++#else /* CONFIG_HUGETLB_PAGE */
1377 + struct hstate {};
1378 + #define alloc_huge_page_node(h, nid) NULL
1379 + #define alloc_bootmem_huge_page(h) NULL
1380 +diff --git a/include/linux/if_cablemodem.h b/include/linux/if_cablemodem.h
1381 +index 9ca1007..ee6b3c4 100644
1382 +--- a/include/linux/if_cablemodem.h
1383 ++++ b/include/linux/if_cablemodem.h
1384 +@@ -12,11 +12,11 @@
1385 + */
1386 +
1387 + /* some useful defines for sb1000.c e cmconfig.c - fv */
1388 +-#define SIOCGCMSTATS SIOCDEVPRIVATE+0 /* get cable modem stats */
1389 +-#define SIOCGCMFIRMWARE SIOCDEVPRIVATE+1 /* get cm firmware version */
1390 +-#define SIOCGCMFREQUENCY SIOCDEVPRIVATE+2 /* get cable modem frequency */
1391 +-#define SIOCSCMFREQUENCY SIOCDEVPRIVATE+3 /* set cable modem frequency */
1392 +-#define SIOCGCMPIDS SIOCDEVPRIVATE+4 /* get cable modem PIDs */
1393 +-#define SIOCSCMPIDS SIOCDEVPRIVATE+5 /* set cable modem PIDs */
1394 ++#define SIOCGCMSTATS (SIOCDEVPRIVATE+0) /* get cable modem stats */
1395 ++#define SIOCGCMFIRMWARE (SIOCDEVPRIVATE+1) /* get cm firmware version */
1396 ++#define SIOCGCMFREQUENCY (SIOCDEVPRIVATE+2) /* get cable modem frequency */
1397 ++#define SIOCSCMFREQUENCY (SIOCDEVPRIVATE+3) /* set cable modem frequency */
1398 ++#define SIOCGCMPIDS (SIOCDEVPRIVATE+4) /* get cable modem PIDs */
1399 ++#define SIOCSCMPIDS (SIOCDEVPRIVATE+5) /* set cable modem PIDs */
1400 +
1401 + #endif
1402 +diff --git a/include/linux/rculist.h b/include/linux/rculist.h
1403 +index d079290..6f95e24 100644
1404 +--- a/include/linux/rculist.h
1405 ++++ b/include/linux/rculist.h
1406 +@@ -242,6 +242,23 @@ static inline void list_splice_init_rcu(struct list_head *list,
1407 + list_entry_rcu((ptr)->next, type, member)
1408 +
1409 + /**
1410 ++ * list_first_or_null_rcu - get the first element from a list
1411 ++ * @ptr: the list head to take the element from.
1412 ++ * @type: the type of the struct this is embedded in.
1413 ++ * @member: the name of the list_struct within the struct.
1414 ++ *
1415 ++ * Note that if the list is empty, it returns NULL.
1416 ++ *
1417 ++ * This primitive may safely run concurrently with the _rcu list-mutation
1418 ++ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
1419 ++ */
1420 ++#define list_first_or_null_rcu(ptr, type, member) \
1421 ++ ({struct list_head *__ptr = (ptr); \
1422 ++ struct list_head __rcu *__next = list_next_rcu(__ptr); \
1423 ++ likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
1424 ++ })
1425 ++
1426 ++/**
1427 + * list_for_each_entry_rcu - iterate over rcu list of given type
1428 + * @pos: the type * to use as a loop cursor.
1429 + * @head: the head for your list.
1430 +diff --git a/include/net/sock.h b/include/net/sock.h
1431 +index 59a8947..f673ba5 100644
1432 +--- a/include/net/sock.h
1433 ++++ b/include/net/sock.h
1434 +@@ -793,6 +793,18 @@ struct inet_hashinfo;
1435 + struct raw_hashinfo;
1436 + struct module;
1437 +
1438 ++/*
1439 ++ * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1440 ++ * un-modified. Special care is taken when initializing object to zero.
1441 ++ */
1442 ++static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1443 ++{
1444 ++ if (offsetof(struct sock, sk_node.next) != 0)
1445 ++ memset(sk, 0, offsetof(struct sock, sk_node.next));
1446 ++ memset(&sk->sk_node.pprev, 0,
1447 ++ size - offsetof(struct sock, sk_node.pprev));
1448 ++}
1449 ++
1450 + /* Networking protocol blocks we attach to sockets.
1451 + * socket layer -> transport layer interface
1452 + * transport -> network interface is defined by struct inet_proto
1453 +diff --git a/include/net/tcp.h b/include/net/tcp.h
1454 +index 2757a11..8376a6a 100644
1455 +--- a/include/net/tcp.h
1456 ++++ b/include/net/tcp.h
1457 +@@ -948,6 +948,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1458 + if (sysctl_tcp_low_latency || !tp->ucopy.task)
1459 + return 0;
1460 +
1461 ++ skb_dst_force(skb);
1462 + __skb_queue_tail(&tp->ucopy.prequeue, skb);
1463 + tp->ucopy.memory += skb->truesize;
1464 + if (tp->ucopy.memory > sk->sk_rcvbuf) {
1465 +diff --git a/ipc/shm.c b/ipc/shm.c
1466 +index 85d81b4..a02ef57 100644
1467 +--- a/ipc/shm.c
1468 ++++ b/ipc/shm.c
1469 +@@ -479,10 +479,12 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
1470 +
1471 + sprintf (name, "SYSV%08x", key);
1472 + if (shmflg & SHM_HUGETLB) {
1473 ++ size_t hugesize = ALIGN(size, huge_page_size(&default_hstate));
1474 ++
1475 + /* hugetlb_file_setup applies strict accounting */
1476 + if (shmflg & SHM_NORESERVE)
1477 + acctflag = VM_NORESERVE;
1478 +- file = hugetlb_file_setup(name, 0, size, acctflag,
1479 ++ file = hugetlb_file_setup(name, hugesize, acctflag,
1480 + &shp->mlock_user, HUGETLB_SHMFS_INODE);
1481 + } else {
1482 + /*
1483 +diff --git a/kernel/kmod.c b/kernel/kmod.c
1484 +index 05698a7..f2490e1 100644
1485 +--- a/kernel/kmod.c
1486 ++++ b/kernel/kmod.c
1487 +@@ -541,6 +541,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
1488 + int retval = 0;
1489 +
1490 + helper_lock();
1491 ++ if (!sub_info->path) {
1492 ++ retval = -EINVAL;
1493 ++ goto out;
1494 ++ }
1495 ++
1496 + if (sub_info->path[0] == '\0')
1497 + goto out;
1498 +
1499 +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
1500 +index 01d8e62..6570548 100644
1501 +--- a/kernel/time/tick-sched.c
1502 ++++ b/kernel/time/tick-sched.c
1503 +@@ -866,7 +866,7 @@ void tick_cancel_sched_timer(int cpu)
1504 + hrtimer_cancel(&ts->sched_timer);
1505 + # endif
1506 +
1507 +- ts->nohz_mode = NOHZ_MODE_INACTIVE;
1508 ++ memset(ts, 0, sizeof(*ts));
1509 + }
1510 + #endif
1511 +
1512 +diff --git a/kernel/timer.c b/kernel/timer.c
1513 +index 6dfdb72..dd93d90 100644
1514 +--- a/kernel/timer.c
1515 ++++ b/kernel/timer.c
1516 +@@ -1680,12 +1680,12 @@ static int __cpuinit init_timers_cpu(int cpu)
1517 + boot_done = 1;
1518 + base = &boot_tvec_bases;
1519 + }
1520 ++ spin_lock_init(&base->lock);
1521 + tvec_base_done[cpu] = 1;
1522 + } else {
1523 + base = per_cpu(tvec_bases, cpu);
1524 + }
1525 +
1526 +- spin_lock_init(&base->lock);
1527 +
1528 + for (j = 0; j < TVN_SIZE; j++) {
1529 + INIT_LIST_HEAD(base->tv5.vec + j);
1530 +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
1531 +index 431dba8..289197a 100644
1532 +--- a/kernel/trace/trace_events_filter.c
1533 ++++ b/kernel/trace/trace_events_filter.c
1534 +@@ -777,7 +777,11 @@ static int filter_set_pred(struct event_filter *filter,
1535 +
1536 + static void __free_preds(struct event_filter *filter)
1537 + {
1538 ++ int i;
1539 ++
1540 + if (filter->preds) {
1541 ++ for (i = 0; i < filter->n_preds; i++)
1542 ++ kfree(filter->preds[i].ops);
1543 + kfree(filter->preds);
1544 + filter->preds = NULL;
1545 + }
1546 +diff --git a/mm/mmap.c b/mm/mmap.c
1547 +index 3635d47..ed884dd 100644
1548 +--- a/mm/mmap.c
1549 ++++ b/mm/mmap.c
1550 +@@ -1130,15 +1130,19 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1551 + file = fget(fd);
1552 + if (!file)
1553 + goto out;
1554 ++ if (is_file_hugepages(file))
1555 ++ len = ALIGN(len, huge_page_size(hstate_file(file)));
1556 + } else if (flags & MAP_HUGETLB) {
1557 + struct user_struct *user = NULL;
1558 ++
1559 ++ len = ALIGN(len, huge_page_size(&default_hstate));
1560 + /*
1561 + * VM_NORESERVE is used because the reservations will be
1562 + * taken when vm_ops->mmap() is called
1563 + * A dummy user value is used because we are not locking
1564 + * memory so no accounting is necessary
1565 + */
1566 +- file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
1567 ++ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1568 + VM_NORESERVE, &user,
1569 + HUGETLB_ANONHUGE_INODE);
1570 + if (IS_ERR(file))
1571 +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
1572 +index 9757c19..daeb19d 100644
1573 +--- a/net/8021q/vlan_dev.c
1574 ++++ b/net/8021q/vlan_dev.c
1575 +@@ -598,7 +598,7 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
1576 + netdev_features_t features)
1577 + {
1578 + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
1579 +- u32 old_features = features;
1580 ++ netdev_features_t old_features = features;
1581 +
1582 + features &= real_dev->vlan_features;
1583 + features |= NETIF_F_RXCSUM;
1584 +diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
1585 +index 58de2a0..c83ee79 100644
1586 +--- a/net/bridge/br_stp_timer.c
1587 ++++ b/net/bridge/br_stp_timer.c
1588 +@@ -107,7 +107,7 @@ static void br_tcn_timer_expired(unsigned long arg)
1589 +
1590 + br_debug(br, "tcn timer expired\n");
1591 + spin_lock(&br->lock);
1592 +- if (br->dev->flags & IFF_UP) {
1593 ++ if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) {
1594 + br_transmit_tcn(br);
1595 +
1596 + mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time);
1597 +diff --git a/net/core/dev.c b/net/core/dev.c
1598 +index dd12421..7db83d6 100644
1599 +--- a/net/core/dev.c
1600 ++++ b/net/core/dev.c
1601 +@@ -2170,7 +2170,7 @@ EXPORT_SYMBOL(netif_skb_features);
1602 + * support DMA from it.
1603 + */
1604 + static inline int skb_needs_linearize(struct sk_buff *skb,
1605 +- int features)
1606 ++ netdev_features_t features)
1607 + {
1608 + return skb_is_nonlinear(skb) &&
1609 + ((skb_has_frag_list(skb) &&
1610 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c
1611 +index 6d6d7d2..7becb3f 100644
1612 +--- a/net/core/ethtool.c
1613 ++++ b/net/core/ethtool.c
1614 +@@ -1286,7 +1286,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1615 + void __user *useraddr = ifr->ifr_data;
1616 + u32 ethcmd;
1617 + int rc;
1618 +- u32 old_features;
1619 ++ netdev_features_t old_features;
1620 +
1621 + if (!dev || !netif_device_present(dev))
1622 + return -ENODEV;
1623 +diff --git a/net/core/sock.c b/net/core/sock.c
1624 +index f8b5030..561eb57 100644
1625 +--- a/net/core/sock.c
1626 ++++ b/net/core/sock.c
1627 +@@ -1093,18 +1093,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
1628 + #endif
1629 + }
1630 +
1631 +-/*
1632 +- * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1633 +- * un-modified. Special care is taken when initializing object to zero.
1634 +- */
1635 +-static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1636 +-{
1637 +- if (offsetof(struct sock, sk_node.next) != 0)
1638 +- memset(sk, 0, offsetof(struct sock, sk_node.next));
1639 +- memset(&sk->sk_node.pprev, 0,
1640 +- size - offsetof(struct sock, sk_node.pprev));
1641 +-}
1642 +-
1643 + void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1644 + {
1645 + unsigned long nulls1, nulls2;
1646 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1647 +index 3889e02..7ee7121 100644
1648 +--- a/net/ipv6/tcp_ipv6.c
1649 ++++ b/net/ipv6/tcp_ipv6.c
1650 +@@ -2079,6 +2079,17 @@ void tcp6_proc_exit(struct net *net)
1651 + }
1652 + #endif
1653 +
1654 ++static void tcp_v6_clear_sk(struct sock *sk, int size)
1655 ++{
1656 ++ struct inet_sock *inet = inet_sk(sk);
1657 ++
1658 ++ /* we do not want to clear pinet6 field, because of RCU lookups */
1659 ++ sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1660 ++
1661 ++ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1662 ++ memset(&inet->pinet6 + 1, 0, size);
1663 ++}
1664 ++
1665 + struct proto tcpv6_prot = {
1666 + .name = "TCPv6",
1667 + .owner = THIS_MODULE,
1668 +@@ -2120,6 +2131,7 @@ struct proto tcpv6_prot = {
1669 + #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
1670 + .proto_cgroup = tcp_proto_cgroup,
1671 + #endif
1672 ++ .clear_sk = tcp_v6_clear_sk,
1673 + };
1674 +
1675 + static const struct inet6_protocol tcpv6_protocol = {
1676 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1677 +index 37b0699..aa2f18b 100644
1678 +--- a/net/ipv6/udp.c
1679 ++++ b/net/ipv6/udp.c
1680 +@@ -1457,6 +1457,17 @@ void udp6_proc_exit(struct net *net) {
1681 + }
1682 + #endif /* CONFIG_PROC_FS */
1683 +
1684 ++void udp_v6_clear_sk(struct sock *sk, int size)
1685 ++{
1686 ++ struct inet_sock *inet = inet_sk(sk);
1687 ++
1688 ++ /* we do not want to clear pinet6 field, because of RCU lookups */
1689 ++ sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
1690 ++
1691 ++ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1692 ++ memset(&inet->pinet6 + 1, 0, size);
1693 ++}
1694 ++
1695 + /* ------------------------------------------------------------------------ */
1696 +
1697 + struct proto udpv6_prot = {
1698 +@@ -1487,7 +1498,7 @@ struct proto udpv6_prot = {
1699 + .compat_setsockopt = compat_udpv6_setsockopt,
1700 + .compat_getsockopt = compat_udpv6_getsockopt,
1701 + #endif
1702 +- .clear_sk = sk_prot_clear_portaddr_nulls,
1703 ++ .clear_sk = udp_v6_clear_sk,
1704 + };
1705 +
1706 + static struct inet_protosw udpv6_protosw = {
1707 +diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
1708 +index d757104..4691ed5 100644
1709 +--- a/net/ipv6/udp_impl.h
1710 ++++ b/net/ipv6/udp_impl.h
1711 +@@ -31,6 +31,8 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
1712 + extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
1713 + extern void udpv6_destroy_sock(struct sock *sk);
1714 +
1715 ++extern void udp_v6_clear_sk(struct sock *sk, int size);
1716 ++
1717 + #ifdef CONFIG_PROC_FS
1718 + extern int udp6_seq_show(struct seq_file *seq, void *v);
1719 + #endif
1720 +diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
1721 +index 1d08e21..dfcc4be 100644
1722 +--- a/net/ipv6/udplite.c
1723 ++++ b/net/ipv6/udplite.c
1724 +@@ -56,7 +56,7 @@ struct proto udplitev6_prot = {
1725 + .compat_setsockopt = compat_udpv6_setsockopt,
1726 + .compat_getsockopt = compat_udpv6_getsockopt,
1727 + #endif
1728 +- .clear_sk = sk_prot_clear_portaddr_nulls,
1729 ++ .clear_sk = udp_v6_clear_sk,
1730 + };
1731 +
1732 + static struct inet_protosw udplite6_protosw = {
1733 +diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
1734 +index 8ea65e0..808fd08 100644
1735 +--- a/net/ipv6/xfrm6_policy.c
1736 ++++ b/net/ipv6/xfrm6_policy.c
1737 +@@ -96,8 +96,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1738 + dev_hold(dev);
1739 +
1740 + xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
1741 +- if (!xdst->u.rt6.rt6i_idev)
1742 ++ if (!xdst->u.rt6.rt6i_idev) {
1743 ++ dev_put(dev);
1744 + return -ENODEV;
1745 ++ }
1746 +
1747 + xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
1748 + if (rt->rt6i_peer)
1749 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1750 +index 38ca5e0..cfcd783 100644
1751 +--- a/net/packet/af_packet.c
1752 ++++ b/net/packet/af_packet.c
1753 +@@ -812,37 +812,27 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
1754 +
1755 + smp_rmb();
1756 +
1757 +- if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
1758 +-
1759 +- /* We could have just memset this but we will lose the
1760 +- * flexibility of making the priv area sticky
1761 +- */
1762 +- BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
1763 +- BLOCK_NUM_PKTS(pbd1) = 0;
1764 +- BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
1765 +- getnstimeofday(&ts);
1766 +- h1->ts_first_pkt.ts_sec = ts.tv_sec;
1767 +- h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
1768 +- pkc1->pkblk_start = (char *)pbd1;
1769 +- pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
1770 +- BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
1771 +- BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
1772 +- BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
1773 +- pbd1->version = pkc1->version;
1774 +- pkc1->prev = pkc1->nxt_offset;
1775 +- pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
1776 +- prb_thaw_queue(pkc1);
1777 +- _prb_refresh_rx_retire_blk_timer(pkc1);
1778 +-
1779 +- smp_wmb();
1780 +-
1781 +- return;
1782 +- }
1783 ++ /* We could have just memset this but we will lose the
1784 ++ * flexibility of making the priv area sticky
1785 ++ */
1786 ++ BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
1787 ++ BLOCK_NUM_PKTS(pbd1) = 0;
1788 ++ BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
1789 ++ getnstimeofday(&ts);
1790 ++ h1->ts_first_pkt.ts_sec = ts.tv_sec;
1791 ++ h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
1792 ++ pkc1->pkblk_start = (char *)pbd1;
1793 ++ pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
1794 ++ BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
1795 ++ BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
1796 ++ BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
1797 ++ pbd1->version = pkc1->version;
1798 ++ pkc1->prev = pkc1->nxt_offset;
1799 ++ pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
1800 ++ prb_thaw_queue(pkc1);
1801 ++ _prb_refresh_rx_retire_blk_timer(pkc1);
1802 +
1803 +- WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
1804 +- pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
1805 +- dump_stack();
1806 +- BUG();
1807 ++ smp_wmb();
1808 + }
1809 +
1810 + /*
1811 +@@ -933,10 +923,6 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
1812 + prb_close_block(pkc, pbd, po, status);
1813 + return;
1814 + }
1815 +-
1816 +- WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
1817 +- dump_stack();
1818 +- BUG();
1819 + }
1820 +
1821 + static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
1822 +diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
1823 +index 60f8f61..57827bf 100644
1824 +--- a/net/sched/act_ipt.c
1825 ++++ b/net/sched/act_ipt.c
1826 +@@ -8,7 +8,7 @@
1827 + * as published by the Free Software Foundation; either version
1828 + * 2 of the License, or (at your option) any later version.
1829 + *
1830 +- * Copyright: Jamal Hadi Salim (2002-4)
1831 ++ * Copyright: Jamal Hadi Salim (2002-13)
1832 + */
1833 +
1834 + #include <linux/types.h>
1835 +@@ -299,17 +299,44 @@ static struct tc_action_ops act_ipt_ops = {
1836 + .walk = tcf_generic_walker
1837 + };
1838 +
1839 +-MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
1840 ++static struct tc_action_ops act_xt_ops = {
1841 ++ .kind = "xt",
1842 ++ .hinfo = &ipt_hash_info,
1843 ++ .type = TCA_ACT_IPT,
1844 ++ .capab = TCA_CAP_NONE,
1845 ++ .owner = THIS_MODULE,
1846 ++ .act = tcf_ipt,
1847 ++ .dump = tcf_ipt_dump,
1848 ++ .cleanup = tcf_ipt_cleanup,
1849 ++ .lookup = tcf_hash_search,
1850 ++ .init = tcf_ipt_init,
1851 ++ .walk = tcf_generic_walker
1852 ++};
1853 ++
1854 ++MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
1855 + MODULE_DESCRIPTION("Iptables target actions");
1856 + MODULE_LICENSE("GPL");
1857 ++MODULE_ALIAS("act_xt");
1858 +
1859 + static int __init ipt_init_module(void)
1860 + {
1861 +- return tcf_register_action(&act_ipt_ops);
1862 ++ int ret1, ret2;
1863 ++ ret1 = tcf_register_action(&act_xt_ops);
1864 ++ if (ret1 < 0)
1865 ++ printk("Failed to load xt action\n");
1866 ++ ret2 = tcf_register_action(&act_ipt_ops);
1867 ++ if (ret2 < 0)
1868 ++ printk("Failed to load ipt action\n");
1869 ++
1870 ++ if (ret1 < 0 && ret2 < 0)
1871 ++ return ret1;
1872 ++ else
1873 ++ return 0;
1874 + }
1875 +
1876 + static void __exit ipt_cleanup_module(void)
1877 + {
1878 ++ tcf_unregister_action(&act_xt_ops);
1879 + tcf_unregister_action(&act_ipt_ops);
1880 + }
1881 +
1882 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
1883 +index c72dce0..c74a044 100644
1884 +--- a/sound/pci/hda/hda_codec.c
1885 ++++ b/sound/pci/hda/hda_codec.c
1886 +@@ -617,6 +617,9 @@ int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
1887 + struct hda_bus_unsolicited *unsol;
1888 + unsigned int wp;
1889 +
1890 ++ if (!bus || !bus->workq)
1891 ++ return 0;
1892 ++
1893 + trace_hda_unsol_event(bus, res, res_ex);
1894 + unsol = bus->unsol;
1895 + if (!unsol)
1896 +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
1897 +index 4c471a5..d9924d7 100644
1898 +--- a/sound/soc/codecs/wm8994.c
1899 ++++ b/sound/soc/codecs/wm8994.c
1900 +@@ -2824,6 +2824,7 @@ static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
1901 + default:
1902 + return 0;
1903 + }
1904 ++ break;
1905 + default:
1906 + return 0;
1907 + }