1 |
Author: tomwij |
2 |
Date: 2013-02-28 19:13:50 +0000 (Thu, 28 Feb 2013) |
3 |
New Revision: 2291 |
4 |
|
5 |
Added: |
6 |
genpatches-2.6/trunk/3.0/1065_linux-3.0.66.patch |
7 |
genpatches-2.6/trunk/3.0/1066_linux-3.0.67.patch |
8 |
genpatches-2.6/trunk/3.2/1038_linux-3.2.39.patch |
9 |
genpatches-2.6/trunk/3.4/1032_linux-3.4.33.patch |
10 |
genpatches-2.6/trunk/3.4/1033_linux-3.4.34.patch |
11 |
Modified: |
12 |
genpatches-2.6/trunk/3.0/0000_README |
13 |
genpatches-2.6/trunk/3.2/0000_README |
14 |
genpatches-2.6/trunk/3.4/0000_README |
15 |
Log: |
16 |
Linux patches 3.0.66 to 3.0.67, 3.2.39 and 3.4.33 to 3.4.34. |
17 |
|
18 |
Modified: genpatches-2.6/trunk/3.0/0000_README |
19 |
=================================================================== |
20 |
--- genpatches-2.6/trunk/3.0/0000_README 2013-02-23 19:07:21 UTC (rev 2290) |
21 |
+++ genpatches-2.6/trunk/3.0/0000_README 2013-02-28 19:13:50 UTC (rev 2291) |
22 |
@@ -295,6 +295,14 @@ |
23 |
From: http://www.kernel.org |
24 |
Desc: Linux 3.0.65 |
25 |
|
26 |
+Patch: 1065_linux-3.0.66.patch |
27 |
+From: http://www.kernel.org |
28 |
+Desc: Linux 3.0.66 |
29 |
+ |
30 |
+Patch: 1066_linux-3.0.67.patch |
31 |
+From: http://www.kernel.org |
32 |
+Desc: Linux 3.0.67 |
33 |
+ |
34 |
Patch: 1800_fix-zcache-build.patch |
35 |
From: http://bugs.gentoo.org/show_bug.cgi?id=376325 |
36 |
Desc: Fix zcache build error |
37 |
|
38 |
Added: genpatches-2.6/trunk/3.0/1065_linux-3.0.66.patch |
39 |
=================================================================== |
40 |
--- genpatches-2.6/trunk/3.0/1065_linux-3.0.66.patch (rev 0) |
41 |
+++ genpatches-2.6/trunk/3.0/1065_linux-3.0.66.patch 2013-02-28 19:13:50 UTC (rev 2291) |
42 |
@@ -0,0 +1,54 @@ |
43 |
+diff --git a/Makefile b/Makefile |
44 |
+index cdba5c1..da3ff21 100644 |
45 |
+--- a/Makefile |
46 |
++++ b/Makefile |
47 |
+@@ -1,6 +1,6 @@ |
48 |
+ VERSION = 3 |
49 |
+ PATCHLEVEL = 0 |
50 |
+-SUBLEVEL = 65 |
51 |
++SUBLEVEL = 66 |
52 |
+ EXTRAVERSION = |
53 |
+ NAME = Sneaky Weasel |
54 |
+ |
55 |
+diff --git a/include/linux/syslog.h b/include/linux/syslog.h |
56 |
+index 3891139..ce4c665 100644 |
57 |
+--- a/include/linux/syslog.h |
58 |
++++ b/include/linux/syslog.h |
59 |
+@@ -47,6 +47,12 @@ |
60 |
+ #define SYSLOG_FROM_CALL 0 |
61 |
+ #define SYSLOG_FROM_FILE 1 |
62 |
+ |
63 |
++/* |
64 |
++ * Syslog priority (PRI) maximum length in char : '<[0-9]{1,3}>' |
65 |
++ * See RFC5424 for details |
66 |
++*/ |
67 |
++#define SYSLOG_PRI_MAX_LENGTH 5 |
68 |
++ |
69 |
+ int do_syslog(int type, char __user *buf, int count, bool from_file); |
70 |
+ |
71 |
+ #endif /* _LINUX_SYSLOG_H */ |
72 |
+diff --git a/kernel/printk.c b/kernel/printk.c |
73 |
+index 3fc4708..6edc4e89 100644 |
74 |
+--- a/kernel/printk.c |
75 |
++++ b/kernel/printk.c |
76 |
+@@ -633,8 +633,19 @@ static void call_console_drivers(unsigned start, unsigned end) |
77 |
+ start_print = start; |
78 |
+ while (cur_index != end) { |
79 |
+ if (msg_level < 0 && ((end - cur_index) > 2)) { |
80 |
++ /* |
81 |
++ * prepare buf_prefix, as a contiguous array, |
82 |
++ * to be processed by log_prefix function |
83 |
++ */ |
84 |
++ char buf_prefix[SYSLOG_PRI_MAX_LENGTH+1]; |
85 |
++ unsigned i; |
86 |
++ for (i = 0; i < ((end - cur_index)) && (i < SYSLOG_PRI_MAX_LENGTH); i++) { |
87 |
++ buf_prefix[i] = LOG_BUF(cur_index + i); |
88 |
++ } |
89 |
++ buf_prefix[i] = '\0'; /* force '\0' as last string character */ |
90 |
++ |
91 |
+ /* strip log prefix */ |
92 |
+- cur_index += log_prefix(&LOG_BUF(cur_index), &msg_level, NULL); |
93 |
++ cur_index += log_prefix((const char *)&buf_prefix, &msg_level, NULL); |
94 |
+ start_print = cur_index; |
95 |
+ } |
96 |
+ while (cur_index != end) { |
97 |
|
98 |
Added: genpatches-2.6/trunk/3.0/1066_linux-3.0.67.patch |
99 |
=================================================================== |
100 |
--- genpatches-2.6/trunk/3.0/1066_linux-3.0.67.patch (rev 0) |
101 |
+++ genpatches-2.6/trunk/3.0/1066_linux-3.0.67.patch 2013-02-28 19:13:50 UTC (rev 2291) |
102 |
@@ -0,0 +1,2195 @@ |
103 |
+diff --git a/Makefile b/Makefile |
104 |
+index da3ff21..7d4347a 100644 |
105 |
+--- a/Makefile |
106 |
++++ b/Makefile |
107 |
+@@ -1,6 +1,6 @@ |
108 |
+ VERSION = 3 |
109 |
+ PATCHLEVEL = 0 |
110 |
+-SUBLEVEL = 66 |
111 |
++SUBLEVEL = 67 |
112 |
+ EXTRAVERSION = |
113 |
+ NAME = Sneaky Weasel |
114 |
+ |
115 |
+diff --git a/arch/arm/mach-pxa/include/mach/smemc.h b/arch/arm/mach-pxa/include/mach/smemc.h |
116 |
+index 654adc9..301bf0e 100644 |
117 |
+--- a/arch/arm/mach-pxa/include/mach/smemc.h |
118 |
++++ b/arch/arm/mach-pxa/include/mach/smemc.h |
119 |
+@@ -37,6 +37,7 @@ |
120 |
+ #define CSADRCFG1 (SMEMC_VIRT + 0x84) /* Address Configuration Register for CS1 */ |
121 |
+ #define CSADRCFG2 (SMEMC_VIRT + 0x88) /* Address Configuration Register for CS2 */ |
122 |
+ #define CSADRCFG3 (SMEMC_VIRT + 0x8C) /* Address Configuration Register for CS3 */ |
123 |
++#define CSMSADRCFG (SMEMC_VIRT + 0xA0) /* Chip Select Configuration Register */ |
124 |
+ |
125 |
+ /* |
126 |
+ * More handy macros for PCMCIA |
127 |
+diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c |
128 |
+index 7992305..f38aa89 100644 |
129 |
+--- a/arch/arm/mach-pxa/smemc.c |
130 |
++++ b/arch/arm/mach-pxa/smemc.c |
131 |
+@@ -40,6 +40,8 @@ static void pxa3xx_smemc_resume(void) |
132 |
+ __raw_writel(csadrcfg[1], CSADRCFG1); |
133 |
+ __raw_writel(csadrcfg[2], CSADRCFG2); |
134 |
+ __raw_writel(csadrcfg[3], CSADRCFG3); |
135 |
++ /* CSMSADRCFG wakes up in its default state (0), so we need to set it */ |
136 |
++ __raw_writel(0x2, CSMSADRCFG); |
137 |
+ } |
138 |
+ |
139 |
+ static struct syscore_ops smemc_syscore_ops = { |
140 |
+@@ -49,8 +51,19 @@ static struct syscore_ops smemc_syscore_ops = { |
141 |
+ |
142 |
+ static int __init smemc_init(void) |
143 |
+ { |
144 |
+- if (cpu_is_pxa3xx()) |
145 |
++ if (cpu_is_pxa3xx()) { |
146 |
++ /* |
147 |
++ * The only documentation we have on the |
148 |
++ * Chip Select Configuration Register (CSMSADRCFG) is that |
149 |
++ * it must be programmed to 0x2. |
150 |
++ * Moreover, in the bit definitions, the second bit |
151 |
++ * (CSMSADRCFG[1]) is called "SETALWAYS". |
152 |
++ * Other bits are reserved in this register. |
153 |
++ */ |
154 |
++ __raw_writel(0x2, CSMSADRCFG); |
155 |
++ |
156 |
+ register_syscore_ops(&smemc_syscore_ops); |
157 |
++ } |
158 |
+ |
159 |
+ return 0; |
160 |
+ } |
161 |
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h |
162 |
+index 22dadeb..9d35a3e 100644 |
163 |
+--- a/arch/parisc/include/asm/pgtable.h |
164 |
++++ b/arch/parisc/include/asm/pgtable.h |
165 |
+@@ -12,11 +12,10 @@ |
166 |
+ |
167 |
+ #include <linux/bitops.h> |
168 |
+ #include <linux/spinlock.h> |
169 |
++#include <linux/mm_types.h> |
170 |
+ #include <asm/processor.h> |
171 |
+ #include <asm/cache.h> |
172 |
+ |
173 |
+-struct vm_area_struct; |
174 |
+- |
175 |
+ /* |
176 |
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel |
177 |
+ * memory. For the return value to be meaningful, ADDR must be >= |
178 |
+@@ -40,7 +39,14 @@ struct vm_area_struct; |
179 |
+ do{ \ |
180 |
+ *(pteptr) = (pteval); \ |
181 |
+ } while(0) |
182 |
+-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
183 |
++ |
184 |
++extern void purge_tlb_entries(struct mm_struct *, unsigned long); |
185 |
++ |
186 |
++#define set_pte_at(mm, addr, ptep, pteval) \ |
187 |
++ do { \ |
188 |
++ set_pte(ptep, pteval); \ |
189 |
++ purge_tlb_entries(mm, addr); \ |
190 |
++ } while (0) |
191 |
+ |
192 |
+ #endif /* !__ASSEMBLY__ */ |
193 |
+ |
194 |
+@@ -464,6 +470,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
195 |
+ old = pte_val(*ptep); |
196 |
+ new = pte_val(pte_wrprotect(__pte (old))); |
197 |
+ } while (cmpxchg((unsigned long *) ptep, old, new) != old); |
198 |
++ purge_tlb_entries(mm, addr); |
199 |
+ #else |
200 |
+ pte_t old_pte = *ptep; |
201 |
+ set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); |
202 |
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c |
203 |
+index 83335f3..5241698 100644 |
204 |
+--- a/arch/parisc/kernel/cache.c |
205 |
++++ b/arch/parisc/kernel/cache.c |
206 |
+@@ -421,6 +421,24 @@ void kunmap_parisc(void *addr) |
207 |
+ EXPORT_SYMBOL(kunmap_parisc); |
208 |
+ #endif |
209 |
+ |
210 |
++void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) |
211 |
++{ |
212 |
++ unsigned long flags; |
213 |
++ |
214 |
++ /* Note: purge_tlb_entries can be called at startup with |
215 |
++ no context. */ |
216 |
++ |
217 |
++ /* Disable preemption while we play with %sr1. */ |
218 |
++ preempt_disable(); |
219 |
++ mtsp(mm->context, 1); |
220 |
++ purge_tlb_start(flags); |
221 |
++ pdtlb(addr); |
222 |
++ pitlb(addr); |
223 |
++ purge_tlb_end(flags); |
224 |
++ preempt_enable(); |
225 |
++} |
226 |
++EXPORT_SYMBOL(purge_tlb_entries); |
227 |
++ |
228 |
+ void __flush_tlb_range(unsigned long sid, unsigned long start, |
229 |
+ unsigned long end) |
230 |
+ { |
231 |
+diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c |
232 |
+index 583af70..cac9d2c 100644 |
233 |
+--- a/arch/powerpc/kernel/machine_kexec_64.c |
234 |
++++ b/arch/powerpc/kernel/machine_kexec_64.c |
235 |
+@@ -163,6 +163,8 @@ static int kexec_all_irq_disabled = 0; |
236 |
+ static void kexec_smp_down(void *arg) |
237 |
+ { |
238 |
+ local_irq_disable(); |
239 |
++ hard_irq_disable(); |
240 |
++ |
241 |
+ mb(); /* make sure our irqs are disabled before we say they are */ |
242 |
+ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; |
243 |
+ while(kexec_all_irq_disabled == 0) |
244 |
+@@ -245,6 +247,8 @@ static void kexec_prepare_cpus(void) |
245 |
+ wake_offline_cpus(); |
246 |
+ smp_call_function(kexec_smp_down, NULL, /* wait */0); |
247 |
+ local_irq_disable(); |
248 |
++ hard_irq_disable(); |
249 |
++ |
250 |
+ mb(); /* make sure IRQs are disabled before we say they are */ |
251 |
+ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; |
252 |
+ |
253 |
+@@ -282,6 +286,7 @@ static void kexec_prepare_cpus(void) |
254 |
+ if (ppc_md.kexec_cpu_down) |
255 |
+ ppc_md.kexec_cpu_down(0, 0); |
256 |
+ local_irq_disable(); |
257 |
++ hard_irq_disable(); |
258 |
+ } |
259 |
+ |
260 |
+ #endif /* SMP */ |
261 |
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c |
262 |
+index 2ada634..25ab200 100644 |
263 |
+--- a/arch/s390/kvm/kvm-s390.c |
264 |
++++ b/arch/s390/kvm/kvm-s390.c |
265 |
+@@ -584,6 +584,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) |
266 |
+ } else |
267 |
+ prefix = 0; |
268 |
+ |
269 |
++ /* |
270 |
++ * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy |
271 |
++ * copying in vcpu load/put. Lets update our copies before we save |
272 |
++ * it into the save area |
273 |
++ */ |
274 |
++ save_fp_regs(&vcpu->arch.guest_fpregs); |
275 |
++ save_access_regs(vcpu->run->s.regs.acrs); |
276 |
++ |
277 |
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), |
278 |
+ vcpu->arch.guest_fpregs.fprs, 128, prefix)) |
279 |
+ return -EFAULT; |
280 |
+diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h |
281 |
+index ffa037f..a6a6414 100644 |
282 |
+--- a/arch/x86/include/asm/mmzone_32.h |
283 |
++++ b/arch/x86/include/asm/mmzone_32.h |
284 |
+@@ -14,12 +14,6 @@ extern struct pglist_data *node_data[]; |
285 |
+ |
286 |
+ #include <asm/numaq.h> |
287 |
+ |
288 |
+-extern void resume_map_numa_kva(pgd_t *pgd); |
289 |
+- |
290 |
+-#else /* !CONFIG_NUMA */ |
291 |
+- |
292 |
+-static inline void resume_map_numa_kva(pgd_t *pgd) {} |
293 |
+- |
294 |
+ #endif /* CONFIG_NUMA */ |
295 |
+ |
296 |
+ #ifdef CONFIG_DISCONTIGMEM |
297 |
+diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c |
298 |
+index 3769079..a09ecb9 100644 |
299 |
+--- a/arch/x86/power/hibernate_32.c |
300 |
++++ b/arch/x86/power/hibernate_32.c |
301 |
+@@ -130,8 +130,6 @@ static int resume_physical_mapping_init(pgd_t *pgd_base) |
302 |
+ } |
303 |
+ } |
304 |
+ |
305 |
+- resume_map_numa_kva(pgd_base); |
306 |
+- |
307 |
+ return 0; |
308 |
+ } |
309 |
+ |
310 |
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c |
311 |
+index cc9b1e1..d99537f 100644 |
312 |
+--- a/arch/x86/xen/spinlock.c |
313 |
++++ b/arch/x86/xen/spinlock.c |
314 |
+@@ -313,7 +313,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) |
315 |
+ if (per_cpu(lock_spinners, cpu) == xl) { |
316 |
+ ADD_STATS(released_slow_kicked, 1); |
317 |
+ xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); |
318 |
+- break; |
319 |
+ } |
320 |
+ } |
321 |
+ } |
322 |
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c |
323 |
+index 000e7b2..8b8e8c0 100644 |
324 |
+--- a/drivers/base/bus.c |
325 |
++++ b/drivers/base/bus.c |
326 |
+@@ -289,7 +289,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start, |
327 |
+ struct device *dev; |
328 |
+ int error = 0; |
329 |
+ |
330 |
+- if (!bus) |
331 |
++ if (!bus || !bus->p) |
332 |
+ return -EINVAL; |
333 |
+ |
334 |
+ klist_iter_init_node(&bus->p->klist_devices, &i, |
335 |
+@@ -323,7 +323,7 @@ struct device *bus_find_device(struct bus_type *bus, |
336 |
+ struct klist_iter i; |
337 |
+ struct device *dev; |
338 |
+ |
339 |
+- if (!bus) |
340 |
++ if (!bus || !bus->p) |
341 |
+ return NULL; |
342 |
+ |
343 |
+ klist_iter_init_node(&bus->p->klist_devices, &i, |
344 |
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c |
345 |
+index 48e8fee..94f6ae2 100644 |
346 |
+--- a/drivers/block/sunvdc.c |
347 |
++++ b/drivers/block/sunvdc.c |
348 |
+@@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len) |
349 |
+ int op_len, err; |
350 |
+ void *req_buf; |
351 |
+ |
352 |
+- if (!(((u64)1 << ((u64)op - 1)) & port->operations)) |
353 |
++ if (!(((u64)1 << (u64)op) & port->operations)) |
354 |
+ return -EOPNOTSUPP; |
355 |
+ |
356 |
+ switch (op) { |
357 |
+diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c |
358 |
+index 4abd089..7065851 100644 |
359 |
+--- a/drivers/dca/dca-core.c |
360 |
++++ b/drivers/dca/dca-core.c |
361 |
+@@ -409,6 +409,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) |
362 |
+ |
363 |
+ spin_lock_irqsave(&dca_lock, flags); |
364 |
+ |
365 |
++ if (list_empty(&dca_domains)) { |
366 |
++ raw_spin_unlock_irqrestore(&dca_lock, flags); |
367 |
++ return; |
368 |
++ } |
369 |
++ |
370 |
+ list_del(&dca->node); |
371 |
+ |
372 |
+ pci_rc = dca_pci_rc_from_dev(dev); |
373 |
+diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c |
374 |
+index 206d230..0c853f5 100644 |
375 |
+--- a/drivers/gpu/drm/drm_usb.c |
376 |
++++ b/drivers/gpu/drm/drm_usb.c |
377 |
+@@ -18,7 +18,7 @@ int drm_get_usb_dev(struct usb_interface *interface, |
378 |
+ |
379 |
+ usbdev = interface_to_usbdev(interface); |
380 |
+ dev->usbdev = usbdev; |
381 |
+- dev->dev = &usbdev->dev; |
382 |
++ dev->dev = &interface->dev; |
383 |
+ |
384 |
+ mutex_lock(&drm_global_mutex); |
385 |
+ |
386 |
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
387 |
+index b4f4d12..11ecb0c 100644 |
388 |
+--- a/drivers/gpu/drm/i915/intel_display.c |
389 |
++++ b/drivers/gpu/drm/i915/intel_display.c |
390 |
+@@ -2898,6 +2898,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) |
391 |
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
392 |
+ int pipe = intel_crtc->pipe; |
393 |
+ int plane = intel_crtc->plane; |
394 |
++ u32 pctl; |
395 |
+ |
396 |
+ if (!intel_crtc->active) |
397 |
+ return; |
398 |
+@@ -2914,6 +2915,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) |
399 |
+ |
400 |
+ intel_disable_plane(dev_priv, plane, pipe); |
401 |
+ intel_disable_pipe(dev_priv, pipe); |
402 |
++ |
403 |
++ /* Disable pannel fitter if it is on this pipe. */ |
404 |
++ pctl = I915_READ(PFIT_CONTROL); |
405 |
++ if ((pctl & PFIT_ENABLE) && |
406 |
++ ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe) |
407 |
++ I915_WRITE(PFIT_CONTROL, 0); |
408 |
++ |
409 |
+ intel_disable_pll(dev_priv, pipe); |
410 |
+ |
411 |
+ intel_crtc->active = false; |
412 |
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c |
413 |
+index f1fa763..99e7e7f 100644 |
414 |
+--- a/drivers/net/wireless/p54/p54usb.c |
415 |
++++ b/drivers/net/wireless/p54/p54usb.c |
416 |
+@@ -83,8 +83,8 @@ static struct usb_device_id p54u_table[] = { |
417 |
+ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ |
418 |
+ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ |
419 |
+ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ |
420 |
+- {USB_DEVICE(0x083a, 0x4503)}, /* T-Com Sinus 154 data II */ |
421 |
+ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ |
422 |
++ {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ |
423 |
+ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */ |
424 |
+ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ |
425 |
+ {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ |
426 |
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c |
427 |
+index 5925e0b..8eaf0e2 100644 |
428 |
+--- a/drivers/net/xen-netback/interface.c |
429 |
++++ b/drivers/net/xen-netback/interface.c |
430 |
+@@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif) |
431 |
+ static void xenvif_down(struct xenvif *vif) |
432 |
+ { |
433 |
+ disable_irq(vif->irq); |
434 |
++ del_timer_sync(&vif->credit_timeout); |
435 |
+ xen_netbk_deschedule_xenvif(vif); |
436 |
+ xen_netbk_remove_xenvif(vif); |
437 |
+ } |
438 |
+@@ -362,8 +363,6 @@ void xenvif_disconnect(struct xenvif *vif) |
439 |
+ atomic_dec(&vif->refcnt); |
440 |
+ wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); |
441 |
+ |
442 |
+- del_timer_sync(&vif->credit_timeout); |
443 |
+- |
444 |
+ if (vif->irq) |
445 |
+ unbind_from_irqhandler(vif->irq, vif); |
446 |
+ |
447 |
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c |
448 |
+index 9068d32..1260bf0 100644 |
449 |
+--- a/drivers/net/xen-netback/netback.c |
450 |
++++ b/drivers/net/xen-netback/netback.c |
451 |
+@@ -870,13 +870,13 @@ static int netbk_count_requests(struct xenvif *vif, |
452 |
+ if (frags >= work_to_do) { |
453 |
+ netdev_err(vif->dev, "Need more frags\n"); |
454 |
+ netbk_fatal_tx_err(vif); |
455 |
+- return -frags; |
456 |
++ return -ENODATA; |
457 |
+ } |
458 |
+ |
459 |
+ if (unlikely(frags >= MAX_SKB_FRAGS)) { |
460 |
+ netdev_err(vif->dev, "Too many frags\n"); |
461 |
+ netbk_fatal_tx_err(vif); |
462 |
+- return -frags; |
463 |
++ return -E2BIG; |
464 |
+ } |
465 |
+ |
466 |
+ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), |
467 |
+@@ -884,7 +884,7 @@ static int netbk_count_requests(struct xenvif *vif, |
468 |
+ if (txp->size > first->size) { |
469 |
+ netdev_err(vif->dev, "Frag is bigger than frame.\n"); |
470 |
+ netbk_fatal_tx_err(vif); |
471 |
+- return -frags; |
472 |
++ return -EIO; |
473 |
+ } |
474 |
+ |
475 |
+ first->size -= txp->size; |
476 |
+@@ -894,7 +894,7 @@ static int netbk_count_requests(struct xenvif *vif, |
477 |
+ netdev_err(vif->dev, "txp->offset: %x, size: %u\n", |
478 |
+ txp->offset, txp->size); |
479 |
+ netbk_fatal_tx_err(vif); |
480 |
+- return -frags; |
481 |
++ return -EINVAL; |
482 |
+ } |
483 |
+ } while ((txp++)->flags & XEN_NETTXF_more_data); |
484 |
+ return frags; |
485 |
+@@ -990,7 +990,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, |
486 |
+ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); |
487 |
+ |
488 |
+ /* Skip first skb fragment if it is on same page as header fragment. */ |
489 |
+- start = ((unsigned long)shinfo->frags[i].page == pending_idx); |
490 |
++ start = ((unsigned long)shinfo->frags[0].page == pending_idx); |
491 |
+ |
492 |
+ for (i = start; i < nr_frags; i++) { |
493 |
+ int j, newerr; |
494 |
+diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c |
495 |
+index 86e4a1a..6bb02ab 100644 |
496 |
+--- a/drivers/pcmcia/vrc4171_card.c |
497 |
++++ b/drivers/pcmcia/vrc4171_card.c |
498 |
+@@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock) |
499 |
+ socket = &vrc4171_sockets[slot]; |
500 |
+ socket->csc_irq = search_nonuse_irq(); |
501 |
+ socket->io_irq = search_nonuse_irq(); |
502 |
++ spin_lock_init(&socket->lock); |
503 |
+ |
504 |
+ return 0; |
505 |
+ } |
506 |
+diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c |
507 |
+index aec60d5..481037d 100644 |
508 |
+--- a/drivers/s390/kvm/kvm_virtio.c |
509 |
++++ b/drivers/s390/kvm/kvm_virtio.c |
510 |
+@@ -413,6 +413,26 @@ static void kvm_extint_handler(unsigned int ext_int_code, |
511 |
+ } |
512 |
+ |
513 |
+ /* |
514 |
++ * For s390-virtio, we expect a page above main storage containing |
515 |
++ * the virtio configuration. Try to actually load from this area |
516 |
++ * in order to figure out if the host provides this page. |
517 |
++ */ |
518 |
++static int __init test_devices_support(unsigned long addr) |
519 |
++{ |
520 |
++ int ret = -EIO; |
521 |
++ |
522 |
++ asm volatile( |
523 |
++ "0: lura 0,%1\n" |
524 |
++ "1: xgr %0,%0\n" |
525 |
++ "2:\n" |
526 |
++ EX_TABLE(0b,2b) |
527 |
++ EX_TABLE(1b,2b) |
528 |
++ : "+d" (ret) |
529 |
++ : "a" (addr) |
530 |
++ : "0", "cc"); |
531 |
++ return ret; |
532 |
++} |
533 |
++/* |
534 |
+ * Init function for virtio |
535 |
+ * devices are in a single page above top of "normal" mem |
536 |
+ */ |
537 |
+@@ -423,21 +443,23 @@ static int __init kvm_devices_init(void) |
538 |
+ if (!MACHINE_IS_KVM) |
539 |
+ return -ENODEV; |
540 |
+ |
541 |
++ if (test_devices_support(real_memory_size) < 0) |
542 |
++ return -ENODEV; |
543 |
++ |
544 |
++ rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); |
545 |
++ if (rc) |
546 |
++ return rc; |
547 |
++ |
548 |
++ kvm_devices = (void *) real_memory_size; |
549 |
++ |
550 |
+ kvm_root = root_device_register("kvm_s390"); |
551 |
+ if (IS_ERR(kvm_root)) { |
552 |
+ rc = PTR_ERR(kvm_root); |
553 |
+ printk(KERN_ERR "Could not register kvm_s390 root device"); |
554 |
++ vmem_remove_mapping(real_memory_size, PAGE_SIZE); |
555 |
+ return rc; |
556 |
+ } |
557 |
+ |
558 |
+- rc = vmem_add_mapping(real_memory_size, PAGE_SIZE); |
559 |
+- if (rc) { |
560 |
+- root_device_unregister(kvm_root); |
561 |
+- return rc; |
562 |
+- } |
563 |
+- |
564 |
+- kvm_devices = (void *) real_memory_size; |
565 |
+- |
566 |
+ INIT_WORK(&hotplug_work, hotplug_devices); |
567 |
+ |
568 |
+ service_subclass_irq_register(); |
569 |
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c |
570 |
+index 39be673..4b9d8f0 100644 |
571 |
+--- a/drivers/staging/comedi/comedi_fops.c |
572 |
++++ b/drivers/staging/comedi/comedi_fops.c |
573 |
+@@ -136,6 +136,11 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, |
574 |
+ /* Device config is special, because it must work on |
575 |
+ * an unconfigured device. */ |
576 |
+ if (cmd == COMEDI_DEVCONFIG) { |
577 |
++ if (minor >= COMEDI_NUM_BOARD_MINORS) { |
578 |
++ /* Device config not appropriate on non-board minors. */ |
579 |
++ rc = -ENOTTY; |
580 |
++ goto done; |
581 |
++ } |
582 |
+ rc = do_devconfig_ioctl(dev, |
583 |
+ (struct comedi_devconfig __user *)arg); |
584 |
+ if (rc == 0) |
585 |
+diff --git a/drivers/staging/hv/hv_kvp.c b/drivers/staging/hv/hv_kvp.c |
586 |
+index 13b0ecf..9f8efd4 100644 |
587 |
+--- a/drivers/staging/hv/hv_kvp.c |
588 |
++++ b/drivers/staging/hv/hv_kvp.c |
589 |
+@@ -201,11 +201,13 @@ kvp_respond_to_host(char *key, char *value, int error) |
590 |
+ * The windows host expects the key/value pair to be encoded |
591 |
+ * in utf16. |
592 |
+ */ |
593 |
+- keylen = utf8s_to_utf16s(key_name, strlen(key_name), |
594 |
+- (wchar_t *)kvp_data->data.key); |
595 |
++ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN, |
596 |
++ (wchar_t *) kvp_data->data.key, |
597 |
++ HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2); |
598 |
+ kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */ |
599 |
+- valuelen = utf8s_to_utf16s(value, strlen(value), |
600 |
+- (wchar_t *)kvp_data->data.value); |
601 |
++ valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN, |
602 |
++ (wchar_t *) kvp_data->data.value, |
603 |
++ HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2); |
604 |
+ kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */ |
605 |
+ |
606 |
+ kvp_data->data.value_type = REG_SZ; /* all our values are strings */ |
607 |
+diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c |
608 |
+index c612ab5..f759352 100644 |
609 |
+--- a/drivers/staging/vt6656/usbpipe.c |
610 |
++++ b/drivers/staging/vt6656/usbpipe.c |
611 |
+@@ -168,6 +168,11 @@ int PIPEnsControlOut( |
612 |
+ if (pDevice->Flags & fMP_CONTROL_WRITES) |
613 |
+ return STATUS_FAILURE; |
614 |
+ |
615 |
++ if (pDevice->Flags & fMP_CONTROL_READS) |
616 |
++ return STATUS_FAILURE; |
617 |
++ |
618 |
++ MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES); |
619 |
++ |
620 |
+ pDevice->sUsbCtlRequest.bRequestType = 0x40; |
621 |
+ pDevice->sUsbCtlRequest.bRequest = byRequest; |
622 |
+ pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); |
623 |
+@@ -182,12 +187,13 @@ int PIPEnsControlOut( |
624 |
+ |
625 |
+ ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); |
626 |
+ if (ntStatus != 0) { |
627 |
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control send request submission failed: %d\n", ntStatus); |
628 |
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO |
629 |
++ "control send request submission failed: %d\n", |
630 |
++ ntStatus); |
631 |
++ MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); |
632 |
+ return STATUS_FAILURE; |
633 |
+ } |
634 |
+- else { |
635 |
+- MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES); |
636 |
+- } |
637 |
++ |
638 |
+ spin_unlock_irq(&pDevice->lock); |
639 |
+ for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { |
640 |
+ |
641 |
+@@ -227,6 +233,11 @@ int PIPEnsControlIn( |
642 |
+ if (pDevice->Flags & fMP_CONTROL_READS) |
643 |
+ return STATUS_FAILURE; |
644 |
+ |
645 |
++ if (pDevice->Flags & fMP_CONTROL_WRITES) |
646 |
++ return STATUS_FAILURE; |
647 |
++ |
648 |
++ MP_SET_FLAG(pDevice, fMP_CONTROL_READS); |
649 |
++ |
650 |
+ pDevice->sUsbCtlRequest.bRequestType = 0xC0; |
651 |
+ pDevice->sUsbCtlRequest.bRequest = byRequest; |
652 |
+ pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); |
653 |
+@@ -240,10 +251,11 @@ int PIPEnsControlIn( |
654 |
+ |
655 |
+ ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); |
656 |
+ if (ntStatus != 0) { |
657 |
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control request submission failed: %d\n", ntStatus); |
658 |
+- }else { |
659 |
+- MP_SET_FLAG(pDevice, fMP_CONTROL_READS); |
660 |
+- } |
661 |
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO |
662 |
++ "control request submission failed: %d\n", ntStatus); |
663 |
++ MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); |
664 |
++ return STATUS_FAILURE; |
665 |
++ } |
666 |
+ |
667 |
+ spin_unlock_irq(&pDevice->lock); |
668 |
+ for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { |
669 |
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c |
670 |
+index 53f2442..3047873 100644 |
671 |
+--- a/drivers/tty/tty_ioctl.c |
672 |
++++ b/drivers/tty/tty_ioctl.c |
673 |
+@@ -617,7 +617,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt) |
674 |
+ if (opt & TERMIOS_WAIT) { |
675 |
+ tty_wait_until_sent(tty, 0); |
676 |
+ if (signal_pending(current)) |
677 |
+- return -EINTR; |
678 |
++ return -ERESTARTSYS; |
679 |
+ } |
680 |
+ |
681 |
+ tty_set_termios(tty, &tmp_termios); |
682 |
+@@ -684,7 +684,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt) |
683 |
+ if (opt & TERMIOS_WAIT) { |
684 |
+ tty_wait_until_sent(tty, 0); |
685 |
+ if (signal_pending(current)) |
686 |
+- return -EINTR; |
687 |
++ return -ERESTARTSYS; |
688 |
+ } |
689 |
+ |
690 |
+ mutex_lock(&tty->termios_mutex); |
691 |
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c |
692 |
+index b3915b7..e41288a 100644 |
693 |
+--- a/drivers/tty/vt/vt.c |
694 |
++++ b/drivers/tty/vt/vt.c |
695 |
+@@ -3016,7 +3016,7 @@ int __init vty_init(const struct file_operations *console_fops) |
696 |
+ |
697 |
+ static struct class *vtconsole_class; |
698 |
+ |
699 |
+-static int bind_con_driver(const struct consw *csw, int first, int last, |
700 |
++static int do_bind_con_driver(const struct consw *csw, int first, int last, |
701 |
+ int deflt) |
702 |
+ { |
703 |
+ struct module *owner = csw->owner; |
704 |
+@@ -3027,7 +3027,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last, |
705 |
+ if (!try_module_get(owner)) |
706 |
+ return -ENODEV; |
707 |
+ |
708 |
+- console_lock(); |
709 |
++ WARN_CONSOLE_UNLOCKED(); |
710 |
+ |
711 |
+ /* check if driver is registered */ |
712 |
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { |
713 |
+@@ -3112,11 +3112,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last, |
714 |
+ |
715 |
+ retval = 0; |
716 |
+ err: |
717 |
+- console_unlock(); |
718 |
+ module_put(owner); |
719 |
+ return retval; |
720 |
+ }; |
721 |
+ |
722 |
++ |
723 |
++static int bind_con_driver(const struct consw *csw, int first, int last, |
724 |
++ int deflt) |
725 |
++{ |
726 |
++ int ret; |
727 |
++ |
728 |
++ console_lock(); |
729 |
++ ret = do_bind_con_driver(csw, first, last, deflt); |
730 |
++ console_unlock(); |
731 |
++ return ret; |
732 |
++} |
733 |
++ |
734 |
+ #ifdef CONFIG_VT_HW_CONSOLE_BINDING |
735 |
+ static int con_is_graphics(const struct consw *csw, int first, int last) |
736 |
+ { |
737 |
+@@ -3153,6 +3164,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last) |
738 |
+ */ |
739 |
+ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) |
740 |
+ { |
741 |
++ int retval; |
742 |
++ |
743 |
++ console_lock(); |
744 |
++ retval = do_unbind_con_driver(csw, first, last, deflt); |
745 |
++ console_unlock(); |
746 |
++ return retval; |
747 |
++} |
748 |
++EXPORT_SYMBOL(unbind_con_driver); |
749 |
++ |
750 |
++/* unlocked version of unbind_con_driver() */ |
751 |
++int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt) |
752 |
++{ |
753 |
+ struct module *owner = csw->owner; |
754 |
+ const struct consw *defcsw = NULL; |
755 |
+ struct con_driver *con_driver = NULL, *con_back = NULL; |
756 |
+@@ -3161,7 +3184,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) |
757 |
+ if (!try_module_get(owner)) |
758 |
+ return -ENODEV; |
759 |
+ |
760 |
+- console_lock(); |
761 |
++ WARN_CONSOLE_UNLOCKED(); |
762 |
+ |
763 |
+ /* check if driver is registered and if it is unbindable */ |
764 |
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { |
765 |
+@@ -3174,10 +3197,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) |
766 |
+ } |
767 |
+ } |
768 |
+ |
769 |
+- if (retval) { |
770 |
+- console_unlock(); |
771 |
++ if (retval) |
772 |
+ goto err; |
773 |
+- } |
774 |
+ |
775 |
+ retval = -ENODEV; |
776 |
+ |
777 |
+@@ -3193,15 +3214,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) |
778 |
+ } |
779 |
+ } |
780 |
+ |
781 |
+- if (retval) { |
782 |
+- console_unlock(); |
783 |
++ if (retval) |
784 |
+ goto err; |
785 |
+- } |
786 |
+ |
787 |
+- if (!con_is_bound(csw)) { |
788 |
+- console_unlock(); |
789 |
++ if (!con_is_bound(csw)) |
790 |
+ goto err; |
791 |
+- } |
792 |
+ |
793 |
+ first = max(first, con_driver->first); |
794 |
+ last = min(last, con_driver->last); |
795 |
+@@ -3228,15 +3245,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) |
796 |
+ if (!con_is_bound(csw)) |
797 |
+ con_driver->flag &= ~CON_DRIVER_FLAG_INIT; |
798 |
+ |
799 |
+- console_unlock(); |
800 |
+ /* ignore return value, binding should not fail */ |
801 |
+- bind_con_driver(defcsw, first, last, deflt); |
802 |
++ do_bind_con_driver(defcsw, first, last, deflt); |
803 |
+ err: |
804 |
+ module_put(owner); |
805 |
+ return retval; |
806 |
+ |
807 |
+ } |
808 |
+-EXPORT_SYMBOL(unbind_con_driver); |
809 |
++EXPORT_SYMBOL_GPL(do_unbind_con_driver); |
810 |
+ |
811 |
+ static int vt_bind(struct con_driver *con) |
812 |
+ { |
813 |
+@@ -3508,28 +3524,18 @@ int con_debug_leave(void) |
814 |
+ } |
815 |
+ EXPORT_SYMBOL_GPL(con_debug_leave); |
816 |
+ |
817 |
+-/** |
818 |
+- * register_con_driver - register console driver to console layer |
819 |
+- * @csw: console driver |
820 |
+- * @first: the first console to take over, minimum value is 0 |
821 |
+- * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1 |
822 |
+- * |
823 |
+- * DESCRIPTION: This function registers a console driver which can later |
824 |
+- * bind to a range of consoles specified by @first and @last. It will |
825 |
+- * also initialize the console driver by calling con_startup(). |
826 |
+- */ |
827 |
+-int register_con_driver(const struct consw *csw, int first, int last) |
828 |
++static int do_register_con_driver(const struct consw *csw, int first, int last) |
829 |
+ { |
830 |
+ struct module *owner = csw->owner; |
831 |
+ struct con_driver *con_driver; |
832 |
+ const char *desc; |
833 |
+ int i, retval = 0; |
834 |
+ |
835 |
++ WARN_CONSOLE_UNLOCKED(); |
836 |
++ |
837 |
+ if (!try_module_get(owner)) |
838 |
+ return -ENODEV; |
839 |
+ |
840 |
+- console_lock(); |
841 |
+- |
842 |
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { |
843 |
+ con_driver = ®istered_con_driver[i]; |
844 |
+ |
845 |
+@@ -3582,10 +3588,29 @@ int register_con_driver(const struct consw *csw, int first, int last) |
846 |
+ } |
847 |
+ |
848 |
+ err: |
849 |
+- console_unlock(); |
850 |
+ module_put(owner); |
851 |
+ return retval; |
852 |
+ } |
853 |
++ |
854 |
++/** |
855 |
++ * register_con_driver - register console driver to console layer |
856 |
++ * @csw: console driver |
857 |
++ * @first: the first console to take over, minimum value is 0 |
858 |
++ * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1 |
859 |
++ * |
860 |
++ * DESCRIPTION: This function registers a console driver which can later |
861 |
++ * bind to a range of consoles specified by @first and @last. It will |
862 |
++ * also initialize the console driver by calling con_startup(). |
863 |
++ */ |
864 |
++int register_con_driver(const struct consw *csw, int first, int last) |
865 |
++{ |
866 |
++ int retval; |
867 |
++ |
868 |
++ console_lock(); |
869 |
++ retval = do_register_con_driver(csw, first, last); |
870 |
++ console_unlock(); |
871 |
++ return retval; |
872 |
++} |
873 |
+ EXPORT_SYMBOL(register_con_driver); |
874 |
+ |
875 |
+ /** |
876 |
+@@ -3601,9 +3626,18 @@ EXPORT_SYMBOL(register_con_driver); |
877 |
+ */ |
878 |
+ int unregister_con_driver(const struct consw *csw) |
879 |
+ { |
880 |
+- int i, retval = -ENODEV; |
881 |
++ int retval; |
882 |
+ |
883 |
+ console_lock(); |
884 |
++ retval = do_unregister_con_driver(csw); |
885 |
++ console_unlock(); |
886 |
++ return retval; |
887 |
++} |
888 |
++EXPORT_SYMBOL(unregister_con_driver); |
889 |
++ |
890 |
++int do_unregister_con_driver(const struct consw *csw) |
891 |
++{ |
892 |
++ int i, retval = -ENODEV; |
893 |
+ |
894 |
+ /* cannot unregister a bound driver */ |
895 |
+ if (con_is_bound(csw)) |
896 |
+@@ -3629,27 +3663,53 @@ int unregister_con_driver(const struct consw *csw) |
897 |
+ } |
898 |
+ } |
899 |
+ err: |
900 |
+- console_unlock(); |
901 |
+ return retval; |
902 |
+ } |
903 |
+-EXPORT_SYMBOL(unregister_con_driver); |
904 |
++EXPORT_SYMBOL_GPL(do_unregister_con_driver); |
905 |
+ |
906 |
+ /* |
907 |
+ * If we support more console drivers, this function is used |
908 |
+ * when a driver wants to take over some existing consoles |
909 |
+ * and become default driver for newly opened ones. |
910 |
+ * |
911 |
+- * take_over_console is basically a register followed by unbind |
912 |
++ * take_over_console is basically a register followed by unbind |
913 |
++ */ |
914 |
++int do_take_over_console(const struct consw *csw, int first, int last, int deflt) |
915 |
++{ |
916 |
++ int err; |
917 |
++ |
918 |
++ err = do_register_con_driver(csw, first, last); |
919 |
++ /* |
920 |
++ * If we get an busy error we still want to bind the console driver |
921 |
++ * and return success, as we may have unbound the console driver |
922 |
++ * but not unregistered it. |
923 |
++ */ |
924 |
++ if (err == -EBUSY) |
925 |
++ err = 0; |
926 |
++ if (!err) |
927 |
++ do_bind_con_driver(csw, first, last, deflt); |
928 |
++ |
929 |
++ return err; |
930 |
++} |
931 |
++EXPORT_SYMBOL_GPL(do_take_over_console); |
932 |
++ |
933 |
++/* |
934 |
++ * If we support more console drivers, this function is used |
935 |
++ * when a driver wants to take over some existing consoles |
936 |
++ * and become default driver for newly opened ones. |
937 |
++ * |
938 |
++ * take_over_console is basically a register followed by unbind |
939 |
+ */ |
940 |
+ int take_over_console(const struct consw *csw, int first, int last, int deflt) |
941 |
+ { |
942 |
+ int err; |
943 |
+ |
944 |
+ err = register_con_driver(csw, first, last); |
945 |
+- /* if we get an busy error we still want to bind the console driver |
946 |
++ /* |
947 |
++ * If we get an busy error we still want to bind the console driver |
948 |
+ * and return success, as we may have unbound the console driver |
949 |
+- Â * but not unregistered it. |
950 |
+- */ |
951 |
++ * but not unregistered it. |
952 |
++ */ |
953 |
+ if (err == -EBUSY) |
954 |
+ err = 0; |
955 |
+ if (!err) |
956 |
+diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c |
957 |
+index 55a57c2..028c572 100644 |
958 |
+--- a/drivers/usb/host/ehci-omap.c |
959 |
++++ b/drivers/usb/host/ehci-omap.c |
960 |
+@@ -321,7 +321,7 @@ static const struct hc_driver ehci_omap_hc_driver = { |
961 |
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, |
962 |
+ }; |
963 |
+ |
964 |
+-MODULE_ALIAS("platform:omap-ehci"); |
965 |
++MODULE_ALIAS("platform:ehci-omap"); |
966 |
+ MODULE_AUTHOR("Texas Instruments, Inc."); |
967 |
+ MODULE_AUTHOR("Felipe Balbi <felipe.balbi@×××××.com>"); |
968 |
+ |
969 |
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
970 |
+index 52cd814..24a3ea6 100644 |
971 |
+--- a/drivers/usb/serial/option.c |
972 |
++++ b/drivers/usb/serial/option.c |
973 |
+@@ -479,6 +479,7 @@ static const struct option_blacklist_info four_g_w14_blacklist = { |
974 |
+ |
975 |
+ static const struct option_blacklist_info alcatel_x200_blacklist = { |
976 |
+ .sendsetup = BIT(0) | BIT(1), |
977 |
++ .reserved = BIT(4), |
978 |
+ }; |
979 |
+ |
980 |
+ static const struct option_blacklist_info zte_0037_blacklist = { |
981 |
+@@ -575,8 +576,14 @@ static const struct usb_device_id option_ids[] = { |
982 |
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, |
983 |
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, |
984 |
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, |
985 |
++ { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42), |
986 |
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
987 |
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) }, |
988 |
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, |
989 |
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), |
990 |
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, |
991 |
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, |
992 |
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, |
993 |
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), |
994 |
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, |
995 |
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), |
996 |
+@@ -1215,7 +1222,14 @@ static const struct usb_device_id option_ids[] = { |
997 |
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), |
998 |
+ .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist |
999 |
+ }, |
1000 |
+- { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) }, |
1001 |
++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), |
1002 |
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, |
1003 |
++ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052), |
1004 |
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, |
1005 |
++ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6), |
1006 |
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, |
1007 |
++ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7), |
1008 |
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, |
1009 |
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V), |
1010 |
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
1011 |
+ { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, |
1012 |
+diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c |
1013 |
+index 16b0bf0..7ab9046 100644 |
1014 |
+--- a/drivers/usb/storage/initializers.c |
1015 |
++++ b/drivers/usb/storage/initializers.c |
1016 |
+@@ -147,7 +147,7 @@ static int usb_stor_huawei_dongles_pid(struct us_data *us) |
1017 |
+ int idProduct; |
1018 |
+ |
1019 |
+ idesc = &us->pusb_intf->cur_altsetting->desc; |
1020 |
+- idProduct = us->pusb_dev->descriptor.idProduct; |
1021 |
++ idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct); |
1022 |
+ /* The first port is CDROM, |
1023 |
+ * means the dongle in the single port mode, |
1024 |
+ * and a switch command is required to be sent. */ |
1025 |
+@@ -169,7 +169,7 @@ int usb_stor_huawei_init(struct us_data *us) |
1026 |
+ int result = 0; |
1027 |
+ |
1028 |
+ if (usb_stor_huawei_dongles_pid(us)) { |
1029 |
+- if (us->pusb_dev->descriptor.idProduct >= 0x1446) |
1030 |
++ if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446) |
1031 |
+ result = usb_stor_huawei_scsi_init(us); |
1032 |
+ else |
1033 |
+ result = usb_stor_huawei_feature_init(us); |
1034 |
+diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h |
1035 |
+index 2c85530..65a6a75 100644 |
1036 |
+--- a/drivers/usb/storage/unusual_cypress.h |
1037 |
++++ b/drivers/usb/storage/unusual_cypress.h |
1038 |
+@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999, |
1039 |
+ "Cypress ISD-300LP", |
1040 |
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), |
1041 |
+ |
1042 |
+-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999, |
1043 |
++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219, |
1044 |
+ "Super Top", |
1045 |
+ "USB 2.0 SATA BRIDGE", |
1046 |
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), |
1047 |
+diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c |
1048 |
+index d2a96a4..ee0f001 100644 |
1049 |
+--- a/drivers/video/backlight/adp8860_bl.c |
1050 |
++++ b/drivers/video/backlight/adp8860_bl.c |
1051 |
+@@ -793,7 +793,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message) |
1052 |
+ |
1053 |
+ static int adp8860_i2c_resume(struct i2c_client *client) |
1054 |
+ { |
1055 |
+- adp8860_set_bits(client, ADP8860_MDCR, NSTBY); |
1056 |
++ adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN); |
1057 |
+ |
1058 |
+ return 0; |
1059 |
+ } |
1060 |
+diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c |
1061 |
+index 05a8832..bc0503a 100644 |
1062 |
+--- a/drivers/video/backlight/adp8870_bl.c |
1063 |
++++ b/drivers/video/backlight/adp8870_bl.c |
1064 |
+@@ -968,7 +968,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message) |
1065 |
+ |
1066 |
+ static int adp8870_i2c_resume(struct i2c_client *client) |
1067 |
+ { |
1068 |
+- adp8870_set_bits(client, ADP8870_MDCR, NSTBY); |
1069 |
++ adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN); |
1070 |
+ |
1071 |
+ return 0; |
1072 |
+ } |
1073 |
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c |
1074 |
+index bf9a9b7..9b8bcab 100644 |
1075 |
+--- a/drivers/video/console/fbcon.c |
1076 |
++++ b/drivers/video/console/fbcon.c |
1077 |
+@@ -530,6 +530,33 @@ static int search_for_mapped_con(void) |
1078 |
+ return retval; |
1079 |
+ } |
1080 |
+ |
1081 |
++static int do_fbcon_takeover(int show_logo) |
1082 |
++{ |
1083 |
++ int err, i; |
1084 |
++ |
1085 |
++ if (!num_registered_fb) |
1086 |
++ return -ENODEV; |
1087 |
++ |
1088 |
++ if (!show_logo) |
1089 |
++ logo_shown = FBCON_LOGO_DONTSHOW; |
1090 |
++ |
1091 |
++ for (i = first_fb_vc; i <= last_fb_vc; i++) |
1092 |
++ con2fb_map[i] = info_idx; |
1093 |
++ |
1094 |
++ err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc, |
1095 |
++ fbcon_is_default); |
1096 |
++ |
1097 |
++ if (err) { |
1098 |
++ for (i = first_fb_vc; i <= last_fb_vc; i++) |
1099 |
++ con2fb_map[i] = -1; |
1100 |
++ info_idx = -1; |
1101 |
++ } else { |
1102 |
++ fbcon_has_console_bind = 1; |
1103 |
++ } |
1104 |
++ |
1105 |
++ return err; |
1106 |
++} |
1107 |
++ |
1108 |
+ static int fbcon_takeover(int show_logo) |
1109 |
+ { |
1110 |
+ int err, i; |
1111 |
+@@ -991,7 +1018,7 @@ static const char *fbcon_startup(void) |
1112 |
+ } |
1113 |
+ |
1114 |
+ /* Setup default font */ |
1115 |
+- if (!p->fontdata) { |
1116 |
++ if (!p->fontdata && !vc->vc_font.data) { |
1117 |
+ if (!fontname[0] || !(font = find_font(fontname))) |
1118 |
+ font = get_default_font(info->var.xres, |
1119 |
+ info->var.yres, |
1120 |
+@@ -1001,6 +1028,8 @@ static const char *fbcon_startup(void) |
1121 |
+ vc->vc_font.height = font->height; |
1122 |
+ vc->vc_font.data = (void *)(p->fontdata = font->data); |
1123 |
+ vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */ |
1124 |
++ } else { |
1125 |
++ p->fontdata = vc->vc_font.data; |
1126 |
+ } |
1127 |
+ |
1128 |
+ cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); |
1129 |
+@@ -1160,9 +1189,9 @@ static void fbcon_init(struct vc_data *vc, int init) |
1130 |
+ ops->p = &fb_display[fg_console]; |
1131 |
+ } |
1132 |
+ |
1133 |
+-static void fbcon_free_font(struct display *p) |
1134 |
++static void fbcon_free_font(struct display *p, bool freefont) |
1135 |
+ { |
1136 |
+- if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0)) |
1137 |
++ if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0)) |
1138 |
+ kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int)); |
1139 |
+ p->fontdata = NULL; |
1140 |
+ p->userfont = 0; |
1141 |
+@@ -1174,8 +1203,8 @@ static void fbcon_deinit(struct vc_data *vc) |
1142 |
+ struct fb_info *info; |
1143 |
+ struct fbcon_ops *ops; |
1144 |
+ int idx; |
1145 |
++ bool free_font = true; |
1146 |
+ |
1147 |
+- fbcon_free_font(p); |
1148 |
+ idx = con2fb_map[vc->vc_num]; |
1149 |
+ |
1150 |
+ if (idx == -1) |
1151 |
+@@ -1186,6 +1215,8 @@ static void fbcon_deinit(struct vc_data *vc) |
1152 |
+ if (!info) |
1153 |
+ goto finished; |
1154 |
+ |
1155 |
++ if (info->flags & FBINFO_MISC_FIRMWARE) |
1156 |
++ free_font = false; |
1157 |
+ ops = info->fbcon_par; |
1158 |
+ |
1159 |
+ if (!ops) |
1160 |
+@@ -1197,6 +1228,8 @@ static void fbcon_deinit(struct vc_data *vc) |
1161 |
+ ops->flags &= ~FBCON_FLAGS_INIT; |
1162 |
+ finished: |
1163 |
+ |
1164 |
++ fbcon_free_font(p, free_font); |
1165 |
++ |
1166 |
+ if (!con_is_bound(&fb_con)) |
1167 |
+ fbcon_exit(); |
1168 |
+ |
1169 |
+@@ -2978,7 +3011,7 @@ static int fbcon_unbind(void) |
1170 |
+ { |
1171 |
+ int ret; |
1172 |
+ |
1173 |
+- ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc, |
1174 |
++ ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc, |
1175 |
+ fbcon_is_default); |
1176 |
+ |
1177 |
+ if (!ret) |
1178 |
+@@ -3051,7 +3084,7 @@ static int fbcon_fb_unregistered(struct fb_info *info) |
1179 |
+ primary_device = -1; |
1180 |
+ |
1181 |
+ if (!num_registered_fb) |
1182 |
+- unregister_con_driver(&fb_con); |
1183 |
++ do_unregister_con_driver(&fb_con); |
1184 |
+ |
1185 |
+ return 0; |
1186 |
+ } |
1187 |
+@@ -3116,7 +3149,7 @@ static int fbcon_fb_registered(struct fb_info *info) |
1188 |
+ } |
1189 |
+ |
1190 |
+ if (info_idx != -1) |
1191 |
+- ret = fbcon_takeover(1); |
1192 |
++ ret = do_fbcon_takeover(1); |
1193 |
+ } else { |
1194 |
+ for (i = first_fb_vc; i <= last_fb_vc; i++) { |
1195 |
+ if (con2fb_map_boot[i] == idx) |
1196 |
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c |
1197 |
+index 7a41220..c133dde 100644 |
1198 |
+--- a/drivers/video/fbmem.c |
1199 |
++++ b/drivers/video/fbmem.c |
1200 |
+@@ -1628,7 +1628,9 @@ static int do_register_framebuffer(struct fb_info *fb_info) |
1201 |
+ event.info = fb_info; |
1202 |
+ if (!lock_fb_info(fb_info)) |
1203 |
+ return -ENODEV; |
1204 |
++ console_lock(); |
1205 |
+ fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event); |
1206 |
++ console_unlock(); |
1207 |
+ unlock_fb_info(fb_info); |
1208 |
+ return 0; |
1209 |
+ } |
1210 |
+@@ -1644,8 +1646,10 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) |
1211 |
+ |
1212 |
+ if (!lock_fb_info(fb_info)) |
1213 |
+ return -ENODEV; |
1214 |
++ console_lock(); |
1215 |
+ event.info = fb_info; |
1216 |
+ ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); |
1217 |
++ console_unlock(); |
1218 |
+ unlock_fb_info(fb_info); |
1219 |
+ |
1220 |
+ if (ret) |
1221 |
+@@ -1660,7 +1664,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) |
1222 |
+ num_registered_fb--; |
1223 |
+ fb_cleanup_device(fb_info); |
1224 |
+ event.info = fb_info; |
1225 |
++ console_lock(); |
1226 |
+ fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); |
1227 |
++ console_unlock(); |
1228 |
+ |
1229 |
+ /* this may free fb info */ |
1230 |
+ put_fb_info(fb_info); |
1231 |
+@@ -1831,11 +1837,8 @@ int fb_new_modelist(struct fb_info *info) |
1232 |
+ err = 1; |
1233 |
+ |
1234 |
+ if (!list_empty(&info->modelist)) { |
1235 |
+- if (!lock_fb_info(info)) |
1236 |
+- return -ENODEV; |
1237 |
+ event.info = info; |
1238 |
+ err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); |
1239 |
+- unlock_fb_info(info); |
1240 |
+ } |
1241 |
+ |
1242 |
+ return err; |
1243 |
+diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c |
1244 |
+index 67afa9c..303fb9f 100644 |
1245 |
+--- a/drivers/video/fbsysfs.c |
1246 |
++++ b/drivers/video/fbsysfs.c |
1247 |
+@@ -175,6 +175,8 @@ static ssize_t store_modes(struct device *device, |
1248 |
+ if (i * sizeof(struct fb_videomode) != count) |
1249 |
+ return -EINVAL; |
1250 |
+ |
1251 |
++ if (!lock_fb_info(fb_info)) |
1252 |
++ return -ENODEV; |
1253 |
+ console_lock(); |
1254 |
+ list_splice(&fb_info->modelist, &old_list); |
1255 |
+ fb_videomode_to_modelist((const struct fb_videomode *)buf, i, |
1256 |
+@@ -186,6 +188,7 @@ static ssize_t store_modes(struct device *device, |
1257 |
+ fb_destroy_modelist(&old_list); |
1258 |
+ |
1259 |
+ console_unlock(); |
1260 |
++ unlock_fb_info(fb_info); |
1261 |
+ |
1262 |
+ return 0; |
1263 |
+ } |
1264 |
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c |
1265 |
+index 80bbc9c..244100f 100644 |
1266 |
+--- a/fs/ext4/resize.c |
1267 |
++++ b/fs/ext4/resize.c |
1268 |
+@@ -499,6 +499,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, |
1269 |
+ return err; |
1270 |
+ |
1271 |
+ exit_inode: |
1272 |
++ kfree(n_group_desc); |
1273 |
+ /* ext4_handle_release_buffer(handle, iloc.bh); */ |
1274 |
+ brelse(iloc.bh); |
1275 |
+ exit_dindj: |
1276 |
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
1277 |
+index f1aa1a2..c6a3363 100644 |
1278 |
+--- a/fs/ext4/super.c |
1279 |
++++ b/fs/ext4/super.c |
1280 |
+@@ -3681,22 +3681,19 @@ no_journal: |
1281 |
+ if (err) { |
1282 |
+ ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", |
1283 |
+ err); |
1284 |
+- goto failed_mount4; |
1285 |
++ goto failed_mount5; |
1286 |
+ } |
1287 |
+ |
1288 |
+ err = ext4_register_li_request(sb, first_not_zeroed); |
1289 |
+ if (err) |
1290 |
+- goto failed_mount4; |
1291 |
++ goto failed_mount6; |
1292 |
+ |
1293 |
+ sbi->s_kobj.kset = ext4_kset; |
1294 |
+ init_completion(&sbi->s_kobj_unregister); |
1295 |
+ err = kobject_init_and_add(&sbi->s_kobj, &ext4_ktype, NULL, |
1296 |
+ "%s", sb->s_id); |
1297 |
+- if (err) { |
1298 |
+- ext4_mb_release(sb); |
1299 |
+- ext4_ext_release(sb); |
1300 |
+- goto failed_mount4; |
1301 |
+- }; |
1302 |
++ if (err) |
1303 |
++ goto failed_mount7; |
1304 |
+ |
1305 |
+ EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; |
1306 |
+ ext4_orphan_cleanup(sb, es); |
1307 |
+@@ -3730,13 +3727,19 @@ cantfind_ext4: |
1308 |
+ ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); |
1309 |
+ goto failed_mount; |
1310 |
+ |
1311 |
++failed_mount7: |
1312 |
++ ext4_unregister_li_request(sb); |
1313 |
++failed_mount6: |
1314 |
++ ext4_ext_release(sb); |
1315 |
++failed_mount5: |
1316 |
++ ext4_mb_release(sb); |
1317 |
++ ext4_release_system_zone(sb); |
1318 |
+ failed_mount4: |
1319 |
+ iput(root); |
1320 |
+ sb->s_root = NULL; |
1321 |
+ ext4_msg(sb, KERN_ERR, "mount failed"); |
1322 |
+ destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq); |
1323 |
+ failed_mount_wq: |
1324 |
+- ext4_release_system_zone(sb); |
1325 |
+ if (sbi->s_journal) { |
1326 |
+ jbd2_journal_destroy(sbi->s_journal); |
1327 |
+ sbi->s_journal = NULL; |
1328 |
+diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c |
1329 |
+index 20b4ea5..6ee3c36 100644 |
1330 |
+--- a/fs/fat/namei_vfat.c |
1331 |
++++ b/fs/fat/namei_vfat.c |
1332 |
+@@ -514,7 +514,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname, |
1333 |
+ int charlen; |
1334 |
+ |
1335 |
+ if (utf8) { |
1336 |
+- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname); |
1337 |
++ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN, |
1338 |
++ (wchar_t *) outname, FAT_LFN_LEN + 2); |
1339 |
+ if (*outlen < 0) |
1340 |
+ return *outlen; |
1341 |
+ else if (*outlen > FAT_LFN_LEN) |
1342 |
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c |
1343 |
+index e374050..5ee055e 100644 |
1344 |
+--- a/fs/lockd/clntproc.c |
1345 |
++++ b/fs/lockd/clntproc.c |
1346 |
+@@ -550,6 +550,9 @@ again: |
1347 |
+ status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); |
1348 |
+ if (status < 0) |
1349 |
+ break; |
1350 |
++ /* Resend the blocking lock request after a server reboot */ |
1351 |
++ if (resp->status == nlm_lck_denied_grace_period) |
1352 |
++ continue; |
1353 |
+ if (resp->status != nlm_lck_blocked) |
1354 |
+ break; |
1355 |
+ } |
1356 |
+diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c |
1357 |
+index 44a88a9..0eb059e 100644 |
1358 |
+--- a/fs/nls/nls_base.c |
1359 |
++++ b/fs/nls/nls_base.c |
1360 |
+@@ -114,34 +114,57 @@ int utf32_to_utf8(unicode_t u, u8 *s, int maxlen) |
1361 |
+ } |
1362 |
+ EXPORT_SYMBOL(utf32_to_utf8); |
1363 |
+ |
1364 |
+-int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs) |
1365 |
++static inline void put_utf16(wchar_t *s, unsigned c, enum utf16_endian endian) |
1366 |
++{ |
1367 |
++ switch (endian) { |
1368 |
++ default: |
1369 |
++ *s = (wchar_t) c; |
1370 |
++ break; |
1371 |
++ case UTF16_LITTLE_ENDIAN: |
1372 |
++ *s = __cpu_to_le16(c); |
1373 |
++ break; |
1374 |
++ case UTF16_BIG_ENDIAN: |
1375 |
++ *s = __cpu_to_be16(c); |
1376 |
++ break; |
1377 |
++ } |
1378 |
++} |
1379 |
++ |
1380 |
++int utf8s_to_utf16s(const u8 *s, int len, enum utf16_endian endian, |
1381 |
++ wchar_t *pwcs, int maxlen) |
1382 |
+ { |
1383 |
+ u16 *op; |
1384 |
+ int size; |
1385 |
+ unicode_t u; |
1386 |
+ |
1387 |
+ op = pwcs; |
1388 |
+- while (*s && len > 0) { |
1389 |
++ while (len > 0 && maxlen > 0 && *s) { |
1390 |
+ if (*s & 0x80) { |
1391 |
+ size = utf8_to_utf32(s, len, &u); |
1392 |
+ if (size < 0) |
1393 |
+ return -EINVAL; |
1394 |
++ s += size; |
1395 |
++ len -= size; |
1396 |
+ |
1397 |
+ if (u >= PLANE_SIZE) { |
1398 |
++ if (maxlen < 2) |
1399 |
++ break; |
1400 |
+ u -= PLANE_SIZE; |
1401 |
+- *op++ = (wchar_t) (SURROGATE_PAIR | |
1402 |
+- ((u >> 10) & SURROGATE_BITS)); |
1403 |
+- *op++ = (wchar_t) (SURROGATE_PAIR | |
1404 |
++ put_utf16(op++, SURROGATE_PAIR | |
1405 |
++ ((u >> 10) & SURROGATE_BITS), |
1406 |
++ endian); |
1407 |
++ put_utf16(op++, SURROGATE_PAIR | |
1408 |
+ SURROGATE_LOW | |
1409 |
+- (u & SURROGATE_BITS)); |
1410 |
++ (u & SURROGATE_BITS), |
1411 |
++ endian); |
1412 |
++ maxlen -= 2; |
1413 |
+ } else { |
1414 |
+- *op++ = (wchar_t) u; |
1415 |
++ put_utf16(op++, u, endian); |
1416 |
++ maxlen--; |
1417 |
+ } |
1418 |
+- s += size; |
1419 |
+- len -= size; |
1420 |
+ } else { |
1421 |
+- *op++ = *s++; |
1422 |
++ put_utf16(op++, *s++, endian); |
1423 |
+ len--; |
1424 |
++ maxlen--; |
1425 |
+ } |
1426 |
+ } |
1427 |
+ return op - pwcs; |
1428 |
+diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c |
1429 |
+index 8445fbc..6f292dd 100644 |
1430 |
+--- a/fs/notify/inotify/inotify_user.c |
1431 |
++++ b/fs/notify/inotify/inotify_user.c |
1432 |
+@@ -579,8 +579,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group, |
1433 |
+ |
1434 |
+ /* don't allow invalid bits: we don't want flags set */ |
1435 |
+ mask = inotify_arg_to_mask(arg); |
1436 |
+- if (unlikely(!(mask & IN_ALL_EVENTS))) |
1437 |
+- return -EINVAL; |
1438 |
+ |
1439 |
+ fsn_mark = fsnotify_find_inode_mark(group, inode); |
1440 |
+ if (!fsn_mark) |
1441 |
+@@ -632,8 +630,6 @@ static int inotify_new_watch(struct fsnotify_group *group, |
1442 |
+ |
1443 |
+ /* don't allow invalid bits: we don't want flags set */ |
1444 |
+ mask = inotify_arg_to_mask(arg); |
1445 |
+- if (unlikely(!(mask & IN_ALL_EVENTS))) |
1446 |
+- return -EINVAL; |
1447 |
+ |
1448 |
+ tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); |
1449 |
+ if (unlikely(!tmp_i_mark)) |
1450 |
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c |
1451 |
+index 7642d7c..ab4046f 100644 |
1452 |
+--- a/fs/ocfs2/dlmglue.c |
1453 |
++++ b/fs/ocfs2/dlmglue.c |
1454 |
+@@ -2539,6 +2539,7 @@ int ocfs2_super_lock(struct ocfs2_super *osb, |
1455 |
+ * everything is up to the caller :) */ |
1456 |
+ status = ocfs2_should_refresh_lock_res(lockres); |
1457 |
+ if (status < 0) { |
1458 |
++ ocfs2_cluster_unlock(osb, lockres, level); |
1459 |
+ mlog_errno(status); |
1460 |
+ goto bail; |
1461 |
+ } |
1462 |
+@@ -2547,8 +2548,10 @@ int ocfs2_super_lock(struct ocfs2_super *osb, |
1463 |
+ |
1464 |
+ ocfs2_complete_lock_res_refresh(lockres, status); |
1465 |
+ |
1466 |
+- if (status < 0) |
1467 |
++ if (status < 0) { |
1468 |
++ ocfs2_cluster_unlock(osb, lockres, level); |
1469 |
+ mlog_errno(status); |
1470 |
++ } |
1471 |
+ ocfs2_track_lock_refresh(lockres); |
1472 |
+ } |
1473 |
+ bail: |
1474 |
+diff --git a/include/linux/console.h b/include/linux/console.h |
1475 |
+index 7453cfd..6ae6a15 100644 |
1476 |
+--- a/include/linux/console.h |
1477 |
++++ b/include/linux/console.h |
1478 |
+@@ -77,7 +77,9 @@ extern const struct consw prom_con; /* SPARC PROM console */ |
1479 |
+ int con_is_bound(const struct consw *csw); |
1480 |
+ int register_con_driver(const struct consw *csw, int first, int last); |
1481 |
+ int unregister_con_driver(const struct consw *csw); |
1482 |
++int do_unregister_con_driver(const struct consw *csw); |
1483 |
+ int take_over_console(const struct consw *sw, int first, int last, int deflt); |
1484 |
++int do_take_over_console(const struct consw *sw, int first, int last, int deflt); |
1485 |
+ void give_up_console(const struct consw *sw); |
1486 |
+ #ifdef CONFIG_HW_CONSOLE |
1487 |
+ int con_debug_enter(struct vc_data *vc); |
1488 |
+diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h |
1489 |
+index 1d1b1e1..ee2baf0 100644 |
1490 |
+--- a/include/linux/mmu_notifier.h |
1491 |
++++ b/include/linux/mmu_notifier.h |
1492 |
+@@ -4,6 +4,7 @@ |
1493 |
+ #include <linux/list.h> |
1494 |
+ #include <linux/spinlock.h> |
1495 |
+ #include <linux/mm_types.h> |
1496 |
++#include <linux/srcu.h> |
1497 |
+ |
1498 |
+ struct mmu_notifier; |
1499 |
+ struct mmu_notifier_ops; |
1500 |
+diff --git a/include/linux/nls.h b/include/linux/nls.h |
1501 |
+index d47beef..5dc635f 100644 |
1502 |
+--- a/include/linux/nls.h |
1503 |
++++ b/include/linux/nls.h |
1504 |
+@@ -43,7 +43,7 @@ enum utf16_endian { |
1505 |
+ UTF16_BIG_ENDIAN |
1506 |
+ }; |
1507 |
+ |
1508 |
+-/* nls.c */ |
1509 |
++/* nls_base.c */ |
1510 |
+ extern int register_nls(struct nls_table *); |
1511 |
+ extern int unregister_nls(struct nls_table *); |
1512 |
+ extern struct nls_table *load_nls(char *); |
1513 |
+@@ -52,7 +52,8 @@ extern struct nls_table *load_nls_default(void); |
1514 |
+ |
1515 |
+ extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu); |
1516 |
+ extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen); |
1517 |
+-extern int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs); |
1518 |
++extern int utf8s_to_utf16s(const u8 *s, int len, |
1519 |
++ enum utf16_endian endian, wchar_t *pwcs, int maxlen); |
1520 |
+ extern int utf16s_to_utf8s(const wchar_t *pwcs, int len, |
1521 |
+ enum utf16_endian endian, u8 *s, int maxlen); |
1522 |
+ |
1523 |
+diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h |
1524 |
+index a54b825..6f8b026 100644 |
1525 |
+--- a/include/linux/usb/audio.h |
1526 |
++++ b/include/linux/usb/audio.h |
1527 |
+@@ -384,14 +384,16 @@ static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_de |
1528 |
+ int protocol) |
1529 |
+ { |
1530 |
+ __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); |
1531 |
+- return desc->baSourceID[desc->bNrInPins + control_size]; |
1532 |
++ return *(uac_processing_unit_bmControls(desc, protocol) |
1533 |
++ + control_size); |
1534 |
+ } |
1535 |
+ |
1536 |
+ static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc, |
1537 |
+ int protocol) |
1538 |
+ { |
1539 |
+ __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); |
1540 |
+- return &desc->baSourceID[desc->bNrInPins + control_size + 1]; |
1541 |
++ return uac_processing_unit_bmControls(desc, protocol) |
1542 |
++ + control_size + 1; |
1543 |
+ } |
1544 |
+ |
1545 |
+ /* 4.5.2 Class-Specific AS Interface Descriptor */ |
1546 |
+diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h |
1547 |
+index 4d05e14..90538b4 100644 |
1548 |
+--- a/include/linux/vt_kern.h |
1549 |
++++ b/include/linux/vt_kern.h |
1550 |
+@@ -131,6 +131,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new); |
1551 |
+ int vt_waitactive(int n); |
1552 |
+ void change_console(struct vc_data *new_vc); |
1553 |
+ void reset_vc(struct vc_data *vc); |
1554 |
++extern int do_unbind_con_driver(const struct consw *csw, int first, int last, |
1555 |
++ int deflt); |
1556 |
+ extern int unbind_con_driver(const struct consw *csw, int first, int last, |
1557 |
+ int deflt); |
1558 |
+ int vty_init(const struct file_operations *console_fops); |
1559 |
+diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h |
1560 |
+index e46674d..f9ce2fa 100644 |
1561 |
+--- a/include/net/inet6_hashtables.h |
1562 |
++++ b/include/net/inet6_hashtables.h |
1563 |
+@@ -28,16 +28,16 @@ |
1564 |
+ |
1565 |
+ struct inet_hashinfo; |
1566 |
+ |
1567 |
+-/* I have no idea if this is a good hash for v6 or not. -DaveM */ |
1568 |
+ static inline unsigned int inet6_ehashfn(struct net *net, |
1569 |
+ const struct in6_addr *laddr, const u16 lport, |
1570 |
+ const struct in6_addr *faddr, const __be16 fport) |
1571 |
+ { |
1572 |
+- u32 ports = (lport ^ (__force u16)fport); |
1573 |
++ u32 ports = (((u32)lport) << 16) | (__force u32)fport; |
1574 |
+ |
1575 |
+ return jhash_3words((__force u32)laddr->s6_addr32[3], |
1576 |
+- (__force u32)faddr->s6_addr32[3], |
1577 |
+- ports, inet_ehash_secret + net_hash_mix(net)); |
1578 |
++ ipv6_addr_jhash(faddr), |
1579 |
++ ports, |
1580 |
++ inet_ehash_secret + net_hash_mix(net)); |
1581 |
+ } |
1582 |
+ |
1583 |
+ static inline int inet6_sk_ehashfn(const struct sock *sk) |
1584 |
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h |
1585 |
+index 14dd9c7..26490b3 100644 |
1586 |
+--- a/include/net/inet_sock.h |
1587 |
++++ b/include/net/inet_sock.h |
1588 |
+@@ -199,6 +199,7 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to, |
1589 |
+ extern int inet_sk_rebuild_header(struct sock *sk); |
1590 |
+ |
1591 |
+ extern u32 inet_ehash_secret; |
1592 |
++extern u32 ipv6_hash_secret; |
1593 |
+ extern void build_ehash_secret(void); |
1594 |
+ |
1595 |
+ static inline unsigned int inet_ehashfn(struct net *net, |
1596 |
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h |
1597 |
+index c39121f..879aadf 100644 |
1598 |
+--- a/include/net/ipv6.h |
1599 |
++++ b/include/net/ipv6.h |
1600 |
+@@ -15,6 +15,7 @@ |
1601 |
+ |
1602 |
+ #include <linux/ipv6.h> |
1603 |
+ #include <linux/hardirq.h> |
1604 |
++#include <linux/jhash.h> |
1605 |
+ #include <net/if_inet6.h> |
1606 |
+ #include <net/ndisc.h> |
1607 |
+ #include <net/flow.h> |
1608 |
+@@ -386,6 +387,17 @@ struct ip6_create_arg { |
1609 |
+ void ip6_frag_init(struct inet_frag_queue *q, void *a); |
1610 |
+ int ip6_frag_match(struct inet_frag_queue *q, void *a); |
1611 |
+ |
1612 |
++/* more secured version of ipv6_addr_hash() */ |
1613 |
++static inline u32 ipv6_addr_jhash(const struct in6_addr *a) |
1614 |
++{ |
1615 |
++ u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1]; |
1616 |
++ |
1617 |
++ return jhash_3words(v, |
1618 |
++ (__force u32)a->s6_addr32[2], |
1619 |
++ (__force u32)a->s6_addr32[3], |
1620 |
++ ipv6_hash_secret); |
1621 |
++} |
1622 |
++ |
1623 |
+ static inline int ipv6_addr_any(const struct in6_addr *a) |
1624 |
+ { |
1625 |
+ return (a->s6_addr32[0] | a->s6_addr32[1] | |
1626 |
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c |
1627 |
+index 957869f..e079c3e 100644 |
1628 |
+--- a/kernel/hrtimer.c |
1629 |
++++ b/kernel/hrtimer.c |
1630 |
+@@ -640,21 +640,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) |
1631 |
+ * and expiry check is done in the hrtimer_interrupt or in the softirq. |
1632 |
+ */ |
1633 |
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
1634 |
+- struct hrtimer_clock_base *base, |
1635 |
+- int wakeup) |
1636 |
++ struct hrtimer_clock_base *base) |
1637 |
+ { |
1638 |
+- if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { |
1639 |
+- if (wakeup) { |
1640 |
+- raw_spin_unlock(&base->cpu_base->lock); |
1641 |
+- raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
1642 |
+- raw_spin_lock(&base->cpu_base->lock); |
1643 |
+- } else |
1644 |
+- __raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
1645 |
+- |
1646 |
+- return 1; |
1647 |
+- } |
1648 |
+- |
1649 |
+- return 0; |
1650 |
++ return base->cpu_base->hres_active && hrtimer_reprogram(timer, base); |
1651 |
+ } |
1652 |
+ |
1653 |
+ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) |
1654 |
+@@ -735,8 +723,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; } |
1655 |
+ static inline void |
1656 |
+ hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } |
1657 |
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
1658 |
+- struct hrtimer_clock_base *base, |
1659 |
+- int wakeup) |
1660 |
++ struct hrtimer_clock_base *base) |
1661 |
+ { |
1662 |
+ return 0; |
1663 |
+ } |
1664 |
+@@ -995,8 +982,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
1665 |
+ * |
1666 |
+ * XXX send_remote_softirq() ? |
1667 |
+ */ |
1668 |
+- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) |
1669 |
+- hrtimer_enqueue_reprogram(timer, new_base, wakeup); |
1670 |
++ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) |
1671 |
++ && hrtimer_enqueue_reprogram(timer, new_base)) { |
1672 |
++ if (wakeup) { |
1673 |
++ /* |
1674 |
++ * We need to drop cpu_base->lock to avoid a |
1675 |
++ * lock ordering issue vs. rq->lock. |
1676 |
++ */ |
1677 |
++ raw_spin_unlock(&new_base->cpu_base->lock); |
1678 |
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
1679 |
++ local_irq_restore(flags); |
1680 |
++ return ret; |
1681 |
++ } else { |
1682 |
++ __raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
1683 |
++ } |
1684 |
++ } |
1685 |
+ |
1686 |
+ unlock_hrtimer_base(timer, &flags); |
1687 |
+ |
1688 |
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c |
1689 |
+index dc813a9..63633a3 100644 |
1690 |
+--- a/kernel/irq/spurious.c |
1691 |
++++ b/kernel/irq/spurious.c |
1692 |
+@@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
1693 |
+ |
1694 |
+ /* |
1695 |
+ * All handlers must agree on IRQF_SHARED, so we test just the |
1696 |
+- * first. Check for action->next as well. |
1697 |
++ * first. |
1698 |
+ */ |
1699 |
+ action = desc->action; |
1700 |
+ if (!action || !(action->flags & IRQF_SHARED) || |
1701 |
+- (action->flags & __IRQF_TIMER) || |
1702 |
+- (action->handler(irq, action->dev_id) == IRQ_HANDLED) || |
1703 |
+- !action->next) |
1704 |
++ (action->flags & __IRQF_TIMER)) |
1705 |
+ goto out; |
1706 |
+ |
1707 |
+ /* Already running on another processor */ |
1708 |
+@@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
1709 |
+ do { |
1710 |
+ if (handle_irq_event(desc) == IRQ_HANDLED) |
1711 |
+ ret = IRQ_HANDLED; |
1712 |
++ /* Make sure that there is still a valid action */ |
1713 |
+ action = desc->action; |
1714 |
+ } while ((desc->istate & IRQS_PENDING) && action); |
1715 |
+ desc->istate &= ~IRQS_POLL_INPROGRESS; |
1716 |
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c |
1717 |
+index 640ded8..93d5e4a 100644 |
1718 |
+--- a/kernel/posix-cpu-timers.c |
1719 |
++++ b/kernel/posix-cpu-timers.c |
1720 |
+@@ -1450,8 +1450,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
1721 |
+ while (!signal_pending(current)) { |
1722 |
+ if (timer.it.cpu.expires.sched == 0) { |
1723 |
+ /* |
1724 |
+- * Our timer fired and was reset. |
1725 |
++ * Our timer fired and was reset, below |
1726 |
++ * deletion can not fail. |
1727 |
+ */ |
1728 |
++ posix_cpu_timer_del(&timer); |
1729 |
+ spin_unlock_irq(&timer.it_lock); |
1730 |
+ return 0; |
1731 |
+ } |
1732 |
+@@ -1469,9 +1471,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
1733 |
+ * We were interrupted by a signal. |
1734 |
+ */ |
1735 |
+ sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); |
1736 |
+- posix_cpu_timer_set(&timer, 0, &zero_it, it); |
1737 |
++ error = posix_cpu_timer_set(&timer, 0, &zero_it, it); |
1738 |
++ if (!error) { |
1739 |
++ /* |
1740 |
++ * Timer is now unarmed, deletion can not fail. |
1741 |
++ */ |
1742 |
++ posix_cpu_timer_del(&timer); |
1743 |
++ } |
1744 |
+ spin_unlock_irq(&timer.it_lock); |
1745 |
+ |
1746 |
++ while (error == TIMER_RETRY) { |
1747 |
++ /* |
1748 |
++ * We need to handle case when timer was or is in the |
1749 |
++ * middle of firing. In other cases we already freed |
1750 |
++ * resources. |
1751 |
++ */ |
1752 |
++ spin_lock_irq(&timer.it_lock); |
1753 |
++ error = posix_cpu_timer_del(&timer); |
1754 |
++ spin_unlock_irq(&timer.it_lock); |
1755 |
++ } |
1756 |
++ |
1757 |
+ if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { |
1758 |
+ /* |
1759 |
+ * It actually did fire already. |
1760 |
+diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl |
1761 |
+index eb51d76..3f42652 100644 |
1762 |
+--- a/kernel/timeconst.pl |
1763 |
++++ b/kernel/timeconst.pl |
1764 |
+@@ -369,10 +369,8 @@ if ($hz eq '--can') { |
1765 |
+ die "Usage: $0 HZ\n"; |
1766 |
+ } |
1767 |
+ |
1768 |
+- @val = @{$canned_values{$hz}}; |
1769 |
+- if (!defined(@val)) { |
1770 |
+- @val = compute_values($hz); |
1771 |
+- } |
1772 |
++ $cv = $canned_values{$hz}; |
1773 |
++ @val = defined($cv) ? @$cv : compute_values($hz); |
1774 |
+ output($hz, @val); |
1775 |
+ } |
1776 |
+ exit 0; |
1777 |
+diff --git a/mm/fadvise.c b/mm/fadvise.c |
1778 |
+index 8d723c9..35b2bb0 100644 |
1779 |
+--- a/mm/fadvise.c |
1780 |
++++ b/mm/fadvise.c |
1781 |
+@@ -17,6 +17,7 @@ |
1782 |
+ #include <linux/fadvise.h> |
1783 |
+ #include <linux/writeback.h> |
1784 |
+ #include <linux/syscalls.h> |
1785 |
++#include <linux/swap.h> |
1786 |
+ |
1787 |
+ #include <asm/unistd.h> |
1788 |
+ |
1789 |
+@@ -123,9 +124,22 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice) |
1790 |
+ start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; |
1791 |
+ end_index = (endbyte >> PAGE_CACHE_SHIFT); |
1792 |
+ |
1793 |
+- if (end_index >= start_index) |
1794 |
+- invalidate_mapping_pages(mapping, start_index, |
1795 |
++ if (end_index >= start_index) { |
1796 |
++ unsigned long count = invalidate_mapping_pages(mapping, |
1797 |
++ start_index, end_index); |
1798 |
++ |
1799 |
++ /* |
1800 |
++ * If fewer pages were invalidated than expected then |
1801 |
++ * it is possible that some of the pages were on |
1802 |
++ * a per-cpu pagevec for a remote CPU. Drain all |
1803 |
++ * pagevecs and try again. |
1804 |
++ */ |
1805 |
++ if (count < (end_index - start_index + 1)) { |
1806 |
++ lru_add_drain_all(); |
1807 |
++ invalidate_mapping_pages(mapping, start_index, |
1808 |
+ end_index); |
1809 |
++ } |
1810 |
++ } |
1811 |
+ break; |
1812 |
+ default: |
1813 |
+ ret = -EINVAL; |
1814 |
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c |
1815 |
+index 71c7811..88fa54d 100644 |
1816 |
+--- a/mm/mmu_notifier.c |
1817 |
++++ b/mm/mmu_notifier.c |
1818 |
+@@ -14,10 +14,14 @@ |
1819 |
+ #include <linux/module.h> |
1820 |
+ #include <linux/mm.h> |
1821 |
+ #include <linux/err.h> |
1822 |
++#include <linux/srcu.h> |
1823 |
+ #include <linux/rcupdate.h> |
1824 |
+ #include <linux/sched.h> |
1825 |
+ #include <linux/slab.h> |
1826 |
+ |
1827 |
++/* global SRCU for all MMs */ |
1828 |
++static struct srcu_struct srcu; |
1829 |
++ |
1830 |
+ /* |
1831 |
+ * This function can't run concurrently against mmu_notifier_register |
1832 |
+ * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap |
1833 |
+@@ -25,58 +29,61 @@ |
1834 |
+ * in parallel despite there being no task using this mm any more, |
1835 |
+ * through the vmas outside of the exit_mmap context, such as with |
1836 |
+ * vmtruncate. This serializes against mmu_notifier_unregister with |
1837 |
+- * the mmu_notifier_mm->lock in addition to RCU and it serializes |
1838 |
+- * against the other mmu notifiers with RCU. struct mmu_notifier_mm |
1839 |
++ * the mmu_notifier_mm->lock in addition to SRCU and it serializes |
1840 |
++ * against the other mmu notifiers with SRCU. struct mmu_notifier_mm |
1841 |
+ * can't go away from under us as exit_mmap holds an mm_count pin |
1842 |
+ * itself. |
1843 |
+ */ |
1844 |
+ void __mmu_notifier_release(struct mm_struct *mm) |
1845 |
+ { |
1846 |
+ struct mmu_notifier *mn; |
1847 |
+- struct hlist_node *n; |
1848 |
++ int id; |
1849 |
+ |
1850 |
+ /* |
1851 |
+- * RCU here will block mmu_notifier_unregister until |
1852 |
+- * ->release returns. |
1853 |
++ * srcu_read_lock() here will block synchronize_srcu() in |
1854 |
++ * mmu_notifier_unregister() until all registered |
1855 |
++ * ->release() callouts this function makes have |
1856 |
++ * returned. |
1857 |
+ */ |
1858 |
+- rcu_read_lock(); |
1859 |
+- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) |
1860 |
+- /* |
1861 |
+- * if ->release runs before mmu_notifier_unregister it |
1862 |
+- * must be handled as it's the only way for the driver |
1863 |
+- * to flush all existing sptes and stop the driver |
1864 |
+- * from establishing any more sptes before all the |
1865 |
+- * pages in the mm are freed. |
1866 |
+- */ |
1867 |
+- if (mn->ops->release) |
1868 |
+- mn->ops->release(mn, mm); |
1869 |
+- rcu_read_unlock(); |
1870 |
+- |
1871 |
++ id = srcu_read_lock(&srcu); |
1872 |
+ spin_lock(&mm->mmu_notifier_mm->lock); |
1873 |
+ while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { |
1874 |
+ mn = hlist_entry(mm->mmu_notifier_mm->list.first, |
1875 |
+ struct mmu_notifier, |
1876 |
+ hlist); |
1877 |
++ |
1878 |
+ /* |
1879 |
+- * We arrived before mmu_notifier_unregister so |
1880 |
+- * mmu_notifier_unregister will do nothing other than |
1881 |
+- * to wait ->release to finish and |
1882 |
+- * mmu_notifier_unregister to return. |
1883 |
++ * Unlink. This will prevent mmu_notifier_unregister() |
1884 |
++ * from also making the ->release() callout. |
1885 |
+ */ |
1886 |
+ hlist_del_init_rcu(&mn->hlist); |
1887 |
++ spin_unlock(&mm->mmu_notifier_mm->lock); |
1888 |
++ |
1889 |
++ /* |
1890 |
++ * Clear sptes. (see 'release' description in mmu_notifier.h) |
1891 |
++ */ |
1892 |
++ if (mn->ops->release) |
1893 |
++ mn->ops->release(mn, mm); |
1894 |
++ |
1895 |
++ spin_lock(&mm->mmu_notifier_mm->lock); |
1896 |
+ } |
1897 |
+ spin_unlock(&mm->mmu_notifier_mm->lock); |
1898 |
+ |
1899 |
+ /* |
1900 |
+- * synchronize_rcu here prevents mmu_notifier_release to |
1901 |
+- * return to exit_mmap (which would proceed freeing all pages |
1902 |
+- * in the mm) until the ->release method returns, if it was |
1903 |
+- * invoked by mmu_notifier_unregister. |
1904 |
+- * |
1905 |
+- * The mmu_notifier_mm can't go away from under us because one |
1906 |
+- * mm_count is hold by exit_mmap. |
1907 |
++ * All callouts to ->release() which we have done are complete. |
1908 |
++ * Allow synchronize_srcu() in mmu_notifier_unregister() to complete |
1909 |
++ */ |
1910 |
++ srcu_read_unlock(&srcu, id); |
1911 |
++ |
1912 |
++ /* |
1913 |
++ * mmu_notifier_unregister() may have unlinked a notifier and may |
1914 |
++ * still be calling out to it. Additionally, other notifiers |
1915 |
++ * may have been active via vmtruncate() et. al. Block here |
1916 |
++ * to ensure that all notifier callouts for this mm have been |
1917 |
++ * completed and the sptes are really cleaned up before returning |
1918 |
++ * to exit_mmap(). |
1919 |
+ */ |
1920 |
+- synchronize_rcu(); |
1921 |
++ synchronize_srcu(&srcu); |
1922 |
+ } |
1923 |
+ |
1924 |
+ /* |
1925 |
+@@ -89,14 +96,14 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm, |
1926 |
+ { |
1927 |
+ struct mmu_notifier *mn; |
1928 |
+ struct hlist_node *n; |
1929 |
+- int young = 0; |
1930 |
++ int young = 0, id; |
1931 |
+ |
1932 |
+- rcu_read_lock(); |
1933 |
++ id = srcu_read_lock(&srcu); |
1934 |
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
1935 |
+ if (mn->ops->clear_flush_young) |
1936 |
+ young |= mn->ops->clear_flush_young(mn, mm, address); |
1937 |
+ } |
1938 |
+- rcu_read_unlock(); |
1939 |
++ srcu_read_unlock(&srcu, id); |
1940 |
+ |
1941 |
+ return young; |
1942 |
+ } |
1943 |
+@@ -106,9 +113,9 @@ int __mmu_notifier_test_young(struct mm_struct *mm, |
1944 |
+ { |
1945 |
+ struct mmu_notifier *mn; |
1946 |
+ struct hlist_node *n; |
1947 |
+- int young = 0; |
1948 |
++ int young = 0, id; |
1949 |
+ |
1950 |
+- rcu_read_lock(); |
1951 |
++ id = srcu_read_lock(&srcu); |
1952 |
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
1953 |
+ if (mn->ops->test_young) { |
1954 |
+ young = mn->ops->test_young(mn, mm, address); |
1955 |
+@@ -116,7 +123,7 @@ int __mmu_notifier_test_young(struct mm_struct *mm, |
1956 |
+ break; |
1957 |
+ } |
1958 |
+ } |
1959 |
+- rcu_read_unlock(); |
1960 |
++ srcu_read_unlock(&srcu, id); |
1961 |
+ |
1962 |
+ return young; |
1963 |
+ } |
1964 |
+@@ -126,8 +133,9 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, |
1965 |
+ { |
1966 |
+ struct mmu_notifier *mn; |
1967 |
+ struct hlist_node *n; |
1968 |
++ int id; |
1969 |
+ |
1970 |
+- rcu_read_lock(); |
1971 |
++ id = srcu_read_lock(&srcu); |
1972 |
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
1973 |
+ if (mn->ops->change_pte) |
1974 |
+ mn->ops->change_pte(mn, mm, address, pte); |
1975 |
+@@ -138,7 +146,7 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, |
1976 |
+ else if (mn->ops->invalidate_page) |
1977 |
+ mn->ops->invalidate_page(mn, mm, address); |
1978 |
+ } |
1979 |
+- rcu_read_unlock(); |
1980 |
++ srcu_read_unlock(&srcu, id); |
1981 |
+ } |
1982 |
+ |
1983 |
+ void __mmu_notifier_invalidate_page(struct mm_struct *mm, |
1984 |
+@@ -146,13 +154,14 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm, |
1985 |
+ { |
1986 |
+ struct mmu_notifier *mn; |
1987 |
+ struct hlist_node *n; |
1988 |
++ int id; |
1989 |
+ |
1990 |
+- rcu_read_lock(); |
1991 |
++ id = srcu_read_lock(&srcu); |
1992 |
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
1993 |
+ if (mn->ops->invalidate_page) |
1994 |
+ mn->ops->invalidate_page(mn, mm, address); |
1995 |
+ } |
1996 |
+- rcu_read_unlock(); |
1997 |
++ srcu_read_unlock(&srcu, id); |
1998 |
+ } |
1999 |
+ |
2000 |
+ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, |
2001 |
+@@ -160,13 +169,14 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, |
2002 |
+ { |
2003 |
+ struct mmu_notifier *mn; |
2004 |
+ struct hlist_node *n; |
2005 |
++ int id; |
2006 |
+ |
2007 |
+- rcu_read_lock(); |
2008 |
++ id = srcu_read_lock(&srcu); |
2009 |
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
2010 |
+ if (mn->ops->invalidate_range_start) |
2011 |
+ mn->ops->invalidate_range_start(mn, mm, start, end); |
2012 |
+ } |
2013 |
+- rcu_read_unlock(); |
2014 |
++ srcu_read_unlock(&srcu, id); |
2015 |
+ } |
2016 |
+ |
2017 |
+ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, |
2018 |
+@@ -174,13 +184,14 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, |
2019 |
+ { |
2020 |
+ struct mmu_notifier *mn; |
2021 |
+ struct hlist_node *n; |
2022 |
++ int id; |
2023 |
+ |
2024 |
+- rcu_read_lock(); |
2025 |
++ id = srcu_read_lock(&srcu); |
2026 |
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
2027 |
+ if (mn->ops->invalidate_range_end) |
2028 |
+ mn->ops->invalidate_range_end(mn, mm, start, end); |
2029 |
+ } |
2030 |
+- rcu_read_unlock(); |
2031 |
++ srcu_read_unlock(&srcu, id); |
2032 |
+ } |
2033 |
+ |
2034 |
+ static int do_mmu_notifier_register(struct mmu_notifier *mn, |
2035 |
+@@ -192,6 +203,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, |
2036 |
+ |
2037 |
+ BUG_ON(atomic_read(&mm->mm_users) <= 0); |
2038 |
+ |
2039 |
++ /* |
2040 |
++ * Verify that mmu_notifier_init() already run and the global srcu is |
2041 |
++ * initialized. |
2042 |
++ */ |
2043 |
++ BUG_ON(!srcu.per_cpu_ref); |
2044 |
++ |
2045 |
+ ret = -ENOMEM; |
2046 |
+ mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); |
2047 |
+ if (unlikely(!mmu_notifier_mm)) |
2048 |
+@@ -274,8 +291,8 @@ void __mmu_notifier_mm_destroy(struct mm_struct *mm) |
2049 |
+ /* |
2050 |
+ * This releases the mm_count pin automatically and frees the mm |
2051 |
+ * structure if it was the last user of it. It serializes against |
2052 |
+- * running mmu notifiers with RCU and against mmu_notifier_unregister |
2053 |
+- * with the unregister lock + RCU. All sptes must be dropped before |
2054 |
++ * running mmu notifiers with SRCU and against mmu_notifier_unregister |
2055 |
++ * with the unregister lock + SRCU. All sptes must be dropped before |
2056 |
+ * calling mmu_notifier_unregister. ->release or any other notifier |
2057 |
+ * method may be invoked concurrently with mmu_notifier_unregister, |
2058 |
+ * and only after mmu_notifier_unregister returned we're guaranteed |
2059 |
+@@ -285,35 +302,43 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) |
2060 |
+ { |
2061 |
+ BUG_ON(atomic_read(&mm->mm_count) <= 0); |
2062 |
+ |
2063 |
++ spin_lock(&mm->mmu_notifier_mm->lock); |
2064 |
+ if (!hlist_unhashed(&mn->hlist)) { |
2065 |
+- /* |
2066 |
+- * RCU here will force exit_mmap to wait ->release to finish |
2067 |
+- * before freeing the pages. |
2068 |
+- */ |
2069 |
+- rcu_read_lock(); |
2070 |
++ int id; |
2071 |
+ |
2072 |
+ /* |
2073 |
+- * exit_mmap will block in mmu_notifier_release to |
2074 |
+- * guarantee ->release is called before freeing the |
2075 |
+- * pages. |
2076 |
++ * Ensure we synchronize up with __mmu_notifier_release(). |
2077 |
+ */ |
2078 |
++ id = srcu_read_lock(&srcu); |
2079 |
++ |
2080 |
++ hlist_del_rcu(&mn->hlist); |
2081 |
++ spin_unlock(&mm->mmu_notifier_mm->lock); |
2082 |
++ |
2083 |
+ if (mn->ops->release) |
2084 |
+ mn->ops->release(mn, mm); |
2085 |
+- rcu_read_unlock(); |
2086 |
+ |
2087 |
+- spin_lock(&mm->mmu_notifier_mm->lock); |
2088 |
+- hlist_del_rcu(&mn->hlist); |
2089 |
++ /* |
2090 |
++ * Allow __mmu_notifier_release() to complete. |
2091 |
++ */ |
2092 |
++ srcu_read_unlock(&srcu, id); |
2093 |
++ } else |
2094 |
+ spin_unlock(&mm->mmu_notifier_mm->lock); |
2095 |
+- } |
2096 |
+ |
2097 |
+ /* |
2098 |
+- * Wait any running method to finish, of course including |
2099 |
+- * ->release if it was run by mmu_notifier_relase instead of us. |
2100 |
++ * Wait for any running method to finish, including ->release() if it |
2101 |
++ * was run by __mmu_notifier_release() instead of us. |
2102 |
+ */ |
2103 |
+- synchronize_rcu(); |
2104 |
++ synchronize_srcu(&srcu); |
2105 |
+ |
2106 |
+ BUG_ON(atomic_read(&mm->mm_count) <= 0); |
2107 |
+ |
2108 |
+ mmdrop(mm); |
2109 |
+ } |
2110 |
+ EXPORT_SYMBOL_GPL(mmu_notifier_unregister); |
2111 |
++ |
2112 |
++static int __init mmu_notifier_init(void) |
2113 |
++{ |
2114 |
++ return init_srcu_struct(&srcu); |
2115 |
++} |
2116 |
++ |
2117 |
++module_init(mmu_notifier_init); |
2118 |
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
2119 |
+index 0ec869e..1b94f08 100644 |
2120 |
+--- a/mm/page_alloc.c |
2121 |
++++ b/mm/page_alloc.c |
2122 |
+@@ -4264,10 +4264,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, |
2123 |
+ * round what is now in bits to nearest long in bits, then return it in |
2124 |
+ * bytes. |
2125 |
+ */ |
2126 |
+-static unsigned long __init usemap_size(unsigned long zonesize) |
2127 |
++static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) |
2128 |
+ { |
2129 |
+ unsigned long usemapsize; |
2130 |
+ |
2131 |
++ zonesize += zone_start_pfn & (pageblock_nr_pages-1); |
2132 |
+ usemapsize = roundup(zonesize, pageblock_nr_pages); |
2133 |
+ usemapsize = usemapsize >> pageblock_order; |
2134 |
+ usemapsize *= NR_PAGEBLOCK_BITS; |
2135 |
+@@ -4277,17 +4278,19 @@ static unsigned long __init usemap_size(unsigned long zonesize) |
2136 |
+ } |
2137 |
+ |
2138 |
+ static void __init setup_usemap(struct pglist_data *pgdat, |
2139 |
+- struct zone *zone, unsigned long zonesize) |
2140 |
++ struct zone *zone, |
2141 |
++ unsigned long zone_start_pfn, |
2142 |
++ unsigned long zonesize) |
2143 |
+ { |
2144 |
+- unsigned long usemapsize = usemap_size(zonesize); |
2145 |
++ unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); |
2146 |
+ zone->pageblock_flags = NULL; |
2147 |
+ if (usemapsize) |
2148 |
+ zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, |
2149 |
+ usemapsize); |
2150 |
+ } |
2151 |
+ #else |
2152 |
+-static inline void setup_usemap(struct pglist_data *pgdat, |
2153 |
+- struct zone *zone, unsigned long zonesize) {} |
2154 |
++static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, |
2155 |
++ unsigned long zone_start_pfn, unsigned long zonesize) {} |
2156 |
+ #endif /* CONFIG_SPARSEMEM */ |
2157 |
+ |
2158 |
+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE |
2159 |
+@@ -4415,7 +4418,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, |
2160 |
+ continue; |
2161 |
+ |
2162 |
+ set_pageblock_order(pageblock_default_order()); |
2163 |
+- setup_usemap(pgdat, zone, size); |
2164 |
++ setup_usemap(pgdat, zone, zone_start_pfn, size); |
2165 |
+ ret = init_currently_empty_zone(zone, zone_start_pfn, |
2166 |
+ size, MEMMAP_EARLY); |
2167 |
+ BUG_ON(ret); |
2168 |
+diff --git a/mm/shmem.c b/mm/shmem.c |
2169 |
+index b952332..8b38477 100644 |
2170 |
+--- a/mm/shmem.c |
2171 |
++++ b/mm/shmem.c |
2172 |
+@@ -2505,6 +2505,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) |
2173 |
+ unsigned long inodes; |
2174 |
+ int error = -EINVAL; |
2175 |
+ |
2176 |
++ config.mpol = NULL; |
2177 |
+ if (shmem_parse_options(data, &config, true)) |
2178 |
+ return error; |
2179 |
+ |
2180 |
+@@ -2530,8 +2531,13 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) |
2181 |
+ sbinfo->max_inodes = config.max_inodes; |
2182 |
+ sbinfo->free_inodes = config.max_inodes - inodes; |
2183 |
+ |
2184 |
+- mpol_put(sbinfo->mpol); |
2185 |
+- sbinfo->mpol = config.mpol; /* transfers initial ref */ |
2186 |
++ /* |
2187 |
++ * Preserve previous mempolicy unless mpol remount option was specified. |
2188 |
++ */ |
2189 |
++ if (config.mpol) { |
2190 |
++ mpol_put(sbinfo->mpol); |
2191 |
++ sbinfo->mpol = config.mpol; /* transfers initial ref */ |
2192 |
++ } |
2193 |
+ out: |
2194 |
+ spin_unlock(&sbinfo->stat_lock); |
2195 |
+ return error; |
2196 |
+diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c |
2197 |
+index 289646e..f26516a 100644 |
2198 |
+--- a/net/bridge/br_stp_bpdu.c |
2199 |
++++ b/net/bridge/br_stp_bpdu.c |
2200 |
+@@ -16,6 +16,7 @@ |
2201 |
+ #include <linux/etherdevice.h> |
2202 |
+ #include <linux/llc.h> |
2203 |
+ #include <linux/slab.h> |
2204 |
++#include <linux/pkt_sched.h> |
2205 |
+ #include <net/net_namespace.h> |
2206 |
+ #include <net/llc.h> |
2207 |
+ #include <net/llc_pdu.h> |
2208 |
+@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p, |
2209 |
+ |
2210 |
+ skb->dev = p->dev; |
2211 |
+ skb->protocol = htons(ETH_P_802_2); |
2212 |
++ skb->priority = TC_PRIO_CONTROL; |
2213 |
+ |
2214 |
+ skb_reserve(skb, LLC_RESERVE); |
2215 |
+ memcpy(__skb_put(skb, length), data, length); |
2216 |
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c |
2217 |
+index ef1528a..f5dde14 100644 |
2218 |
+--- a/net/ipv4/af_inet.c |
2219 |
++++ b/net/ipv4/af_inet.c |
2220 |
+@@ -226,8 +226,12 @@ EXPORT_SYMBOL(inet_listen); |
2221 |
+ u32 inet_ehash_secret __read_mostly; |
2222 |
+ EXPORT_SYMBOL(inet_ehash_secret); |
2223 |
+ |
2224 |
++u32 ipv6_hash_secret __read_mostly; |
2225 |
++EXPORT_SYMBOL(ipv6_hash_secret); |
2226 |
++ |
2227 |
+ /* |
2228 |
+- * inet_ehash_secret must be set exactly once |
2229 |
++ * inet_ehash_secret must be set exactly once, and to a non nul value |
2230 |
++ * ipv6_hash_secret must be set exactly once. |
2231 |
+ */ |
2232 |
+ void build_ehash_secret(void) |
2233 |
+ { |
2234 |
+@@ -237,7 +241,8 @@ void build_ehash_secret(void) |
2235 |
+ get_random_bytes(&rnd, sizeof(rnd)); |
2236 |
+ } while (rnd == 0); |
2237 |
+ |
2238 |
+- cmpxchg(&inet_ehash_secret, 0, rnd); |
2239 |
++ if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) |
2240 |
++ get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); |
2241 |
+ } |
2242 |
+ EXPORT_SYMBOL(build_ehash_secret); |
2243 |
+ |
2244 |
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c |
2245 |
+index 39b403f..0ae2cf1 100644 |
2246 |
+--- a/net/ipv4/ping.c |
2247 |
++++ b/net/ipv4/ping.c |
2248 |
+@@ -320,8 +320,8 @@ void ping_err(struct sk_buff *skb, u32 info) |
2249 |
+ struct iphdr *iph = (struct iphdr *)skb->data; |
2250 |
+ struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); |
2251 |
+ struct inet_sock *inet_sock; |
2252 |
+- int type = icmph->type; |
2253 |
+- int code = icmph->code; |
2254 |
++ int type = icmp_hdr(skb)->type; |
2255 |
++ int code = icmp_hdr(skb)->code; |
2256 |
+ struct net *net = dev_net(skb->dev); |
2257 |
+ struct sock *sk; |
2258 |
+ int harderr; |
2259 |
+diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c |
2260 |
+index 5c6e322..f71a0ff 100644 |
2261 |
+--- a/sound/pci/ali5451/ali5451.c |
2262 |
++++ b/sound/pci/ali5451/ali5451.c |
2263 |
+@@ -1435,7 +1435,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream) |
2264 |
+ |
2265 |
+ spin_lock(&codec->reg_lock); |
2266 |
+ if (!pvoice->running) { |
2267 |
+- spin_unlock_irq(&codec->reg_lock); |
2268 |
++ spin_unlock(&codec->reg_lock); |
2269 |
+ return 0; |
2270 |
+ } |
2271 |
+ outb(pvoice->number, ALI_REG(codec, ALI_GC_CIR)); |
2272 |
+diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c |
2273 |
+index 3c04524..1026820 100644 |
2274 |
+--- a/sound/pci/rme32.c |
2275 |
++++ b/sound/pci/rme32.c |
2276 |
+@@ -1017,7 +1017,7 @@ static int snd_rme32_capture_close(struct snd_pcm_substream *substream) |
2277 |
+ spin_lock_irq(&rme32->lock); |
2278 |
+ rme32->capture_substream = NULL; |
2279 |
+ rme32->capture_periodsize = 0; |
2280 |
+- spin_unlock(&rme32->lock); |
2281 |
++ spin_unlock_irq(&rme32->lock); |
2282 |
+ return 0; |
2283 |
+ } |
2284 |
+ |
2285 |
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h |
2286 |
+index 7ccffb2..11a9f86 100644 |
2287 |
+--- a/sound/usb/quirks-table.h |
2288 |
++++ b/sound/usb/quirks-table.h |
2289 |
+@@ -1613,7 +1613,7 @@ YAMAHA_DEVICE(0x7010, "UB99"), |
2290 |
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { |
2291 |
+ /* .vendor_name = "Roland", */ |
2292 |
+ /* .product_name = "A-PRO", */ |
2293 |
+- .ifnum = 1, |
2294 |
++ .ifnum = 0, |
2295 |
+ .type = QUIRK_MIDI_FIXED_ENDPOINT, |
2296 |
+ .data = & (const struct snd_usb_midi_endpoint_info) { |
2297 |
+ .out_cables = 0x0003, |
2298 |
|
2299 |
Modified: genpatches-2.6/trunk/3.2/0000_README |
2300 |
=================================================================== |
2301 |
--- genpatches-2.6/trunk/3.2/0000_README 2013-02-23 19:07:21 UTC (rev 2290) |
2302 |
+++ genpatches-2.6/trunk/3.2/0000_README 2013-02-28 19:13:50 UTC (rev 2291) |
2303 |
@@ -192,6 +192,10 @@ |
2304 |
From: http://www.kernel.org |
2305 |
Desc: Linux 3.2.38 |
2306 |
|
2307 |
+Patch: 1038_linux-3.2.39.patch |
2308 |
+From: http://www.kernel.org |
2309 |
+Desc: Linux 3.2.39 |
2310 |
+ |
2311 |
Patch: 2300_per-pci-device-msi-irq-listing.patch |
2312 |
From: http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=da8d1c8ba4dcb16d60be54b233deca9a7cac98dc |
2313 |
Desc: Add a per-pci-device subdirectory in sysfs |
2314 |
|
2315 |
Added: genpatches-2.6/trunk/3.2/1038_linux-3.2.39.patch |
2316 |
=================================================================== |
2317 |
--- genpatches-2.6/trunk/3.2/1038_linux-3.2.39.patch (rev 0) |
2318 |
+++ genpatches-2.6/trunk/3.2/1038_linux-3.2.39.patch 2013-02-28 19:13:50 UTC (rev 2291) |
2319 |
@@ -0,0 +1,2660 @@ |
2320 |
+diff --git a/MAINTAINERS b/MAINTAINERS |
2321 |
+index 82d7fa6..83f156e 100644 |
2322 |
+--- a/MAINTAINERS |
2323 |
++++ b/MAINTAINERS |
2324 |
+@@ -2584,7 +2584,7 @@ S: Maintained |
2325 |
+ F: drivers/net/ethernet/i825xx/eexpress.* |
2326 |
+ |
2327 |
+ ETHERNET BRIDGE |
2328 |
+-M: Stephen Hemminger <shemminger@××××××.com> |
2329 |
++M: Stephen Hemminger <stephen@××××××××××××××.org> |
2330 |
+ L: bridge@××××××××××××××××××××××.org |
2331 |
+ L: netdev@×××××××××××.org |
2332 |
+ W: http://www.linuxfoundation.org/en/Net:Bridge |
2333 |
+@@ -4475,7 +4475,7 @@ S: Supported |
2334 |
+ F: drivers/infiniband/hw/nes/ |
2335 |
+ |
2336 |
+ NETEM NETWORK EMULATOR |
2337 |
+-M: Stephen Hemminger <shemminger@××××××.com> |
2338 |
++M: Stephen Hemminger <stephen@××××××××××××××.org> |
2339 |
+ L: netem@××××××××××××××××××××××.org |
2340 |
+ S: Maintained |
2341 |
+ F: net/sched/sch_netem.c |
2342 |
+@@ -5993,7 +5993,7 @@ S: Maintained |
2343 |
+ F: drivers/usb/misc/sisusbvga/ |
2344 |
+ |
2345 |
+ SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS |
2346 |
+-M: Stephen Hemminger <shemminger@××××××.com> |
2347 |
++M: Stephen Hemminger <stephen@××××××××××××××.org> |
2348 |
+ L: netdev@×××××××××××.org |
2349 |
+ S: Maintained |
2350 |
+ F: drivers/net/ethernet/marvell/sk* |
2351 |
+diff --git a/Makefile b/Makefile |
2352 |
+index c8c9d02..0fceb8b 100644 |
2353 |
+--- a/Makefile |
2354 |
++++ b/Makefile |
2355 |
+@@ -1,6 +1,6 @@ |
2356 |
+ VERSION = 3 |
2357 |
+ PATCHLEVEL = 2 |
2358 |
+-SUBLEVEL = 38 |
2359 |
++SUBLEVEL = 39 |
2360 |
+ EXTRAVERSION = |
2361 |
+ NAME = Saber-toothed Squirrel |
2362 |
+ |
2363 |
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S |
2364 |
+index a6253ec..95b4eb3 100644 |
2365 |
+--- a/arch/x86/ia32/ia32entry.S |
2366 |
++++ b/arch/x86/ia32/ia32entry.S |
2367 |
+@@ -208,7 +208,7 @@ sysexit_from_sys_call: |
2368 |
+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) |
2369 |
+ jnz ia32_ret_from_sys_call |
2370 |
+ TRACE_IRQS_ON |
2371 |
+- sti |
2372 |
++ ENABLE_INTERRUPTS(CLBR_NONE) |
2373 |
+ movl %eax,%esi /* second arg, syscall return value */ |
2374 |
+ cmpl $0,%eax /* is it < 0? */ |
2375 |
+ setl %al /* 1 if so, 0 if not */ |
2376 |
+@@ -218,7 +218,7 @@ sysexit_from_sys_call: |
2377 |
+ GET_THREAD_INFO(%r10) |
2378 |
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */ |
2379 |
+ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi |
2380 |
+- cli |
2381 |
++ DISABLE_INTERRUPTS(CLBR_NONE) |
2382 |
+ TRACE_IRQS_OFF |
2383 |
+ testl %edi,TI_flags(%r10) |
2384 |
+ jz \exit |
2385 |
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c |
2386 |
+index c346d11..d4f278e 100644 |
2387 |
+--- a/arch/x86/kernel/step.c |
2388 |
++++ b/arch/x86/kernel/step.c |
2389 |
+@@ -157,6 +157,34 @@ static int enable_single_step(struct task_struct *child) |
2390 |
+ return 1; |
2391 |
+ } |
2392 |
+ |
2393 |
++static void set_task_blockstep(struct task_struct *task, bool on) |
2394 |
++{ |
2395 |
++ unsigned long debugctl; |
2396 |
++ |
2397 |
++ /* |
2398 |
++ * Ensure irq/preemption can't change debugctl in between. |
2399 |
++ * Note also that both TIF_BLOCKSTEP and debugctl should |
2400 |
++ * be changed atomically wrt preemption. |
2401 |
++ * |
2402 |
++ * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if |
2403 |
++ * task is current or it can't be running, otherwise we can race |
2404 |
++ * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but |
2405 |
++ * PTRACE_KILL is not safe. |
2406 |
++ */ |
2407 |
++ local_irq_disable(); |
2408 |
++ debugctl = get_debugctlmsr(); |
2409 |
++ if (on) { |
2410 |
++ debugctl |= DEBUGCTLMSR_BTF; |
2411 |
++ set_tsk_thread_flag(task, TIF_BLOCKSTEP); |
2412 |
++ } else { |
2413 |
++ debugctl &= ~DEBUGCTLMSR_BTF; |
2414 |
++ clear_tsk_thread_flag(task, TIF_BLOCKSTEP); |
2415 |
++ } |
2416 |
++ if (task == current) |
2417 |
++ update_debugctlmsr(debugctl); |
2418 |
++ local_irq_enable(); |
2419 |
++} |
2420 |
++ |
2421 |
+ /* |
2422 |
+ * Enable single or block step. |
2423 |
+ */ |
2424 |
+@@ -169,19 +197,10 @@ static void enable_step(struct task_struct *child, bool block) |
2425 |
+ * So no one should try to use debugger block stepping in a program |
2426 |
+ * that uses user-mode single stepping itself. |
2427 |
+ */ |
2428 |
+- if (enable_single_step(child) && block) { |
2429 |
+- unsigned long debugctl = get_debugctlmsr(); |
2430 |
+- |
2431 |
+- debugctl |= DEBUGCTLMSR_BTF; |
2432 |
+- update_debugctlmsr(debugctl); |
2433 |
+- set_tsk_thread_flag(child, TIF_BLOCKSTEP); |
2434 |
+- } else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) { |
2435 |
+- unsigned long debugctl = get_debugctlmsr(); |
2436 |
+- |
2437 |
+- debugctl &= ~DEBUGCTLMSR_BTF; |
2438 |
+- update_debugctlmsr(debugctl); |
2439 |
+- clear_tsk_thread_flag(child, TIF_BLOCKSTEP); |
2440 |
+- } |
2441 |
++ if (enable_single_step(child) && block) |
2442 |
++ set_task_blockstep(child, true); |
2443 |
++ else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) |
2444 |
++ set_task_blockstep(child, false); |
2445 |
+ } |
2446 |
+ |
2447 |
+ void user_enable_single_step(struct task_struct *child) |
2448 |
+@@ -199,13 +218,8 @@ void user_disable_single_step(struct task_struct *child) |
2449 |
+ /* |
2450 |
+ * Make sure block stepping (BTF) is disabled. |
2451 |
+ */ |
2452 |
+- if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) { |
2453 |
+- unsigned long debugctl = get_debugctlmsr(); |
2454 |
+- |
2455 |
+- debugctl &= ~DEBUGCTLMSR_BTF; |
2456 |
+- update_debugctlmsr(debugctl); |
2457 |
+- clear_tsk_thread_flag(child, TIF_BLOCKSTEP); |
2458 |
+- } |
2459 |
++ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) |
2460 |
++ set_task_blockstep(child, false); |
2461 |
+ |
2462 |
+ /* Always clear TIF_SINGLESTEP... */ |
2463 |
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP); |
2464 |
+diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S |
2465 |
+index b040b0e..7328f71 100644 |
2466 |
+--- a/arch/x86/xen/xen-asm_32.S |
2467 |
++++ b/arch/x86/xen/xen-asm_32.S |
2468 |
+@@ -88,11 +88,11 @@ ENTRY(xen_iret) |
2469 |
+ */ |
2470 |
+ #ifdef CONFIG_SMP |
2471 |
+ GET_THREAD_INFO(%eax) |
2472 |
+- movl TI_cpu(%eax), %eax |
2473 |
+- movl __per_cpu_offset(,%eax,4), %eax |
2474 |
+- mov xen_vcpu(%eax), %eax |
2475 |
++ movl %ss:TI_cpu(%eax), %eax |
2476 |
++ movl %ss:__per_cpu_offset(,%eax,4), %eax |
2477 |
++ mov %ss:xen_vcpu(%eax), %eax |
2478 |
+ #else |
2479 |
+- movl xen_vcpu, %eax |
2480 |
++ movl %ss:xen_vcpu, %eax |
2481 |
+ #endif |
2482 |
+ |
2483 |
+ /* check IF state we're restoring */ |
2484 |
+@@ -105,11 +105,11 @@ ENTRY(xen_iret) |
2485 |
+ * resuming the code, so we don't have to be worried about |
2486 |
+ * being preempted to another CPU. |
2487 |
+ */ |
2488 |
+- setz XEN_vcpu_info_mask(%eax) |
2489 |
++ setz %ss:XEN_vcpu_info_mask(%eax) |
2490 |
+ xen_iret_start_crit: |
2491 |
+ |
2492 |
+ /* check for unmasked and pending */ |
2493 |
+- cmpw $0x0001, XEN_vcpu_info_pending(%eax) |
2494 |
++ cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax) |
2495 |
+ |
2496 |
+ /* |
2497 |
+ * If there's something pending, mask events again so we can |
2498 |
+@@ -117,7 +117,7 @@ xen_iret_start_crit: |
2499 |
+ * touch XEN_vcpu_info_mask. |
2500 |
+ */ |
2501 |
+ jne 1f |
2502 |
+- movb $1, XEN_vcpu_info_mask(%eax) |
2503 |
++ movb $1, %ss:XEN_vcpu_info_mask(%eax) |
2504 |
+ |
2505 |
+ 1: popl %eax |
2506 |
+ |
2507 |
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c |
2508 |
+index b07edc4..62c1325 100644 |
2509 |
+--- a/drivers/ata/ahci.c |
2510 |
++++ b/drivers/ata/ahci.c |
2511 |
+@@ -52,7 +52,9 @@ |
2512 |
+ #define DRV_VERSION "3.0" |
2513 |
+ |
2514 |
+ enum { |
2515 |
+- AHCI_PCI_BAR = 5, |
2516 |
++ AHCI_PCI_BAR_STA2X11 = 0, |
2517 |
++ AHCI_PCI_BAR_ENMOTUS = 2, |
2518 |
++ AHCI_PCI_BAR_STANDARD = 5, |
2519 |
+ }; |
2520 |
+ |
2521 |
+ enum board_ids { |
2522 |
+@@ -375,6 +377,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
2523 |
+ { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */ |
2524 |
+ { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ |
2525 |
+ |
2526 |
++ /* ST Microelectronics */ |
2527 |
++ { PCI_VDEVICE(STMICRO, 0xCC06), board_ahci }, /* ST ConneXt */ |
2528 |
++ |
2529 |
+ /* Marvell */ |
2530 |
+ { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ |
2531 |
+ { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ |
2532 |
+@@ -400,6 +405,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
2533 |
+ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ |
2534 |
+ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ |
2535 |
+ |
2536 |
++ /* Enmotus */ |
2537 |
++ { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, |
2538 |
++ |
2539 |
+ /* Generic, PCI class code for AHCI */ |
2540 |
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
2541 |
+ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, |
2542 |
+@@ -629,6 +637,13 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) |
2543 |
+ { |
2544 |
+ int rc; |
2545 |
+ |
2546 |
++ /* |
2547 |
++ * If the device fixup already set the dma_mask to some non-standard |
2548 |
++ * value, don't extend it here. This happens on STA2X11, for example. |
2549 |
++ */ |
2550 |
++ if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32)) |
2551 |
++ return 0; |
2552 |
++ |
2553 |
+ if (using_dac && |
2554 |
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
2555 |
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
2556 |
+@@ -1033,6 +1048,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
2557 |
+ struct ahci_host_priv *hpriv; |
2558 |
+ struct ata_host *host; |
2559 |
+ int n_ports, i, rc; |
2560 |
++ int ahci_pci_bar = AHCI_PCI_BAR_STANDARD; |
2561 |
+ |
2562 |
|