1 |
Author: mpagano |
2 |
Date: 2009-04-29 00:49:18 +0000 (Wed, 29 Apr 2009) |
3 |
New Revision: 1549 |
4 |
|
5 |
Added: |
6 |
genpatches-2.6/trunk/2.6.29/1001_linux-2.6.29.2.patch |
7 |
Removed: |
8 |
genpatches-2.6/trunk/2.6.29/1700_flush-pending-TLB-entries.patch |
9 |
Modified: |
10 |
genpatches-2.6/trunk/2.6.29/0000_README |
11 |
Log: |
12 |
Linux 2.6.29.2 patch |
13 |
|
14 |
Modified: genpatches-2.6/trunk/2.6.29/0000_README |
15 |
=================================================================== |
16 |
--- genpatches-2.6/trunk/2.6.29/0000_README 2009-04-25 01:48:57 UTC (rev 1548) |
17 |
+++ genpatches-2.6/trunk/2.6.29/0000_README 2009-04-29 00:49:18 UTC (rev 1549) |
18 |
@@ -43,9 +43,9 @@ |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 2.6.29.1 |
21 |
|
22 |
-Patch: 1700_flush-pending-TLB-entries.patch |
23 |
-From: http://bugs.gentoo.org/show_bug.cgi?id=265714 |
24 |
-Desc: sparc64: Flush TLB before releasing pages |
25 |
+Patch: 1001_linux-2.6.29.2.patch |
26 |
+From: http://www.kernel.org |
27 |
+Desc: Linux 2.6.29.2 |
28 |
|
29 |
Patch: 1915_ext4-automatically-allocate-delay-allocated-blocks-on-rename.patch |
30 |
From: Theodore Ts'o <tytso@×××.edu> |
31 |
|
32 |
Added: genpatches-2.6/trunk/2.6.29/1001_linux-2.6.29.2.patch |
33 |
=================================================================== |
34 |
--- genpatches-2.6/trunk/2.6.29/1001_linux-2.6.29.2.patch (rev 0) |
35 |
+++ genpatches-2.6/trunk/2.6.29/1001_linux-2.6.29.2.patch 2009-04-29 00:49:18 UTC (rev 1549) |
36 |
@@ -0,0 +1,4604 @@ |
37 |
+diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt |
38 |
+index 5ede747..0876275 100644 |
39 |
+--- a/Documentation/networking/bonding.txt |
40 |
++++ b/Documentation/networking/bonding.txt |
41 |
+@@ -1242,7 +1242,7 @@ monitoring is enabled, and vice-versa. |
42 |
+ To add ARP targets: |
43 |
+ # echo +192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target |
44 |
+ # echo +192.168.0.101 > /sys/class/net/bond0/bonding/arp_ip_target |
45 |
+- NOTE: up to 10 target addresses may be specified. |
46 |
++ NOTE: up to 16 target addresses may be specified. |
47 |
+ |
48 |
+ To remove an ARP target: |
49 |
+ # echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target |
50 |
+diff --git a/Makefile b/Makefile |
51 |
+index cdb1133..0380c7e 100644 |
52 |
+--- a/Makefile |
53 |
++++ b/Makefile |
54 |
+@@ -1,7 +1,7 @@ |
55 |
+ VERSION = 2 |
56 |
+ PATCHLEVEL = 6 |
57 |
+ SUBLEVEL = 29 |
58 |
+-EXTRAVERSION = .1 |
59 |
++EXTRAVERSION = .2 |
60 |
+ NAME = Temporary Tasmanian Devil |
61 |
+ |
62 |
+ # *DOCUMENTATION* |
63 |
+diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig |
64 |
+index f833a0b..0a2d6b8 100644 |
65 |
+--- a/arch/ia64/kvm/Kconfig |
66 |
++++ b/arch/ia64/kvm/Kconfig |
67 |
+@@ -4,6 +4,10 @@ |
68 |
+ config HAVE_KVM |
69 |
+ bool |
70 |
+ |
71 |
++config HAVE_KVM_IRQCHIP |
72 |
++ bool |
73 |
++ default y |
74 |
++ |
75 |
+ menuconfig VIRTUALIZATION |
76 |
+ bool "Virtualization" |
77 |
+ depends on HAVE_KVM || IA64 |
78 |
+diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c |
79 |
+index 1a86f84..5abcc7f 100644 |
80 |
+--- a/arch/mips/kernel/linux32.c |
81 |
++++ b/arch/mips/kernel/linux32.c |
82 |
+@@ -134,9 +134,9 @@ SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy, |
83 |
+ return sys_ftruncate(fd, merge_64(a2, a3)); |
84 |
+ } |
85 |
+ |
86 |
+-SYSCALL_DEFINE5(32_llseek, unsigned long, fd, unsigned long, offset_high, |
87 |
+- unsigned long, offset_low, loff_t __user *, result, |
88 |
+- unsigned long, origin) |
89 |
++SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high, |
90 |
++ unsigned int, offset_low, loff_t __user *, result, |
91 |
++ unsigned int, origin) |
92 |
+ { |
93 |
+ return sys_llseek(fd, offset_high, offset_low, result, origin); |
94 |
+ } |
95 |
+diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h |
96 |
+index 6d406c5..9696cc3 100644 |
97 |
+--- a/arch/powerpc/include/asm/futex.h |
98 |
++++ b/arch/powerpc/include/asm/futex.h |
99 |
+@@ -27,7 +27,7 @@ |
100 |
+ PPC_LONG "1b,4b,2b,4b\n" \ |
101 |
+ ".previous" \ |
102 |
+ : "=&r" (oldval), "=&r" (ret) \ |
103 |
+- : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \ |
104 |
++ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ |
105 |
+ : "cr0", "memory") |
106 |
+ |
107 |
+ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) |
108 |
+@@ -47,19 +47,19 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) |
109 |
+ |
110 |
+ switch (op) { |
111 |
+ case FUTEX_OP_SET: |
112 |
+- __futex_atomic_op("", ret, oldval, uaddr, oparg); |
113 |
++ __futex_atomic_op("mr %1,%4\n", ret, oldval, uaddr, oparg); |
114 |
+ break; |
115 |
+ case FUTEX_OP_ADD: |
116 |
+- __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg); |
117 |
++ __futex_atomic_op("add %1,%0,%4\n", ret, oldval, uaddr, oparg); |
118 |
+ break; |
119 |
+ case FUTEX_OP_OR: |
120 |
+- __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg); |
121 |
++ __futex_atomic_op("or %1,%0,%4\n", ret, oldval, uaddr, oparg); |
122 |
+ break; |
123 |
+ case FUTEX_OP_ANDN: |
124 |
+- __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg); |
125 |
++ __futex_atomic_op("andc %1,%0,%4\n", ret, oldval, uaddr, oparg); |
126 |
+ break; |
127 |
+ case FUTEX_OP_XOR: |
128 |
+- __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg); |
129 |
++ __futex_atomic_op("xor %1,%0,%4\n", ret, oldval, uaddr, oparg); |
130 |
+ break; |
131 |
+ default: |
132 |
+ ret = -ENOSYS; |
133 |
+diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig |
134 |
+index 6dbdc48..03becdf 100644 |
135 |
+--- a/arch/powerpc/kvm/Kconfig |
136 |
++++ b/arch/powerpc/kvm/Kconfig |
137 |
+@@ -2,6 +2,9 @@ |
138 |
+ # KVM configuration |
139 |
+ # |
140 |
+ |
141 |
++config HAVE_KVM_IRQCHIP |
142 |
++ bool |
143 |
++ |
144 |
+ menuconfig VIRTUALIZATION |
145 |
+ bool "Virtualization" |
146 |
+ ---help--- |
147 |
+diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig |
148 |
+index e051cad..3e260b7 100644 |
149 |
+--- a/arch/s390/kvm/Kconfig |
150 |
++++ b/arch/s390/kvm/Kconfig |
151 |
+@@ -4,6 +4,9 @@ |
152 |
+ config HAVE_KVM |
153 |
+ bool |
154 |
+ |
155 |
++config HAVE_KVM_IRQCHIP |
156 |
++ bool |
157 |
++ |
158 |
+ menuconfig VIRTUALIZATION |
159 |
+ bool "Virtualization" |
160 |
+ default y |
161 |
+diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h |
162 |
+index 0aaa086..ee38e73 100644 |
163 |
+--- a/arch/sparc/include/asm/tlb_64.h |
164 |
++++ b/arch/sparc/include/asm/tlb_64.h |
165 |
+@@ -57,9 +57,9 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned i |
166 |
+ |
167 |
+ static inline void tlb_flush_mmu(struct mmu_gather *mp) |
168 |
+ { |
169 |
++ if (!mp->fullmm) |
170 |
++ flush_tlb_pending(); |
171 |
+ if (mp->need_flush) { |
172 |
+- if (!mp->fullmm) |
173 |
+- flush_tlb_pending(); |
174 |
+ free_pages_and_swap_cache(mp->pages, mp->pages_nr); |
175 |
+ mp->pages_nr = 0; |
176 |
+ mp->need_flush = 0; |
177 |
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu |
178 |
+index c98d52e..6ed3aca 100644 |
179 |
+--- a/arch/x86/Kconfig.cpu |
180 |
++++ b/arch/x86/Kconfig.cpu |
181 |
+@@ -523,6 +523,7 @@ config X86_PTRACE_BTS |
182 |
+ bool "Branch Trace Store" |
183 |
+ default y |
184 |
+ depends on X86_DEBUGCTLMSR |
185 |
++ depends on BROKEN |
186 |
+ help |
187 |
+ This adds a ptrace interface to the hardware's branch trace store. |
188 |
+ |
189 |
+diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c |
190 |
+index 8c3c25f..a99dbbe 100644 |
191 |
+--- a/arch/x86/boot/memory.c |
192 |
++++ b/arch/x86/boot/memory.c |
193 |
+@@ -27,13 +27,14 @@ static int detect_memory_e820(void) |
194 |
+ do { |
195 |
+ size = sizeof(struct e820entry); |
196 |
+ |
197 |
+- /* Important: %edx is clobbered by some BIOSes, |
198 |
+- so it must be either used for the error output |
199 |
++ /* Important: %edx and %esi are clobbered by some BIOSes, |
200 |
++ so they must be either used for the error output |
201 |
+ or explicitly marked clobbered. */ |
202 |
+ asm("int $0x15; setc %0" |
203 |
+ : "=d" (err), "+b" (next), "=a" (id), "+c" (size), |
204 |
+ "=m" (*desc) |
205 |
+- : "D" (desc), "d" (SMAP), "a" (0xe820)); |
206 |
++ : "D" (desc), "d" (SMAP), "a" (0xe820) |
207 |
++ : "esi"); |
208 |
+ |
209 |
+ /* BIOSes which terminate the chain with CF = 1 as opposed |
210 |
+ to %ebx = 0 don't always report the SMAP signature on |
211 |
+diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c |
212 |
+index 4b1c319..89c676d 100644 |
213 |
+--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c |
214 |
++++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c |
215 |
+@@ -680,6 +680,18 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) |
216 |
+ perf->states[i].transition_latency * 1000; |
217 |
+ } |
218 |
+ |
219 |
++ /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ |
220 |
++ if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && |
221 |
++ policy->cpuinfo.transition_latency > 20 * 1000) { |
222 |
++ static int print_once; |
223 |
++ policy->cpuinfo.transition_latency = 20 * 1000; |
224 |
++ if (!print_once) { |
225 |
++ print_once = 1; |
226 |
++ printk(KERN_INFO "Capping off P-state tranision latency" |
227 |
++ " at 20 uS\n"); |
228 |
++ } |
229 |
++ } |
230 |
++ |
231 |
+ data->max_freq = perf->states[0].core_frequency * 1000; |
232 |
+ /* table init */ |
233 |
+ for (i=0; i<perf->state_count; i++) { |
234 |
+diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c |
235 |
+index bc7ac4d..7086b24 100644 |
236 |
+--- a/arch/x86/kernel/io_apic.c |
237 |
++++ b/arch/x86/kernel/io_apic.c |
238 |
+@@ -2475,6 +2475,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) |
239 |
+ me = smp_processor_id(); |
240 |
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
241 |
+ unsigned int irq; |
242 |
++ unsigned int irr; |
243 |
+ struct irq_desc *desc; |
244 |
+ struct irq_cfg *cfg; |
245 |
+ irq = __get_cpu_var(vector_irq)[vector]; |
246 |
+@@ -2494,6 +2495,18 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) |
247 |
+ if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
248 |
+ goto unlock; |
249 |
+ |
250 |
++ irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); |
251 |
++ /* |
252 |
++ * Check if the vector that needs to be cleanedup is |
253 |
++ * registered at the cpu's IRR. If so, then this is not |
254 |
++ * the best time to clean it up. Lets clean it up in the |
255 |
++ * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR |
256 |
++ * to myself. |
257 |
++ */ |
258 |
++ if (irr & (1 << (vector % 32))) { |
259 |
++ send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); |
260 |
++ goto unlock; |
261 |
++ } |
262 |
+ __get_cpu_var(vector_irq)[vector] = -1; |
263 |
+ cfg->move_cleanup_count--; |
264 |
+ unlock: |
265 |
+diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig |
266 |
+index b81125f..0a303c3 100644 |
267 |
+--- a/arch/x86/kvm/Kconfig |
268 |
++++ b/arch/x86/kvm/Kconfig |
269 |
+@@ -4,6 +4,10 @@ |
270 |
+ config HAVE_KVM |
271 |
+ bool |
272 |
+ |
273 |
++config HAVE_KVM_IRQCHIP |
274 |
++ bool |
275 |
++ default y |
276 |
++ |
277 |
+ menuconfig VIRTUALIZATION |
278 |
+ bool "Virtualization" |
279 |
+ depends on HAVE_KVM || X86 |
280 |
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c |
281 |
+index 72bd275..3dceaef 100644 |
282 |
+--- a/arch/x86/kvm/i8254.c |
283 |
++++ b/arch/x86/kvm/i8254.c |
284 |
+@@ -536,6 +536,16 @@ void kvm_pit_reset(struct kvm_pit *pit) |
285 |
+ pit->pit_state.irq_ack = 1; |
286 |
+ } |
287 |
+ |
288 |
++static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) |
289 |
++{ |
290 |
++ struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); |
291 |
++ |
292 |
++ if (!mask) { |
293 |
++ atomic_set(&pit->pit_state.pit_timer.pending, 0); |
294 |
++ pit->pit_state.irq_ack = 1; |
295 |
++ } |
296 |
++} |
297 |
++ |
298 |
+ struct kvm_pit *kvm_create_pit(struct kvm *kvm) |
299 |
+ { |
300 |
+ struct kvm_pit *pit; |
301 |
+@@ -584,6 +594,9 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) |
302 |
+ |
303 |
+ kvm_pit_reset(pit); |
304 |
+ |
305 |
++ pit->mask_notifier.func = pit_mask_notifer; |
306 |
++ kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); |
307 |
++ |
308 |
+ return pit; |
309 |
+ } |
310 |
+ |
311 |
+@@ -592,6 +605,8 @@ void kvm_free_pit(struct kvm *kvm) |
312 |
+ struct hrtimer *timer; |
313 |
+ |
314 |
+ if (kvm->arch.vpit) { |
315 |
++ kvm_unregister_irq_mask_notifier(kvm, 0, |
316 |
++ &kvm->arch.vpit->mask_notifier); |
317 |
+ mutex_lock(&kvm->arch.vpit->pit_state.lock); |
318 |
+ timer = &kvm->arch.vpit->pit_state.pit_timer.timer; |
319 |
+ hrtimer_cancel(timer); |
320 |
+diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h |
321 |
+index 4178022..0dfb936 100644 |
322 |
+--- a/arch/x86/kvm/i8254.h |
323 |
++++ b/arch/x86/kvm/i8254.h |
324 |
+@@ -45,6 +45,7 @@ struct kvm_pit { |
325 |
+ struct kvm *kvm; |
326 |
+ struct kvm_kpit_state pit_state; |
327 |
+ int irq_source_id; |
328 |
++ struct kvm_irq_mask_notifier mask_notifier; |
329 |
+ }; |
330 |
+ |
331 |
+ #define KVM_PIT_BASE_ADDRESS 0x40 |
332 |
+diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h |
333 |
+index 258e5d5..eaab214 100644 |
334 |
+--- a/arch/x86/kvm/mmu.h |
335 |
++++ b/arch/x86/kvm/mmu.h |
336 |
+@@ -54,7 +54,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) |
337 |
+ static inline int is_long_mode(struct kvm_vcpu *vcpu) |
338 |
+ { |
339 |
+ #ifdef CONFIG_X86_64 |
340 |
+- return vcpu->arch.shadow_efer & EFER_LME; |
341 |
++ return vcpu->arch.shadow_efer & EFER_LMA; |
342 |
+ #else |
343 |
+ return 0; |
344 |
+ #endif |
345 |
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h |
346 |
+index c95a67d..89addbd 100644 |
347 |
+--- a/arch/x86/kvm/paging_tmpl.h |
348 |
++++ b/arch/x86/kvm/paging_tmpl.h |
349 |
+@@ -476,16 +476,20 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, |
350 |
+ if (level == PT_PAGE_TABLE_LEVEL || |
351 |
+ ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { |
352 |
+ struct kvm_mmu_page *sp = page_header(__pa(sptep)); |
353 |
++ int need_flush = 0; |
354 |
+ |
355 |
+ sw->pte_gpa = (sp->gfn << PAGE_SHIFT); |
356 |
+ sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); |
357 |
+ |
358 |
+ if (is_shadow_present_pte(*sptep)) { |
359 |
++ need_flush = 1; |
360 |
+ rmap_remove(vcpu->kvm, sptep); |
361 |
+ if (is_large_pte(*sptep)) |
362 |
+ --vcpu->kvm->stat.lpages; |
363 |
+ } |
364 |
+ set_shadow_pte(sptep, shadow_trap_nonpresent_pte); |
365 |
++ if (need_flush) |
366 |
++ kvm_flush_remote_tlbs(vcpu->kvm); |
367 |
+ return 1; |
368 |
+ } |
369 |
+ if (!is_shadow_present_pte(*sptep)) |
370 |
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
371 |
+index 90de444..898910c 100644 |
372 |
+--- a/arch/x86/kvm/vmx.c |
373 |
++++ b/arch/x86/kvm/vmx.c |
374 |
+@@ -1433,6 +1433,29 @@ continue_rmode: |
375 |
+ init_rmode(vcpu->kvm); |
376 |
+ } |
377 |
+ |
378 |
++static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
379 |
++{ |
380 |
++ struct vcpu_vmx *vmx = to_vmx(vcpu); |
381 |
++ struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); |
382 |
++ |
383 |
++ vcpu->arch.shadow_efer = efer; |
384 |
++ if (!msr) |
385 |
++ return; |
386 |
++ if (efer & EFER_LMA) { |
387 |
++ vmcs_write32(VM_ENTRY_CONTROLS, |
388 |
++ vmcs_read32(VM_ENTRY_CONTROLS) | |
389 |
++ VM_ENTRY_IA32E_MODE); |
390 |
++ msr->data = efer; |
391 |
++ } else { |
392 |
++ vmcs_write32(VM_ENTRY_CONTROLS, |
393 |
++ vmcs_read32(VM_ENTRY_CONTROLS) & |
394 |
++ ~VM_ENTRY_IA32E_MODE); |
395 |
++ |
396 |
++ msr->data = efer & ~EFER_LME; |
397 |
++ } |
398 |
++ setup_msrs(vmx); |
399 |
++} |
400 |
++ |
401 |
+ #ifdef CONFIG_X86_64 |
402 |
+ |
403 |
+ static void enter_lmode(struct kvm_vcpu *vcpu) |
404 |
+@@ -1447,13 +1470,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu) |
405 |
+ (guest_tr_ar & ~AR_TYPE_MASK) |
406 |
+ | AR_TYPE_BUSY_64_TSS); |
407 |
+ } |
408 |
+- |
409 |
+ vcpu->arch.shadow_efer |= EFER_LMA; |
410 |
+- |
411 |
+- find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME; |
412 |
+- vmcs_write32(VM_ENTRY_CONTROLS, |
413 |
+- vmcs_read32(VM_ENTRY_CONTROLS) |
414 |
+- | VM_ENTRY_IA32E_MODE); |
415 |
++ vmx_set_efer(vcpu, vcpu->arch.shadow_efer); |
416 |
+ } |
417 |
+ |
418 |
+ static void exit_lmode(struct kvm_vcpu *vcpu) |
419 |
+@@ -1612,30 +1630,6 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
420 |
+ vmcs_writel(GUEST_CR4, hw_cr4); |
421 |
+ } |
422 |
+ |
423 |
+-static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
424 |
+-{ |
425 |
+- struct vcpu_vmx *vmx = to_vmx(vcpu); |
426 |
+- struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); |
427 |
+- |
428 |
+- vcpu->arch.shadow_efer = efer; |
429 |
+- if (!msr) |
430 |
+- return; |
431 |
+- if (efer & EFER_LMA) { |
432 |
+- vmcs_write32(VM_ENTRY_CONTROLS, |
433 |
+- vmcs_read32(VM_ENTRY_CONTROLS) | |
434 |
+- VM_ENTRY_IA32E_MODE); |
435 |
+- msr->data = efer; |
436 |
+- |
437 |
+- } else { |
438 |
+- vmcs_write32(VM_ENTRY_CONTROLS, |
439 |
+- vmcs_read32(VM_ENTRY_CONTROLS) & |
440 |
+- ~VM_ENTRY_IA32E_MODE); |
441 |
+- |
442 |
+- msr->data = efer & ~EFER_LME; |
443 |
+- } |
444 |
+- setup_msrs(vmx); |
445 |
+-} |
446 |
+- |
447 |
+ static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) |
448 |
+ { |
449 |
+ struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
450 |
+diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c |
451 |
+index 21bc1f7..441489c 100644 |
452 |
+--- a/arch/x86/mm/pat.c |
453 |
++++ b/arch/x86/mm/pat.c |
454 |
+@@ -713,29 +713,28 @@ static void free_pfn_range(u64 paddr, unsigned long size) |
455 |
+ * |
456 |
+ * If the vma has a linear pfn mapping for the entire range, we get the prot |
457 |
+ * from pte and reserve the entire vma range with single reserve_pfn_range call. |
458 |
+- * Otherwise, we reserve the entire vma range, my ging through the PTEs page |
459 |
+- * by page to get physical address and protection. |
460 |
+ */ |
461 |
+ int track_pfn_vma_copy(struct vm_area_struct *vma) |
462 |
+ { |
463 |
+- int retval = 0; |
464 |
+- unsigned long i, j; |
465 |
+ resource_size_t paddr; |
466 |
+ unsigned long prot; |
467 |
+- unsigned long vma_start = vma->vm_start; |
468 |
+- unsigned long vma_end = vma->vm_end; |
469 |
+- unsigned long vma_size = vma_end - vma_start; |
470 |
++ unsigned long vma_size = vma->vm_end - vma->vm_start; |
471 |
+ pgprot_t pgprot; |
472 |
+ |
473 |
+ if (!pat_enabled) |
474 |
+ return 0; |
475 |
+ |
476 |
++ /* |
477 |
++ * For now, only handle remap_pfn_range() vmas where |
478 |
++ * is_linear_pfn_mapping() == TRUE. Handling of |
479 |
++ * vm_insert_pfn() is TBD. |
480 |
++ */ |
481 |
+ if (is_linear_pfn_mapping(vma)) { |
482 |
+ /* |
483 |
+ * reserve the whole chunk covered by vma. We need the |
484 |
+ * starting address and protection from pte. |
485 |
+ */ |
486 |
+- if (follow_phys(vma, vma_start, 0, &prot, &paddr)) { |
487 |
++ if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { |
488 |
+ WARN_ON_ONCE(1); |
489 |
+ return -EINVAL; |
490 |
+ } |
491 |
+@@ -743,28 +742,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) |
492 |
+ return reserve_pfn_range(paddr, vma_size, &pgprot, 1); |
493 |
+ } |
494 |
+ |
495 |
+- /* reserve entire vma page by page, using pfn and prot from pte */ |
496 |
+- for (i = 0; i < vma_size; i += PAGE_SIZE) { |
497 |
+- if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) |
498 |
+- continue; |
499 |
+- |
500 |
+- pgprot = __pgprot(prot); |
501 |
+- retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1); |
502 |
+- if (retval) |
503 |
+- goto cleanup_ret; |
504 |
+- } |
505 |
+ return 0; |
506 |
+- |
507 |
+-cleanup_ret: |
508 |
+- /* Reserve error: Cleanup partial reservation and return error */ |
509 |
+- for (j = 0; j < i; j += PAGE_SIZE) { |
510 |
+- if (follow_phys(vma, vma_start + j, 0, &prot, &paddr)) |
511 |
+- continue; |
512 |
+- |
513 |
+- free_pfn_range(paddr, PAGE_SIZE); |
514 |
+- } |
515 |
+- |
516 |
+- return retval; |
517 |
+ } |
518 |
+ |
519 |
+ /* |
520 |
+@@ -774,50 +752,28 @@ cleanup_ret: |
521 |
+ * prot is passed in as a parameter for the new mapping. If the vma has a |
522 |
+ * linear pfn mapping for the entire range reserve the entire vma range with |
523 |
+ * single reserve_pfn_range call. |
524 |
+- * Otherwise, we look t the pfn and size and reserve only the specified range |
525 |
+- * page by page. |
526 |
+- * |
527 |
+- * Note that this function can be called with caller trying to map only a |
528 |
+- * subrange/page inside the vma. |
529 |
+ */ |
530 |
+ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, |
531 |
+ unsigned long pfn, unsigned long size) |
532 |
+ { |
533 |
+- int retval = 0; |
534 |
+- unsigned long i, j; |
535 |
+- resource_size_t base_paddr; |
536 |
+ resource_size_t paddr; |
537 |
+- unsigned long vma_start = vma->vm_start; |
538 |
+- unsigned long vma_end = vma->vm_end; |
539 |
+- unsigned long vma_size = vma_end - vma_start; |
540 |
++ unsigned long vma_size = vma->vm_end - vma->vm_start; |
541 |
+ |
542 |
+ if (!pat_enabled) |
543 |
+ return 0; |
544 |
+ |
545 |
++ /* |
546 |
++ * For now, only handle remap_pfn_range() vmas where |
547 |
++ * is_linear_pfn_mapping() == TRUE. Handling of |
548 |
++ * vm_insert_pfn() is TBD. |
549 |
++ */ |
550 |
+ if (is_linear_pfn_mapping(vma)) { |
551 |
+ /* reserve the whole chunk starting from vm_pgoff */ |
552 |
+ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
553 |
+ return reserve_pfn_range(paddr, vma_size, prot, 0); |
554 |
+ } |
555 |
+ |
556 |
+- /* reserve page by page using pfn and size */ |
557 |
+- base_paddr = (resource_size_t)pfn << PAGE_SHIFT; |
558 |
+- for (i = 0; i < size; i += PAGE_SIZE) { |
559 |
+- paddr = base_paddr + i; |
560 |
+- retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0); |
561 |
+- if (retval) |
562 |
+- goto cleanup_ret; |
563 |
+- } |
564 |
+ return 0; |
565 |
+- |
566 |
+-cleanup_ret: |
567 |
+- /* Reserve error: Cleanup partial reservation and return error */ |
568 |
+- for (j = 0; j < i; j += PAGE_SIZE) { |
569 |
+- paddr = base_paddr + j; |
570 |
+- free_pfn_range(paddr, PAGE_SIZE); |
571 |
+- } |
572 |
+- |
573 |
+- return retval; |
574 |
+ } |
575 |
+ |
576 |
+ /* |
577 |
+@@ -828,39 +784,23 @@ cleanup_ret: |
578 |
+ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, |
579 |
+ unsigned long size) |
580 |
+ { |
581 |
+- unsigned long i; |
582 |
+ resource_size_t paddr; |
583 |
+- unsigned long prot; |
584 |
+- unsigned long vma_start = vma->vm_start; |
585 |
+- unsigned long vma_end = vma->vm_end; |
586 |
+- unsigned long vma_size = vma_end - vma_start; |
587 |
++ unsigned long vma_size = vma->vm_end - vma->vm_start; |
588 |
+ |
589 |
+ if (!pat_enabled) |
590 |
+ return; |
591 |
+ |
592 |
++ /* |
593 |
++ * For now, only handle remap_pfn_range() vmas where |
594 |
++ * is_linear_pfn_mapping() == TRUE. Handling of |
595 |
++ * vm_insert_pfn() is TBD. |
596 |
++ */ |
597 |
+ if (is_linear_pfn_mapping(vma)) { |
598 |
+ /* free the whole chunk starting from vm_pgoff */ |
599 |
+ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; |
600 |
+ free_pfn_range(paddr, vma_size); |
601 |
+ return; |
602 |
+ } |
603 |
+- |
604 |
+- if (size != 0 && size != vma_size) { |
605 |
+- /* free page by page, using pfn and size */ |
606 |
+- paddr = (resource_size_t)pfn << PAGE_SHIFT; |
607 |
+- for (i = 0; i < size; i += PAGE_SIZE) { |
608 |
+- paddr = paddr + i; |
609 |
+- free_pfn_range(paddr, PAGE_SIZE); |
610 |
+- } |
611 |
+- } else { |
612 |
+- /* free entire vma, page by page, using the pfn from pte */ |
613 |
+- for (i = 0; i < vma_size; i += PAGE_SIZE) { |
614 |
+- if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) |
615 |
+- continue; |
616 |
+- |
617 |
+- free_pfn_range(paddr, PAGE_SIZE); |
618 |
+- } |
619 |
+- } |
620 |
+ } |
621 |
+ |
622 |
+ pgprot_t pgprot_writecombine(pgprot_t prot) |
623 |
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c |
624 |
+index 7d388d5..096b0ed 100644 |
625 |
+--- a/arch/x86/pci/fixup.c |
626 |
++++ b/arch/x86/pci/fixup.c |
627 |
+@@ -495,26 +495,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015, |
628 |
+ pci_siemens_interrupt_controller); |
629 |
+ |
630 |
+ /* |
631 |
+- * Regular PCI devices have 256 bytes, but AMD Family 10h/11h CPUs have |
632 |
+- * 4096 bytes configuration space for each function of their processor |
633 |
+- * configuration space. |
634 |
+- */ |
635 |
+-static void amd_cpu_pci_cfg_space_size(struct pci_dev *dev) |
636 |
+-{ |
637 |
+- dev->cfg_size = pci_cfg_space_size_ext(dev); |
638 |
+-} |
639 |
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, amd_cpu_pci_cfg_space_size); |
640 |
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, amd_cpu_pci_cfg_space_size); |
641 |
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, amd_cpu_pci_cfg_space_size); |
642 |
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, amd_cpu_pci_cfg_space_size); |
643 |
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, amd_cpu_pci_cfg_space_size); |
644 |
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1300, amd_cpu_pci_cfg_space_size); |
645 |
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1301, amd_cpu_pci_cfg_space_size); |
646 |
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1302, amd_cpu_pci_cfg_space_size); |
647 |
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1303, amd_cpu_pci_cfg_space_size); |
648 |
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1304, amd_cpu_pci_cfg_space_size); |
649 |
+- |
650 |
+-/* |
651 |
+ * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from |
652 |
+ * confusing the PCI engine: |
653 |
+ */ |
654 |
+diff --git a/crypto/shash.c b/crypto/shash.c |
655 |
+index d5a2b61..6792a67 100644 |
656 |
+--- a/crypto/shash.c |
657 |
++++ b/crypto/shash.c |
658 |
+@@ -82,6 +82,9 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, |
659 |
+ u8 buf[shash_align_buffer_size(unaligned_len, alignmask)] |
660 |
+ __attribute__ ((aligned)); |
661 |
+ |
662 |
++ if (unaligned_len > len) |
663 |
++ unaligned_len = len; |
664 |
++ |
665 |
+ memcpy(buf, data, unaligned_len); |
666 |
+ |
667 |
+ return shash->update(desc, buf, unaligned_len) ?: |
668 |
+diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c |
669 |
+index 35094f2..8f62fa0 100644 |
670 |
+--- a/drivers/acpi/dock.c |
671 |
++++ b/drivers/acpi/dock.c |
672 |
+@@ -1146,9 +1146,10 @@ static int __init dock_init(void) |
673 |
+ static void __exit dock_exit(void) |
674 |
+ { |
675 |
+ struct dock_station *dock_station; |
676 |
++ struct dock_station *tmp; |
677 |
+ |
678 |
+ unregister_acpi_bus_notifier(&dock_acpi_notifier); |
679 |
+- list_for_each_entry(dock_station, &dock_stations, sibiling) |
680 |
++ list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling) |
681 |
+ dock_remove(dock_station); |
682 |
+ } |
683 |
+ |
684 |
+diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c |
685 |
+index 4216399..233a5fd 100644 |
686 |
+--- a/drivers/ata/pata_hpt37x.c |
687 |
++++ b/drivers/ata/pata_hpt37x.c |
688 |
+@@ -8,7 +8,7 @@ |
689 |
+ * Copyright (C) 1999-2003 Andre Hedrick <andre@×××××××××.org> |
690 |
+ * Portions Copyright (C) 2001 Sun Microsystems, Inc. |
691 |
+ * Portions Copyright (C) 2003 Red Hat Inc |
692 |
+- * Portions Copyright (C) 2005-2007 MontaVista Software, Inc. |
693 |
++ * Portions Copyright (C) 2005-2009 MontaVista Software, Inc. |
694 |
+ * |
695 |
+ * TODO |
696 |
+ * Look into engine reset on timeout errors. Should not be required. |
697 |
+@@ -24,7 +24,7 @@ |
698 |
+ #include <linux/libata.h> |
699 |
+ |
700 |
+ #define DRV_NAME "pata_hpt37x" |
701 |
+-#define DRV_VERSION "0.6.11" |
702 |
++#define DRV_VERSION "0.6.12" |
703 |
+ |
704 |
+ struct hpt_clock { |
705 |
+ u8 xfer_speed; |
706 |
+@@ -445,23 +445,6 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) |
707 |
+ } |
708 |
+ |
709 |
+ /** |
710 |
+- * hpt370_bmdma_start - DMA engine begin |
711 |
+- * @qc: ATA command |
712 |
+- * |
713 |
+- * The 370 and 370A want us to reset the DMA engine each time we |
714 |
+- * use it. The 372 and later are fine. |
715 |
+- */ |
716 |
+- |
717 |
+-static void hpt370_bmdma_start(struct ata_queued_cmd *qc) |
718 |
+-{ |
719 |
+- struct ata_port *ap = qc->ap; |
720 |
+- struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
721 |
+- pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); |
722 |
+- udelay(10); |
723 |
+- ata_bmdma_start(qc); |
724 |
+-} |
725 |
+- |
726 |
+-/** |
727 |
+ * hpt370_bmdma_end - DMA engine stop |
728 |
+ * @qc: ATA command |
729 |
+ * |
730 |
+@@ -598,7 +581,6 @@ static struct scsi_host_template hpt37x_sht = { |
731 |
+ static struct ata_port_operations hpt370_port_ops = { |
732 |
+ .inherits = &ata_bmdma_port_ops, |
733 |
+ |
734 |
+- .bmdma_start = hpt370_bmdma_start, |
735 |
+ .bmdma_stop = hpt370_bmdma_stop, |
736 |
+ |
737 |
+ .mode_filter = hpt370_filter, |
738 |
+diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c |
739 |
+index 10d6cbd..2224b76 100644 |
740 |
+--- a/drivers/char/agp/generic.c |
741 |
++++ b/drivers/char/agp/generic.c |
742 |
+@@ -1226,7 +1226,7 @@ int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *m |
743 |
+ int i, ret = -ENOMEM; |
744 |
+ |
745 |
+ for (i = 0; i < num_pages; i++) { |
746 |
+- page = alloc_page(GFP_KERNEL | GFP_DMA32); |
747 |
++ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
748 |
+ /* agp_free_memory() needs gart address */ |
749 |
+ if (page == NULL) |
750 |
+ goto out; |
751 |
+@@ -1257,7 +1257,7 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge) |
752 |
+ { |
753 |
+ struct page * page; |
754 |
+ |
755 |
+- page = alloc_page(GFP_KERNEL | GFP_DMA32); |
756 |
++ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
757 |
+ if (page == NULL) |
758 |
+ return NULL; |
759 |
+ |
760 |
+diff --git a/drivers/char/vt.c b/drivers/char/vt.c |
761 |
+index 7900bd6..60453ab 100644 |
762 |
+--- a/drivers/char/vt.c |
763 |
++++ b/drivers/char/vt.c |
764 |
+@@ -2271,7 +2271,7 @@ rescan_last_byte: |
765 |
+ continue; /* nothing to display */ |
766 |
+ } |
767 |
+ /* Glyph not found */ |
768 |
+- if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) { |
769 |
++ if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) { |
770 |
+ /* In legacy mode use the glyph we get by a 1:1 mapping. |
771 |
+ This would make absolutely no sense with Unicode in mind, |
772 |
+ but do this for ASCII characters since a font may lack |
773 |
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c |
774 |
+index 88d3368..7ee1ce1 100644 |
775 |
+--- a/drivers/gpu/drm/drm_gem.c |
776 |
++++ b/drivers/gpu/drm/drm_gem.c |
777 |
+@@ -505,7 +505,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
778 |
+ struct drm_map *map = NULL; |
779 |
+ struct drm_gem_object *obj; |
780 |
+ struct drm_hash_item *hash; |
781 |
+- unsigned long prot; |
782 |
+ int ret = 0; |
783 |
+ |
784 |
+ mutex_lock(&dev->struct_mutex); |
785 |
+@@ -538,11 +537,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
786 |
+ vma->vm_ops = obj->dev->driver->gem_vm_ops; |
787 |
+ vma->vm_private_data = map->handle; |
788 |
+ /* FIXME: use pgprot_writecombine when available */ |
789 |
+- prot = pgprot_val(vma->vm_page_prot); |
790 |
+-#ifdef CONFIG_X86 |
791 |
+- prot |= _PAGE_CACHE_WC; |
792 |
+-#endif |
793 |
+- vma->vm_page_prot = __pgprot(prot); |
794 |
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
795 |
+ |
796 |
+ /* Take a ref for this mapping of the object, so that the fault |
797 |
+ * handler can dereference the mmap offset's pointer to the object. |
798 |
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c |
799 |
+index 6d21b9e..908d24e 100644 |
800 |
+--- a/drivers/gpu/drm/i915/i915_dma.c |
801 |
++++ b/drivers/gpu/drm/i915/i915_dma.c |
802 |
+@@ -41,7 +41,6 @@ |
803 |
+ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) |
804 |
+ { |
805 |
+ drm_i915_private_t *dev_priv = dev->dev_private; |
806 |
+- struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
807 |
+ drm_i915_ring_buffer_t *ring = &(dev_priv->ring); |
808 |
+ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; |
809 |
+ u32 last_acthd = I915_READ(acthd_reg); |
810 |
+@@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller) |
811 |
+ if (ring->space >= n) |
812 |
+ return 0; |
813 |
+ |
814 |
+- if (master_priv->sarea_priv) |
815 |
+- master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
816 |
++ if (dev->primary->master) { |
817 |
++ struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
818 |
++ if (master_priv->sarea_priv) |
819 |
++ master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; |
820 |
++ } |
821 |
++ |
822 |
+ |
823 |
+ if (ring->head != last_head) |
824 |
+ i = 0; |
825 |
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c |
826 |
+index 37427e4..fb6390a 100644 |
827 |
+--- a/drivers/gpu/drm/i915/i915_gem.c |
828 |
++++ b/drivers/gpu/drm/i915/i915_gem.c |
829 |
+@@ -603,6 +603,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
830 |
+ case -EAGAIN: |
831 |
+ return VM_FAULT_OOM; |
832 |
+ case -EFAULT: |
833 |
++ case -EINVAL: |
834 |
+ return VM_FAULT_SIGBUS; |
835 |
+ default: |
836 |
+ return VM_FAULT_NOPAGE; |
837 |
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c |
838 |
+index 7fb4191..4cce1ae 100644 |
839 |
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c |
840 |
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c |
841 |
+@@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) |
842 |
+ */ |
843 |
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
844 |
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
845 |
+- } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) || |
846 |
+- IS_GM45(dev)) { |
847 |
++ } else if (IS_MOBILE(dev)) { |
848 |
+ uint32_t dcc; |
849 |
+ |
850 |
+- /* On 915-945 and GM965, channel interleave by the CPU is |
851 |
+- * determined by DCC. The CPU will alternate based on bit 6 |
852 |
+- * in interleaved mode, and the GPU will then also alternate |
853 |
+- * on bit 6, 9, and 10 for X, but the CPU may also optionally |
854 |
+- * alternate based on bit 17 (XOR not disabled and XOR |
855 |
+- * bit == 17). |
856 |
++ /* On mobile 9xx chipsets, channel interleave by the CPU is |
857 |
++ * determined by DCC. For single-channel, neither the CPU |
858 |
++ * nor the GPU do swizzling. For dual channel interleaved, |
859 |
++ * the GPU's interleave is bit 9 and 10 for X tiled, and bit |
860 |
++ * 9 for Y tiled. The CPU's interleave is independent, and |
861 |
++ * can be based on either bit 11 (haven't seen this yet) or |
862 |
++ * bit 17 (common). |
863 |
+ */ |
864 |
+ dcc = I915_READ(DCC); |
865 |
+ switch (dcc & DCC_ADDRESSING_MODE_MASK) { |
866 |
+@@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) |
867 |
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
868 |
+ break; |
869 |
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: |
870 |
+- if (IS_I915G(dev) || IS_I915GM(dev) || |
871 |
+- dcc & DCC_CHANNEL_XOR_DISABLE) { |
872 |
++ if (dcc & DCC_CHANNEL_XOR_DISABLE) { |
873 |
++ /* This is the base swizzling by the GPU for |
874 |
++ * tiled buffers. |
875 |
++ */ |
876 |
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
877 |
+ swizzle_y = I915_BIT_6_SWIZZLE_9; |
878 |
+- } else if ((IS_I965GM(dev) || IS_GM45(dev)) && |
879 |
+- (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { |
880 |
+- /* GM965/GM45 does either bit 11 or bit 17 |
881 |
+- * swizzling. |
882 |
+- */ |
883 |
++ } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { |
884 |
++ /* Bit 11 swizzling by the CPU in addition. */ |
885 |
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; |
886 |
+ swizzle_y = I915_BIT_6_SWIZZLE_9_11; |
887 |
+ } else { |
888 |
+- /* Bit 17 or perhaps other swizzling */ |
889 |
++ /* Bit 17 swizzling by the CPU in addition. */ |
890 |
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
891 |
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
892 |
+ } |
893 |
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h |
894 |
+index 90600d8..cc2938d 100644 |
895 |
+--- a/drivers/gpu/drm/i915/i915_reg.h |
896 |
++++ b/drivers/gpu/drm/i915/i915_reg.h |
897 |
+@@ -629,6 +629,22 @@ |
898 |
+ #define TV_HOTPLUG_INT_EN (1 << 18) |
899 |
+ #define CRT_HOTPLUG_INT_EN (1 << 9) |
900 |
+ #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) |
901 |
++#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) |
902 |
++/* must use period 64 on GM45 according to docs */ |
903 |
++#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8) |
904 |
++#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7) |
905 |
++#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7) |
906 |
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5) |
907 |
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5) |
908 |
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5) |
909 |
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5) |
910 |
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5) |
911 |
++#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4) |
912 |
++#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4) |
913 |
++#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) |
914 |
++#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
915 |
++#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ |
916 |
++ |
917 |
+ |
918 |
+ #define PORT_HOTPLUG_STAT 0x61114 |
919 |
+ #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) |
920 |
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c |
921 |
+index dcaed34..61c108e 100644 |
922 |
+--- a/drivers/gpu/drm/i915/intel_crt.c |
923 |
++++ b/drivers/gpu/drm/i915/intel_crt.c |
924 |
+@@ -133,20 +133,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) |
925 |
+ { |
926 |
+ struct drm_device *dev = connector->dev; |
927 |
+ struct drm_i915_private *dev_priv = dev->dev_private; |
928 |
+- u32 temp; |
929 |
+- |
930 |
+- unsigned long timeout = jiffies + msecs_to_jiffies(1000); |
931 |
+- |
932 |
+- temp = I915_READ(PORT_HOTPLUG_EN); |
933 |
+- |
934 |
+- I915_WRITE(PORT_HOTPLUG_EN, |
935 |
+- temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5)); |
936 |
++ u32 hotplug_en; |
937 |
++ int i, tries = 0; |
938 |
++ /* |
939 |
++ * On 4 series desktop, CRT detect sequence need to be done twice |
940 |
++ * to get a reliable result. |
941 |
++ */ |
942 |
+ |
943 |
+- do { |
944 |
+- if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT)) |
945 |
+- break; |
946 |
+- msleep(1); |
947 |
+- } while (time_after(timeout, jiffies)); |
948 |
++ if (IS_G4X(dev) && !IS_GM45(dev)) |
949 |
++ tries = 2; |
950 |
++ else |
951 |
++ tries = 1; |
952 |
++ hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
953 |
++ hotplug_en &= ~(CRT_HOTPLUG_MASK); |
954 |
++ hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; |
955 |
++ |
956 |
++ if (IS_GM45(dev)) |
957 |
++ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; |
958 |
++ |
959 |
++ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
960 |
++ |
961 |
++ for (i = 0; i < tries ; i++) { |
962 |
++ unsigned long timeout; |
963 |
++ /* turn on the FORCE_DETECT */ |
964 |
++ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
965 |
++ timeout = jiffies + msecs_to_jiffies(1000); |
966 |
++ /* wait for FORCE_DETECT to go off */ |
967 |
++ do { |
968 |
++ if (!(I915_READ(PORT_HOTPLUG_EN) & |
969 |
++ CRT_HOTPLUG_FORCE_DETECT)) |
970 |
++ break; |
971 |
++ msleep(1); |
972 |
++ } while (time_after(timeout, jiffies)); |
973 |
++ } |
974 |
+ |
975 |
+ if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == |
976 |
+ CRT_HOTPLUG_MONITOR_COLOR) |
977 |
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
978 |
+index a283427..601a76f 100644 |
979 |
+--- a/drivers/gpu/drm/i915/intel_display.c |
980 |
++++ b/drivers/gpu/drm/i915/intel_display.c |
981 |
+@@ -1474,13 +1474,21 @@ static void intel_setup_outputs(struct drm_device *dev) |
982 |
+ |
983 |
+ if (IS_I9XX(dev)) { |
984 |
+ int found; |
985 |
++ u32 reg; |
986 |
+ |
987 |
+ if (I915_READ(SDVOB) & SDVO_DETECTED) { |
988 |
+ found = intel_sdvo_init(dev, SDVOB); |
989 |
+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) |
990 |
+ intel_hdmi_init(dev, SDVOB); |
991 |
+ } |
992 |
+- if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) { |
993 |
++ |
994 |
++ /* Before G4X SDVOC doesn't have its own detect register */ |
995 |
++ if (IS_G4X(dev)) |
996 |
++ reg = SDVOC; |
997 |
++ else |
998 |
++ reg = SDVOB; |
999 |
++ |
1000 |
++ if (I915_READ(reg) & SDVO_DETECTED) { |
1001 |
+ found = intel_sdvo_init(dev, SDVOC); |
1002 |
+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) |
1003 |
+ intel_hdmi_init(dev, SDVOC); |
1004 |
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c |
1005 |
+index 56485d6..b05cb67 100644 |
1006 |
+--- a/drivers/gpu/drm/i915/intel_tv.c |
1007 |
++++ b/drivers/gpu/drm/i915/intel_tv.c |
1008 |
+@@ -1558,33 +1558,49 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop |
1009 |
+ struct drm_device *dev = connector->dev; |
1010 |
+ struct intel_output *intel_output = to_intel_output(connector); |
1011 |
+ struct intel_tv_priv *tv_priv = intel_output->dev_priv; |
1012 |
++ struct drm_encoder *encoder = &intel_output->enc; |
1013 |
++ struct drm_crtc *crtc = encoder->crtc; |
1014 |
+ int ret = 0; |
1015 |
++ bool changed = false; |
1016 |
+ |
1017 |
+ ret = drm_connector_property_set_value(connector, property, val); |
1018 |
+ if (ret < 0) |
1019 |
+ goto out; |
1020 |
+ |
1021 |
+- if (property == dev->mode_config.tv_left_margin_property) |
1022 |
++ if (property == dev->mode_config.tv_left_margin_property && |
1023 |
++ tv_priv->margin[TV_MARGIN_LEFT] != val) { |
1024 |
+ tv_priv->margin[TV_MARGIN_LEFT] = val; |
1025 |
+- else if (property == dev->mode_config.tv_right_margin_property) |
1026 |
++ changed = true; |
1027 |
++ } else if (property == dev->mode_config.tv_right_margin_property && |
1028 |
++ tv_priv->margin[TV_MARGIN_RIGHT] != val) { |
1029 |
+ tv_priv->margin[TV_MARGIN_RIGHT] = val; |
1030 |
+- else if (property == dev->mode_config.tv_top_margin_property) |
1031 |
++ changed = true; |
1032 |
++ } else if (property == dev->mode_config.tv_top_margin_property && |
1033 |
++ tv_priv->margin[TV_MARGIN_TOP] != val) { |
1034 |
+ tv_priv->margin[TV_MARGIN_TOP] = val; |
1035 |
+- else if (property == dev->mode_config.tv_bottom_margin_property) |
1036 |
++ changed = true; |
1037 |
++ } else if (property == dev->mode_config.tv_bottom_margin_property && |
1038 |
++ tv_priv->margin[TV_MARGIN_BOTTOM] != val) { |
1039 |
+ tv_priv->margin[TV_MARGIN_BOTTOM] = val; |
1040 |
+- else if (property == dev->mode_config.tv_mode_property) { |
1041 |
++ changed = true; |
1042 |
++ } else if (property == dev->mode_config.tv_mode_property) { |
1043 |
+ if (val >= NUM_TV_MODES) { |
1044 |
+ ret = -EINVAL; |
1045 |
+ goto out; |
1046 |
+ } |
1047 |
++ if (!strcmp(tv_priv->tv_format, tv_modes[val].name)) |
1048 |
++ goto out; |
1049 |
++ |
1050 |
+ tv_priv->tv_format = tv_modes[val].name; |
1051 |
+- intel_tv_mode_set(&intel_output->enc, NULL, NULL); |
1052 |
++ changed = true; |
1053 |
+ } else { |
1054 |
+ ret = -EINVAL; |
1055 |
+ goto out; |
1056 |
+ } |
1057 |
+ |
1058 |
+- intel_tv_mode_set(&intel_output->enc, NULL, NULL); |
1059 |
++ if (changed && crtc) |
1060 |
++ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, |
1061 |
++ crtc->y, crtc->fb); |
1062 |
+ out: |
1063 |
+ return ret; |
1064 |
+ } |
1065 |
+diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c |
1066 |
+index 3eb9b5c..5ff6962 100644 |
1067 |
+--- a/drivers/ide/hpt366.c |
1068 |
++++ b/drivers/ide/hpt366.c |
1069 |
+@@ -114,6 +114,8 @@ |
1070 |
+ * the register setting lists into the table indexed by the clock selected |
1071 |
+ * - set the correct hwif->ultra_mask for each individual chip |
1072 |
+ * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards |
1073 |
++ * - stop resetting HPT370's state machine before each DMA transfer as that has |
1074 |
++ * caused more harm than good |
1075 |
+ * Sergei Shtylyov, <sshtylyov@×××××××××.com> or <source@××××××.com> |
1076 |
+ */ |
1077 |
+ |
1078 |
+@@ -133,7 +135,7 @@ |
1079 |
+ #define DRV_NAME "hpt366" |
1080 |
+ |
1081 |
+ /* various tuning parameters */ |
1082 |
+-#define HPT_RESET_STATE_ENGINE |
1083 |
++#undef HPT_RESET_STATE_ENGINE |
1084 |
+ #undef HPT_DELAY_INTERRUPT |
1085 |
+ |
1086 |
+ static const char *quirk_drives[] = { |
1087 |
+diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c |
1088 |
+index e9d042d..53a9e8d 100644 |
1089 |
+--- a/drivers/ide/ide-atapi.c |
1090 |
++++ b/drivers/ide/ide-atapi.c |
1091 |
+@@ -6,6 +6,8 @@ |
1092 |
+ #include <linux/cdrom.h> |
1093 |
+ #include <linux/delay.h> |
1094 |
+ #include <linux/ide.h> |
1095 |
++#include <linux/scatterlist.h> |
1096 |
++ |
1097 |
+ #include <scsi/scsi.h> |
1098 |
+ |
1099 |
+ #ifdef DEBUG |
1100 |
+@@ -566,6 +568,10 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) |
1101 |
+ : ide_pc_intr), |
1102 |
+ timeout, expiry); |
1103 |
+ |
1104 |
++ /* Send the actual packet */ |
1105 |
++ if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0) |
1106 |
++ hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len); |
1107 |
++ |
1108 |
+ /* Begin DMA, if necessary */ |
1109 |
+ if (dev_is_idecd(drive)) { |
1110 |
+ if (drive->dma) |
1111 |
+@@ -577,10 +583,6 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) |
1112 |
+ } |
1113 |
+ } |
1114 |
+ |
1115 |
+- /* Send the actual packet */ |
1116 |
+- if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0) |
1117 |
+- hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len); |
1118 |
+- |
1119 |
+ return ide_started; |
1120 |
+ } |
1121 |
+ |
1122 |
+diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c |
1123 |
+index a9a6c20..af70777 100644 |
1124 |
+--- a/drivers/ide/ide-io.c |
1125 |
++++ b/drivers/ide/ide-io.c |
1126 |
+@@ -736,11 +736,10 @@ repeat: |
1127 |
+ prev_port = hwif->host->cur_port; |
1128 |
+ hwif->rq = NULL; |
1129 |
+ |
1130 |
+- if (drive->dev_flags & IDE_DFLAG_SLEEPING) { |
1131 |
+- if (time_before(drive->sleep, jiffies)) { |
1132 |
+- ide_unlock_port(hwif); |
1133 |
+- goto plug_device; |
1134 |
+- } |
1135 |
++ if (drive->dev_flags & IDE_DFLAG_SLEEPING && |
1136 |
++ time_after(drive->sleep, jiffies)) { |
1137 |
++ ide_unlock_port(hwif); |
1138 |
++ goto plug_device; |
1139 |
+ } |
1140 |
+ |
1141 |
+ if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && |
1142 |
+diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c |
1143 |
+index ebf4be5..2d175b5 100644 |
1144 |
+--- a/drivers/input/gameport/gameport.c |
1145 |
++++ b/drivers/input/gameport/gameport.c |
1146 |
+@@ -50,9 +50,8 @@ static LIST_HEAD(gameport_list); |
1147 |
+ |
1148 |
+ static struct bus_type gameport_bus; |
1149 |
+ |
1150 |
+-static void gameport_add_driver(struct gameport_driver *drv); |
1151 |
+ static void gameport_add_port(struct gameport *gameport); |
1152 |
+-static void gameport_destroy_port(struct gameport *gameport); |
1153 |
++static void gameport_attach_driver(struct gameport_driver *drv); |
1154 |
+ static void gameport_reconnect_port(struct gameport *gameport); |
1155 |
+ static void gameport_disconnect_port(struct gameport *gameport); |
1156 |
+ |
1157 |
+@@ -230,7 +229,6 @@ static void gameport_find_driver(struct gameport *gameport) |
1158 |
+ |
1159 |
+ enum gameport_event_type { |
1160 |
+ GAMEPORT_REGISTER_PORT, |
1161 |
+- GAMEPORT_REGISTER_DRIVER, |
1162 |
+ GAMEPORT_ATTACH_DRIVER, |
1163 |
+ }; |
1164 |
+ |
1165 |
+@@ -374,8 +372,8 @@ static void gameport_handle_event(void) |
1166 |
+ gameport_add_port(event->object); |
1167 |
+ break; |
1168 |
+ |
1169 |
+- case GAMEPORT_REGISTER_DRIVER: |
1170 |
+- gameport_add_driver(event->object); |
1171 |
++ case GAMEPORT_ATTACH_DRIVER: |
1172 |
++ gameport_attach_driver(event->object); |
1173 |
+ break; |
1174 |
+ |
1175 |
+ default: |
1176 |
+@@ -706,14 +704,14 @@ static int gameport_driver_remove(struct device *dev) |
1177 |
+ return 0; |
1178 |
+ } |
1179 |
+ |
1180 |
+-static void gameport_add_driver(struct gameport_driver *drv) |
1181 |
++static void gameport_attach_driver(struct gameport_driver *drv) |
1182 |
+ { |
1183 |
+ int error; |
1184 |
+ |
1185 |
+- error = driver_register(&drv->driver); |
1186 |
++ error = driver_attach(&drv->driver); |
1187 |
+ if (error) |
1188 |
+ printk(KERN_ERR |
1189 |
+- "gameport: driver_register() failed for %s, error: %d\n", |
1190 |
++ "gameport: driver_attach() failed for %s, error: %d\n", |
1191 |
+ drv->driver.name, error); |
1192 |
+ } |
1193 |
+ |
1194 |
+diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h |
1195 |
+index d3ec217..3a8cfa2 100644 |
1196 |
+--- a/drivers/md/dm-bio-record.h |
1197 |
++++ b/drivers/md/dm-bio-record.h |
1198 |
+@@ -16,30 +16,56 @@ |
1199 |
+ * functions in this file help the target record and restore the |
1200 |
+ * original bio state. |
1201 |
+ */ |
1202 |
++ |
1203 |
++struct dm_bio_vec_details { |
1204 |
++#if PAGE_SIZE < 65536 |
1205 |
++ __u16 bv_len; |
1206 |
++ __u16 bv_offset; |
1207 |
++#else |
1208 |
++ unsigned bv_len; |
1209 |
++ unsigned bv_offset; |
1210 |
++#endif |
1211 |
++}; |
1212 |
++ |
1213 |
+ struct dm_bio_details { |
1214 |
+ sector_t bi_sector; |
1215 |
+ struct block_device *bi_bdev; |
1216 |
+ unsigned int bi_size; |
1217 |
+ unsigned short bi_idx; |
1218 |
+ unsigned long bi_flags; |
1219 |
++ struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES]; |
1220 |
+ }; |
1221 |
+ |
1222 |
+ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) |
1223 |
+ { |
1224 |
++ unsigned i; |
1225 |
++ |
1226 |
+ bd->bi_sector = bio->bi_sector; |
1227 |
+ bd->bi_bdev = bio->bi_bdev; |
1228 |
+ bd->bi_size = bio->bi_size; |
1229 |
+ bd->bi_idx = bio->bi_idx; |
1230 |
+ bd->bi_flags = bio->bi_flags; |
1231 |
++ |
1232 |
++ for (i = 0; i < bio->bi_vcnt; i++) { |
1233 |
++ bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len; |
1234 |
++ bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset; |
1235 |
++ } |
1236 |
+ } |
1237 |
+ |
1238 |
+ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) |
1239 |
+ { |
1240 |
++ unsigned i; |
1241 |
++ |
1242 |
+ bio->bi_sector = bd->bi_sector; |
1243 |
+ bio->bi_bdev = bd->bi_bdev; |
1244 |
+ bio->bi_size = bd->bi_size; |
1245 |
+ bio->bi_idx = bd->bi_idx; |
1246 |
+ bio->bi_flags = bd->bi_flags; |
1247 |
++ |
1248 |
++ for (i = 0; i < bio->bi_vcnt; i++) { |
1249 |
++ bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len; |
1250 |
++ bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset; |
1251 |
++ } |
1252 |
+ } |
1253 |
+ |
1254 |
+ #endif |
1255 |
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c |
1256 |
+index 36e2b5e..e73aabd 100644 |
1257 |
+--- a/drivers/md/dm-io.c |
1258 |
++++ b/drivers/md/dm-io.c |
1259 |
+@@ -370,16 +370,13 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
1260 |
+ while (1) { |
1261 |
+ set_current_state(TASK_UNINTERRUPTIBLE); |
1262 |
+ |
1263 |
+- if (!atomic_read(&io.count) || signal_pending(current)) |
1264 |
++ if (!atomic_read(&io.count)) |
1265 |
+ break; |
1266 |
+ |
1267 |
+ io_schedule(); |
1268 |
+ } |
1269 |
+ set_current_state(TASK_RUNNING); |
1270 |
+ |
1271 |
+- if (atomic_read(&io.count)) |
1272 |
+- return -EINTR; |
1273 |
+- |
1274 |
+ if (error_bits) |
1275 |
+ *error_bits = io.error_bits; |
1276 |
+ |
1277 |
+diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c |
1278 |
+index 0a225da..3e3fc06 100644 |
1279 |
+--- a/drivers/md/dm-kcopyd.c |
1280 |
++++ b/drivers/md/dm-kcopyd.c |
1281 |
+@@ -297,7 +297,8 @@ static int run_complete_job(struct kcopyd_job *job) |
1282 |
+ dm_kcopyd_notify_fn fn = job->fn; |
1283 |
+ struct dm_kcopyd_client *kc = job->kc; |
1284 |
+ |
1285 |
+- kcopyd_put_pages(kc, job->pages); |
1286 |
++ if (job->pages) |
1287 |
++ kcopyd_put_pages(kc, job->pages); |
1288 |
+ mempool_free(job, kc->job_pool); |
1289 |
+ fn(read_err, write_err, context); |
1290 |
+ |
1291 |
+@@ -461,6 +462,7 @@ static void segment_complete(int read_err, unsigned long write_err, |
1292 |
+ sector_t progress = 0; |
1293 |
+ sector_t count = 0; |
1294 |
+ struct kcopyd_job *job = (struct kcopyd_job *) context; |
1295 |
++ struct dm_kcopyd_client *kc = job->kc; |
1296 |
+ |
1297 |
+ mutex_lock(&job->lock); |
1298 |
+ |
1299 |
+@@ -490,7 +492,7 @@ static void segment_complete(int read_err, unsigned long write_err, |
1300 |
+ |
1301 |
+ if (count) { |
1302 |
+ int i; |
1303 |
+- struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool, |
1304 |
++ struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool, |
1305 |
+ GFP_NOIO); |
1306 |
+ |
1307 |
+ *sub_job = *job; |
1308 |
+@@ -509,13 +511,16 @@ static void segment_complete(int read_err, unsigned long write_err, |
1309 |
+ } else if (atomic_dec_and_test(&job->sub_jobs)) { |
1310 |
+ |
1311 |
+ /* |
1312 |
+- * To avoid a race we must keep the job around |
1313 |
+- * until after the notify function has completed. |
1314 |
+- * Otherwise the client may try and stop the job |
1315 |
+- * after we've completed. |
1316 |
++ * Queue the completion callback to the kcopyd thread. |
1317 |
++ * |
1318 |
++ * Some callers assume that all the completions are called |
1319 |
++ * from a single thread and don't race with each other. |
1320 |
++ * |
1321 |
++ * We must not call the callback directly here because this |
1322 |
++ * code may not be executing in the thread. |
1323 |
+ */ |
1324 |
+- job->fn(read_err, write_err, job->context); |
1325 |
+- mempool_free(job, job->kc->job_pool); |
1326 |
++ push(&kc->complete_jobs, job); |
1327 |
++ wake(kc); |
1328 |
+ } |
1329 |
+ } |
1330 |
+ |
1331 |
+@@ -528,6 +533,8 @@ static void split_job(struct kcopyd_job *job) |
1332 |
+ { |
1333 |
+ int i; |
1334 |
+ |
1335 |
++ atomic_inc(&job->kc->nr_jobs); |
1336 |
++ |
1337 |
+ atomic_set(&job->sub_jobs, SPLIT_COUNT); |
1338 |
+ for (i = 0; i < SPLIT_COUNT; i++) |
1339 |
+ segment_complete(0, 0u, job); |
1340 |
+diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c |
1341 |
+index 96ea226..42c04f0 100644 |
1342 |
+--- a/drivers/md/dm-path-selector.c |
1343 |
++++ b/drivers/md/dm-path-selector.c |
1344 |
+@@ -17,9 +17,7 @@ |
1345 |
+ |
1346 |
+ struct ps_internal { |
1347 |
+ struct path_selector_type pst; |
1348 |
+- |
1349 |
+ struct list_head list; |
1350 |
+- long use; |
1351 |
+ }; |
1352 |
+ |
1353 |
+ #define pst_to_psi(__pst) container_of((__pst), struct ps_internal, pst) |
1354 |
+@@ -45,12 +43,8 @@ static struct ps_internal *get_path_selector(const char *name) |
1355 |
+ |
1356 |
+ down_read(&_ps_lock); |
1357 |
+ psi = __find_path_selector_type(name); |
1358 |
+- if (psi) { |
1359 |
+- if ((psi->use == 0) && !try_module_get(psi->pst.module)) |
1360 |
+- psi = NULL; |
1361 |
+- else |
1362 |
+- psi->use++; |
1363 |
+- } |
1364 |
++ if (psi && !try_module_get(psi->pst.module)) |
1365 |
++ psi = NULL; |
1366 |
+ up_read(&_ps_lock); |
1367 |
+ |
1368 |
+ return psi; |
1369 |
+@@ -84,11 +78,7 @@ void dm_put_path_selector(struct path_selector_type *pst) |
1370 |
+ if (!psi) |
1371 |
+ goto out; |
1372 |
+ |
1373 |
+- if (--psi->use == 0) |
1374 |
+- module_put(psi->pst.module); |
1375 |
+- |
1376 |
+- BUG_ON(psi->use < 0); |
1377 |
+- |
1378 |
++ module_put(psi->pst.module); |
1379 |
+ out: |
1380 |
+ up_read(&_ps_lock); |
1381 |
+ } |
1382 |
+@@ -136,11 +126,6 @@ int dm_unregister_path_selector(struct path_selector_type *pst) |
1383 |
+ return -EINVAL; |
1384 |
+ } |
1385 |
+ |
1386 |
+- if (psi->use) { |
1387 |
+- up_write(&_ps_lock); |
1388 |
+- return -ETXTBSY; |
1389 |
+- } |
1390 |
+- |
1391 |
+ list_del(&psi->list); |
1392 |
+ |
1393 |
+ up_write(&_ps_lock); |
1394 |
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c |
1395 |
+index 4d6bc10..62d5948 100644 |
1396 |
+--- a/drivers/md/dm-raid1.c |
1397 |
++++ b/drivers/md/dm-raid1.c |
1398 |
+@@ -145,6 +145,8 @@ struct dm_raid1_read_record { |
1399 |
+ struct dm_bio_details details; |
1400 |
+ }; |
1401 |
+ |
1402 |
++static struct kmem_cache *_dm_raid1_read_record_cache; |
1403 |
++ |
1404 |
+ /* |
1405 |
+ * Every mirror should look like this one. |
1406 |
+ */ |
1407 |
+@@ -764,9 +766,9 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, |
1408 |
+ atomic_set(&ms->suspend, 0); |
1409 |
+ atomic_set(&ms->default_mirror, DEFAULT_MIRROR); |
1410 |
+ |
1411 |
+- len = sizeof(struct dm_raid1_read_record); |
1412 |
+- ms->read_record_pool = mempool_create_kmalloc_pool(MIN_READ_RECORDS, |
1413 |
+- len); |
1414 |
++ ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS, |
1415 |
++ _dm_raid1_read_record_cache); |
1416 |
++ |
1417 |
+ if (!ms->read_record_pool) { |
1418 |
+ ti->error = "Error creating mirror read_record_pool"; |
1419 |
+ kfree(ms); |
1420 |
+@@ -1279,16 +1281,31 @@ static int __init dm_mirror_init(void) |
1421 |
+ { |
1422 |
+ int r; |
1423 |
+ |
1424 |
++ _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0); |
1425 |
++ if (!_dm_raid1_read_record_cache) { |
1426 |
++ DMERR("Can't allocate dm_raid1_read_record cache"); |
1427 |
++ r = -ENOMEM; |
1428 |
++ goto bad_cache; |
1429 |
++ } |
1430 |
++ |
1431 |
+ r = dm_register_target(&mirror_target); |
1432 |
+- if (r < 0) |
1433 |
++ if (r < 0) { |
1434 |
+ DMERR("Failed to register mirror target"); |
1435 |
++ goto bad_target; |
1436 |
++ } |
1437 |
++ |
1438 |
++ return 0; |
1439 |
+ |
1440 |
++bad_target: |
1441 |
++ kmem_cache_destroy(_dm_raid1_read_record_cache); |
1442 |
++bad_cache: |
1443 |
+ return r; |
1444 |
+ } |
1445 |
+ |
1446 |
+ static void __exit dm_mirror_exit(void) |
1447 |
+ { |
1448 |
+ dm_unregister_target(&mirror_target); |
1449 |
++ kmem_cache_destroy(_dm_raid1_read_record_cache); |
1450 |
+ } |
1451 |
+ |
1452 |
+ /* Module hooks */ |
1453 |
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c |
1454 |
+index 65ff82f..462750c 100644 |
1455 |
+--- a/drivers/md/dm-snap.c |
1456 |
++++ b/drivers/md/dm-snap.c |
1457 |
+@@ -972,6 +972,17 @@ static void start_copy(struct dm_snap_pending_exception *pe) |
1458 |
+ &src, 1, &dest, 0, copy_callback, pe); |
1459 |
+ } |
1460 |
+ |
1461 |
++static struct dm_snap_pending_exception * |
1462 |
++__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) |
1463 |
++{ |
1464 |
++ struct dm_snap_exception *e = lookup_exception(&s->pending, chunk); |
1465 |
++ |
1466 |
++ if (!e) |
1467 |
++ return NULL; |
1468 |
++ |
1469 |
++ return container_of(e, struct dm_snap_pending_exception, e); |
1470 |
++} |
1471 |
++ |
1472 |
+ /* |
1473 |
+ * Looks to see if this snapshot already has a pending exception |
1474 |
+ * for this chunk, otherwise it allocates a new one and inserts |
1475 |
+@@ -981,40 +992,15 @@ static void start_copy(struct dm_snap_pending_exception *pe) |
1476 |
+ * this. |
1477 |
+ */ |
1478 |
+ static struct dm_snap_pending_exception * |
1479 |
+-__find_pending_exception(struct dm_snapshot *s, struct bio *bio) |
1480 |
++__find_pending_exception(struct dm_snapshot *s, |
1481 |
++ struct dm_snap_pending_exception *pe, chunk_t chunk) |
1482 |
+ { |
1483 |
+- struct dm_snap_exception *e; |
1484 |
+- struct dm_snap_pending_exception *pe; |
1485 |
+- chunk_t chunk = sector_to_chunk(s, bio->bi_sector); |
1486 |
+- |
1487 |
+- /* |
1488 |
+- * Is there a pending exception for this already ? |
1489 |
+- */ |
1490 |
+- e = lookup_exception(&s->pending, chunk); |
1491 |
+- if (e) { |
1492 |
+- /* cast the exception to a pending exception */ |
1493 |
+- pe = container_of(e, struct dm_snap_pending_exception, e); |
1494 |
+- goto out; |
1495 |
+- } |
1496 |
+- |
1497 |
+- /* |
1498 |
+- * Create a new pending exception, we don't want |
1499 |
+- * to hold the lock while we do this. |
1500 |
+- */ |
1501 |
+- up_write(&s->lock); |
1502 |
+- pe = alloc_pending_exception(s); |
1503 |
+- down_write(&s->lock); |
1504 |
+- |
1505 |
+- if (!s->valid) { |
1506 |
+- free_pending_exception(pe); |
1507 |
+- return NULL; |
1508 |
+- } |
1509 |
++ struct dm_snap_pending_exception *pe2; |
1510 |
+ |
1511 |
+- e = lookup_exception(&s->pending, chunk); |
1512 |
+- if (e) { |
1513 |
++ pe2 = __lookup_pending_exception(s, chunk); |
1514 |
++ if (pe2) { |
1515 |
+ free_pending_exception(pe); |
1516 |
+- pe = container_of(e, struct dm_snap_pending_exception, e); |
1517 |
+- goto out; |
1518 |
++ return pe2; |
1519 |
+ } |
1520 |
+ |
1521 |
+ pe->e.old_chunk = chunk; |
1522 |
+@@ -1032,7 +1018,6 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio) |
1523 |
+ get_pending_exception(pe); |
1524 |
+ insert_exception(&s->pending, &pe->e); |
1525 |
+ |
1526 |
+- out: |
1527 |
+ return pe; |
1528 |
+ } |
1529 |
+ |
1530 |
+@@ -1083,11 +1068,31 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, |
1531 |
+ * writeable. |
1532 |
+ */ |
1533 |
+ if (bio_rw(bio) == WRITE) { |
1534 |
+- pe = __find_pending_exception(s, bio); |
1535 |
++ pe = __lookup_pending_exception(s, chunk); |
1536 |
+ if (!pe) { |
1537 |
+- __invalidate_snapshot(s, -ENOMEM); |
1538 |
+- r = -EIO; |
1539 |
+- goto out_unlock; |
1540 |
++ up_write(&s->lock); |
1541 |
++ pe = alloc_pending_exception(s); |
1542 |
++ down_write(&s->lock); |
1543 |
++ |
1544 |
++ if (!s->valid) { |
1545 |
++ free_pending_exception(pe); |
1546 |
++ r = -EIO; |
1547 |
++ goto out_unlock; |
1548 |
++ } |
1549 |
++ |
1550 |
++ e = lookup_exception(&s->complete, chunk); |
1551 |
++ if (e) { |
1552 |
++ free_pending_exception(pe); |
1553 |
++ remap_exception(s, e, bio, chunk); |
1554 |
++ goto out_unlock; |
1555 |
++ } |
1556 |
++ |
1557 |
++ pe = __find_pending_exception(s, pe, chunk); |
1558 |
++ if (!pe) { |
1559 |
++ __invalidate_snapshot(s, -ENOMEM); |
1560 |
++ r = -EIO; |
1561 |
++ goto out_unlock; |
1562 |
++ } |
1563 |
+ } |
1564 |
+ |
1565 |
+ remap_exception(s, &pe->e, bio, chunk); |
1566 |
+@@ -1217,10 +1222,28 @@ static int __origin_write(struct list_head *snapshots, struct bio *bio) |
1567 |
+ if (e) |
1568 |
+ goto next_snapshot; |
1569 |
+ |
1570 |
+- pe = __find_pending_exception(snap, bio); |
1571 |
++ pe = __lookup_pending_exception(snap, chunk); |
1572 |
+ if (!pe) { |
1573 |
+- __invalidate_snapshot(snap, -ENOMEM); |
1574 |
+- goto next_snapshot; |
1575 |
++ up_write(&snap->lock); |
1576 |
++ pe = alloc_pending_exception(snap); |
1577 |
++ down_write(&snap->lock); |
1578 |
++ |
1579 |
++ if (!snap->valid) { |
1580 |
++ free_pending_exception(pe); |
1581 |
++ goto next_snapshot; |
1582 |
++ } |
1583 |
++ |
1584 |
++ e = lookup_exception(&snap->complete, chunk); |
1585 |
++ if (e) { |
1586 |
++ free_pending_exception(pe); |
1587 |
++ goto next_snapshot; |
1588 |
++ } |
1589 |
++ |
1590 |
++ pe = __find_pending_exception(snap, pe, chunk); |
1591 |
++ if (!pe) { |
1592 |
++ __invalidate_snapshot(snap, -ENOMEM); |
1593 |
++ goto next_snapshot; |
1594 |
++ } |
1595 |
+ } |
1596 |
+ |
1597 |
+ if (!primary_pe) { |
1598 |
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c |
1599 |
+index 2fd66c3..e8361b1 100644 |
1600 |
+--- a/drivers/md/dm-table.c |
1601 |
++++ b/drivers/md/dm-table.c |
1602 |
+@@ -399,28 +399,30 @@ static int check_device_area(struct dm_dev_internal *dd, sector_t start, |
1603 |
+ } |
1604 |
+ |
1605 |
+ /* |
1606 |
+- * This upgrades the mode on an already open dm_dev. Being |
1607 |
++ * This upgrades the mode on an already open dm_dev, being |
1608 |
+ * careful to leave things as they were if we fail to reopen the |
1609 |
+- * device. |
1610 |
++ * device and not to touch the existing bdev field in case |
1611 |
++ * it is accessed concurrently inside dm_table_any_congested(). |
1612 |
+ */ |
1613 |
+ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, |
1614 |
+ struct mapped_device *md) |
1615 |
+ { |
1616 |
+ int r; |
1617 |
+- struct dm_dev_internal dd_copy; |
1618 |
+- dev_t dev = dd->dm_dev.bdev->bd_dev; |
1619 |
++ struct dm_dev_internal dd_new, dd_old; |
1620 |
+ |
1621 |
+- dd_copy = *dd; |
1622 |
++ dd_new = dd_old = *dd; |
1623 |
++ |
1624 |
++ dd_new.dm_dev.mode |= new_mode; |
1625 |
++ dd_new.dm_dev.bdev = NULL; |
1626 |
++ |
1627 |
++ r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md); |
1628 |
++ if (r) |
1629 |
++ return r; |
1630 |
+ |
1631 |
+ dd->dm_dev.mode |= new_mode; |
1632 |
+- dd->dm_dev.bdev = NULL; |
1633 |
+- r = open_dev(dd, dev, md); |
1634 |
+- if (!r) |
1635 |
+- close_dev(&dd_copy, md); |
1636 |
+- else |
1637 |
+- *dd = dd_copy; |
1638 |
++ close_dev(&dd_old, md); |
1639 |
+ |
1640 |
+- return r; |
1641 |
++ return 0; |
1642 |
+ } |
1643 |
+ |
1644 |
+ /* |
1645 |
+diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c |
1646 |
+index 7decf10..db72c94 100644 |
1647 |
+--- a/drivers/md/dm-target.c |
1648 |
++++ b/drivers/md/dm-target.c |
1649 |
+@@ -18,7 +18,6 @@ struct tt_internal { |
1650 |
+ struct target_type tt; |
1651 |
+ |
1652 |
+ struct list_head list; |
1653 |
+- long use; |
1654 |
+ }; |
1655 |
+ |
1656 |
+ static LIST_HEAD(_targets); |
1657 |
+@@ -44,12 +43,8 @@ static struct tt_internal *get_target_type(const char *name) |
1658 |
+ down_read(&_lock); |
1659 |
+ |
1660 |
+ ti = __find_target_type(name); |
1661 |
+- if (ti) { |
1662 |
+- if ((ti->use == 0) && !try_module_get(ti->tt.module)) |
1663 |
+- ti = NULL; |
1664 |
+- else |
1665 |
+- ti->use++; |
1666 |
+- } |
1667 |
++ if (ti && !try_module_get(ti->tt.module)) |
1668 |
++ ti = NULL; |
1669 |
+ |
1670 |
+ up_read(&_lock); |
1671 |
+ return ti; |
1672 |
+@@ -77,10 +72,7 @@ void dm_put_target_type(struct target_type *t) |
1673 |
+ struct tt_internal *ti = (struct tt_internal *) t; |
1674 |
+ |
1675 |
+ down_read(&_lock); |
1676 |
+- if (--ti->use == 0) |
1677 |
+- module_put(ti->tt.module); |
1678 |
+- |
1679 |
+- BUG_ON(ti->use < 0); |
1680 |
++ module_put(ti->tt.module); |
1681 |
+ up_read(&_lock); |
1682 |
+ |
1683 |
+ return; |
1684 |
+@@ -140,12 +132,6 @@ void dm_unregister_target(struct target_type *t) |
1685 |
+ BUG(); |
1686 |
+ } |
1687 |
+ |
1688 |
+- if (ti->use) { |
1689 |
+- DMCRIT("Attempt to unregister target still in use: %s", |
1690 |
+- t->name); |
1691 |
+- BUG(); |
1692 |
+- } |
1693 |
+- |
1694 |
+ list_del(&ti->list); |
1695 |
+ kfree(ti); |
1696 |
+ |
1697 |
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c |
1698 |
+index e246642..4a25fa9 100644 |
1699 |
+--- a/drivers/md/raid1.c |
1700 |
++++ b/drivers/md/raid1.c |
1701 |
+@@ -120,6 +120,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) |
1702 |
+ goto out_free_pages; |
1703 |
+ |
1704 |
+ bio->bi_io_vec[i].bv_page = page; |
1705 |
++ bio->bi_vcnt = i+1; |
1706 |
+ } |
1707 |
+ } |
1708 |
+ /* If not user-requests, copy the page pointers to all bios */ |
1709 |
+@@ -135,9 +136,9 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) |
1710 |
+ return r1_bio; |
1711 |
+ |
1712 |
+ out_free_pages: |
1713 |
+- for (i=0; i < RESYNC_PAGES ; i++) |
1714 |
+- for (j=0 ; j < pi->raid_disks; j++) |
1715 |
+- safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); |
1716 |
++ for (j=0 ; j < pi->raid_disks; j++) |
1717 |
++ for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++) |
1718 |
++ put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); |
1719 |
+ j = -1; |
1720 |
+ out_free_bio: |
1721 |
+ while ( ++j < pi->raid_disks ) |
1722 |
+diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c |
1723 |
+index 8683d10..5b107fa 100644 |
1724 |
+--- a/drivers/media/video/cx88/cx88-input.c |
1725 |
++++ b/drivers/media/video/cx88/cx88-input.c |
1726 |
+@@ -48,8 +48,7 @@ struct cx88_IR { |
1727 |
+ |
1728 |
+ /* poll external decoder */ |
1729 |
+ int polling; |
1730 |
+- struct work_struct work; |
1731 |
+- struct timer_list timer; |
1732 |
++ struct delayed_work work; |
1733 |
+ u32 gpio_addr; |
1734 |
+ u32 last_gpio; |
1735 |
+ u32 mask_keycode; |
1736 |
+@@ -143,27 +142,19 @@ static void cx88_ir_handle_key(struct cx88_IR *ir) |
1737 |
+ } |
1738 |
+ } |
1739 |
+ |
1740 |
+-static void ir_timer(unsigned long data) |
1741 |
+-{ |
1742 |
+- struct cx88_IR *ir = (struct cx88_IR *)data; |
1743 |
+- |
1744 |
+- schedule_work(&ir->work); |
1745 |
+-} |
1746 |
+- |
1747 |
+ static void cx88_ir_work(struct work_struct *work) |
1748 |
+ { |
1749 |
+- struct cx88_IR *ir = container_of(work, struct cx88_IR, work); |
1750 |
++ struct cx88_IR *ir = container_of(work, struct cx88_IR, work.work); |
1751 |
+ |
1752 |
+ cx88_ir_handle_key(ir); |
1753 |
+- mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling)); |
1754 |
++ schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); |
1755 |
+ } |
1756 |
+ |
1757 |
+ void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir) |
1758 |
+ { |
1759 |
+ if (ir->polling) { |
1760 |
+- setup_timer(&ir->timer, ir_timer, (unsigned long)ir); |
1761 |
+- INIT_WORK(&ir->work, cx88_ir_work); |
1762 |
+- schedule_work(&ir->work); |
1763 |
++ INIT_DELAYED_WORK(&ir->work, cx88_ir_work); |
1764 |
++ schedule_delayed_work(&ir->work, 0); |
1765 |
+ } |
1766 |
+ if (ir->sampling) { |
1767 |
+ core->pci_irqmask |= PCI_INT_IR_SMPINT; |
1768 |
+@@ -179,10 +170,8 @@ void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir) |
1769 |
+ core->pci_irqmask &= ~PCI_INT_IR_SMPINT; |
1770 |
+ } |
1771 |
+ |
1772 |
+- if (ir->polling) { |
1773 |
+- del_timer_sync(&ir->timer); |
1774 |
+- flush_scheduled_work(); |
1775 |
+- } |
1776 |
++ if (ir->polling) |
1777 |
++ cancel_delayed_work_sync(&ir->work); |
1778 |
+ } |
1779 |
+ |
1780 |
+ /* ---------------------------------------------------------------------- */ |
1781 |
+diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c |
1782 |
+index ea3aafb..6fc789e 100644 |
1783 |
+--- a/drivers/message/fusion/mptbase.c |
1784 |
++++ b/drivers/message/fusion/mptbase.c |
1785 |
+@@ -5934,7 +5934,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) |
1786 |
+ |
1787 |
+ /* Initalize the timer |
1788 |
+ */ |
1789 |
+- init_timer(&pCfg->timer); |
1790 |
++ init_timer_on_stack(&pCfg->timer); |
1791 |
+ pCfg->timer.data = (unsigned long) ioc; |
1792 |
+ pCfg->timer.function = mpt_timer_expired; |
1793 |
+ pCfg->wait_done = 0; |
1794 |
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
1795 |
+index 3d76686..87045f8 100644 |
1796 |
+--- a/drivers/net/bonding/bond_main.c |
1797 |
++++ b/drivers/net/bonding/bond_main.c |
1798 |
+@@ -2565,7 +2565,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) |
1799 |
+ |
1800 |
+ for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { |
1801 |
+ if (!targets[i]) |
1802 |
+- continue; |
1803 |
++ break; |
1804 |
+ pr_debug("basa: target %x\n", targets[i]); |
1805 |
+ if (list_empty(&bond->vlan_list)) { |
1806 |
+ pr_debug("basa: empty vlan: arp_send\n"); |
1807 |
+@@ -2672,7 +2672,6 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 |
1808 |
+ int i; |
1809 |
+ __be32 *targets = bond->params.arp_targets; |
1810 |
+ |
1811 |
+- targets = bond->params.arp_targets; |
1812 |
+ for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { |
1813 |
+ pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n", |
1814 |
+ &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip)); |
1815 |
+@@ -3294,7 +3293,7 @@ static void bond_info_show_master(struct seq_file *seq) |
1816 |
+ |
1817 |
+ for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) { |
1818 |
+ if (!bond->params.arp_targets[i]) |
1819 |
+- continue; |
1820 |
++ break; |
1821 |
+ if (printed) |
1822 |
+ seq_printf(seq, ","); |
1823 |
+ seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); |
1824 |
+diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c |
1825 |
+index 18cf478..d287315 100644 |
1826 |
+--- a/drivers/net/bonding/bond_sysfs.c |
1827 |
++++ b/drivers/net/bonding/bond_sysfs.c |
1828 |
+@@ -684,17 +684,15 @@ static ssize_t bonding_store_arp_targets(struct device *d, |
1829 |
+ goto out; |
1830 |
+ } |
1831 |
+ /* look for an empty slot to put the target in, and check for dupes */ |
1832 |
+- for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { |
1833 |
++ for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { |
1834 |
+ if (targets[i] == newtarget) { /* duplicate */ |
1835 |
+ printk(KERN_ERR DRV_NAME |
1836 |
+ ": %s: ARP target %pI4 is already present\n", |
1837 |
+ bond->dev->name, &newtarget); |
1838 |
+- if (done) |
1839 |
+- targets[i] = 0; |
1840 |
+ ret = -EINVAL; |
1841 |
+ goto out; |
1842 |
+ } |
1843 |
+- if (targets[i] == 0 && !done) { |
1844 |
++ if (targets[i] == 0) { |
1845 |
+ printk(KERN_INFO DRV_NAME |
1846 |
+ ": %s: adding ARP target %pI4.\n", |
1847 |
+ bond->dev->name, &newtarget); |
1848 |
+@@ -720,12 +718,16 @@ static ssize_t bonding_store_arp_targets(struct device *d, |
1849 |
+ goto out; |
1850 |
+ } |
1851 |
+ |
1852 |
+- for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { |
1853 |
++ for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { |
1854 |
+ if (targets[i] == newtarget) { |
1855 |
++ int j; |
1856 |
+ printk(KERN_INFO DRV_NAME |
1857 |
+ ": %s: removing ARP target %pI4.\n", |
1858 |
+ bond->dev->name, &newtarget); |
1859 |
+- targets[i] = 0; |
1860 |
++ for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++) |
1861 |
++ targets[j] = targets[j+1]; |
1862 |
++ |
1863 |
++ targets[j] = 0; |
1864 |
+ done = 1; |
1865 |
+ } |
1866 |
+ } |
1867 |
+diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c |
1868 |
+index 67f87a7..090ada6 100644 |
1869 |
+--- a/drivers/net/ixgbe/ixgbe_ethtool.c |
1870 |
++++ b/drivers/net/ixgbe/ixgbe_ethtool.c |
1871 |
+@@ -691,9 +691,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev, |
1872 |
+ struct ethtool_ringparam *ring) |
1873 |
+ { |
1874 |
+ struct ixgbe_adapter *adapter = netdev_priv(netdev); |
1875 |
+- struct ixgbe_ring *temp_ring; |
1876 |
++ struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; |
1877 |
+ int i, err; |
1878 |
+ u32 new_rx_count, new_tx_count; |
1879 |
++ bool need_update = false; |
1880 |
+ |
1881 |
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
1882 |
+ return -EINVAL; |
1883 |
+@@ -712,80 +713,94 @@ static int ixgbe_set_ringparam(struct net_device *netdev, |
1884 |
+ return 0; |
1885 |
+ } |
1886 |
+ |
1887 |
+- temp_ring = kcalloc(adapter->num_tx_queues, |
1888 |
+- sizeof(struct ixgbe_ring), GFP_KERNEL); |
1889 |
+- if (!temp_ring) |
1890 |
+- return -ENOMEM; |
1891 |
+- |
1892 |
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) |
1893 |
+ msleep(1); |
1894 |
+ |
1895 |
+- if (new_tx_count != adapter->tx_ring->count) { |
1896 |
++ temp_tx_ring = kcalloc(adapter->num_tx_queues, |
1897 |
++ sizeof(struct ixgbe_ring), GFP_KERNEL); |
1898 |
++ if (!temp_tx_ring) { |
1899 |
++ err = -ENOMEM; |
1900 |
++ goto err_setup; |
1901 |
++ } |
1902 |
++ |
1903 |
++ if (new_tx_count != adapter->tx_ring_count) { |
1904 |
++ memcpy(temp_tx_ring, adapter->tx_ring, |
1905 |
++ adapter->num_tx_queues * sizeof(struct ixgbe_ring)); |
1906 |
+ for (i = 0; i < adapter->num_tx_queues; i++) { |
1907 |
+- temp_ring[i].count = new_tx_count; |
1908 |
+- err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]); |
1909 |
++ temp_tx_ring[i].count = new_tx_count; |
1910 |
++ err = ixgbe_setup_tx_resources(adapter, |
1911 |
++ &temp_tx_ring[i]); |
1912 |
+ if (err) { |
1913 |
+ while (i) { |
1914 |
+ i--; |
1915 |
+ ixgbe_free_tx_resources(adapter, |
1916 |
+- &temp_ring[i]); |
1917 |
++ &temp_tx_ring[i]); |
1918 |
+ } |
1919 |
+ goto err_setup; |
1920 |
+ } |
1921 |
+- temp_ring[i].v_idx = adapter->tx_ring[i].v_idx; |
1922 |
++ temp_tx_ring[i].v_idx = adapter->tx_ring[i].v_idx; |
1923 |
+ } |
1924 |
+- if (netif_running(netdev)) |
1925 |
+- netdev->netdev_ops->ndo_stop(netdev); |
1926 |
+- ixgbe_reset_interrupt_capability(adapter); |
1927 |
+- ixgbe_napi_del_all(adapter); |
1928 |
+- INIT_LIST_HEAD(&netdev->napi_list); |
1929 |
+- kfree(adapter->tx_ring); |
1930 |
+- adapter->tx_ring = temp_ring; |
1931 |
+- temp_ring = NULL; |
1932 |
+- adapter->tx_ring_count = new_tx_count; |
1933 |
++ need_update = true; |
1934 |
+ } |
1935 |
+ |
1936 |
+- temp_ring = kcalloc(adapter->num_rx_queues, |
1937 |
+- sizeof(struct ixgbe_ring), GFP_KERNEL); |
1938 |
+- if (!temp_ring) { |
1939 |
+- if (netif_running(netdev)) |
1940 |
+- netdev->netdev_ops->ndo_open(netdev); |
1941 |
+- return -ENOMEM; |
1942 |
++ temp_rx_ring = kcalloc(adapter->num_rx_queues, |
1943 |
++ sizeof(struct ixgbe_ring), GFP_KERNEL); |
1944 |
++ if ((!temp_rx_ring) && (need_update)) { |
1945 |
++ for (i = 0; i < adapter->num_tx_queues; i++) |
1946 |
++ ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]); |
1947 |
++ kfree(temp_tx_ring); |
1948 |
++ err = -ENOMEM; |
1949 |
++ goto err_setup; |
1950 |
+ } |
1951 |
+ |
1952 |
+- if (new_rx_count != adapter->rx_ring->count) { |
1953 |
++ if (new_rx_count != adapter->rx_ring_count) { |
1954 |
++ memcpy(temp_rx_ring, adapter->rx_ring, |
1955 |
++ adapter->num_rx_queues * sizeof(struct ixgbe_ring)); |
1956 |
+ for (i = 0; i < adapter->num_rx_queues; i++) { |
1957 |
+- temp_ring[i].count = new_rx_count; |
1958 |
+- err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); |
1959 |
++ temp_rx_ring[i].count = new_rx_count; |
1960 |
++ err = ixgbe_setup_rx_resources(adapter, |
1961 |
++ &temp_rx_ring[i]); |
1962 |
+ if (err) { |
1963 |
+ while (i) { |
1964 |
+ i--; |
1965 |
+ ixgbe_free_rx_resources(adapter, |
1966 |
+- &temp_ring[i]); |
1967 |
++ &temp_rx_ring[i]); |
1968 |
+ } |
1969 |
+ goto err_setup; |
1970 |
+ } |
1971 |
+- temp_ring[i].v_idx = adapter->rx_ring[i].v_idx; |
1972 |
++ temp_rx_ring[i].v_idx = adapter->rx_ring[i].v_idx; |
1973 |
+ } |
1974 |
++ need_update = true; |
1975 |
++ } |
1976 |
++ |
1977 |
++ /* if rings need to be updated, here's the place to do it in one shot */ |
1978 |
++ if (need_update) { |
1979 |
+ if (netif_running(netdev)) |
1980 |
+- netdev->netdev_ops->ndo_stop(netdev); |
1981 |
+- ixgbe_reset_interrupt_capability(adapter); |
1982 |
+- ixgbe_napi_del_all(adapter); |
1983 |
+- INIT_LIST_HEAD(&netdev->napi_list); |
1984 |
+- kfree(adapter->rx_ring); |
1985 |
+- adapter->rx_ring = temp_ring; |
1986 |
+- temp_ring = NULL; |
1987 |
+- |
1988 |
+- adapter->rx_ring_count = new_rx_count; |
1989 |
++ ixgbe_down(adapter); |
1990 |
++ |
1991 |
++ /* tx */ |
1992 |
++ if (new_tx_count != adapter->tx_ring_count) { |
1993 |
++ kfree(adapter->tx_ring); |
1994 |
++ adapter->tx_ring = temp_tx_ring; |
1995 |
++ temp_tx_ring = NULL; |
1996 |
++ adapter->tx_ring_count = new_tx_count; |
1997 |
++ } |
1998 |
++ |
1999 |
++ /* rx */ |
2000 |
++ if (new_rx_count != adapter->rx_ring_count) { |
2001 |
++ kfree(adapter->rx_ring); |
2002 |
++ adapter->rx_ring = temp_rx_ring; |
2003 |
++ temp_rx_ring = NULL; |
2004 |
++ adapter->rx_ring_count = new_rx_count; |
2005 |
++ } |
2006 |
+ } |
2007 |
+ |
2008 |
+ /* success! */ |
2009 |
+ err = 0; |
2010 |
+-err_setup: |
2011 |
+- ixgbe_init_interrupt_scheme(adapter); |
2012 |
+ if (netif_running(netdev)) |
2013 |
+- netdev->netdev_ops->ndo_open(netdev); |
2014 |
++ ixgbe_up(adapter); |
2015 |
+ |
2016 |
++err_setup: |
2017 |
+ clear_bit(__IXGBE_RESETTING, &adapter->state); |
2018 |
+ return err; |
2019 |
+ } |
2020 |
+diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c |
2021 |
+index 43fedb9..9201e5a 100644 |
2022 |
+--- a/drivers/net/r8169.c |
2023 |
++++ b/drivers/net/r8169.c |
2024 |
+@@ -2075,8 +2075,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
2025 |
+ if (!tp->pcie_cap && netif_msg_probe(tp)) |
2026 |
+ dev_info(&pdev->dev, "no PCI Express capability\n"); |
2027 |
+ |
2028 |
+- /* Unneeded ? Don't mess with Mrs. Murphy. */ |
2029 |
+- rtl8169_irq_mask_and_ack(ioaddr); |
2030 |
++ RTL_W16(IntrMask, 0x0000); |
2031 |
+ |
2032 |
+ /* Soft reset the chip. */ |
2033 |
+ RTL_W8(ChipCmd, CmdReset); |
2034 |
+@@ -2088,6 +2087,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
2035 |
+ msleep_interruptible(1); |
2036 |
+ } |
2037 |
+ |
2038 |
++ RTL_W16(IntrStatus, 0xffff); |
2039 |
++ |
2040 |
+ /* Identify chip attached to board */ |
2041 |
+ rtl8169_get_mac_version(tp, ioaddr); |
2042 |
+ |
2043 |
+diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c |
2044 |
+index ab0e09b..655e9b2 100644 |
2045 |
+--- a/drivers/net/sfc/efx.c |
2046 |
++++ b/drivers/net/sfc/efx.c |
2047 |
+@@ -424,10 +424,6 @@ static void efx_start_channel(struct efx_channel *channel) |
2048 |
+ |
2049 |
+ EFX_LOG(channel->efx, "starting chan %d\n", channel->channel); |
2050 |
+ |
2051 |
+- if (!(channel->efx->net_dev->flags & IFF_UP)) |
2052 |
+- netif_napi_add(channel->napi_dev, &channel->napi_str, |
2053 |
+- efx_poll, napi_weight); |
2054 |
+- |
2055 |
+ /* The interrupt handler for this channel may set work_pending |
2056 |
+ * as soon as we enable it. Make sure it's cleared before |
2057 |
+ * then. Similarly, make sure it sees the enabled flag set. */ |
2058 |
+@@ -1273,6 +1269,8 @@ static int efx_init_napi(struct efx_nic *efx) |
2059 |
+ |
2060 |
+ efx_for_each_channel(channel, efx) { |
2061 |
+ channel->napi_dev = efx->net_dev; |
2062 |
++ netif_napi_add(channel->napi_dev, &channel->napi_str, |
2063 |
++ efx_poll, napi_weight); |
2064 |
+ rc = efx_lro_init(&channel->lro_mgr, efx); |
2065 |
+ if (rc) |
2066 |
+ goto err; |
2067 |
+@@ -1289,6 +1287,8 @@ static void efx_fini_napi(struct efx_nic *efx) |
2068 |
+ |
2069 |
+ efx_for_each_channel(channel, efx) { |
2070 |
+ efx_lro_fini(&channel->lro_mgr); |
2071 |
++ if (channel->napi_dev) |
2072 |
++ netif_napi_del(&channel->napi_str); |
2073 |
+ channel->napi_dev = NULL; |
2074 |
+ } |
2075 |
+ } |
2076 |
+diff --git a/drivers/net/skge.c b/drivers/net/skge.c |
2077 |
+index c9dbb06..2bbb44b 100644 |
2078 |
+--- a/drivers/net/skge.c |
2079 |
++++ b/drivers/net/skge.c |
2080 |
+@@ -2674,7 +2674,7 @@ static int skge_down(struct net_device *dev) |
2081 |
+ if (netif_msg_ifdown(skge)) |
2082 |
+ printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); |
2083 |
+ |
2084 |
+- netif_stop_queue(dev); |
2085 |
++ netif_tx_disable(dev); |
2086 |
+ |
2087 |
+ if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) |
2088 |
+ del_timer_sync(&skge->link_timer); |
2089 |
+@@ -2881,7 +2881,6 @@ static void skge_tx_clean(struct net_device *dev) |
2090 |
+ } |
2091 |
+ |
2092 |
+ skge->tx_ring.to_clean = e; |
2093 |
+- netif_wake_queue(dev); |
2094 |
+ } |
2095 |
+ |
2096 |
+ static void skge_tx_timeout(struct net_device *dev) |
2097 |
+@@ -2893,6 +2892,7 @@ static void skge_tx_timeout(struct net_device *dev) |
2098 |
+ |
2099 |
+ skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); |
2100 |
+ skge_tx_clean(dev); |
2101 |
++ netif_wake_queue(dev); |
2102 |
+ } |
2103 |
+ |
2104 |
+ static int skge_change_mtu(struct net_device *dev, int new_mtu) |
2105 |
+diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h |
2106 |
+index 39ecf3b..820fdb2 100644 |
2107 |
+--- a/drivers/net/wireless/rt2x00/rt2x00.h |
2108 |
++++ b/drivers/net/wireless/rt2x00/rt2x00.h |
2109 |
+@@ -687,8 +687,7 @@ struct rt2x00_dev { |
2110 |
+ */ |
2111 |
+ #ifdef CONFIG_RT2X00_LIB_RFKILL |
2112 |
+ unsigned long rfkill_state; |
2113 |
+-#define RFKILL_STATE_ALLOCATED 1 |
2114 |
+-#define RFKILL_STATE_REGISTERED 2 |
2115 |
++#define RFKILL_STATE_REGISTERED 1 |
2116 |
+ struct rfkill *rfkill; |
2117 |
+ struct delayed_work rfkill_work; |
2118 |
+ #endif /* CONFIG_RT2X00_LIB_RFKILL */ |
2119 |
+diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c |
2120 |
+index 87c0f2c..e694bb7 100644 |
2121 |
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c |
2122 |
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c |
2123 |
+@@ -1105,7 +1105,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) |
2124 |
+ * Register extra components. |
2125 |
+ */ |
2126 |
+ rt2x00leds_register(rt2x00dev); |
2127 |
+- rt2x00rfkill_allocate(rt2x00dev); |
2128 |
+ rt2x00debug_register(rt2x00dev); |
2129 |
+ |
2130 |
+ set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); |
2131 |
+@@ -1137,7 +1136,6 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) |
2132 |
+ * Free extra components |
2133 |
+ */ |
2134 |
+ rt2x00debug_deregister(rt2x00dev); |
2135 |
+- rt2x00rfkill_free(rt2x00dev); |
2136 |
+ rt2x00leds_unregister(rt2x00dev); |
2137 |
+ |
2138 |
+ /* |
2139 |
+diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h |
2140 |
+index 86cd26f..49309d4 100644 |
2141 |
+--- a/drivers/net/wireless/rt2x00/rt2x00lib.h |
2142 |
++++ b/drivers/net/wireless/rt2x00/rt2x00lib.h |
2143 |
+@@ -260,8 +260,6 @@ static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, |
2144 |
+ #ifdef CONFIG_RT2X00_LIB_RFKILL |
2145 |
+ void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev); |
2146 |
+ void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev); |
2147 |
+-void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev); |
2148 |
+-void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev); |
2149 |
+ #else |
2150 |
+ static inline void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) |
2151 |
+ { |
2152 |
+@@ -270,14 +268,6 @@ static inline void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) |
2153 |
+ static inline void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) |
2154 |
+ { |
2155 |
+ } |
2156 |
+- |
2157 |
+-static inline void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) |
2158 |
+-{ |
2159 |
+-} |
2160 |
+- |
2161 |
+-static inline void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) |
2162 |
+-{ |
2163 |
+-} |
2164 |
+ #endif /* CONFIG_RT2X00_LIB_RFKILL */ |
2165 |
+ |
2166 |
+ /* |
2167 |
+diff --git a/drivers/net/wireless/rt2x00/rt2x00rfkill.c b/drivers/net/wireless/rt2x00/rt2x00rfkill.c |
2168 |
+index 3298cae..08ffc6d 100644 |
2169 |
+--- a/drivers/net/wireless/rt2x00/rt2x00rfkill.c |
2170 |
++++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c |
2171 |
+@@ -94,14 +94,50 @@ static void rt2x00rfkill_poll(struct work_struct *work) |
2172 |
+ &rt2x00dev->rfkill_work, RFKILL_POLL_INTERVAL); |
2173 |
+ } |
2174 |
+ |
2175 |
++static int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) |
2176 |
++{ |
2177 |
++ struct device *dev = wiphy_dev(rt2x00dev->hw->wiphy); |
2178 |
++ |
2179 |
++ rt2x00dev->rfkill = rfkill_allocate(dev, RFKILL_TYPE_WLAN); |
2180 |
++ if (!rt2x00dev->rfkill) |
2181 |
++ return -ENOMEM; |
2182 |
++ |
2183 |
++ rt2x00dev->rfkill->name = rt2x00dev->ops->name; |
2184 |
++ rt2x00dev->rfkill->data = rt2x00dev; |
2185 |
++ rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio; |
2186 |
++ if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) { |
2187 |
++ rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state; |
2188 |
++ rt2x00dev->rfkill->state = |
2189 |
++ rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ? |
2190 |
++ RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED; |
2191 |
++ } else { |
2192 |
++ rt2x00dev->rfkill->state = RFKILL_STATE_UNBLOCKED; |
2193 |
++ } |
2194 |
++ |
2195 |
++ INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll); |
2196 |
++ |
2197 |
++ return 0; |
2198 |
++} |
2199 |
++ |
2200 |
++static void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) |
2201 |
++{ |
2202 |
++ rfkill_free(rt2x00dev->rfkill); |
2203 |
++ rt2x00dev->rfkill = NULL; |
2204 |
++} |
2205 |
++ |
2206 |
+ void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) |
2207 |
+ { |
2208 |
+- if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) || |
2209 |
+- test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) |
2210 |
++ if (test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) |
2211 |
++ return; |
2212 |
++ |
2213 |
++ if (rt2x00rfkill_allocate(rt2x00dev)) { |
2214 |
++ ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n"); |
2215 |
+ return; |
2216 |
++ } |
2217 |
+ |
2218 |
+ if (rfkill_register(rt2x00dev->rfkill)) { |
2219 |
+ ERROR(rt2x00dev, "Failed to register rfkill handler.\n"); |
2220 |
++ rt2x00rfkill_free(rt2x00dev); |
2221 |
+ return; |
2222 |
+ } |
2223 |
+ |
2224 |
+@@ -117,8 +153,7 @@ void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) |
2225 |
+ |
2226 |
+ void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) |
2227 |
+ { |
2228 |
+- if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) || |
2229 |
+- !test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) |
2230 |
++ if (!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) |
2231 |
+ return; |
2232 |
+ |
2233 |
+ cancel_delayed_work_sync(&rt2x00dev->rfkill_work); |
2234 |
+@@ -127,46 +162,3 @@ void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) |
2235 |
+ |
2236 |
+ __clear_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state); |
2237 |
+ } |
2238 |
+- |
2239 |
+-void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) |
2240 |
+-{ |
2241 |
+- struct device *dev = wiphy_dev(rt2x00dev->hw->wiphy); |
2242 |
+- |
2243 |
+- if (test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state)) |
2244 |
+- return; |
2245 |
+- |
2246 |
+- rt2x00dev->rfkill = rfkill_allocate(dev, RFKILL_TYPE_WLAN); |
2247 |
+- if (!rt2x00dev->rfkill) { |
2248 |
+- ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n"); |
2249 |
+- return; |
2250 |
+- } |
2251 |
+- |
2252 |
+- __set_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state); |
2253 |
+- |
2254 |
+- rt2x00dev->rfkill->name = rt2x00dev->ops->name; |
2255 |
+- rt2x00dev->rfkill->data = rt2x00dev; |
2256 |
+- rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio; |
2257 |
+- if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) { |
2258 |
+- rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state; |
2259 |
+- rt2x00dev->rfkill->state = |
2260 |
+- rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ? |
2261 |
+- RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED; |
2262 |
+- } else { |
2263 |
+- rt2x00dev->rfkill->state = RFKILL_STATE_UNBLOCKED; |
2264 |
+- } |
2265 |
+- |
2266 |
+- INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll); |
2267 |
+- |
2268 |
+- return; |
2269 |
+-} |
2270 |
+- |
2271 |
+-void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) |
2272 |
+-{ |
2273 |
+- if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state)) |
2274 |
+- return; |
2275 |
+- |
2276 |
+- cancel_delayed_work_sync(&rt2x00dev->rfkill_work); |
2277 |
+- |
2278 |
+- rfkill_free(rt2x00dev->rfkill); |
2279 |
+- rt2x00dev->rfkill = NULL; |
2280 |
+-} |
2281 |
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c |
2282 |
+index 55ec44a..31cfd86 100644 |
2283 |
+--- a/drivers/pci/probe.c |
2284 |
++++ b/drivers/pci/probe.c |
2285 |
+@@ -847,6 +847,11 @@ int pci_cfg_space_size(struct pci_dev *dev) |
2286 |
+ { |
2287 |
+ int pos; |
2288 |
+ u32 status; |
2289 |
++ u16 class; |
2290 |
++ |
2291 |
++ class = dev->class >> 8; |
2292 |
++ if (class == PCI_CLASS_BRIDGE_HOST) |
2293 |
++ return pci_cfg_space_size_ext(dev); |
2294 |
+ |
2295 |
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP); |
2296 |
+ if (!pos) { |
2297 |
+@@ -936,7 +941,6 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) |
2298 |
+ dev->multifunction = !!(hdr_type & 0x80); |
2299 |
+ dev->vendor = l & 0xffff; |
2300 |
+ dev->device = (l >> 16) & 0xffff; |
2301 |
+- dev->cfg_size = pci_cfg_space_size(dev); |
2302 |
+ dev->error_state = pci_channel_io_normal; |
2303 |
+ set_pcie_port_type(dev); |
2304 |
+ |
2305 |
+@@ -952,6 +956,9 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) |
2306 |
+ return NULL; |
2307 |
+ } |
2308 |
+ |
2309 |
++ /* need to have dev->class ready */ |
2310 |
++ dev->cfg_size = pci_cfg_space_size(dev); |
2311 |
++ |
2312 |
+ return dev; |
2313 |
+ } |
2314 |
+ |
2315 |
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c |
2316 |
+index a6a42e8..60fbef2 100644 |
2317 |
+--- a/drivers/platform/x86/acer-wmi.c |
2318 |
++++ b/drivers/platform/x86/acer-wmi.c |
2319 |
+@@ -225,6 +225,25 @@ static struct quirk_entry quirk_fujitsu_amilo_li_1718 = { |
2320 |
+ .wireless = 2, |
2321 |
+ }; |
2322 |
+ |
2323 |
++/* The Aspire One has a dummy ACPI-WMI interface - disable it */ |
2324 |
++static struct dmi_system_id __devinitdata acer_blacklist[] = { |
2325 |
++ { |
2326 |
++ .ident = "Acer Aspire One (SSD)", |
2327 |
++ .matches = { |
2328 |
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
2329 |
++ DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), |
2330 |
++ }, |
2331 |
++ }, |
2332 |
++ { |
2333 |
++ .ident = "Acer Aspire One (HDD)", |
2334 |
++ .matches = { |
2335 |
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
2336 |
++ DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), |
2337 |
++ }, |
2338 |
++ }, |
2339 |
++ {} |
2340 |
++}; |
2341 |
++ |
2342 |
+ static struct dmi_system_id acer_quirks[] = { |
2343 |
+ { |
2344 |
+ .callback = dmi_matched, |
2345 |
+@@ -1254,6 +1273,12 @@ static int __init acer_wmi_init(void) |
2346 |
+ |
2347 |
+ printk(ACER_INFO "Acer Laptop ACPI-WMI Extras\n"); |
2348 |
+ |
2349 |
++ if (dmi_check_system(acer_blacklist)) { |
2350 |
++ printk(ACER_INFO "Blacklisted hardware detected - " |
2351 |
++ "not loading\n"); |
2352 |
++ return -ENODEV; |
2353 |
++ } |
2354 |
++ |
2355 |
+ find_quirks(); |
2356 |
+ |
2357 |
+ /* |
2358 |
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c |
2359 |
+index 809d32d..ca4467c 100644 |
2360 |
+--- a/drivers/scsi/libiscsi.c |
2361 |
++++ b/drivers/scsi/libiscsi.c |
2362 |
+@@ -1944,12 +1944,14 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) |
2363 |
+ num_arrays++; |
2364 |
+ q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL); |
2365 |
+ if (q->pool == NULL) |
2366 |
+- goto enomem; |
2367 |
++ return -ENOMEM; |
2368 |
+ |
2369 |
+ q->queue = kfifo_init((void*)q->pool, max * sizeof(void*), |
2370 |
+ GFP_KERNEL, NULL); |
2371 |
+- if (q->queue == ERR_PTR(-ENOMEM)) |
2372 |
++ if (IS_ERR(q->queue)) { |
2373 |
++ q->queue = NULL; |
2374 |
+ goto enomem; |
2375 |
++ } |
2376 |
+ |
2377 |
+ for (i = 0; i < max; i++) { |
2378 |
+ q->pool[i] = kzalloc(item_size, GFP_KERNEL); |
2379 |
+@@ -1979,8 +1981,7 @@ void iscsi_pool_free(struct iscsi_pool *q) |
2380 |
+ |
2381 |
+ for (i = 0; i < q->max; i++) |
2382 |
+ kfree(q->pool[i]); |
2383 |
+- if (q->pool) |
2384 |
+- kfree(q->pool); |
2385 |
++ kfree(q->pool); |
2386 |
+ kfree(q->queue); |
2387 |
+ } |
2388 |
+ EXPORT_SYMBOL_GPL(iscsi_pool_free); |
2389 |
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c |
2390 |
+index 516925d..5e390d2 100644 |
2391 |
+--- a/drivers/scsi/sg.c |
2392 |
++++ b/drivers/scsi/sg.c |
2393 |
+@@ -101,6 +101,7 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ; |
2394 |
+ #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1) |
2395 |
+ |
2396 |
+ static int sg_add(struct device *, struct class_interface *); |
2397 |
++static void sg_device_destroy(struct kref *kref); |
2398 |
+ static void sg_remove(struct device *, struct class_interface *); |
2399 |
+ |
2400 |
+ static DEFINE_IDR(sg_index_idr); |
2401 |
+@@ -137,6 +138,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ |
2402 |
+ volatile char done; /* 0->before bh, 1->before read, 2->read */ |
2403 |
+ struct request *rq; |
2404 |
+ struct bio *bio; |
2405 |
++ struct execute_work ew; |
2406 |
+ } Sg_request; |
2407 |
+ |
2408 |
+ typedef struct sg_fd { /* holds the state of a file descriptor */ |
2409 |
+@@ -158,6 +160,8 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ |
2410 |
+ char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */ |
2411 |
+ char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ |
2412 |
+ char mmap_called; /* 0 -> mmap() never called on this fd */ |
2413 |
++ struct kref f_ref; |
2414 |
++ struct execute_work ew; |
2415 |
+ } Sg_fd; |
2416 |
+ |
2417 |
+ typedef struct sg_device { /* holds the state of each scsi generic device */ |
2418 |
+@@ -171,6 +175,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */ |
2419 |
+ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ |
2420 |
+ struct gendisk *disk; |
2421 |
+ struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ |
2422 |
++ struct kref d_ref; |
2423 |
+ } Sg_device; |
2424 |
+ |
2425 |
+ static int sg_fasync(int fd, struct file *filp, int mode); |
2426 |
+@@ -185,7 +190,7 @@ static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, |
2427 |
+ Sg_request * srp); |
2428 |
+ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, |
2429 |
+ const char __user *buf, size_t count, int blocking, |
2430 |
+- int read_only, Sg_request **o_srp); |
2431 |
++ int read_only, int sg_io_owned, Sg_request **o_srp); |
2432 |
+ static int sg_common_write(Sg_fd * sfp, Sg_request * srp, |
2433 |
+ unsigned char *cmnd, int timeout, int blocking); |
2434 |
+ static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); |
2435 |
+@@ -194,13 +199,14 @@ static void sg_build_reserve(Sg_fd * sfp, int req_size); |
2436 |
+ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); |
2437 |
+ static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); |
2438 |
+ static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); |
2439 |
+-static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); |
2440 |
+-static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); |
2441 |
++static void sg_remove_sfp(struct kref *); |
2442 |
+ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); |
2443 |
+ static Sg_request *sg_add_request(Sg_fd * sfp); |
2444 |
+ static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); |
2445 |
+ static int sg_res_in_use(Sg_fd * sfp); |
2446 |
++static Sg_device *sg_lookup_dev(int dev); |
2447 |
+ static Sg_device *sg_get_dev(int dev); |
2448 |
++static void sg_put_dev(Sg_device *sdp); |
2449 |
+ #ifdef CONFIG_SCSI_PROC_FS |
2450 |
+ static int sg_last_dev(void); |
2451 |
+ #endif |
2452 |
+@@ -237,22 +243,17 @@ sg_open(struct inode *inode, struct file *filp) |
2453 |
+ nonseekable_open(inode, filp); |
2454 |
+ SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); |
2455 |
+ sdp = sg_get_dev(dev); |
2456 |
+- if ((!sdp) || (!sdp->device)) { |
2457 |
+- unlock_kernel(); |
2458 |
+- return -ENXIO; |
2459 |
+- } |
2460 |
+- if (sdp->detached) { |
2461 |
+- unlock_kernel(); |
2462 |
+- return -ENODEV; |
2463 |
++ if (IS_ERR(sdp)) { |
2464 |
++ retval = PTR_ERR(sdp); |
2465 |
++ sdp = NULL; |
2466 |
++ goto sg_put; |
2467 |
+ } |
2468 |
+ |
2469 |
+ /* This driver's module count bumped by fops_get in <linux/fs.h> */ |
2470 |
+ /* Prevent the device driver from vanishing while we sleep */ |
2471 |
+ retval = scsi_device_get(sdp->device); |
2472 |
+- if (retval) { |
2473 |
+- unlock_kernel(); |
2474 |
+- return retval; |
2475 |
+- } |
2476 |
++ if (retval) |
2477 |
++ goto sg_put; |
2478 |
+ |
2479 |
+ if (!((flags & O_NONBLOCK) || |
2480 |
+ scsi_block_when_processing_errors(sdp->device))) { |
2481 |
+@@ -303,16 +304,20 @@ sg_open(struct inode *inode, struct file *filp) |
2482 |
+ if ((sfp = sg_add_sfp(sdp, dev))) |
2483 |
+ filp->private_data = sfp; |
2484 |
+ else { |
2485 |
+- if (flags & O_EXCL) |
2486 |
++ if (flags & O_EXCL) { |
2487 |
+ sdp->exclude = 0; /* undo if error */ |
2488 |
++ wake_up_interruptible(&sdp->o_excl_wait); |
2489 |
++ } |
2490 |
+ retval = -ENOMEM; |
2491 |
+ goto error_out; |
2492 |
+ } |
2493 |
+- unlock_kernel(); |
2494 |
+- return 0; |
2495 |
+- |
2496 |
+- error_out: |
2497 |
+- scsi_device_put(sdp->device); |
2498 |
++ retval = 0; |
2499 |
++error_out: |
2500 |
++ if (retval) |
2501 |
++ scsi_device_put(sdp->device); |
2502 |
++sg_put: |
2503 |
++ if (sdp) |
2504 |
++ sg_put_dev(sdp); |
2505 |
+ unlock_kernel(); |
2506 |
+ return retval; |
2507 |
+ } |
2508 |
+@@ -327,13 +332,13 @@ sg_release(struct inode *inode, struct file *filp) |
2509 |
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
2510 |
+ return -ENXIO; |
2511 |
+ SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); |
2512 |
+- if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */ |
2513 |
+- if (!sdp->detached) { |
2514 |
+- scsi_device_put(sdp->device); |
2515 |
+- } |
2516 |
+- sdp->exclude = 0; |
2517 |
+- wake_up_interruptible(&sdp->o_excl_wait); |
2518 |
+- } |
2519 |
++ |
2520 |
++ sfp->closed = 1; |
2521 |
++ |
2522 |
++ sdp->exclude = 0; |
2523 |
++ wake_up_interruptible(&sdp->o_excl_wait); |
2524 |
++ |
2525 |
++ kref_put(&sfp->f_ref, sg_remove_sfp); |
2526 |
+ return 0; |
2527 |
+ } |
2528 |
+ |
2529 |
+@@ -557,7 +562,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) |
2530 |
+ return -EFAULT; |
2531 |
+ blocking = !(filp->f_flags & O_NONBLOCK); |
2532 |
+ if (old_hdr.reply_len < 0) |
2533 |
+- return sg_new_write(sfp, filp, buf, count, blocking, 0, NULL); |
2534 |
++ return sg_new_write(sfp, filp, buf, count, |
2535 |
++ blocking, 0, 0, NULL); |
2536 |
+ if (count < (SZ_SG_HEADER + 6)) |
2537 |
+ return -EIO; /* The minimum scsi command length is 6 bytes. */ |
2538 |
+ |
2539 |
+@@ -638,7 +644,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) |
2540 |
+ |
2541 |
+ static ssize_t |
2542 |
+ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, |
2543 |
+- size_t count, int blocking, int read_only, |
2544 |
++ size_t count, int blocking, int read_only, int sg_io_owned, |
2545 |
+ Sg_request **o_srp) |
2546 |
+ { |
2547 |
+ int k; |
2548 |
+@@ -658,6 +664,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, |
2549 |
+ SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n")); |
2550 |
+ return -EDOM; |
2551 |
+ } |
2552 |
++ srp->sg_io_owned = sg_io_owned; |
2553 |
+ hp = &srp->header; |
2554 |
+ if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) { |
2555 |
+ sg_remove_request(sfp, srp); |
2556 |
+@@ -755,24 +762,13 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, |
2557 |
+ hp->duration = jiffies_to_msecs(jiffies); |
2558 |
+ |
2559 |
+ srp->rq->timeout = timeout; |
2560 |
++ kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ |
2561 |
+ blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, |
2562 |
+ srp->rq, 1, sg_rq_end_io); |
2563 |
+ return 0; |
2564 |
+ } |
2565 |
+ |
2566 |
+ static int |
2567 |
+-sg_srp_done(Sg_request *srp, Sg_fd *sfp) |
2568 |
+-{ |
2569 |
+- unsigned long iflags; |
2570 |
+- int done; |
2571 |
+- |
2572 |
+- read_lock_irqsave(&sfp->rq_list_lock, iflags); |
2573 |
+- done = srp->done; |
2574 |
+- read_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2575 |
+- return done; |
2576 |
+-} |
2577 |
+- |
2578 |
+-static int |
2579 |
+ sg_ioctl(struct inode *inode, struct file *filp, |
2580 |
+ unsigned int cmd_in, unsigned long arg) |
2581 |
+ { |
2582 |
+@@ -804,27 +800,26 @@ sg_ioctl(struct inode *inode, struct file *filp, |
2583 |
+ return -EFAULT; |
2584 |
+ result = |
2585 |
+ sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, |
2586 |
+- blocking, read_only, &srp); |
2587 |
++ blocking, read_only, 1, &srp); |
2588 |
+ if (result < 0) |
2589 |
+ return result; |
2590 |
+- srp->sg_io_owned = 1; |
2591 |
+ while (1) { |
2592 |
+ result = 0; /* following macro to beat race condition */ |
2593 |
+ __wait_event_interruptible(sfp->read_wait, |
2594 |
+- (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)), |
2595 |
+- result); |
2596 |
++ (srp->done || sdp->detached), |
2597 |
++ result); |
2598 |
+ if (sdp->detached) |
2599 |
+ return -ENODEV; |
2600 |
+- if (sfp->closed) |
2601 |
+- return 0; /* request packet dropped already */ |
2602 |
+- if (0 == result) |
2603 |
++ write_lock_irq(&sfp->rq_list_lock); |
2604 |
++ if (srp->done) { |
2605 |
++ srp->done = 2; |
2606 |
++ write_unlock_irq(&sfp->rq_list_lock); |
2607 |
+ break; |
2608 |
++ } |
2609 |
+ srp->orphan = 1; |
2610 |
++ write_unlock_irq(&sfp->rq_list_lock); |
2611 |
+ return result; /* -ERESTARTSYS because signal hit process */ |
2612 |
+ } |
2613 |
+- write_lock_irqsave(&sfp->rq_list_lock, iflags); |
2614 |
+- srp->done = 2; |
2615 |
+- write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2616 |
+ result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); |
2617 |
+ return (result < 0) ? result : 0; |
2618 |
+ } |
2619 |
+@@ -1240,6 +1235,15 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) |
2620 |
+ return 0; |
2621 |
+ } |
2622 |
+ |
2623 |
++static void sg_rq_end_io_usercontext(struct work_struct *work) |
2624 |
++{ |
2625 |
++ struct sg_request *srp = container_of(work, struct sg_request, ew.work); |
2626 |
++ struct sg_fd *sfp = srp->parentfp; |
2627 |
++ |
2628 |
++ sg_finish_rem_req(srp); |
2629 |
++ kref_put(&sfp->f_ref, sg_remove_sfp); |
2630 |
++} |
2631 |
++ |
2632 |
+ /* |
2633 |
+ * This function is a "bottom half" handler that is called by the mid |
2634 |
+ * level when a command is completed (or has failed). |
2635 |
+@@ -1247,24 +1251,23 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) |
2636 |
+ static void sg_rq_end_io(struct request *rq, int uptodate) |
2637 |
+ { |
2638 |
+ struct sg_request *srp = rq->end_io_data; |
2639 |
+- Sg_device *sdp = NULL; |
2640 |
++ Sg_device *sdp; |
2641 |
+ Sg_fd *sfp; |
2642 |
+ unsigned long iflags; |
2643 |
+ unsigned int ms; |
2644 |
+ char *sense; |
2645 |
+- int result, resid; |
2646 |
++ int result, resid, done = 1; |
2647 |
+ |
2648 |
+- if (NULL == srp) { |
2649 |
+- printk(KERN_ERR "sg_cmd_done: NULL request\n"); |
2650 |
++ if (WARN_ON(srp->done != 0)) |
2651 |
+ return; |
2652 |
+- } |
2653 |
++ |
2654 |
+ sfp = srp->parentfp; |
2655 |
+- if (sfp) |
2656 |
+- sdp = sfp->parentdp; |
2657 |
+- if ((NULL == sdp) || sdp->detached) { |
2658 |
+- printk(KERN_INFO "sg_cmd_done: device detached\n"); |
2659 |
++ if (WARN_ON(sfp == NULL)) |
2660 |
+ return; |
2661 |
+- } |
2662 |
++ |
2663 |
++ sdp = sfp->parentdp; |
2664 |
++ if (unlikely(sdp->detached)) |
2665 |
++ printk(KERN_INFO "sg_rq_end_io: device detached\n"); |
2666 |
+ |
2667 |
+ sense = rq->sense; |
2668 |
+ result = rq->errors; |
2669 |
+@@ -1303,32 +1306,26 @@ static void sg_rq_end_io(struct request *rq, int uptodate) |
2670 |
+ } |
2671 |
+ /* Rely on write phase to clean out srp status values, so no "else" */ |
2672 |
+ |
2673 |
+- if (sfp->closed) { /* whoops this fd already released, cleanup */ |
2674 |
+- SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n")); |
2675 |
+- sg_finish_rem_req(srp); |
2676 |
+- srp = NULL; |
2677 |
+- if (NULL == sfp->headrp) { |
2678 |
+- SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, final cleanup\n")); |
2679 |
+- if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */ |
2680 |
+- scsi_device_put(sdp->device); |
2681 |
+- } |
2682 |
+- sfp = NULL; |
2683 |
+- } |
2684 |
+- } else if (srp && srp->orphan) { |
2685 |
++ write_lock_irqsave(&sfp->rq_list_lock, iflags); |
2686 |
++ if (unlikely(srp->orphan)) { |
2687 |
+ if (sfp->keep_orphan) |
2688 |
+ srp->sg_io_owned = 0; |
2689 |
+- else { |
2690 |
+- sg_finish_rem_req(srp); |
2691 |
+- srp = NULL; |
2692 |
+- } |
2693 |
++ else |
2694 |
++ done = 0; |
2695 |
+ } |
2696 |
+- if (sfp && srp) { |
2697 |
+- /* Now wake up any sg_read() that is waiting for this packet. */ |
2698 |
+- kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); |
2699 |
+- write_lock_irqsave(&sfp->rq_list_lock, iflags); |
2700 |
+- srp->done = 1; |
2701 |
++ srp->done = done; |
2702 |
++ write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2703 |
++ |
2704 |
++ if (likely(done)) { |
2705 |
++ /* Now wake up any sg_read() that is waiting for this |
2706 |
++ * packet. |
2707 |
++ */ |
2708 |
+ wake_up_interruptible(&sfp->read_wait); |
2709 |
+- write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2710 |
++ kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); |
2711 |
++ kref_put(&sfp->f_ref, sg_remove_sfp); |
2712 |
++ } else { |
2713 |
++ INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext); |
2714 |
++ schedule_work(&srp->ew.work); |
2715 |
+ } |
2716 |
+ } |
2717 |
+ |
2718 |
+@@ -1364,17 +1361,18 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) |
2719 |
+ printk(KERN_WARNING "kmalloc Sg_device failure\n"); |
2720 |
+ return ERR_PTR(-ENOMEM); |
2721 |
+ } |
2722 |
+- error = -ENOMEM; |
2723 |
++ |
2724 |
+ if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) { |
2725 |
+ printk(KERN_WARNING "idr expansion Sg_device failure\n"); |
2726 |
++ error = -ENOMEM; |
2727 |
+ goto out; |
2728 |
+ } |
2729 |
+ |
2730 |
+ write_lock_irqsave(&sg_index_lock, iflags); |
2731 |
+- error = idr_get_new(&sg_index_idr, sdp, &k); |
2732 |
+- write_unlock_irqrestore(&sg_index_lock, iflags); |
2733 |
+ |
2734 |
++ error = idr_get_new(&sg_index_idr, sdp, &k); |
2735 |
+ if (error) { |
2736 |
++ write_unlock_irqrestore(&sg_index_lock, iflags); |
2737 |
+ printk(KERN_WARNING "idr allocation Sg_device failure: %d\n", |
2738 |
+ error); |
2739 |
+ goto out; |
2740 |
+@@ -1391,6 +1389,9 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) |
2741 |
+ init_waitqueue_head(&sdp->o_excl_wait); |
2742 |
+ sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); |
2743 |
+ sdp->index = k; |
2744 |
++ kref_init(&sdp->d_ref); |
2745 |
++ |
2746 |
++ write_unlock_irqrestore(&sg_index_lock, iflags); |
2747 |
+ |
2748 |
+ error = 0; |
2749 |
+ out: |
2750 |
+@@ -1401,6 +1402,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) |
2751 |
+ return sdp; |
2752 |
+ |
2753 |
+ overflow: |
2754 |
++ idr_remove(&sg_index_idr, k); |
2755 |
++ write_unlock_irqrestore(&sg_index_lock, iflags); |
2756 |
+ sdev_printk(KERN_WARNING, scsidp, |
2757 |
+ "Unable to attach sg device type=%d, minor " |
2758 |
+ "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); |
2759 |
+@@ -1488,49 +1491,46 @@ out: |
2760 |
+ return error; |
2761 |
+ } |
2762 |
+ |
2763 |
+-static void |
2764 |
+-sg_remove(struct device *cl_dev, struct class_interface *cl_intf) |
2765 |
++static void sg_device_destroy(struct kref *kref) |
2766 |
++{ |
2767 |
++ struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); |
2768 |
++ unsigned long flags; |
2769 |
++ |
2770 |
++ /* CAUTION! Note that the device can still be found via idr_find() |
2771 |
++ * even though the refcount is 0. Therefore, do idr_remove() BEFORE |
2772 |
++ * any other cleanup. |
2773 |
++ */ |
2774 |
++ |
2775 |
++ write_lock_irqsave(&sg_index_lock, flags); |
2776 |
++ idr_remove(&sg_index_idr, sdp->index); |
2777 |
++ write_unlock_irqrestore(&sg_index_lock, flags); |
2778 |
++ |
2779 |
++ SCSI_LOG_TIMEOUT(3, |
2780 |
++ printk("sg_device_destroy: %s\n", |
2781 |
++ sdp->disk->disk_name)); |
2782 |
++ |
2783 |
++ put_disk(sdp->disk); |
2784 |
++ kfree(sdp); |
2785 |
++} |
2786 |
++ |
2787 |
++static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf) |
2788 |
+ { |
2789 |
+ struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); |
2790 |
+ Sg_device *sdp = dev_get_drvdata(cl_dev); |
2791 |
+ unsigned long iflags; |
2792 |
+ Sg_fd *sfp; |
2793 |
+- Sg_fd *tsfp; |
2794 |
+- Sg_request *srp; |
2795 |
+- Sg_request *tsrp; |
2796 |
+- int delay; |
2797 |
+ |
2798 |
+- if (!sdp) |
2799 |
++ if (!sdp || sdp->detached) |
2800 |
+ return; |
2801 |
+ |
2802 |
+- delay = 0; |
2803 |
++ SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name)); |
2804 |
++ |
2805 |
++ /* Need a write lock to set sdp->detached. */ |
2806 |
+ write_lock_irqsave(&sg_index_lock, iflags); |
2807 |
+- if (sdp->headfp) { |
2808 |
+- sdp->detached = 1; |
2809 |
+- for (sfp = sdp->headfp; sfp; sfp = tsfp) { |
2810 |
+- tsfp = sfp->nextfp; |
2811 |
+- for (srp = sfp->headrp; srp; srp = tsrp) { |
2812 |
+- tsrp = srp->nextrp; |
2813 |
+- if (sfp->closed || (0 == sg_srp_done(srp, sfp))) |
2814 |
+- sg_finish_rem_req(srp); |
2815 |
+- } |
2816 |
+- if (sfp->closed) { |
2817 |
+- scsi_device_put(sdp->device); |
2818 |
+- __sg_remove_sfp(sdp, sfp); |
2819 |
+- } else { |
2820 |
+- delay = 1; |
2821 |
+- wake_up_interruptible(&sfp->read_wait); |
2822 |
+- kill_fasync(&sfp->async_qp, SIGPOLL, |
2823 |
+- POLL_HUP); |
2824 |
+- } |
2825 |
+- } |
2826 |
+- SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d, dirty\n", sdp->index)); |
2827 |
+- if (NULL == sdp->headfp) { |
2828 |
+- idr_remove(&sg_index_idr, sdp->index); |
2829 |
+- } |
2830 |
+- } else { /* nothing active, simple case */ |
2831 |
+- SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d\n", sdp->index)); |
2832 |
+- idr_remove(&sg_index_idr, sdp->index); |
2833 |
++ sdp->detached = 1; |
2834 |
++ for (sfp = sdp->headfp; sfp; sfp = sfp->nextfp) { |
2835 |
++ wake_up_interruptible(&sfp->read_wait); |
2836 |
++ kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); |
2837 |
+ } |
2838 |
+ write_unlock_irqrestore(&sg_index_lock, iflags); |
2839 |
+ |
2840 |
+@@ -1538,13 +1538,8 @@ sg_remove(struct device *cl_dev, struct class_interface *cl_intf) |
2841 |
+ device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); |
2842 |
+ cdev_del(sdp->cdev); |
2843 |
+ sdp->cdev = NULL; |
2844 |
+- put_disk(sdp->disk); |
2845 |
+- sdp->disk = NULL; |
2846 |
+- if (NULL == sdp->headfp) |
2847 |
+- kfree(sdp); |
2848 |
+ |
2849 |
+- if (delay) |
2850 |
+- msleep(10); /* dirty detach so delay device destruction */ |
2851 |
++ sg_put_dev(sdp); |
2852 |
+ } |
2853 |
+ |
2854 |
+ module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); |
2855 |
+@@ -1673,10 +1668,30 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) |
2856 |
+ md->null_mapped = hp->dxferp ? 0 : 1; |
2857 |
+ } |
2858 |
+ |
2859 |
+- if (iov_count) |
2860 |
+- res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count, |
2861 |
+- hp->dxfer_len, GFP_ATOMIC); |
2862 |
+- else |
2863 |
++ if (iov_count) { |
2864 |
++ int len, size = sizeof(struct sg_iovec) * iov_count; |
2865 |
++ struct iovec *iov; |
2866 |
++ |
2867 |
++ iov = kmalloc(size, GFP_ATOMIC); |
2868 |
++ if (!iov) |
2869 |
++ return -ENOMEM; |
2870 |
++ |
2871 |
++ if (copy_from_user(iov, hp->dxferp, size)) { |
2872 |
++ kfree(iov); |
2873 |
++ return -EFAULT; |
2874 |
++ } |
2875 |
++ |
2876 |
++ len = iov_length(iov, iov_count); |
2877 |
++ if (hp->dxfer_len < len) { |
2878 |
++ iov_count = iov_shorten(iov, iov_count, hp->dxfer_len); |
2879 |
++ len = hp->dxfer_len; |
2880 |
++ } |
2881 |
++ |
2882 |
++ res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov, |
2883 |
++ iov_count, |
2884 |
++ len, GFP_ATOMIC); |
2885 |
++ kfree(iov); |
2886 |
++ } else |
2887 |
+ res = blk_rq_map_user(q, rq, md, hp->dxferp, |
2888 |
+ hp->dxfer_len, GFP_ATOMIC); |
2889 |
+ |
2890 |
+@@ -1941,22 +1956,6 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id) |
2891 |
+ return resp; |
2892 |
+ } |
2893 |
+ |
2894 |
+-#ifdef CONFIG_SCSI_PROC_FS |
2895 |
+-static Sg_request * |
2896 |
+-sg_get_nth_request(Sg_fd * sfp, int nth) |
2897 |
+-{ |
2898 |
+- Sg_request *resp; |
2899 |
+- unsigned long iflags; |
2900 |
+- int k; |
2901 |
+- |
2902 |
+- read_lock_irqsave(&sfp->rq_list_lock, iflags); |
2903 |
+- for (k = 0, resp = sfp->headrp; resp && (k < nth); |
2904 |
+- ++k, resp = resp->nextrp) ; |
2905 |
+- read_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2906 |
+- return resp; |
2907 |
+-} |
2908 |
+-#endif |
2909 |
+- |
2910 |
+ /* always adds to end of list */ |
2911 |
+ static Sg_request * |
2912 |
+ sg_add_request(Sg_fd * sfp) |
2913 |
+@@ -2032,22 +2031,6 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp) |
2914 |
+ return res; |
2915 |
+ } |
2916 |
+ |
2917 |
+-#ifdef CONFIG_SCSI_PROC_FS |
2918 |
+-static Sg_fd * |
2919 |
+-sg_get_nth_sfp(Sg_device * sdp, int nth) |
2920 |
+-{ |
2921 |
+- Sg_fd *resp; |
2922 |
+- unsigned long iflags; |
2923 |
+- int k; |
2924 |
+- |
2925 |
+- read_lock_irqsave(&sg_index_lock, iflags); |
2926 |
+- for (k = 0, resp = sdp->headfp; resp && (k < nth); |
2927 |
+- ++k, resp = resp->nextfp) ; |
2928 |
+- read_unlock_irqrestore(&sg_index_lock, iflags); |
2929 |
+- return resp; |
2930 |
+-} |
2931 |
+-#endif |
2932 |
+- |
2933 |
+ static Sg_fd * |
2934 |
+ sg_add_sfp(Sg_device * sdp, int dev) |
2935 |
+ { |
2936 |
+@@ -2062,6 +2045,7 @@ sg_add_sfp(Sg_device * sdp, int dev) |
2937 |
+ init_waitqueue_head(&sfp->read_wait); |
2938 |
+ rwlock_init(&sfp->rq_list_lock); |
2939 |
+ |
2940 |
++ kref_init(&sfp->f_ref); |
2941 |
+ sfp->timeout = SG_DEFAULT_TIMEOUT; |
2942 |
+ sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; |
2943 |
+ sfp->force_packid = SG_DEF_FORCE_PACK_ID; |
2944 |
+@@ -2089,15 +2073,54 @@ sg_add_sfp(Sg_device * sdp, int dev) |
2945 |
+ sg_build_reserve(sfp, bufflen); |
2946 |
+ SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", |
2947 |
+ sfp->reserve.bufflen, sfp->reserve.k_use_sg)); |
2948 |
++ |
2949 |
++ kref_get(&sdp->d_ref); |
2950 |
++ __module_get(THIS_MODULE); |
2951 |
+ return sfp; |
2952 |
+ } |
2953 |
+ |
2954 |
+-static void |
2955 |
+-__sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) |
2956 |
++static void sg_remove_sfp_usercontext(struct work_struct *work) |
2957 |
++{ |
2958 |
++ struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); |
2959 |
++ struct sg_device *sdp = sfp->parentdp; |
2960 |
++ |
2961 |
++ /* Cleanup any responses which were never read(). */ |
2962 |
++ while (sfp->headrp) |
2963 |
++ sg_finish_rem_req(sfp->headrp); |
2964 |
++ |
2965 |
++ if (sfp->reserve.bufflen > 0) { |
2966 |
++ SCSI_LOG_TIMEOUT(6, |
2967 |
++ printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", |
2968 |
++ (int) sfp->reserve.bufflen, |
2969 |
++ (int) sfp->reserve.k_use_sg)); |
2970 |
++ sg_remove_scat(&sfp->reserve); |
2971 |
++ } |
2972 |
++ |
2973 |
++ SCSI_LOG_TIMEOUT(6, |
2974 |
++ printk("sg_remove_sfp: %s, sfp=0x%p\n", |
2975 |
++ sdp->disk->disk_name, |
2976 |
++ sfp)); |
2977 |
++ kfree(sfp); |
2978 |
++ |
2979 |
++ scsi_device_put(sdp->device); |
2980 |
++ sg_put_dev(sdp); |
2981 |
++ module_put(THIS_MODULE); |
2982 |
++} |
2983 |
++ |
2984 |
++static void sg_remove_sfp(struct kref *kref) |
2985 |
+ { |
2986 |
++ struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); |
2987 |
++ struct sg_device *sdp = sfp->parentdp; |
2988 |
+ Sg_fd *fp; |
2989 |
+ Sg_fd *prev_fp; |
2990 |
++ unsigned long iflags; |
2991 |
+ |
2992 |
++ /* CAUTION! Note that sfp can still be found by walking sdp->headfp |
2993 |
++ * even though the refcount is now 0. Therefore, unlink sfp from |
2994 |
++ * sdp->headfp BEFORE doing any other cleanup. |
2995 |
++ */ |
2996 |
++ |
2997 |
++ write_lock_irqsave(&sg_index_lock, iflags); |
2998 |
+ prev_fp = sdp->headfp; |
2999 |
+ if (sfp == prev_fp) |
3000 |
+ sdp->headfp = prev_fp->nextfp; |
3001 |
+@@ -2110,54 +2133,11 @@ __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) |
3002 |
+ prev_fp = fp; |
3003 |
+ } |
3004 |
+ } |
3005 |
+- if (sfp->reserve.bufflen > 0) { |
3006 |
+- SCSI_LOG_TIMEOUT(6, |
3007 |
+- printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", |
3008 |
+- (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); |
3009 |
+- sg_remove_scat(&sfp->reserve); |
3010 |
+- } |
3011 |
+- sfp->parentdp = NULL; |
3012 |
+- SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp)); |
3013 |
+- kfree(sfp); |
3014 |
+-} |
3015 |
+- |
3016 |
+-/* Returns 0 in normal case, 1 when detached and sdp object removed */ |
3017 |
+-static int |
3018 |
+-sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) |
3019 |
+-{ |
3020 |
+- Sg_request *srp; |
3021 |
+- Sg_request *tsrp; |
3022 |
+- int dirty = 0; |
3023 |
+- int res = 0; |
3024 |
+- |
3025 |
+- for (srp = sfp->headrp; srp; srp = tsrp) { |
3026 |
+- tsrp = srp->nextrp; |
3027 |
+- if (sg_srp_done(srp, sfp)) |
3028 |
+- sg_finish_rem_req(srp); |
3029 |
+- else |
3030 |
+- ++dirty; |
3031 |
+- } |
3032 |
+- if (0 == dirty) { |
3033 |
+- unsigned long iflags; |
3034 |
++ write_unlock_irqrestore(&sg_index_lock, iflags); |
3035 |
++ wake_up_interruptible(&sdp->o_excl_wait); |
3036 |
+ |
3037 |
+- write_lock_irqsave(&sg_index_lock, iflags); |
3038 |
+- __sg_remove_sfp(sdp, sfp); |
3039 |
+- if (sdp->detached && (NULL == sdp->headfp)) { |
3040 |
+- idr_remove(&sg_index_idr, sdp->index); |
3041 |
+- kfree(sdp); |
3042 |
+- res = 1; |
3043 |
+- } |
3044 |
+- write_unlock_irqrestore(&sg_index_lock, iflags); |
3045 |
+- } else { |
3046 |
+- /* MOD_INC's to inhibit unloading sg and associated adapter driver */ |
3047 |
+- /* only bump the access_count if we actually succeeded in |
3048 |
+- * throwing another counter on the host module */ |
3049 |
+- scsi_device_get(sdp->device); /* XXX: retval ignored? */ |
3050 |
+- sfp->closed = 1; /* flag dirty state on this fd */ |
3051 |
+- SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n", |
3052 |
+- dirty)); |
3053 |
+- } |
3054 |
+- return res; |
3055 |
++ INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); |
3056 |
++ schedule_work(&sfp->ew.work); |
3057 |
+ } |
3058 |
+ |
3059 |
+ static int |
3060 |
+@@ -2199,19 +2179,38 @@ sg_last_dev(void) |
3061 |
+ } |
3062 |
+ #endif |
3063 |
+ |
3064 |
+-static Sg_device * |
3065 |
+-sg_get_dev(int dev) |
3066 |
++/* must be called with sg_index_lock held */ |
3067 |
++static Sg_device *sg_lookup_dev(int dev) |
3068 |
+ { |
3069 |
+- Sg_device *sdp; |
3070 |
+- unsigned long iflags; |
3071 |
++ return idr_find(&sg_index_idr, dev); |
3072 |
++} |
3073 |
+ |
3074 |
+- read_lock_irqsave(&sg_index_lock, iflags); |
3075 |
+- sdp = idr_find(&sg_index_idr, dev); |
3076 |
+- read_unlock_irqrestore(&sg_index_lock, iflags); |
3077 |
++static Sg_device *sg_get_dev(int dev) |
3078 |
++{ |
3079 |
++ struct sg_device *sdp; |
3080 |
++ unsigned long flags; |
3081 |
++ |
3082 |
++ read_lock_irqsave(&sg_index_lock, flags); |
3083 |
++ sdp = sg_lookup_dev(dev); |
3084 |
++ if (!sdp) |
3085 |
++ sdp = ERR_PTR(-ENXIO); |
3086 |
++ else if (sdp->detached) { |
3087 |
++ /* If sdp->detached, then the refcount may already be 0, in |
3088 |
++ * which case it would be a bug to do kref_get(). |
3089 |
++ */ |
3090 |
++ sdp = ERR_PTR(-ENODEV); |
3091 |
++ } else |
3092 |
++ kref_get(&sdp->d_ref); |
3093 |
++ read_unlock_irqrestore(&sg_index_lock, flags); |
3094 |
+ |
3095 |
+ return sdp; |
3096 |
+ } |
3097 |
+ |
3098 |
++static void sg_put_dev(struct sg_device *sdp) |
3099 |
++{ |
3100 |
++ kref_put(&sdp->d_ref, sg_device_destroy); |
3101 |
++} |
3102 |
++ |
3103 |
+ #ifdef CONFIG_SCSI_PROC_FS |
3104 |
+ |
3105 |
+ static struct proc_dir_entry *sg_proc_sgp = NULL; |
3106 |
+@@ -2468,8 +2467,10 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v) |
3107 |
+ struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; |
3108 |
+ Sg_device *sdp; |
3109 |
+ struct scsi_device *scsidp; |
3110 |
++ unsigned long iflags; |
3111 |
+ |
3112 |
+- sdp = it ? sg_get_dev(it->index) : NULL; |
3113 |
++ read_lock_irqsave(&sg_index_lock, iflags); |
3114 |
++ sdp = it ? sg_lookup_dev(it->index) : NULL; |
3115 |
+ if (sdp && (scsidp = sdp->device) && (!sdp->detached)) |
3116 |
+ seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", |
3117 |
+ scsidp->host->host_no, scsidp->channel, |
3118 |
+@@ -2480,6 +2481,7 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v) |
3119 |
+ (int) scsi_device_online(scsidp)); |
3120 |
+ else |
3121 |
+ seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); |
3122 |
++ read_unlock_irqrestore(&sg_index_lock, iflags); |
3123 |
+ return 0; |
3124 |
+ } |
3125 |
+ |
3126 |
+@@ -2493,16 +2495,20 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) |
3127 |
+ struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; |
3128 |
+ Sg_device *sdp; |
3129 |
+ struct scsi_device *scsidp; |
3130 |
++ unsigned long iflags; |
3131 |
+ |
3132 |
+- sdp = it ? sg_get_dev(it->index) : NULL; |
3133 |
++ read_lock_irqsave(&sg_index_lock, iflags); |
3134 |
++ sdp = it ? sg_lookup_dev(it->index) : NULL; |
3135 |
+ if (sdp && (scsidp = sdp->device) && (!sdp->detached)) |
3136 |
+ seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", |
3137 |
+ scsidp->vendor, scsidp->model, scsidp->rev); |
3138 |
+ else |
3139 |
+ seq_printf(s, "<no active device>\n"); |
3140 |
++ read_unlock_irqrestore(&sg_index_lock, iflags); |
3141 |
+ return 0; |
3142 |
+ } |
3143 |
+ |
3144 |
++/* must be called while holding sg_index_lock */ |
3145 |
+ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) |
3146 |
+ { |
3147 |
+ int k, m, new_interface, blen, usg; |
3148 |
+@@ -2512,7 +2518,8 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) |
3149 |
+ const char * cp; |
3150 |
+ unsigned int ms; |
3151 |
+ |
3152 |
+- for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) { |
3153 |
++ for (k = 0, fp = sdp->headfp; fp != NULL; ++k, fp = fp->nextfp) { |
3154 |
++ read_lock(&fp->rq_list_lock); /* irqs already disabled */ |
3155 |
+ seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " |
3156 |
+ "(res)sgat=%d low_dma=%d\n", k + 1, |
3157 |
+ jiffies_to_msecs(fp->timeout), |
3158 |
+@@ -2522,7 +2529,9 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) |
3159 |
+ seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n", |
3160 |
+ (int) fp->cmd_q, (int) fp->force_packid, |
3161 |
+ (int) fp->keep_orphan, (int) fp->closed); |
3162 |
+- for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) { |
3163 |
++ for (m = 0, srp = fp->headrp; |
3164 |
++ srp != NULL; |
3165 |
++ ++m, srp = srp->nextrp) { |
3166 |
+ hp = &srp->header; |
3167 |
+ new_interface = (hp->interface_id == '\0') ? 0 : 1; |
3168 |
+ if (srp->res_used) { |
3169 |
+@@ -2559,6 +2568,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) |
3170 |
+ } |
3171 |
+ if (0 == m) |
3172 |
+ seq_printf(s, " No requests active\n"); |
3173 |
++ read_unlock(&fp->rq_list_lock); |
3174 |
+ } |
3175 |
+ } |
3176 |
+ |
3177 |
+@@ -2571,39 +2581,34 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v) |
3178 |
+ { |
3179 |
+ struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; |
3180 |
+ Sg_device *sdp; |
3181 |
++ unsigned long iflags; |
3182 |
+ |
3183 |
+ if (it && (0 == it->index)) { |
3184 |
+ seq_printf(s, "max_active_device=%d(origin 1)\n", |
3185 |
+ (int)it->max); |
3186 |
+ seq_printf(s, " def_reserved_size=%d\n", sg_big_buff); |
3187 |
+ } |
3188 |
+- sdp = it ? sg_get_dev(it->index) : NULL; |
3189 |
+- if (sdp) { |
3190 |
+- struct scsi_device *scsidp = sdp->device; |
3191 |
+ |
3192 |
+- if (NULL == scsidp) { |
3193 |
+- seq_printf(s, "device %d detached ??\n", |
3194 |
+- (int)it->index); |
3195 |
+- return 0; |
3196 |
+- } |
3197 |
++ read_lock_irqsave(&sg_index_lock, iflags); |
3198 |
++ sdp = it ? sg_lookup_dev(it->index) : NULL; |
3199 |
++ if (sdp && sdp->headfp) { |
3200 |
++ struct scsi_device *scsidp = sdp->device; |
3201 |
+ |
3202 |
+- if (sg_get_nth_sfp(sdp, 0)) { |
3203 |
+- seq_printf(s, " >>> device=%s ", |
3204 |
+- sdp->disk->disk_name); |
3205 |
+- if (sdp->detached) |
3206 |
+- seq_printf(s, "detached pending close "); |
3207 |
+- else |
3208 |
+- seq_printf |
3209 |
+- (s, "scsi%d chan=%d id=%d lun=%d em=%d", |
3210 |
+- scsidp->host->host_no, |
3211 |
+- scsidp->channel, scsidp->id, |
3212 |
+- scsidp->lun, |
3213 |
+- scsidp->host->hostt->emulated); |
3214 |
+- seq_printf(s, " sg_tablesize=%d excl=%d\n", |
3215 |
+- sdp->sg_tablesize, sdp->exclude); |
3216 |
+- } |
3217 |
++ seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); |
3218 |
++ if (sdp->detached) |
3219 |
++ seq_printf(s, "detached pending close "); |
3220 |
++ else |
3221 |
++ seq_printf |
3222 |
++ (s, "scsi%d chan=%d id=%d lun=%d em=%d", |
3223 |
++ scsidp->host->host_no, |
3224 |
++ scsidp->channel, scsidp->id, |
3225 |
++ scsidp->lun, |
3226 |
++ scsidp->host->hostt->emulated); |
3227 |
++ seq_printf(s, " sg_tablesize=%d excl=%d\n", |
3228 |
++ sdp->sg_tablesize, sdp->exclude); |
3229 |
+ sg_proc_debug_helper(s, sdp); |
3230 |
+ } |
3231 |
++ read_unlock_irqrestore(&sg_index_lock, iflags); |
3232 |
+ return 0; |
3233 |
+ } |
3234 |
+ |
3235 |
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c |
3236 |
+index 643908b..8eba98c 100644 |
3237 |
+--- a/drivers/spi/spi.c |
3238 |
++++ b/drivers/spi/spi.c |
3239 |
+@@ -658,7 +658,7 @@ int spi_write_then_read(struct spi_device *spi, |
3240 |
+ |
3241 |
+ int status; |
3242 |
+ struct spi_message message; |
3243 |
+- struct spi_transfer x; |
3244 |
++ struct spi_transfer x[2]; |
3245 |
+ u8 *local_buf; |
3246 |
+ |
3247 |
+ /* Use preallocated DMA-safe buffer. We can't avoid copying here, |
3248 |
+@@ -669,9 +669,15 @@ int spi_write_then_read(struct spi_device *spi, |
3249 |
+ return -EINVAL; |
3250 |
+ |
3251 |
+ spi_message_init(&message); |
3252 |
+- memset(&x, 0, sizeof x); |
3253 |
+- x.len = n_tx + n_rx; |
3254 |
+- spi_message_add_tail(&x, &message); |
3255 |
++ memset(x, 0, sizeof x); |
3256 |
++ if (n_tx) { |
3257 |
++ x[0].len = n_tx; |
3258 |
++ spi_message_add_tail(&x[0], &message); |
3259 |
++ } |
3260 |
++ if (n_rx) { |
3261 |
++ x[1].len = n_rx; |
3262 |
++ spi_message_add_tail(&x[1], &message); |
3263 |
++ } |
3264 |
+ |
3265 |
+ /* ... unless someone else is using the pre-allocated buffer */ |
3266 |
+ if (!mutex_trylock(&lock)) { |
3267 |
+@@ -682,15 +688,15 @@ int spi_write_then_read(struct spi_device *spi, |
3268 |
+ local_buf = buf; |
3269 |
+ |
3270 |
+ memcpy(local_buf, txbuf, n_tx); |
3271 |
+- x.tx_buf = local_buf; |
3272 |
+- x.rx_buf = local_buf; |
3273 |
++ x[0].tx_buf = local_buf; |
3274 |
++ x[1].rx_buf = local_buf + n_tx; |
3275 |
+ |
3276 |
+ /* do the i/o */ |
3277 |
+ status = spi_sync(spi, &message); |
3278 |
+ if (status == 0) |
3279 |
+- memcpy(rxbuf, x.rx_buf + n_tx, n_rx); |
3280 |
++ memcpy(rxbuf, x[1].rx_buf, n_rx); |
3281 |
+ |
3282 |
+- if (x.tx_buf == buf) |
3283 |
++ if (x[0].tx_buf == buf) |
3284 |
+ mutex_unlock(&lock); |
3285 |
+ else |
3286 |
+ kfree(local_buf); |
3287 |
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c |
3288 |
+index 3771d6e..34e6108 100644 |
3289 |
+--- a/drivers/usb/class/cdc-wdm.c |
3290 |
++++ b/drivers/usb/class/cdc-wdm.c |
3291 |
+@@ -652,7 +652,7 @@ next_desc: |
3292 |
+ |
3293 |
+ iface = &intf->altsetting[0]; |
3294 |
+ ep = &iface->endpoint[0].desc; |
3295 |
+- if (!usb_endpoint_is_int_in(ep)) { |
3296 |
++ if (!ep || !usb_endpoint_is_int_in(ep)) { |
3297 |
+ rv = -EINVAL; |
3298 |
+ goto err; |
3299 |
+ } |
3300 |
+diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c |
3301 |
+index 96d65ca..4007770 100644 |
3302 |
+--- a/drivers/usb/gadget/u_ether.c |
3303 |
++++ b/drivers/usb/gadget/u_ether.c |
3304 |
+@@ -175,12 +175,6 @@ static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) |
3305 |
+ strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info); |
3306 |
+ } |
3307 |
+ |
3308 |
+-static u32 eth_get_link(struct net_device *net) |
3309 |
+-{ |
3310 |
+- struct eth_dev *dev = netdev_priv(net); |
3311 |
+- return dev->gadget->speed != USB_SPEED_UNKNOWN; |
3312 |
+-} |
3313 |
+- |
3314 |
+ /* REVISIT can also support: |
3315 |
+ * - WOL (by tracking suspends and issuing remote wakeup) |
3316 |
+ * - msglevel (implies updated messaging) |
3317 |
+@@ -189,7 +183,7 @@ static u32 eth_get_link(struct net_device *net) |
3318 |
+ |
3319 |
+ static struct ethtool_ops ops = { |
3320 |
+ .get_drvinfo = eth_get_drvinfo, |
3321 |
+- .get_link = eth_get_link |
3322 |
++ .get_link = ethtool_op_get_link, |
3323 |
+ }; |
3324 |
+ |
3325 |
+ static void defer_kevent(struct eth_dev *dev, int flag) |
3326 |
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
3327 |
+index ae84c32..bb3143e 100644 |
3328 |
+--- a/drivers/usb/serial/ftdi_sio.c |
3329 |
++++ b/drivers/usb/serial/ftdi_sio.c |
3330 |
+@@ -668,6 +668,7 @@ static struct usb_device_id id_table_combined [] = { |
3331 |
+ { USB_DEVICE(DE_VID, WHT_PID) }, |
3332 |
+ { USB_DEVICE(ADI_VID, ADI_GNICE_PID), |
3333 |
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
3334 |
++ { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, |
3335 |
+ { }, /* Optional parameter entry */ |
3336 |
+ { } /* Terminating entry */ |
3337 |
+ }; |
3338 |
+diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h |
3339 |
+index daaf63d..c09f658 100644 |
3340 |
+--- a/drivers/usb/serial/ftdi_sio.h |
3341 |
++++ b/drivers/usb/serial/ftdi_sio.h |
3342 |
+@@ -913,6 +913,13 @@ |
3343 |
+ #define ADI_GNICE_PID 0xF000 |
3344 |
+ |
3345 |
+ /* |
3346 |
++ * JETI SPECTROMETER SPECBOS 1201 |
3347 |
++ * http://www.jeti.com/products/sys/scb/scb1201.php |
3348 |
++ */ |
3349 |
++#define JETI_VID 0x0c6c |
3350 |
++#define JETI_SPC1201_PID 0x04b2 |
3351 |
++ |
3352 |
++/* |
3353 |
+ * BmRequestType: 1100 0000b |
3354 |
+ * bRequest: FTDI_E2_READ |
3355 |
+ * wValue: 0 |
3356 |
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c |
3357 |
+index 2620bf6..9c4c700 100644 |
3358 |
+--- a/drivers/usb/serial/ti_usb_3410_5052.c |
3359 |
++++ b/drivers/usb/serial/ti_usb_3410_5052.c |
3360 |
+@@ -1215,20 +1215,22 @@ static void ti_bulk_in_callback(struct urb *urb) |
3361 |
+ } |
3362 |
+ |
3363 |
+ tty = tty_port_tty_get(&port->port); |
3364 |
+- if (tty && urb->actual_length) { |
3365 |
+- usb_serial_debug_data(debug, dev, __func__, |
3366 |
+- urb->actual_length, urb->transfer_buffer); |
3367 |
+- |
3368 |
+- if (!tport->tp_is_open) |
3369 |
+- dbg("%s - port closed, dropping data", __func__); |
3370 |
+- else |
3371 |
+- ti_recv(&urb->dev->dev, tty, |
3372 |
++ if (tty) { |
3373 |
++ if (urb->actual_length) { |
3374 |
++ usb_serial_debug_data(debug, dev, __func__, |
3375 |
++ urb->actual_length, urb->transfer_buffer); |
3376 |
++ |
3377 |
++ if (!tport->tp_is_open) |
3378 |
++ dbg("%s - port closed, dropping data", |
3379 |
++ __func__); |
3380 |
++ else |
3381 |
++ ti_recv(&urb->dev->dev, tty, |
3382 |
+ urb->transfer_buffer, |
3383 |
+ urb->actual_length); |
3384 |
+- |
3385 |
+- spin_lock(&tport->tp_lock); |
3386 |
+- tport->tp_icount.rx += urb->actual_length; |
3387 |
+- spin_unlock(&tport->tp_lock); |
3388 |
++ spin_lock(&tport->tp_lock); |
3389 |
++ tport->tp_icount.rx += urb->actual_length; |
3390 |
++ spin_unlock(&tport->tp_lock); |
3391 |
++ } |
3392 |
+ tty_kref_put(tty); |
3393 |
+ } |
3394 |
+ |
3395 |
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h |
3396 |
+index cfde74a..0f54399 100644 |
3397 |
+--- a/drivers/usb/storage/unusual_devs.h |
3398 |
++++ b/drivers/usb/storage/unusual_devs.h |
3399 |
+@@ -1218,12 +1218,14 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff, |
3400 |
+ US_SC_DEVICE, US_PR_DEVICE, NULL, |
3401 |
+ US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ), |
3402 |
+ |
3403 |
+-/* Reported by Rauch Wolke <rauchwolke@×××.net> */ |
3404 |
++/* Reported by Rauch Wolke <rauchwolke@×××.net> |
3405 |
++ * and augmented by binbin <binbinsh@×××××.com> (Bugzilla #12882) |
3406 |
++ */ |
3407 |
+ UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff, |
3408 |
+ "Simple Tech/Datafab", |
3409 |
+ "CF+SM Reader", |
3410 |
+ US_SC_DEVICE, US_PR_DEVICE, NULL, |
3411 |
+- US_FL_IGNORE_RESIDUE ), |
3412 |
++ US_FL_IGNORE_RESIDUE | US_FL_MAX_SECTORS_64 ), |
3413 |
+ |
3414 |
+ /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant |
3415 |
+ * to the USB storage specification in two ways: |
3416 |
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c |
3417 |
+index 1657b96..471a9a6 100644 |
3418 |
+--- a/drivers/video/console/fbcon.c |
3419 |
++++ b/drivers/video/console/fbcon.c |
3420 |
+@@ -2263,9 +2263,12 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info, |
3421 |
+ } |
3422 |
+ |
3423 |
+ |
3424 |
++ if (!lock_fb_info(info)) |
3425 |
++ return; |
3426 |
+ event.info = info; |
3427 |
+ event.data = ␣ |
3428 |
+ fb_notifier_call_chain(FB_EVENT_CONBLANK, &event); |
3429 |
++ unlock_fb_info(info); |
3430 |
+ } |
3431 |
+ |
3432 |
+ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) |
3433 |
+@@ -2954,8 +2957,9 @@ static int fbcon_fb_unbind(int idx) |
3434 |
+ |
3435 |
+ static int fbcon_fb_unregistered(struct fb_info *info) |
3436 |
+ { |
3437 |
+- int i, idx = info->node; |
3438 |
++ int i, idx; |
3439 |
+ |
3440 |
++ idx = info->node; |
3441 |
+ for (i = first_fb_vc; i <= last_fb_vc; i++) { |
3442 |
+ if (con2fb_map[i] == idx) |
3443 |
+ con2fb_map[i] = -1; |
3444 |
+@@ -2979,13 +2983,12 @@ static int fbcon_fb_unregistered(struct fb_info *info) |
3445 |
+ } |
3446 |
+ } |
3447 |
+ |
3448 |
+- if (!num_registered_fb) |
3449 |
+- unregister_con_driver(&fb_con); |
3450 |
+- |
3451 |
+- |
3452 |
+ if (primary_device == idx) |
3453 |
+ primary_device = -1; |
3454 |
+ |
3455 |
++ if (!num_registered_fb) |
3456 |
++ unregister_con_driver(&fb_con); |
3457 |
++ |
3458 |
+ return 0; |
3459 |
+ } |
3460 |
+ |
3461 |
+@@ -3021,8 +3024,9 @@ static inline void fbcon_select_primary(struct fb_info *info) |
3462 |
+ |
3463 |
+ static int fbcon_fb_registered(struct fb_info *info) |
3464 |
+ { |
3465 |
+- int ret = 0, i, idx = info->node; |
3466 |
++ int ret = 0, i, idx; |
3467 |
+ |
3468 |
++ idx = info->node; |
3469 |
+ fbcon_select_primary(info); |
3470 |
+ |
3471 |
+ if (info_idx == -1) { |
3472 |
+@@ -3124,7 +3128,7 @@ static void fbcon_get_requirement(struct fb_info *info, |
3473 |
+ } |
3474 |
+ } |
3475 |
+ |
3476 |
+-static int fbcon_event_notify(struct notifier_block *self, |
3477 |
++static int fbcon_event_notify(struct notifier_block *self, |
3478 |
+ unsigned long action, void *data) |
3479 |
+ { |
3480 |
+ struct fb_event *event = data; |
3481 |
+@@ -3132,7 +3136,7 @@ static int fbcon_event_notify(struct notifier_block *self, |
3482 |
+ struct fb_videomode *mode; |
3483 |
+ struct fb_con2fbmap *con2fb; |
3484 |
+ struct fb_blit_caps *caps; |
3485 |
+- int ret = 0; |
3486 |
++ int idx, ret = 0; |
3487 |
+ |
3488 |
+ /* |
3489 |
+ * ignore all events except driver registration and deregistration |
3490 |
+@@ -3160,7 +3164,8 @@ static int fbcon_event_notify(struct notifier_block *self, |
3491 |
+ ret = fbcon_mode_deleted(info, mode); |
3492 |
+ break; |
3493 |
+ case FB_EVENT_FB_UNBIND: |
3494 |
+- ret = fbcon_fb_unbind(info->node); |
3495 |
++ idx = info->node; |
3496 |
++ ret = fbcon_fb_unbind(idx); |
3497 |
+ break; |
3498 |
+ case FB_EVENT_FB_REGISTERED: |
3499 |
+ ret = fbcon_fb_registered(info); |
3500 |
+@@ -3188,7 +3193,6 @@ static int fbcon_event_notify(struct notifier_block *self, |
3501 |
+ fbcon_get_requirement(info, caps); |
3502 |
+ break; |
3503 |
+ } |
3504 |
+- |
3505 |
+ done: |
3506 |
+ return ret; |
3507 |
+ } |
3508 |
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c |
3509 |
+index cfd9dce..1d6fb41 100644 |
3510 |
+--- a/drivers/video/fbmem.c |
3511 |
++++ b/drivers/video/fbmem.c |
3512 |
+@@ -1086,13 +1086,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, |
3513 |
+ return -EINVAL; |
3514 |
+ con2fb.framebuffer = -1; |
3515 |
+ event.data = &con2fb; |
3516 |
+- |
3517 |
+ if (!lock_fb_info(info)) |
3518 |
+ return -ENODEV; |
3519 |
+ event.info = info; |
3520 |
+ fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event); |
3521 |
+ unlock_fb_info(info); |
3522 |
+- |
3523 |
+ ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0; |
3524 |
+ break; |
3525 |
+ case FBIOPUT_CON2FBMAP: |
3526 |
+@@ -1112,8 +1110,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, |
3527 |
+ if (!lock_fb_info(info)) |
3528 |
+ return -ENODEV; |
3529 |
+ event.info = info; |
3530 |
+- ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, |
3531 |
+- &event); |
3532 |
++ ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event); |
3533 |
+ unlock_fb_info(info); |
3534 |
+ break; |
3535 |
+ case FBIOBLANK: |
3536 |
+@@ -1519,7 +1516,10 @@ register_framebuffer(struct fb_info *fb_info) |
3537 |
+ registered_fb[i] = fb_info; |
3538 |
+ |
3539 |
+ event.info = fb_info; |
3540 |
++ if (!lock_fb_info(fb_info)) |
3541 |
++ return -ENODEV; |
3542 |
+ fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event); |
3543 |
++ unlock_fb_info(fb_info); |
3544 |
+ return 0; |
3545 |
+ } |
3546 |
+ |
3547 |
+@@ -1553,8 +1553,12 @@ unregister_framebuffer(struct fb_info *fb_info) |
3548 |
+ goto done; |
3549 |
+ } |
3550 |
+ |
3551 |
++ |
3552 |
++ if (!lock_fb_info(fb_info)) |
3553 |
++ return -ENODEV; |
3554 |
+ event.info = fb_info; |
3555 |
+ ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); |
3556 |
++ unlock_fb_info(fb_info); |
3557 |
+ |
3558 |
+ if (ret) { |
3559 |
+ ret = -EINVAL; |
3560 |
+@@ -1588,6 +1592,8 @@ void fb_set_suspend(struct fb_info *info, int state) |
3561 |
+ { |
3562 |
+ struct fb_event event; |
3563 |
+ |
3564 |
++ if (!lock_fb_info(info)) |
3565 |
++ return; |
3566 |
+ event.info = info; |
3567 |
+ if (state) { |
3568 |
+ fb_notifier_call_chain(FB_EVENT_SUSPEND, &event); |
3569 |
+@@ -1596,6 +1602,7 @@ void fb_set_suspend(struct fb_info *info, int state) |
3570 |
+ info->state = FBINFO_STATE_RUNNING; |
3571 |
+ fb_notifier_call_chain(FB_EVENT_RESUME, &event); |
3572 |
+ } |
3573 |
++ unlock_fb_info(info); |
3574 |
+ } |
3575 |
+ |
3576 |
+ /** |
3577 |
+@@ -1665,8 +1672,11 @@ int fb_new_modelist(struct fb_info *info) |
3578 |
+ err = 1; |
3579 |
+ |
3580 |
+ if (!list_empty(&info->modelist)) { |
3581 |
++ if (!lock_fb_info(info)) |
3582 |
++ return -ENODEV; |
3583 |
+ event.info = info; |
3584 |
+ err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); |
3585 |
++ unlock_fb_info(info); |
3586 |
+ } |
3587 |
+ |
3588 |
+ return err; |
3589 |
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c |
3590 |
+index 5926826..9c76a06 100644 |
3591 |
+--- a/drivers/virtio/virtio_balloon.c |
3592 |
++++ b/drivers/virtio/virtio_balloon.c |
3593 |
+@@ -190,7 +190,8 @@ static int balloon(void *_vballoon) |
3594 |
+ try_to_freeze(); |
3595 |
+ wait_event_interruptible(vb->config_change, |
3596 |
+ (diff = towards_target(vb)) != 0 |
3597 |
+- || kthread_should_stop()); |
3598 |
++ || kthread_should_stop() |
3599 |
++ || freezing(current)); |
3600 |
+ if (diff > 0) |
3601 |
+ fill_balloon(vb, diff); |
3602 |
+ else if (diff < 0) |
3603 |
+diff --git a/fs/dquot.c b/fs/dquot.c |
3604 |
+index bca3cac..5a0059d 100644 |
3605 |
+--- a/fs/dquot.c |
3606 |
++++ b/fs/dquot.c |
3607 |
+@@ -793,7 +793,7 @@ static void add_dquot_ref(struct super_block *sb, int type) |
3608 |
+ continue; |
3609 |
+ if (!dqinit_needed(inode, type)) |
3610 |
+ continue; |
3611 |
+- if (inode->i_state & (I_FREEING|I_WILL_FREE)) |
3612 |
++ if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) |
3613 |
+ continue; |
3614 |
+ |
3615 |
+ __iget(inode); |
3616 |
+diff --git a/fs/drop_caches.c b/fs/drop_caches.c |
3617 |
+index 3e5637f..f7e66c0 100644 |
3618 |
+--- a/fs/drop_caches.c |
3619 |
++++ b/fs/drop_caches.c |
3620 |
+@@ -18,7 +18,7 @@ static void drop_pagecache_sb(struct super_block *sb) |
3621 |
+ |
3622 |
+ spin_lock(&inode_lock); |
3623 |
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
3624 |
+- if (inode->i_state & (I_FREEING|I_WILL_FREE)) |
3625 |
++ if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) |
3626 |
+ continue; |
3627 |
+ if (inode->i_mapping->nrpages == 0) |
3628 |
+ continue; |
3629 |
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c |
3630 |
+index 9f61e62..27b3741 100644 |
3631 |
+--- a/fs/ext4/mballoc.c |
3632 |
++++ b/fs/ext4/mballoc.c |
3633 |
+@@ -2693,7 +2693,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) |
3634 |
+ i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int); |
3635 |
+ sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); |
3636 |
+ if (sbi->s_mb_maxs == NULL) { |
3637 |
+- kfree(sbi->s_mb_maxs); |
3638 |
++ kfree(sbi->s_mb_offsets); |
3639 |
+ return -ENOMEM; |
3640 |
+ } |
3641 |
+ |
3642 |
+@@ -4439,7 +4439,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) |
3643 |
+ pa_inode_list) { |
3644 |
+ spin_lock(&tmp_pa->pa_lock); |
3645 |
+ if (tmp_pa->pa_deleted) { |
3646 |
+- spin_unlock(&pa->pa_lock); |
3647 |
++ spin_unlock(&tmp_pa->pa_lock); |
3648 |
+ continue; |
3649 |
+ } |
3650 |
+ if (!added && pa->pa_free < tmp_pa->pa_free) { |
3651 |
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c |
3652 |
+index e3fe991..f81f9e7 100644 |
3653 |
+--- a/fs/fs-writeback.c |
3654 |
++++ b/fs/fs-writeback.c |
3655 |
+@@ -538,7 +538,8 @@ void generic_sync_sb_inodes(struct super_block *sb, |
3656 |
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
3657 |
+ struct address_space *mapping; |
3658 |
+ |
3659 |
+- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) |
3660 |
++ if (inode->i_state & |
3661 |
++ (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) |
3662 |
+ continue; |
3663 |
+ mapping = inode->i_mapping; |
3664 |
+ if (mapping->nrpages == 0) |
3665 |
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c |
3666 |
+index 9b800d9..c91a818 100644 |
3667 |
+--- a/fs/hugetlbfs/inode.c |
3668 |
++++ b/fs/hugetlbfs/inode.c |
3669 |
+@@ -26,7 +26,6 @@ |
3670 |
+ #include <linux/pagevec.h> |
3671 |
+ #include <linux/parser.h> |
3672 |
+ #include <linux/mman.h> |
3673 |
+-#include <linux/quotaops.h> |
3674 |
+ #include <linux/slab.h> |
3675 |
+ #include <linux/dnotify.h> |
3676 |
+ #include <linux/statfs.h> |
3677 |
+@@ -842,7 +841,7 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) |
3678 |
+ bad_val: |
3679 |
+ printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n", |
3680 |
+ args[0].from, p); |
3681 |
+- return 1; |
3682 |
++ return -EINVAL; |
3683 |
+ } |
3684 |
+ |
3685 |
+ static int |
3686 |
+diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c |
3687 |
+index 6cdeacf..4bd49c1 100644 |
3688 |
+--- a/fs/nfs/nfs3xdr.c |
3689 |
++++ b/fs/nfs/nfs3xdr.c |
3690 |
+@@ -716,7 +716,8 @@ nfs3_xdr_setaclargs(struct rpc_rqst *req, __be32 *p, |
3691 |
+ if (args->npages != 0) |
3692 |
+ xdr_encode_pages(buf, args->pages, 0, args->len); |
3693 |
+ else |
3694 |
+- req->rq_slen += args->len; |
3695 |
++ req->rq_slen = xdr_adjust_iovec(req->rq_svec, |
3696 |
++ p + XDR_QUADLEN(args->len)); |
3697 |
+ |
3698 |
+ err = nfsacl_encode(buf, base, args->inode, |
3699 |
+ (args->mask & NFS_ACL) ? |
3700 |
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c |
3701 |
+index a5887df..8672b95 100644 |
3702 |
+--- a/fs/ocfs2/file.c |
3703 |
++++ b/fs/ocfs2/file.c |
3704 |
+@@ -1926,7 +1926,7 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, |
3705 |
+ out->f_path.dentry->d_name.len, |
3706 |
+ out->f_path.dentry->d_name.name); |
3707 |
+ |
3708 |
+- inode_double_lock(inode, pipe->inode); |
3709 |
++ mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); |
3710 |
+ |
3711 |
+ ret = ocfs2_rw_lock(inode, 1); |
3712 |
+ if (ret < 0) { |
3713 |
+@@ -1941,12 +1941,16 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, |
3714 |
+ goto out_unlock; |
3715 |
+ } |
3716 |
+ |
3717 |
++ if (pipe->inode) |
3718 |
++ mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD); |
3719 |
+ ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags); |
3720 |
++ if (pipe->inode) |
3721 |
++ mutex_unlock(&pipe->inode->i_mutex); |
3722 |
+ |
3723 |
+ out_unlock: |
3724 |
+ ocfs2_rw_unlock(inode, 1); |
3725 |
+ out: |
3726 |
+- inode_double_unlock(inode, pipe->inode); |
3727 |
++ mutex_unlock(&inode->i_mutex); |
3728 |
+ |
3729 |
+ mlog_exit(ret); |
3730 |
+ return ret; |
3731 |
+diff --git a/fs/splice.c b/fs/splice.c |
3732 |
+index 4ed0ba4..4c1029a 100644 |
3733 |
+--- a/fs/splice.c |
3734 |
++++ b/fs/splice.c |
3735 |
+@@ -736,10 +736,19 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, |
3736 |
+ * ->write_end. Most of the time, these expect i_mutex to |
3737 |
+ * be held. Since this may result in an ABBA deadlock with |
3738 |
+ * pipe->inode, we have to order lock acquiry here. |
3739 |
++ * |
3740 |
++ * Outer lock must be inode->i_mutex, as pipe_wait() will |
3741 |
++ * release and reacquire pipe->inode->i_mutex, AND inode must |
3742 |
++ * never be a pipe. |
3743 |
+ */ |
3744 |
+- inode_double_lock(inode, pipe->inode); |
3745 |
++ WARN_ON(S_ISFIFO(inode->i_mode)); |
3746 |
++ mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); |
3747 |
++ if (pipe->inode) |
3748 |
++ mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD); |
3749 |
+ ret = __splice_from_pipe(pipe, &sd, actor); |
3750 |
+- inode_double_unlock(inode, pipe->inode); |
3751 |
++ if (pipe->inode) |
3752 |
++ mutex_unlock(&pipe->inode->i_mutex); |
3753 |
++ mutex_unlock(&inode->i_mutex); |
3754 |
+ |
3755 |
+ return ret; |
3756 |
+ } |
3757 |
+@@ -830,11 +839,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out, |
3758 |
+ }; |
3759 |
+ ssize_t ret; |
3760 |
+ |
3761 |
+- inode_double_lock(inode, pipe->inode); |
3762 |
++ WARN_ON(S_ISFIFO(inode->i_mode)); |
3763 |
++ mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); |
3764 |
+ ret = file_remove_suid(out); |
3765 |
+- if (likely(!ret)) |
3766 |
++ if (likely(!ret)) { |
3767 |
++ if (pipe->inode) |
3768 |
++ mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD); |
3769 |
+ ret = __splice_from_pipe(pipe, &sd, pipe_to_file); |
3770 |
+- inode_double_unlock(inode, pipe->inode); |
3771 |
++ if (pipe->inode) |
3772 |
++ mutex_unlock(&pipe->inode->i_mutex); |
3773 |
++ } |
3774 |
++ mutex_unlock(&inode->i_mutex); |
3775 |
+ if (ret > 0) { |
3776 |
+ unsigned long nr_pages; |
3777 |
+ |
3778 |
+diff --git a/include/linux/capability.h b/include/linux/capability.h |
3779 |
+index 4864a43..c302110 100644 |
3780 |
+--- a/include/linux/capability.h |
3781 |
++++ b/include/linux/capability.h |
3782 |
+@@ -377,7 +377,21 @@ struct cpu_vfs_cap_data { |
3783 |
+ #define CAP_FOR_EACH_U32(__capi) \ |
3784 |
+ for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi) |
3785 |
+ |
3786 |
++/* |
3787 |
++ * CAP_FS_MASK and CAP_NFSD_MASKS: |
3788 |
++ * |
3789 |
++ * The fs mask is all the privileges that fsuid==0 historically meant. |
3790 |
++ * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE. |
3791 |
++ * |
3792 |
++ * It has never meant setting security.* and trusted.* xattrs. |
3793 |
++ * |
3794 |
++ * We could also define fsmask as follows: |
3795 |
++ * 1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions |
3796 |
++ * 2. The security.* and trusted.* xattrs are fs-related MAC permissions |
3797 |
++ */ |
3798 |
++ |
3799 |
+ # define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \ |
3800 |
++ | CAP_TO_MASK(CAP_MKNOD) \ |
3801 |
+ | CAP_TO_MASK(CAP_DAC_OVERRIDE) \ |
3802 |
+ | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \ |
3803 |
+ | CAP_TO_MASK(CAP_FOWNER) \ |
3804 |
+@@ -392,11 +406,12 @@ struct cpu_vfs_cap_data { |
3805 |
+ # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }}) |
3806 |
+ # define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }}) |
3807 |
+ # define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }}) |
3808 |
+-# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } }) |
3809 |
++# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ |
3810 |
++ | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \ |
3811 |
++ CAP_FS_MASK_B1 } }) |
3812 |
+ # define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ |
3813 |
+- | CAP_TO_MASK(CAP_SYS_RESOURCE) \ |
3814 |
+- | CAP_TO_MASK(CAP_MKNOD), \ |
3815 |
+- CAP_FS_MASK_B1 } }) |
3816 |
++ | CAP_TO_MASK(CAP_SYS_RESOURCE), \ |
3817 |
++ CAP_FS_MASK_B1 } }) |
3818 |
+ |
3819 |
+ #endif /* _KERNEL_CAPABILITY_U32S != 2 */ |
3820 |
+ |
3821 |
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h |
3822 |
+index bd37078..0d2f7c8 100644 |
3823 |
+--- a/include/linux/hrtimer.h |
3824 |
++++ b/include/linux/hrtimer.h |
3825 |
+@@ -336,6 +336,11 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, |
3826 |
+ const enum hrtimer_mode mode); |
3827 |
+ extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
3828 |
+ unsigned long range_ns, const enum hrtimer_mode mode); |
3829 |
++extern int |
3830 |
++__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
3831 |
++ unsigned long delta_ns, |
3832 |
++ const enum hrtimer_mode mode, int wakeup); |
3833 |
++ |
3834 |
+ extern int hrtimer_cancel(struct hrtimer *timer); |
3835 |
+ extern int hrtimer_try_to_cancel(struct hrtimer *timer); |
3836 |
+ |
3837 |
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h |
3838 |
+index 9127f6b..564d1c0 100644 |
3839 |
+--- a/include/linux/interrupt.h |
3840 |
++++ b/include/linux/interrupt.h |
3841 |
+@@ -274,6 +274,7 @@ extern void softirq_init(void); |
3842 |
+ #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) |
3843 |
+ extern void raise_softirq_irqoff(unsigned int nr); |
3844 |
+ extern void raise_softirq(unsigned int nr); |
3845 |
++extern void wakeup_softirqd(void); |
3846 |
+ |
3847 |
+ /* This is the worklist that queues up per-cpu softirq work. |
3848 |
+ * |
3849 |
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h |
3850 |
+index bf6f703..552ef4f 100644 |
3851 |
+--- a/include/linux/kvm_host.h |
3852 |
++++ b/include/linux/kvm_host.h |
3853 |
+@@ -127,6 +127,10 @@ struct kvm { |
3854 |
+ struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
3855 |
+ #endif |
3856 |
+ |
3857 |
++#ifdef CONFIG_HAVE_KVM_IRQCHIP |
3858 |
++ struct hlist_head mask_notifier_list; |
3859 |
++#endif |
3860 |
++ |
3861 |
+ #ifdef KVM_ARCH_WANT_MMU_NOTIFIER |
3862 |
+ struct mmu_notifier mmu_notifier; |
3863 |
+ unsigned long mmu_notifier_seq; |
3864 |
+@@ -321,6 +325,19 @@ struct kvm_assigned_dev_kernel { |
3865 |
+ struct pci_dev *dev; |
3866 |
+ struct kvm *kvm; |
3867 |
+ }; |
3868 |
++ |
3869 |
++struct kvm_irq_mask_notifier { |
3870 |
++ void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); |
3871 |
++ int irq; |
3872 |
++ struct hlist_node link; |
3873 |
++}; |
3874 |
++ |
3875 |
++void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, |
3876 |
++ struct kvm_irq_mask_notifier *kimn); |
3877 |
++void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, |
3878 |
++ struct kvm_irq_mask_notifier *kimn); |
3879 |
++void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); |
3880 |
++ |
3881 |
+ void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); |
3882 |
+ void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); |
3883 |
+ void kvm_register_irq_ack_notifier(struct kvm *kvm, |
3884 |
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h |
3885 |
+index 01ca085..076a7dc 100644 |
3886 |
+--- a/include/linux/pagemap.h |
3887 |
++++ b/include/linux/pagemap.h |
3888 |
+@@ -18,9 +18,14 @@ |
3889 |
+ * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page |
3890 |
+ * allocation mode flags. |
3891 |
+ */ |
3892 |
+-#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ |
3893 |
+-#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ |
3894 |
+-#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ |
3895 |
++enum mapping_flags { |
3896 |
++ AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ |
3897 |
++ AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ |
3898 |
++ AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ |
3899 |
++#ifdef CONFIG_UNEVICTABLE_LRU |
3900 |
++ AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ |
3901 |
++#endif |
3902 |
++}; |
3903 |
+ |
3904 |
+ static inline void mapping_set_error(struct address_space *mapping, int error) |
3905 |
+ { |
3906 |
+@@ -33,7 +38,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error) |
3907 |
+ } |
3908 |
+ |
3909 |
+ #ifdef CONFIG_UNEVICTABLE_LRU |
3910 |
+-#define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */ |
3911 |
+ |
3912 |
+ static inline void mapping_set_unevictable(struct address_space *mapping) |
3913 |
+ { |
3914 |
+diff --git a/include/linux/sched.h b/include/linux/sched.h |
3915 |
+index 011db2f..f8af167 100644 |
3916 |
+--- a/include/linux/sched.h |
3917 |
++++ b/include/linux/sched.h |
3918 |
+@@ -202,7 +202,8 @@ extern unsigned long long time_sync_thresh; |
3919 |
+ #define task_is_stopped_or_traced(task) \ |
3920 |
+ ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
3921 |
+ #define task_contributes_to_load(task) \ |
3922 |
+- ((task->state & TASK_UNINTERRUPTIBLE) != 0) |
3923 |
++ ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
3924 |
++ (task->flags & PF_FROZEN) == 0) |
3925 |
+ |
3926 |
+ #define __set_task_state(tsk, state_value) \ |
3927 |
+ do { (tsk)->state = (state_value); } while (0) |
3928 |
+diff --git a/kernel/fork.c b/kernel/fork.c |
3929 |
+index 4854c2c..9b51a1b 100644 |
3930 |
+--- a/kernel/fork.c |
3931 |
++++ b/kernel/fork.c |
3932 |
+@@ -808,6 +808,12 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) |
3933 |
+ sig->cputime_expires.virt_exp = cputime_zero; |
3934 |
+ sig->cputime_expires.sched_exp = 0; |
3935 |
+ |
3936 |
++ if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { |
3937 |
++ sig->cputime_expires.prof_exp = |
3938 |
++ secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); |
3939 |
++ sig->cputimer.running = 1; |
3940 |
++ } |
3941 |
++ |
3942 |
+ /* The timer lists. */ |
3943 |
+ INIT_LIST_HEAD(&sig->cpu_timers[0]); |
3944 |
+ INIT_LIST_HEAD(&sig->cpu_timers[1]); |
3945 |
+@@ -823,11 +829,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) |
3946 |
+ atomic_inc(¤t->signal->live); |
3947 |
+ return 0; |
3948 |
+ } |
3949 |
+- sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
3950 |
+- |
3951 |
+- if (sig) |
3952 |
+- posix_cpu_timers_init_group(sig); |
3953 |
+ |
3954 |
++ sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
3955 |
+ tsk->signal = sig; |
3956 |
+ if (!sig) |
3957 |
+ return -ENOMEM; |
3958 |
+@@ -865,6 +868,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) |
3959 |
+ memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); |
3960 |
+ task_unlock(current->group_leader); |
3961 |
+ |
3962 |
++ posix_cpu_timers_init_group(sig); |
3963 |
++ |
3964 |
+ acct_init_pacct(&sig->pacct); |
3965 |
+ |
3966 |
+ tty_audit_fork(sig); |
3967 |
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c |
3968 |
+index f394d2a..cb8a15c 100644 |
3969 |
+--- a/kernel/hrtimer.c |
3970 |
++++ b/kernel/hrtimer.c |
3971 |
+@@ -651,14 +651,20 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) |
3972 |
+ * and expiry check is done in the hrtimer_interrupt or in the softirq. |
3973 |
+ */ |
3974 |
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
3975 |
+- struct hrtimer_clock_base *base) |
3976 |
++ struct hrtimer_clock_base *base, |
3977 |
++ int wakeup) |
3978 |
+ { |
3979 |
+ if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { |
3980 |
+- spin_unlock(&base->cpu_base->lock); |
3981 |
+- raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
3982 |
+- spin_lock(&base->cpu_base->lock); |
3983 |
++ if (wakeup) { |
3984 |
++ spin_unlock(&base->cpu_base->lock); |
3985 |
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
3986 |
++ spin_lock(&base->cpu_base->lock); |
3987 |
++ } else |
3988 |
++ __raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
3989 |
++ |
3990 |
+ return 1; |
3991 |
+ } |
3992 |
++ |
3993 |
+ return 0; |
3994 |
+ } |
3995 |
+ |
3996 |
+@@ -703,7 +709,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; } |
3997 |
+ static inline int hrtimer_switch_to_hres(void) { return 0; } |
3998 |
+ static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } |
3999 |
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
4000 |
+- struct hrtimer_clock_base *base) |
4001 |
++ struct hrtimer_clock_base *base, |
4002 |
++ int wakeup) |
4003 |
+ { |
4004 |
+ return 0; |
4005 |
+ } |
4006 |
+@@ -886,20 +893,9 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) |
4007 |
+ return 0; |
4008 |
+ } |
4009 |
+ |
4010 |
+-/** |
4011 |
+- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU |
4012 |
+- * @timer: the timer to be added |
4013 |
+- * @tim: expiry time |
4014 |
+- * @delta_ns: "slack" range for the timer |
4015 |
+- * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) |
4016 |
+- * |
4017 |
+- * Returns: |
4018 |
+- * 0 on success |
4019 |
+- * 1 when the timer was active |
4020 |
+- */ |
4021 |
+-int |
4022 |
+-hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns, |
4023 |
+- const enum hrtimer_mode mode) |
4024 |
++int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
4025 |
++ unsigned long delta_ns, const enum hrtimer_mode mode, |
4026 |
++ int wakeup) |
4027 |
+ { |
4028 |
+ struct hrtimer_clock_base *base, *new_base; |
4029 |
+ unsigned long flags; |
4030 |
+@@ -940,12 +936,29 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n |
4031 |
+ * XXX send_remote_softirq() ? |
4032 |
+ */ |
4033 |
+ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) |
4034 |
+- hrtimer_enqueue_reprogram(timer, new_base); |
4035 |
++ hrtimer_enqueue_reprogram(timer, new_base, wakeup); |
4036 |
+ |
4037 |
+ unlock_hrtimer_base(timer, &flags); |
4038 |
+ |
4039 |
+ return ret; |
4040 |
+ } |
4041 |
++ |
4042 |
++/** |
4043 |
++ * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU |
4044 |
++ * @timer: the timer to be added |
4045 |
++ * @tim: expiry time |
4046 |
++ * @delta_ns: "slack" range for the timer |
4047 |
++ * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) |
4048 |
++ * |
4049 |
++ * Returns: |
4050 |
++ * 0 on success |
4051 |
++ * 1 when the timer was active |
4052 |
++ */ |
4053 |
++int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
4054 |
++ unsigned long delta_ns, const enum hrtimer_mode mode) |
4055 |
++{ |
4056 |
++ return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1); |
4057 |
++} |
4058 |
+ EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); |
4059 |
+ |
4060 |
+ /** |
4061 |
+@@ -961,7 +974,7 @@ EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); |
4062 |
+ int |
4063 |
+ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) |
4064 |
+ { |
4065 |
+- return hrtimer_start_range_ns(timer, tim, 0, mode); |
4066 |
++ return __hrtimer_start_range_ns(timer, tim, 0, mode, 1); |
4067 |
+ } |
4068 |
+ EXPORT_SYMBOL_GPL(hrtimer_start); |
4069 |
+ |
4070 |
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c |
4071 |
+index 7ba8cd9..6589776 100644 |
4072 |
+--- a/kernel/kprobes.c |
4073 |
++++ b/kernel/kprobes.c |
4074 |
+@@ -912,10 +912,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, |
4075 |
+ ri->rp = rp; |
4076 |
+ ri->task = current; |
4077 |
+ |
4078 |
+- if (rp->entry_handler && rp->entry_handler(ri, regs)) { |
4079 |
+- spin_unlock_irqrestore(&rp->lock, flags); |
4080 |
++ if (rp->entry_handler && rp->entry_handler(ri, regs)) |
4081 |
+ return 0; |
4082 |
+- } |
4083 |
+ |
4084 |
+ arch_prepare_kretprobe(ri, regs); |
4085 |
+ |
4086 |
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c |
4087 |
+index e976e50..68647c1 100644 |
4088 |
+--- a/kernel/posix-cpu-timers.c |
4089 |
++++ b/kernel/posix-cpu-timers.c |
4090 |
+@@ -18,7 +18,7 @@ void update_rlimit_cpu(unsigned long rlim_new) |
4091 |
+ |
4092 |
+ cputime = secs_to_cputime(rlim_new); |
4093 |
+ if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || |
4094 |
+- cputime_lt(current->signal->it_prof_expires, cputime)) { |
4095 |
++ cputime_gt(current->signal->it_prof_expires, cputime)) { |
4096 |
+ spin_lock_irq(¤t->sighand->siglock); |
4097 |
+ set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); |
4098 |
+ spin_unlock_irq(¤t->sighand->siglock); |
4099 |
+@@ -224,7 +224,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, |
4100 |
+ cpu->cpu = virt_ticks(p); |
4101 |
+ break; |
4102 |
+ case CPUCLOCK_SCHED: |
4103 |
+- cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p); |
4104 |
++ cpu->sched = task_sched_runtime(p); |
4105 |
+ break; |
4106 |
+ } |
4107 |
+ return 0; |
4108 |
+@@ -305,18 +305,19 @@ static int cpu_clock_sample_group(const clockid_t which_clock, |
4109 |
+ { |
4110 |
+ struct task_cputime cputime; |
4111 |
+ |
4112 |
+- thread_group_cputime(p, &cputime); |
4113 |
+ switch (CPUCLOCK_WHICH(which_clock)) { |
4114 |
+ default: |
4115 |
+ return -EINVAL; |
4116 |
+ case CPUCLOCK_PROF: |
4117 |
++ thread_group_cputime(p, &cputime); |
4118 |
+ cpu->cpu = cputime_add(cputime.utime, cputime.stime); |
4119 |
+ break; |
4120 |
+ case CPUCLOCK_VIRT: |
4121 |
++ thread_group_cputime(p, &cputime); |
4122 |
+ cpu->cpu = cputime.utime; |
4123 |
+ break; |
4124 |
+ case CPUCLOCK_SCHED: |
4125 |
+- cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); |
4126 |
++ cpu->sched = thread_group_sched_runtime(p); |
4127 |
+ break; |
4128 |
+ } |
4129 |
+ return 0; |
4130 |
+diff --git a/kernel/sched.c b/kernel/sched.c |
4131 |
+index 8e2558c..5e80629 100644 |
4132 |
+--- a/kernel/sched.c |
4133 |
++++ b/kernel/sched.c |
4134 |
+@@ -231,13 +231,20 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) |
4135 |
+ |
4136 |
+ spin_lock(&rt_b->rt_runtime_lock); |
4137 |
+ for (;;) { |
4138 |
++ unsigned long delta; |
4139 |
++ ktime_t soft, hard; |
4140 |
++ |
4141 |
+ if (hrtimer_active(&rt_b->rt_period_timer)) |
4142 |
+ break; |
4143 |
+ |
4144 |
+ now = hrtimer_cb_get_time(&rt_b->rt_period_timer); |
4145 |
+ hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); |
4146 |
+- hrtimer_start_expires(&rt_b->rt_period_timer, |
4147 |
+- HRTIMER_MODE_ABS); |
4148 |
++ |
4149 |
++ soft = hrtimer_get_softexpires(&rt_b->rt_period_timer); |
4150 |
++ hard = hrtimer_get_expires(&rt_b->rt_period_timer); |
4151 |
++ delta = ktime_to_ns(ktime_sub(hard, soft)); |
4152 |
++ __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, |
4153 |
++ HRTIMER_MODE_ABS, 0); |
4154 |
+ } |
4155 |
+ spin_unlock(&rt_b->rt_runtime_lock); |
4156 |
+ } |
4157 |
+@@ -1129,7 +1136,8 @@ static __init void init_hrtick(void) |
4158 |
+ */ |
4159 |
+ static void hrtick_start(struct rq *rq, u64 delay) |
4160 |
+ { |
4161 |
+- hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); |
4162 |
++ __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, |
4163 |
++ HRTIMER_MODE_REL, 0); |
4164 |
+ } |
4165 |
+ |
4166 |
+ static inline void init_hrtick(void) |
4167 |
+@@ -4134,9 +4142,25 @@ DEFINE_PER_CPU(struct kernel_stat, kstat); |
4168 |
+ EXPORT_PER_CPU_SYMBOL(kstat); |
4169 |
+ |
4170 |
+ /* |
4171 |
+- * Return any ns on the sched_clock that have not yet been banked in |
4172 |
++ * Return any ns on the sched_clock that have not yet been accounted in |
4173 |
+ * @p in case that task is currently running. |
4174 |
++ * |
4175 |
++ * Called with task_rq_lock() held on @rq. |
4176 |
+ */ |
4177 |
++static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) |
4178 |
++{ |
4179 |
++ u64 ns = 0; |
4180 |
++ |
4181 |
++ if (task_current(rq, p)) { |
4182 |
++ update_rq_clock(rq); |
4183 |
++ ns = rq->clock - p->se.exec_start; |
4184 |
++ if ((s64)ns < 0) |
4185 |
++ ns = 0; |
4186 |
++ } |
4187 |
++ |
4188 |
++ return ns; |
4189 |
++} |
4190 |
++ |
4191 |
+ unsigned long long task_delta_exec(struct task_struct *p) |
4192 |
+ { |
4193 |
+ unsigned long flags; |
4194 |
+@@ -4144,16 +4168,49 @@ unsigned long long task_delta_exec(struct task_struct *p) |
4195 |
+ u64 ns = 0; |
4196 |
+ |
4197 |
+ rq = task_rq_lock(p, &flags); |
4198 |
++ ns = do_task_delta_exec(p, rq); |
4199 |
++ task_rq_unlock(rq, &flags); |
4200 |
+ |
4201 |
+- if (task_current(rq, p)) { |
4202 |
+- u64 delta_exec; |
4203 |
++ return ns; |
4204 |
++} |
4205 |
+ |
4206 |
+- update_rq_clock(rq); |
4207 |
+- delta_exec = rq->clock - p->se.exec_start; |
4208 |
+- if ((s64)delta_exec > 0) |
4209 |
+- ns = delta_exec; |
4210 |
+- } |
4211 |
++/* |
4212 |
++ * Return accounted runtime for the task. |
4213 |
++ * In case the task is currently running, return the runtime plus current's |
4214 |
++ * pending runtime that have not been accounted yet. |
4215 |
++ */ |
4216 |
++unsigned long long task_sched_runtime(struct task_struct *p) |
4217 |
++{ |
4218 |
++ unsigned long flags; |
4219 |
++ struct rq *rq; |
4220 |
++ u64 ns = 0; |
4221 |
+ |
4222 |
++ rq = task_rq_lock(p, &flags); |
4223 |
++ ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); |
4224 |
++ task_rq_unlock(rq, &flags); |
4225 |
++ |
4226 |
++ return ns; |
4227 |
++} |
4228 |
++ |
4229 |
++/* |
4230 |
++ * Return sum_exec_runtime for the thread group. |
4231 |
++ * In case the task is currently running, return the sum plus current's |
4232 |
++ * pending runtime that have not been accounted yet. |
4233 |
++ * |
4234 |
++ * Note that the thread group might have other running tasks as well, |
4235 |
++ * so the return value not includes other pending runtime that other |
4236 |
++ * running tasks might have. |
4237 |
++ */ |
4238 |
++unsigned long long thread_group_sched_runtime(struct task_struct *p) |
4239 |
++{ |
4240 |
++ struct task_cputime totals; |
4241 |
++ unsigned long flags; |
4242 |
++ struct rq *rq; |
4243 |
++ u64 ns; |
4244 |
++ |
4245 |
++ rq = task_rq_lock(p, &flags); |
4246 |
++ thread_group_cputime(p, &totals); |
4247 |
++ ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); |
4248 |
+ task_rq_unlock(rq, &flags); |
4249 |
+ |
4250 |
+ return ns; |
4251 |
+diff --git a/kernel/softirq.c b/kernel/softirq.c |
4252 |
+index 9041ea7..d2b183e 100644 |
4253 |
+--- a/kernel/softirq.c |
4254 |
++++ b/kernel/softirq.c |
4255 |
+@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
4256 |
+ * to the pending events, so lets the scheduler to balance |
4257 |
+ * the softirq load for us. |
4258 |
+ */ |
4259 |
+-static inline void wakeup_softirqd(void) |
4260 |
++void wakeup_softirqd(void) |
4261 |
+ { |
4262 |
+ /* Interrupts are disabled: no need to stop preemption */ |
4263 |
+ struct task_struct *tsk = __get_cpu_var(ksoftirqd); |
4264 |
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c |
4265 |
+index c5ef44f..7755ae7 100644 |
4266 |
+--- a/kernel/sysctl.c |
4267 |
++++ b/kernel/sysctl.c |
4268 |
+@@ -95,12 +95,9 @@ static int sixty = 60; |
4269 |
+ static int neg_one = -1; |
4270 |
+ #endif |
4271 |
+ |
4272 |
+-#if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING) |
4273 |
+-static int two = 2; |
4274 |
+-#endif |
4275 |
+- |
4276 |
+ static int zero; |
4277 |
+ static int one = 1; |
4278 |
++static int two = 2; |
4279 |
+ static unsigned long one_ul = 1; |
4280 |
+ static int one_hundred = 100; |
4281 |
+ |
4282 |
+@@ -1373,10 +1370,7 @@ static struct ctl_table fs_table[] = { |
4283 |
+ .data = &lease_break_time, |
4284 |
+ .maxlen = sizeof(int), |
4285 |
+ .mode = 0644, |
4286 |
+- .proc_handler = &proc_dointvec_minmax, |
4287 |
+- .strategy = &sysctl_intvec, |
4288 |
+- .extra1 = &zero, |
4289 |
+- .extra2 = &two, |
4290 |
++ .proc_handler = &proc_dointvec, |
4291 |
+ }, |
4292 |
+ #endif |
4293 |
+ #ifdef CONFIG_AIO |
4294 |
+@@ -1417,7 +1411,10 @@ static struct ctl_table fs_table[] = { |
4295 |
+ .data = &suid_dumpable, |
4296 |
+ .maxlen = sizeof(int), |
4297 |
+ .mode = 0644, |
4298 |
+- .proc_handler = &proc_dointvec, |
4299 |
++ .proc_handler = &proc_dointvec_minmax, |
4300 |
++ .strategy = &sysctl_intvec, |
4301 |
++ .extra1 = &zero, |
4302 |
++ .extra2 = &two, |
4303 |
+ }, |
4304 |
+ #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) |
4305 |
+ { |
4306 |
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
4307 |
+index 17bb88d..b2387c0 100644 |
4308 |
+--- a/kernel/trace/trace.c |
4309 |
++++ b/kernel/trace/trace.c |
4310 |
+@@ -3886,7 +3886,8 @@ __init static int tracer_alloc_buffers(void) |
4311 |
+ &trace_panic_notifier); |
4312 |
+ |
4313 |
+ register_die_notifier(&trace_die_notifier); |
4314 |
+- ret = 0; |
4315 |
++ |
4316 |
++ return 0; |
4317 |
+ |
4318 |
+ out_free_cpumask: |
4319 |
+ free_cpumask_var(tracing_cpumask); |
4320 |
+diff --git a/lib/cpumask.c b/lib/cpumask.c |
4321 |
+index 3389e24..1f71b97 100644 |
4322 |
+--- a/lib/cpumask.c |
4323 |
++++ b/lib/cpumask.c |
4324 |
+@@ -109,10 +109,10 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) |
4325 |
+ #endif |
4326 |
+ /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ |
4327 |
+ if (*mask) { |
4328 |
++ unsigned char *ptr = (unsigned char *)cpumask_bits(*mask); |
4329 |
+ unsigned int tail; |
4330 |
+ tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); |
4331 |
+- memset(cpumask_bits(*mask) + cpumask_size() - tail, |
4332 |
+- 0, tail); |
4333 |
++ memset(ptr + cpumask_size() - tail, 0, tail); |
4334 |
+ } |
4335 |
+ |
4336 |
+ return *mask != NULL; |
4337 |
+diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c |
4338 |
+index 0c04615..427dfe3 100644 |
4339 |
+--- a/mm/filemap_xip.c |
4340 |
++++ b/mm/filemap_xip.c |
4341 |
+@@ -89,8 +89,8 @@ do_xip_mapping_read(struct address_space *mapping, |
4342 |
+ } |
4343 |
+ } |
4344 |
+ nr = nr - offset; |
4345 |
+- if (nr > len) |
4346 |
+- nr = len; |
4347 |
++ if (nr > len - copied) |
4348 |
++ nr = len - copied; |
4349 |
+ |
4350 |
+ error = mapping->a_ops->get_xip_mem(mapping, index, 0, |
4351 |
+ &xip_mem, &xip_pfn); |
4352 |
+diff --git a/mm/mmap.c b/mm/mmap.c |
4353 |
+index 00ced3e..f1aa6f9 100644 |
4354 |
+--- a/mm/mmap.c |
4355 |
++++ b/mm/mmap.c |
4356 |
+@@ -1571,7 +1571,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns |
4357 |
+ * Overcommit.. This must be the final test, as it will |
4358 |
+ * update security statistics. |
4359 |
+ */ |
4360 |
+- if (security_vm_enough_memory(grow)) |
4361 |
++ if (security_vm_enough_memory_mm(mm, grow)) |
4362 |
+ return -ENOMEM; |
4363 |
+ |
4364 |
+ /* Ok, everything looks good - let it rip */ |
4365 |
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
4366 |
+index c6a6b16..eae6954 100644 |
4367 |
+--- a/net/core/skbuff.c |
4368 |
++++ b/net/core/skbuff.c |
4369 |
+@@ -2496,7 +2496,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) |
4370 |
+ skb_network_header_len(skb)); |
4371 |
+ skb_copy_from_linear_data(skb, nskb->data, doffset); |
4372 |
+ |
4373 |
+- if (pos >= offset + len) |
4374 |
++ if (fskb != skb_shinfo(skb)->frag_list) |
4375 |
+ continue; |
4376 |
+ |
4377 |
+ if (!sg) { |
4378 |
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c |
4379 |
+index 7ea88b6..39879ae 100644 |
4380 |
+--- a/net/ipv4/netfilter/arp_tables.c |
4381 |
++++ b/net/ipv4/netfilter/arp_tables.c |
4382 |
+@@ -374,7 +374,9 @@ static int mark_source_chains(struct xt_table_info *newinfo, |
4383 |
+ && unconditional(&e->arp)) || visited) { |
4384 |
+ unsigned int oldpos, size; |
4385 |
+ |
4386 |
+- if (t->verdict < -NF_MAX_VERDICT - 1) { |
4387 |
++ if ((strcmp(t->target.u.user.name, |
4388 |
++ ARPT_STANDARD_TARGET) == 0) && |
4389 |
++ t->verdict < -NF_MAX_VERDICT - 1) { |
4390 |
+ duprintf("mark_source_chains: bad " |
4391 |
+ "negative verdict (%i)\n", |
4392 |
+ t->verdict); |
4393 |
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c |
4394 |
+index ef8b6ca..ec362a3 100644 |
4395 |
+--- a/net/ipv4/netfilter/ip_tables.c |
4396 |
++++ b/net/ipv4/netfilter/ip_tables.c |
4397 |
+@@ -496,7 +496,9 @@ mark_source_chains(struct xt_table_info *newinfo, |
4398 |
+ && unconditional(&e->ip)) || visited) { |
4399 |
+ unsigned int oldpos, size; |
4400 |
+ |
4401 |
+- if (t->verdict < -NF_MAX_VERDICT - 1) { |
4402 |
++ if ((strcmp(t->target.u.user.name, |
4403 |
++ IPT_STANDARD_TARGET) == 0) && |
4404 |
++ t->verdict < -NF_MAX_VERDICT - 1) { |
4405 |
+ duprintf("mark_source_chains: bad " |
4406 |
+ "negative verdict (%i)\n", |
4407 |
+ t->verdict); |
4408 |
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c |
4409 |
+index a33485d..def375b 100644 |
4410 |
+--- a/net/ipv6/netfilter/ip6_tables.c |
4411 |
++++ b/net/ipv6/netfilter/ip6_tables.c |
4412 |
+@@ -525,7 +525,9 @@ mark_source_chains(struct xt_table_info *newinfo, |
4413 |
+ && unconditional(&e->ipv6)) || visited) { |
4414 |
+ unsigned int oldpos, size; |
4415 |
+ |
4416 |
+- if (t->verdict < -NF_MAX_VERDICT - 1) { |
4417 |
++ if ((strcmp(t->target.u.user.name, |
4418 |
++ IP6T_STANDARD_TARGET) == 0) && |
4419 |
++ t->verdict < -NF_MAX_VERDICT - 1) { |
4420 |
+ duprintf("mark_source_chains: bad " |
4421 |
+ "negative verdict (%i)\n", |
4422 |
+ t->verdict); |
4423 |
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c |
4424 |
+index e9c05b8..dcce778 100644 |
4425 |
+--- a/net/netrom/af_netrom.c |
4426 |
++++ b/net/netrom/af_netrom.c |
4427 |
+@@ -1082,7 +1082,13 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock, |
4428 |
+ |
4429 |
+ SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n"); |
4430 |
+ |
4431 |
+- /* Build a packet */ |
4432 |
++ /* Build a packet - the conventional user limit is 236 bytes. We can |
4433 |
++ do ludicrously large NetROM frames but must not overflow */ |
4434 |
++ if (len > 65536) { |
4435 |
++ err = -EMSGSIZE; |
4436 |
++ goto out; |
4437 |
++ } |
4438 |
++ |
4439 |
+ SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n"); |
4440 |
+ size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; |
4441 |
+ |
4442 |
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c |
4443 |
+index 0139264..5e75bbf 100644 |
4444 |
+--- a/net/rose/af_rose.c |
4445 |
++++ b/net/rose/af_rose.c |
4446 |
+@@ -1124,6 +1124,10 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, |
4447 |
+ |
4448 |
+ /* Build a packet */ |
4449 |
+ SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n"); |
4450 |
++ /* Sanity check the packet size */ |
4451 |
++ if (len > 65535) |
4452 |
++ return -EMSGSIZE; |
4453 |
++ |
4454 |
+ size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; |
4455 |
+ |
4456 |
+ if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) |
4457 |
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c |
4458 |
+index 9fc5b02..88d80f5 100644 |
4459 |
+--- a/net/x25/af_x25.c |
4460 |
++++ b/net/x25/af_x25.c |
4461 |
+@@ -1037,6 +1037,12 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, |
4462 |
+ sx25.sx25_addr = x25->dest_addr; |
4463 |
+ } |
4464 |
+ |
4465 |
++ /* Sanity check the packet size */ |
4466 |
++ if (len > 65535) { |
4467 |
++ rc = -EMSGSIZE; |
4468 |
++ goto out; |
4469 |
++ } |
4470 |
++ |
4471 |
+ SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n"); |
4472 |
+ |
4473 |
+ /* Build a packet */ |
4474 |
+diff --git a/security/commoncap.c b/security/commoncap.c |
4475 |
+index 7cd61a5..beac025 100644 |
4476 |
+--- a/security/commoncap.c |
4477 |
++++ b/security/commoncap.c |
4478 |
+@@ -916,7 +916,6 @@ changed: |
4479 |
+ return commit_creds(new); |
4480 |
+ |
4481 |
+ no_change: |
4482 |
+- error = 0; |
4483 |
+ error: |
4484 |
+ abort_creds(new); |
4485 |
+ return error; |
4486 |
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c |
4487 |
+index e7ded13..c1c5f36 100644 |
4488 |
+--- a/security/smack/smack_lsm.c |
4489 |
++++ b/security/smack/smack_lsm.c |
4490 |
+@@ -607,6 +607,8 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name, |
4491 |
+ strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) { |
4492 |
+ if (!capable(CAP_MAC_ADMIN)) |
4493 |
+ rc = -EPERM; |
4494 |
++ if (size == 0) |
4495 |
++ rc = -EINVAL; |
4496 |
+ } else |
4497 |
+ rc = cap_inode_setxattr(dentry, name, value, size, flags); |
4498 |
+ |
4499 |
+@@ -1430,7 +1432,7 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name, |
4500 |
+ struct socket *sock; |
4501 |
+ int rc = 0; |
4502 |
+ |
4503 |
+- if (value == NULL || size > SMK_LABELLEN) |
4504 |
++ if (value == NULL || size > SMK_LABELLEN || size == 0) |
4505 |
+ return -EACCES; |
4506 |
+ |
4507 |
+ sp = smk_import(value, size); |
4508 |
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c |
4509 |
+index d03f992..cef1ce0 100644 |
4510 |
+--- a/sound/pci/hda/hda_codec.c |
4511 |
++++ b/sound/pci/hda/hda_codec.c |
4512 |
+@@ -2003,7 +2003,11 @@ int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid, |
4513 |
+ err = bus->ops.command(bus, res); |
4514 |
+ if (!err) { |
4515 |
+ struct hda_cache_head *c; |
4516 |
+- u32 key = build_cmd_cache_key(nid, verb); |
4517 |
++ u32 key; |
4518 |
++ /* parm may contain the verb stuff for get/set amp */ |
4519 |
++ verb = verb | (parm >> 8); |
4520 |
++ parm &= 0xff; |
4521 |
++ key = build_cmd_cache_key(nid, verb); |
4522 |
+ c = get_alloc_hash(&codec->cmd_cache, key); |
4523 |
+ if (c) |
4524 |
+ c->val = parm; |
4525 |
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c |
4526 |
+index e486123..5a6d6d8 100644 |
4527 |
+--- a/sound/pci/hda/patch_analog.c |
4528 |
++++ b/sound/pci/hda/patch_analog.c |
4529 |
+@@ -3239,7 +3239,7 @@ static const char *ad1884_slave_vols[] = { |
4530 |
+ "Mic Playback Volume", |
4531 |
+ "CD Playback Volume", |
4532 |
+ "Internal Mic Playback Volume", |
4533 |
+- "Docking Mic Playback Volume" |
4534 |
++ "Docking Mic Playback Volume", |
4535 |
+ "Beep Playback Volume", |
4536 |
+ "IEC958 Playback Volume", |
4537 |
+ NULL |
4538 |
+diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c |
4539 |
+index 23b81cf..e85a2bc 100644 |
4540 |
+--- a/virt/kvm/ioapic.c |
4541 |
++++ b/virt/kvm/ioapic.c |
4542 |
+@@ -101,6 +101,7 @@ static void ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx) |
4543 |
+ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) |
4544 |
+ { |
4545 |
+ unsigned index; |
4546 |
++ bool mask_before, mask_after; |
4547 |
+ |
4548 |
+ switch (ioapic->ioregsel) { |
4549 |
+ case IOAPIC_REG_VERSION: |
4550 |
+@@ -120,6 +121,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) |
4551 |
+ ioapic_debug("change redir index %x val %x\n", index, val); |
4552 |
+ if (index >= IOAPIC_NUM_PINS) |
4553 |
+ return; |
4554 |
++ mask_before = ioapic->redirtbl[index].fields.mask; |
4555 |
+ if (ioapic->ioregsel & 1) { |
4556 |
+ ioapic->redirtbl[index].bits &= 0xffffffff; |
4557 |
+ ioapic->redirtbl[index].bits |= (u64) val << 32; |
4558 |
+@@ -128,6 +130,9 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) |
4559 |
+ ioapic->redirtbl[index].bits |= (u32) val; |
4560 |
+ ioapic->redirtbl[index].fields.remote_irr = 0; |
4561 |
+ } |
4562 |
++ mask_after = ioapic->redirtbl[index].fields.mask; |
4563 |
++ if (mask_before != mask_after) |
4564 |
++ kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); |
4565 |
+ if (ioapic->irr & (1 << index)) |
4566 |
+ ioapic_service(ioapic, index); |
4567 |
+ break; |
4568 |
+@@ -426,3 +431,4 @@ int kvm_ioapic_init(struct kvm *kvm) |
4569 |
+ kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev); |
4570 |
+ return 0; |
4571 |
+ } |
4572 |
++ |
4573 |
+diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c |
4574 |
+index aa5d1e5..5162a41 100644 |
4575 |
+--- a/virt/kvm/irq_comm.c |
4576 |
++++ b/virt/kvm/irq_comm.c |
4577 |
+@@ -99,3 +99,27 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) |
4578 |
+ clear_bit(irq_source_id, &kvm->arch.irq_states[i]); |
4579 |
+ clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); |
4580 |
+ } |
4581 |
++ |
4582 |
++void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, |
4583 |
++ struct kvm_irq_mask_notifier *kimn) |
4584 |
++{ |
4585 |
++ kimn->irq = irq; |
4586 |
++ hlist_add_head(&kimn->link, &kvm->mask_notifier_list); |
4587 |
++} |
4588 |
++ |
4589 |
++void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, |
4590 |
++ struct kvm_irq_mask_notifier *kimn) |
4591 |
++{ |
4592 |
++ hlist_del(&kimn->link); |
4593 |
++} |
4594 |
++ |
4595 |
++void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) |
4596 |
++{ |
4597 |
++ struct kvm_irq_mask_notifier *kimn; |
4598 |
++ struct hlist_node *n; |
4599 |
++ |
4600 |
++ hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link) |
4601 |
++ if (kimn->irq == irq) |
4602 |
++ kimn->func(kimn, mask); |
4603 |
++} |
4604 |
++ |
4605 |
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c |
4606 |
+index 29a667c..6723411 100644 |
4607 |
+--- a/virt/kvm/kvm_main.c |
4608 |
++++ b/virt/kvm/kvm_main.c |
4609 |
+@@ -563,7 +563,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, |
4610 |
+ goto out; |
4611 |
+ } |
4612 |
+ |
4613 |
+- if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) |
4614 |
++ if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) |
4615 |
+ kvm_deassign_device(kvm, match); |
4616 |
+ |
4617 |
+ kvm_free_assigned_device(kvm, match); |
4618 |
+@@ -581,8 +581,10 @@ static inline int valid_vcpu(int n) |
4619 |
+ |
4620 |
+ inline int kvm_is_mmio_pfn(pfn_t pfn) |
4621 |
+ { |
4622 |
+- if (pfn_valid(pfn)) |
4623 |
+- return PageReserved(pfn_to_page(pfn)); |
4624 |
++ if (pfn_valid(pfn)) { |
4625 |
++ struct page *page = compound_head(pfn_to_page(pfn)); |
4626 |
++ return PageReserved(page); |
4627 |
++ } |
4628 |
+ |
4629 |
+ return true; |
4630 |
+ } |
4631 |
+@@ -828,6 +830,9 @@ static struct kvm *kvm_create_vm(void) |
4632 |
+ |
4633 |
+ if (IS_ERR(kvm)) |
4634 |
+ goto out; |
4635 |
++#ifdef CONFIG_HAVE_KVM_IRQCHIP |
4636 |
++ INIT_HLIST_HEAD(&kvm->mask_notifier_list); |
4637 |
++#endif |
4638 |
+ |
4639 |
+ #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
4640 |
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
4641 |
|
4642 |
Deleted: genpatches-2.6/trunk/2.6.29/1700_flush-pending-TLB-entries.patch |
4643 |
=================================================================== |
4644 |
--- genpatches-2.6/trunk/2.6.29/1700_flush-pending-TLB-entries.patch 2009-04-25 01:48:57 UTC (rev 1548) |
4645 |
+++ genpatches-2.6/trunk/2.6.29/1700_flush-pending-TLB-entries.patch 2009-04-29 00:49:18 UTC (rev 1549) |
4646 |
@@ -1,14 +0,0 @@ |
4647 |
---- a/arch/sparc/include/asm/tlb_64.h 2009-04-23 20:58:02.000000000 -0400 |
4648 |
-+++ b/arch/sparc/include/asm/tlb_64.h 2009-04-23 20:58:27.000000000 -0400 |
4649 |
-@@ -57,9 +57,9 @@ static inline struct mmu_gather *tlb_gat |
4650 |
- |
4651 |
- static inline void tlb_flush_mmu(struct mmu_gather *mp) |
4652 |
- { |
4653 |
-+ if (!mp->fullmm) |
4654 |
-+ flush_tlb_pending(); |
4655 |
- if (mp->need_flush) { |
4656 |
-- if (!mp->fullmm) |
4657 |
-- flush_tlb_pending(); |
4658 |
- free_pages_and_swap_cache(mp->pages, mp->pages_nr); |
4659 |
- mp->pages_nr = 0; |
4660 |
- mp->need_flush = 0; |