1 |
Author: mpagano |
2 |
Date: 2009-09-09 12:49:53 +0000 (Wed, 09 Sep 2009) |
3 |
New Revision: 1603 |
4 |
|
5 |
Added: |
6 |
genpatches-2.6/trunk/2.6.30/1005_linux-2.6.30.6.patch |
7 |
Removed: |
8 |
genpatches-2.6/trunk/2.6.30/1700_empty-ipi-check.patch |
9 |
Modified: |
10 |
genpatches-2.6/trunk/2.6.30/0000_README |
11 |
Log: |
12 |
Linux 2.6.30.6 patch and the removal of redundant patch |
13 |
|
14 |
Modified: genpatches-2.6/trunk/2.6.30/0000_README |
15 |
=================================================================== |
16 |
--- genpatches-2.6/trunk/2.6.30/0000_README 2009-09-06 16:23:11 UTC (rev 1602) |
17 |
+++ genpatches-2.6/trunk/2.6.30/0000_README 2009-09-09 12:49:53 UTC (rev 1603) |
18 |
@@ -59,14 +59,14 @@ |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 2.6.30.5 |
21 |
|
22 |
+Patch: 1005_linux-2.6.30.6.patch |
23 |
+From: http://www.kernel.org |
24 |
+Desc: Linux 2.6.30.6 |
25 |
+ |
26 |
Patch: 1510_hid-move-deref-below-null-test.patch |
27 |
From: http://lwn.net/Articles/342440/ |
28 |
Desc: HID: Move dereferences below a NULL test |
29 |
|
30 |
-Patch: 1700_empty-ipi-check.patch |
31 |
-From: http://bugs.gentoo.org/show_bug.cgi?id=273936 |
32 |
-Desc: Fixes freezes caused by careless IPI sends on some x86 setups |
33 |
- |
34 |
Patch: 2500_ide-cd-handle-fragmented-patckets.patch |
35 |
From: http://bugs.gentoo.org/show_bug.cgi?id=274182 |
36 |
Desc: ide-cd: handle fragmented packet commands gracefully |
37 |
|
38 |
Added: genpatches-2.6/trunk/2.6.30/1005_linux-2.6.30.6.patch |
39 |
=================================================================== |
40 |
--- genpatches-2.6/trunk/2.6.30/1005_linux-2.6.30.6.patch (rev 0) |
41 |
+++ genpatches-2.6/trunk/2.6.30/1005_linux-2.6.30.6.patch 2009-09-09 12:49:53 UTC (rev 1603) |
42 |
@@ -0,0 +1,3878 @@ |
43 |
+diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c |
44 |
+index eb98738..391f637 100644 |
45 |
+--- a/arch/ia64/kernel/pci-dma.c |
46 |
++++ b/arch/ia64/kernel/pci-dma.c |
47 |
+@@ -67,11 +67,6 @@ iommu_dma_init(void) |
48 |
+ |
49 |
+ int iommu_dma_supported(struct device *dev, u64 mask) |
50 |
+ { |
51 |
+- struct dma_map_ops *ops = platform_dma_get_ops(dev); |
52 |
+- |
53 |
+- if (ops->dma_supported) |
54 |
+- return ops->dma_supported(dev, mask); |
55 |
+- |
56 |
+ /* Copied from i386. Doesn't make much sense, because it will |
57 |
+ only work for pci_alloc_coherent. |
58 |
+ The caller just has to use GFP_DMA in this case. */ |
59 |
+diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c |
60 |
+index b178a1e..40b5cb4 100644 |
61 |
+--- a/arch/powerpc/platforms/ps3/time.c |
62 |
++++ b/arch/powerpc/platforms/ps3/time.c |
63 |
+@@ -21,6 +21,7 @@ |
64 |
+ #include <linux/kernel.h> |
65 |
+ #include <linux/platform_device.h> |
66 |
+ |
67 |
++#include <asm/firmware.h> |
68 |
+ #include <asm/rtc.h> |
69 |
+ #include <asm/lv1call.h> |
70 |
+ #include <asm/ps3.h> |
71 |
+@@ -84,6 +85,9 @@ static int __init ps3_rtc_init(void) |
72 |
+ { |
73 |
+ struct platform_device *pdev; |
74 |
+ |
75 |
++ if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) |
76 |
++ return -ENODEV; |
77 |
++ |
78 |
+ pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0); |
79 |
+ if (IS_ERR(pdev)) |
80 |
+ return PTR_ERR(pdev); |
81 |
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h |
82 |
+index a93d1cc..9a9efb0 100644 |
83 |
+--- a/arch/x86/include/asm/kvm_host.h |
84 |
++++ b/arch/x86/include/asm/kvm_host.h |
85 |
+@@ -185,6 +185,7 @@ union kvm_mmu_page_role { |
86 |
+ unsigned access:3; |
87 |
+ unsigned invalid:1; |
88 |
+ unsigned cr4_pge:1; |
89 |
++ unsigned nxe:1; |
90 |
+ }; |
91 |
+ }; |
92 |
+ |
93 |
+@@ -513,6 +514,8 @@ struct kvm_x86_ops { |
94 |
+ void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); |
95 |
+ int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); |
96 |
+ void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
97 |
++ void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
98 |
++ u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
99 |
+ void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
100 |
+ unsigned char *hypercall_addr); |
101 |
+ int (*get_irq)(struct kvm_vcpu *vcpu); |
102 |
+diff --git a/arch/x86/include/asm/kvm_x86_emulate.h b/arch/x86/include/asm/kvm_x86_emulate.h |
103 |
+index 6a15973..b7ed2c4 100644 |
104 |
+--- a/arch/x86/include/asm/kvm_x86_emulate.h |
105 |
++++ b/arch/x86/include/asm/kvm_x86_emulate.h |
106 |
+@@ -143,6 +143,9 @@ struct decode_cache { |
107 |
+ struct fetch_cache fetch; |
108 |
+ }; |
109 |
+ |
110 |
++#define X86_SHADOW_INT_MOV_SS 1 |
111 |
++#define X86_SHADOW_INT_STI 2 |
112 |
++ |
113 |
+ struct x86_emulate_ctxt { |
114 |
+ /* Register state before/after emulation. */ |
115 |
+ struct kvm_vcpu *vcpu; |
116 |
+@@ -152,6 +155,9 @@ struct x86_emulate_ctxt { |
117 |
+ int mode; |
118 |
+ u32 cs_base; |
119 |
+ |
120 |
++ /* interruptibility state, as a result of execution of STI or MOV SS */ |
121 |
++ int interruptibility; |
122 |
++ |
123 |
+ /* decode cache */ |
124 |
+ struct decode_cache decode; |
125 |
+ }; |
126 |
+diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c |
127 |
+index dbf5445..6ef00ba 100644 |
128 |
+--- a/arch/x86/kernel/apic/ipi.c |
129 |
++++ b/arch/x86/kernel/apic/ipi.c |
130 |
+@@ -106,6 +106,9 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) |
131 |
+ unsigned long mask = cpumask_bits(cpumask)[0]; |
132 |
+ unsigned long flags; |
133 |
+ |
134 |
++ if (WARN_ONCE(!mask, "empty IPI mask")) |
135 |
++ return; |
136 |
++ |
137 |
+ local_irq_save(flags); |
138 |
+ WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); |
139 |
+ __default_send_IPI_dest_field(mask, vector, apic->dest_logical); |
140 |
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c |
141 |
+index d869b3b..61a592e 100644 |
142 |
+--- a/arch/x86/kernel/cpu/amd.c |
143 |
++++ b/arch/x86/kernel/cpu/amd.c |
144 |
+@@ -356,7 +356,7 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) |
145 |
+ #endif |
146 |
+ #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) |
147 |
+ /* check CPU config space for extended APIC ID */ |
148 |
+- if (c->x86 >= 0xf) { |
149 |
++ if (cpu_has_apic && c->x86 >= 0xf) { |
150 |
+ unsigned int val; |
151 |
+ val = read_pci_config(0, 24, 0, 0x68); |
152 |
+ if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) |
153 |
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c |
154 |
+index 32cf11e..d7ce26b 100644 |
155 |
+--- a/arch/x86/kvm/mmu.c |
156 |
++++ b/arch/x86/kvm/mmu.c |
157 |
+@@ -490,16 +490,20 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage) |
158 |
+ * |
159 |
+ * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc |
160 |
+ * containing more mappings. |
161 |
++ * |
162 |
++ * Returns the number of rmap entries before the spte was added or zero if |
163 |
++ * the spte was not added. |
164 |
++ * |
165 |
+ */ |
166 |
+-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) |
167 |
++static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) |
168 |
+ { |
169 |
+ struct kvm_mmu_page *sp; |
170 |
+ struct kvm_rmap_desc *desc; |
171 |
+ unsigned long *rmapp; |
172 |
+- int i; |
173 |
++ int i, count = 0; |
174 |
+ |
175 |
+ if (!is_rmap_pte(*spte)) |
176 |
+- return; |
177 |
++ return count; |
178 |
+ gfn = unalias_gfn(vcpu->kvm, gfn); |
179 |
+ sp = page_header(__pa(spte)); |
180 |
+ sp->gfns[spte - sp->spt] = gfn; |
181 |
+@@ -516,8 +520,10 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) |
182 |
+ } else { |
183 |
+ rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); |
184 |
+ desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); |
185 |
+- while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) |
186 |
++ while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) { |
187 |
+ desc = desc->more; |
188 |
++ count += RMAP_EXT; |
189 |
++ } |
190 |
+ if (desc->shadow_ptes[RMAP_EXT-1]) { |
191 |
+ desc->more = mmu_alloc_rmap_desc(vcpu); |
192 |
+ desc = desc->more; |
193 |
+@@ -526,6 +532,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) |
194 |
+ ; |
195 |
+ desc->shadow_ptes[i] = spte; |
196 |
+ } |
197 |
++ return count; |
198 |
+ } |
199 |
+ |
200 |
+ static void rmap_desc_remove_entry(unsigned long *rmapp, |
201 |
+@@ -755,6 +762,19 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) |
202 |
+ return young; |
203 |
+ } |
204 |
+ |
205 |
++#define RMAP_RECYCLE_THRESHOLD 1000 |
206 |
++ |
207 |
++static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage) |
208 |
++{ |
209 |
++ unsigned long *rmapp; |
210 |
++ |
211 |
++ gfn = unalias_gfn(vcpu->kvm, gfn); |
212 |
++ rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage); |
213 |
++ |
214 |
++ kvm_unmap_rmapp(vcpu->kvm, rmapp); |
215 |
++ kvm_flush_remote_tlbs(vcpu->kvm); |
216 |
++} |
217 |
++ |
218 |
+ int kvm_age_hva(struct kvm *kvm, unsigned long hva) |
219 |
+ { |
220 |
+ return kvm_handle_hva(kvm, hva, kvm_age_rmapp); |
221 |
+@@ -1417,24 +1437,25 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
222 |
+ */ |
223 |
+ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) |
224 |
+ { |
225 |
++ int used_pages; |
226 |
++ |
227 |
++ used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages; |
228 |
++ used_pages = max(0, used_pages); |
229 |
++ |
230 |
+ /* |
231 |
+ * If we set the number of mmu pages to be smaller be than the |
232 |
+ * number of actived pages , we must to free some mmu pages before we |
233 |
+ * change the value |
234 |
+ */ |
235 |
+ |
236 |
+- if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) > |
237 |
+- kvm_nr_mmu_pages) { |
238 |
+- int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages |
239 |
+- - kvm->arch.n_free_mmu_pages; |
240 |
+- |
241 |
+- while (n_used_mmu_pages > kvm_nr_mmu_pages) { |
242 |
++ if (used_pages > kvm_nr_mmu_pages) { |
243 |
++ while (used_pages > kvm_nr_mmu_pages) { |
244 |
+ struct kvm_mmu_page *page; |
245 |
+ |
246 |
+ page = container_of(kvm->arch.active_mmu_pages.prev, |
247 |
+ struct kvm_mmu_page, link); |
248 |
+ kvm_mmu_zap_page(kvm, page); |
249 |
+- n_used_mmu_pages--; |
250 |
++ used_pages--; |
251 |
+ } |
252 |
+ kvm->arch.n_free_mmu_pages = 0; |
253 |
+ } |
254 |
+@@ -1770,6 +1791,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, |
255 |
+ { |
256 |
+ int was_rmapped = 0; |
257 |
+ int was_writeble = is_writeble_pte(*shadow_pte); |
258 |
++ int rmap_count; |
259 |
+ |
260 |
+ pgprintk("%s: spte %llx access %x write_fault %d" |
261 |
+ " user_fault %d gfn %lx\n", |
262 |
+@@ -1811,9 +1833,11 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, |
263 |
+ |
264 |
+ page_header_update_slot(vcpu->kvm, shadow_pte, gfn); |
265 |
+ if (!was_rmapped) { |
266 |
+- rmap_add(vcpu, shadow_pte, gfn, largepage); |
267 |
++ rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage); |
268 |
+ if (!is_rmap_pte(*shadow_pte)) |
269 |
+ kvm_release_pfn_clean(pfn); |
270 |
++ if (rmap_count > RMAP_RECYCLE_THRESHOLD) |
271 |
++ rmap_recycle(vcpu, gfn, largepage); |
272 |
+ } else { |
273 |
+ if (was_writeble) |
274 |
+ kvm_release_pfn_dirty(pfn); |
275 |
+@@ -1942,7 +1966,19 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) |
276 |
+ vcpu->arch.mmu.root_hpa = INVALID_PAGE; |
277 |
+ } |
278 |
+ |
279 |
+-static void mmu_alloc_roots(struct kvm_vcpu *vcpu) |
280 |
++static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) |
281 |
++{ |
282 |
++ int ret = 0; |
283 |
++ |
284 |
++ if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { |
285 |
++ set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); |
286 |
++ ret = 1; |
287 |
++ } |
288 |
++ |
289 |
++ return ret; |
290 |
++} |
291 |
++ |
292 |
++static int mmu_alloc_roots(struct kvm_vcpu *vcpu) |
293 |
+ { |
294 |
+ int i; |
295 |
+ gfn_t root_gfn; |
296 |
+@@ -1957,13 +1993,15 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) |
297 |
+ ASSERT(!VALID_PAGE(root)); |
298 |
+ if (tdp_enabled) |
299 |
+ direct = 1; |
300 |
++ if (mmu_check_root(vcpu, root_gfn)) |
301 |
++ return 1; |
302 |
+ sp = kvm_mmu_get_page(vcpu, root_gfn, 0, |
303 |
+ PT64_ROOT_LEVEL, direct, |
304 |
+ ACC_ALL, NULL); |
305 |
+ root = __pa(sp->spt); |
306 |
+ ++sp->root_count; |
307 |
+ vcpu->arch.mmu.root_hpa = root; |
308 |
+- return; |
309 |
++ return 0; |
310 |
+ } |
311 |
+ direct = !is_paging(vcpu); |
312 |
+ if (tdp_enabled) |
313 |
+@@ -1980,6 +2018,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) |
314 |
+ root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT; |
315 |
+ } else if (vcpu->arch.mmu.root_level == 0) |
316 |
+ root_gfn = 0; |
317 |
++ if (mmu_check_root(vcpu, root_gfn)) |
318 |
++ return 1; |
319 |
+ sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
320 |
+ PT32_ROOT_LEVEL, direct, |
321 |
+ ACC_ALL, NULL); |
322 |
+@@ -1988,6 +2028,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) |
323 |
+ vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; |
324 |
+ } |
325 |
+ vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); |
326 |
++ return 0; |
327 |
+ } |
328 |
+ |
329 |
+ static void mmu_sync_roots(struct kvm_vcpu *vcpu) |
330 |
+@@ -2006,7 +2047,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu) |
331 |
+ for (i = 0; i < 4; ++i) { |
332 |
+ hpa_t root = vcpu->arch.mmu.pae_root[i]; |
333 |
+ |
334 |
+- if (root) { |
335 |
++ if (root && VALID_PAGE(root)) { |
336 |
+ root &= PT64_BASE_ADDR_MASK; |
337 |
+ sp = page_header(root); |
338 |
+ mmu_sync_children(vcpu, sp); |
339 |
+@@ -2290,9 +2331,11 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) |
340 |
+ goto out; |
341 |
+ spin_lock(&vcpu->kvm->mmu_lock); |
342 |
+ kvm_mmu_free_some_pages(vcpu); |
343 |
+- mmu_alloc_roots(vcpu); |
344 |
++ r = mmu_alloc_roots(vcpu); |
345 |
+ mmu_sync_roots(vcpu); |
346 |
+ spin_unlock(&vcpu->kvm->mmu_lock); |
347 |
++ if (r) |
348 |
++ goto out; |
349 |
+ kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa); |
350 |
+ kvm_mmu_flush_tlb(vcpu); |
351 |
+ out: |
352 |
+@@ -2638,14 +2681,6 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp); |
353 |
+ |
354 |
+ static void free_mmu_pages(struct kvm_vcpu *vcpu) |
355 |
+ { |
356 |
+- struct kvm_mmu_page *sp; |
357 |
+- |
358 |
+- while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) { |
359 |
+- sp = container_of(vcpu->kvm->arch.active_mmu_pages.next, |
360 |
+- struct kvm_mmu_page, link); |
361 |
+- kvm_mmu_zap_page(vcpu->kvm, sp); |
362 |
+- cond_resched(); |
363 |
+- } |
364 |
+ free_page((unsigned long)vcpu->arch.mmu.pae_root); |
365 |
+ } |
366 |
+ |
367 |
+@@ -2710,7 +2745,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) |
368 |
+ { |
369 |
+ struct kvm_mmu_page *sp; |
370 |
+ |
371 |
+- spin_lock(&kvm->mmu_lock); |
372 |
+ list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { |
373 |
+ int i; |
374 |
+ u64 *pt; |
375 |
+@@ -2725,7 +2759,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) |
376 |
+ pt[i] &= ~PT_WRITABLE_MASK; |
377 |
+ } |
378 |
+ kvm_flush_remote_tlbs(kvm); |
379 |
+- spin_unlock(&kvm->mmu_lock); |
380 |
+ } |
381 |
+ |
382 |
+ void kvm_mmu_zap_all(struct kvm *kvm) |
383 |
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
384 |
+index 1f8510c..5700009 100644 |
385 |
+--- a/arch/x86/kvm/svm.c |
386 |
++++ b/arch/x86/kvm/svm.c |
387 |
+@@ -227,6 +227,27 @@ static int is_external_interrupt(u32 info) |
388 |
+ return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); |
389 |
+ } |
390 |
+ |
391 |
++static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
392 |
++{ |
393 |
++ struct vcpu_svm *svm = to_svm(vcpu); |
394 |
++ u32 ret = 0; |
395 |
++ |
396 |
++ if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) |
397 |
++ ret |= X86_SHADOW_INT_STI | X86_SHADOW_INT_MOV_SS; |
398 |
++ return ret & mask; |
399 |
++} |
400 |
++ |
401 |
++static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
402 |
++{ |
403 |
++ struct vcpu_svm *svm = to_svm(vcpu); |
404 |
++ |
405 |
++ if (mask == 0) |
406 |
++ svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; |
407 |
++ else |
408 |
++ svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; |
409 |
++ |
410 |
++} |
411 |
++ |
412 |
+ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) |
413 |
+ { |
414 |
+ struct vcpu_svm *svm = to_svm(vcpu); |
415 |
+@@ -240,7 +261,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) |
416 |
+ __func__, kvm_rip_read(vcpu), svm->next_rip); |
417 |
+ |
418 |
+ kvm_rip_write(vcpu, svm->next_rip); |
419 |
+- svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; |
420 |
++ svm_set_interrupt_shadow(vcpu, 0); |
421 |
+ |
422 |
+ vcpu->arch.interrupt_window_open = (svm->vcpu.arch.hflags & HF_GIF_MASK); |
423 |
+ } |
424 |
+@@ -715,6 +736,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
425 |
+ svm->vmcb->control.tsc_offset += delta; |
426 |
+ vcpu->cpu = cpu; |
427 |
+ kvm_migrate_timers(vcpu); |
428 |
++ svm->asid_generation = 0; |
429 |
+ } |
430 |
+ |
431 |
+ for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
432 |
+@@ -1025,7 +1047,6 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data) |
433 |
+ svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; |
434 |
+ } |
435 |
+ |
436 |
+- svm->vcpu.cpu = svm_data->cpu; |
437 |
+ svm->asid_generation = svm_data->asid_generation; |
438 |
+ svm->vmcb->control.asid = svm_data->next_asid++; |
439 |
+ } |
440 |
+@@ -2237,8 +2258,8 @@ static void pre_svm_run(struct vcpu_svm *svm) |
441 |
+ struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu); |
442 |
+ |
443 |
+ svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
444 |
+- if (svm->vcpu.cpu != cpu || |
445 |
+- svm->asid_generation != svm_data->asid_generation) |
446 |
++ /* FIXME: handle wraparound of asid_generation */ |
447 |
++ if (svm->asid_generation != svm_data->asid_generation) |
448 |
+ new_asid(svm, svm_data); |
449 |
+ } |
450 |
+ |
451 |
+@@ -2667,6 +2688,8 @@ static struct kvm_x86_ops svm_x86_ops = { |
452 |
+ .run = svm_vcpu_run, |
453 |
+ .handle_exit = handle_exit, |
454 |
+ .skip_emulated_instruction = skip_emulated_instruction, |
455 |
++ .set_interrupt_shadow = svm_set_interrupt_shadow, |
456 |
++ .get_interrupt_shadow = svm_get_interrupt_shadow, |
457 |
+ .patch_hypercall = svm_patch_hypercall, |
458 |
+ .get_irq = svm_get_irq, |
459 |
+ .set_irq = svm_set_irq, |
460 |
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
461 |
+index fa0adcd..1a0d5cd 100644 |
462 |
+--- a/arch/x86/kvm/vmx.c |
463 |
++++ b/arch/x86/kvm/vmx.c |
464 |
+@@ -732,23 +732,45 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
465 |
+ vmcs_writel(GUEST_RFLAGS, rflags); |
466 |
+ } |
467 |
+ |
468 |
++static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
469 |
++{ |
470 |
++ u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); |
471 |
++ int ret = 0; |
472 |
++ |
473 |
++ if (interruptibility & GUEST_INTR_STATE_STI) |
474 |
++ ret |= X86_SHADOW_INT_STI; |
475 |
++ if (interruptibility & GUEST_INTR_STATE_MOV_SS) |
476 |
++ ret |= X86_SHADOW_INT_MOV_SS; |
477 |
++ |
478 |
++ return ret & mask; |
479 |
++} |
480 |
++ |
481 |
++static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
482 |
++{ |
483 |
++ u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); |
484 |
++ u32 interruptibility = interruptibility_old; |
485 |
++ |
486 |
++ interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); |
487 |
++ |
488 |
++ if (mask & X86_SHADOW_INT_MOV_SS) |
489 |
++ interruptibility |= GUEST_INTR_STATE_MOV_SS; |
490 |
++ if (mask & X86_SHADOW_INT_STI) |
491 |
++ interruptibility |= GUEST_INTR_STATE_STI; |
492 |
++ |
493 |
++ if ((interruptibility != interruptibility_old)) |
494 |
++ vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); |
495 |
++} |
496 |
++ |
497 |
+ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) |
498 |
+ { |
499 |
+ unsigned long rip; |
500 |
+- u32 interruptibility; |
501 |
+ |
502 |
+ rip = kvm_rip_read(vcpu); |
503 |
+ rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); |
504 |
+ kvm_rip_write(vcpu, rip); |
505 |
+ |
506 |
+- /* |
507 |
+- * We emulated an instruction, so temporary interrupt blocking |
508 |
+- * should be removed, if set. |
509 |
+- */ |
510 |
+- interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); |
511 |
+- if (interruptibility & 3) |
512 |
+- vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, |
513 |
+- interruptibility & ~3); |
514 |
++ /* skipping an emulated instruction also counts */ |
515 |
++ vmx_set_interrupt_shadow(vcpu, 0); |
516 |
+ vcpu->arch.interrupt_window_open = 1; |
517 |
+ } |
518 |
+ |
519 |
+@@ -3738,6 +3760,8 @@ static struct kvm_x86_ops vmx_x86_ops = { |
520 |
+ .run = vmx_vcpu_run, |
521 |
+ .handle_exit = kvm_handle_exit, |
522 |
+ .skip_emulated_instruction = skip_emulated_instruction, |
523 |
++ .set_interrupt_shadow = vmx_set_interrupt_shadow, |
524 |
++ .get_interrupt_shadow = vmx_get_interrupt_shadow, |
525 |
+ .patch_hypercall = vmx_patch_hypercall, |
526 |
+ .get_irq = vmx_get_irq, |
527 |
+ .set_irq = vmx_inject_irq, |
528 |
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
529 |
+index ee4714b..0b1bfc6 100644 |
530 |
+--- a/arch/x86/kvm/x86.c |
531 |
++++ b/arch/x86/kvm/x86.c |
532 |
+@@ -523,6 +523,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) |
533 |
+ efer |= vcpu->arch.shadow_efer & EFER_LMA; |
534 |
+ |
535 |
+ vcpu->arch.shadow_efer = efer; |
536 |
++ |
537 |
++ vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; |
538 |
++ kvm_mmu_reset_context(vcpu); |
539 |
+ } |
540 |
+ |
541 |
+ void kvm_enable_efer_bits(u64 mask) |
542 |
+@@ -703,11 +706,48 @@ static bool msr_mtrr_valid(unsigned msr) |
543 |
+ return false; |
544 |
+ } |
545 |
+ |
546 |
++static bool valid_pat_type(unsigned t) |
547 |
++{ |
548 |
++ return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ |
549 |
++} |
550 |
++ |
551 |
++static bool valid_mtrr_type(unsigned t) |
552 |
++{ |
553 |
++ return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ |
554 |
++} |
555 |
++ |
556 |
++static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
557 |
++{ |
558 |
++ int i; |
559 |
++ |
560 |
++ if (!msr_mtrr_valid(msr)) |
561 |
++ return false; |
562 |
++ |
563 |
++ if (msr == MSR_IA32_CR_PAT) { |
564 |
++ for (i = 0; i < 8; i++) |
565 |
++ if (!valid_pat_type((data >> (i * 8)) & 0xff)) |
566 |
++ return false; |
567 |
++ return true; |
568 |
++ } else if (msr == MSR_MTRRdefType) { |
569 |
++ if (data & ~0xcff) |
570 |
++ return false; |
571 |
++ return valid_mtrr_type(data & 0xff); |
572 |
++ } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { |
573 |
++ for (i = 0; i < 8 ; i++) |
574 |
++ if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) |
575 |
++ return false; |
576 |
++ return true; |
577 |
++ } |
578 |
++ |
579 |
++ /* variable MTRRs */ |
580 |
++ return valid_mtrr_type(data & 0xff); |
581 |
++} |
582 |
++ |
583 |
+ static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
584 |
+ { |
585 |
+ u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; |
586 |
+ |
587 |
+- if (!msr_mtrr_valid(msr)) |
588 |
++ if (!mtrr_valid(vcpu, msr, data)) |
589 |
+ return 1; |
590 |
+ |
591 |
+ if (msr == MSR_MTRRdefType) { |
592 |
+@@ -895,6 +935,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) |
593 |
+ case MSR_IA32_LASTINTFROMIP: |
594 |
+ case MSR_IA32_LASTINTTOIP: |
595 |
+ case MSR_VM_HSAVE_PA: |
596 |
++ case MSR_P6_EVNTSEL0: |
597 |
++ case MSR_P6_EVNTSEL1: |
598 |
++ case MSR_K7_EVNTSEL0: |
599 |
+ data = 0; |
600 |
+ break; |
601 |
+ case MSR_MTRRcap: |
602 |
+@@ -1074,14 +1117,13 @@ long kvm_arch_dev_ioctl(struct file *filp, |
603 |
+ if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) |
604 |
+ goto out; |
605 |
+ r = -E2BIG; |
606 |
+- if (n < num_msrs_to_save) |
607 |
++ if (n < msr_list.nmsrs) |
608 |
+ goto out; |
609 |
+ r = -EFAULT; |
610 |
+ if (copy_to_user(user_msr_list->indices, &msrs_to_save, |
611 |
+ num_msrs_to_save * sizeof(u32))) |
612 |
+ goto out; |
613 |
+- if (copy_to_user(user_msr_list->indices |
614 |
+- + num_msrs_to_save * sizeof(u32), |
615 |
++ if (copy_to_user(user_msr_list->indices + num_msrs_to_save, |
616 |
+ &emulated_msrs, |
617 |
+ ARRAY_SIZE(emulated_msrs) * sizeof(u32))) |
618 |
+ goto out; |
619 |
+@@ -1250,9 +1292,12 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
620 |
+ bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) | |
621 |
+ bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) | |
622 |
+ bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) | |
623 |
++ bit(X86_FEATURE_MCE) | |
624 |
+ bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) | |
625 |
+- bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) | |
626 |
+- bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) | |
627 |
++ bit(X86_FEATURE_SEP) | bit(X86_FEATURE_MTRR) | |
628 |
++ bit(X86_FEATURE_PGE) | bit(X86_FEATURE_MCA) | |
629 |
++ bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PAT) | |
630 |
++ bit(X86_FEATURE_PSE36) | |
631 |
+ bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) | |
632 |
+ bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) | |
633 |
+ bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP); |
634 |
+@@ -1608,10 +1653,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, |
635 |
+ return -EINVAL; |
636 |
+ |
637 |
+ down_write(&kvm->slots_lock); |
638 |
++ spin_lock(&kvm->mmu_lock); |
639 |
+ |
640 |
+ kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); |
641 |
+ kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; |
642 |
+ |
643 |
++ spin_unlock(&kvm->mmu_lock); |
644 |
+ up_write(&kvm->slots_lock); |
645 |
+ return 0; |
646 |
+ } |
647 |
+@@ -1787,7 +1834,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
648 |
+ |
649 |
+ /* If nothing is dirty, don't bother messing with page tables. */ |
650 |
+ if (is_dirty) { |
651 |
++ spin_lock(&kvm->mmu_lock); |
652 |
+ kvm_mmu_slot_remove_write_access(kvm, log->slot); |
653 |
++ spin_unlock(&kvm->mmu_lock); |
654 |
+ kvm_flush_remote_tlbs(kvm); |
655 |
+ memslot = &kvm->memslots[log->slot]; |
656 |
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; |
657 |
+@@ -2362,7 +2411,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu, |
658 |
+ u16 error_code, |
659 |
+ int emulation_type) |
660 |
+ { |
661 |
+- int r; |
662 |
++ int r, shadow_mask; |
663 |
+ struct decode_cache *c; |
664 |
+ |
665 |
+ kvm_clear_exception_queue(vcpu); |
666 |
+@@ -2411,6 +2460,10 @@ int emulate_instruction(struct kvm_vcpu *vcpu, |
667 |
+ } |
668 |
+ |
669 |
+ r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops); |
670 |
++ shadow_mask = vcpu->arch.emulate_ctxt.interruptibility; |
671 |
++ |
672 |
++ if (r == 0) |
673 |
++ kvm_x86_ops->set_interrupt_shadow(vcpu, shadow_mask); |
674 |
+ |
675 |
+ if (vcpu->arch.pio.string) |
676 |
+ return EMULATE_DO_MMIO; |
677 |
+@@ -4419,12 +4472,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm, |
678 |
+ } |
679 |
+ } |
680 |
+ |
681 |
++ spin_lock(&kvm->mmu_lock); |
682 |
+ if (!kvm->arch.n_requested_mmu_pages) { |
683 |
+ unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); |
684 |
+ kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); |
685 |
+ } |
686 |
+ |
687 |
+ kvm_mmu_slot_remove_write_access(kvm, mem->slot); |
688 |
++ spin_unlock(&kvm->mmu_lock); |
689 |
+ kvm_flush_remote_tlbs(kvm); |
690 |
+ |
691 |
+ return 0; |
692 |
+@@ -4433,6 +4488,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm, |
693 |
+ void kvm_arch_flush_shadow(struct kvm *kvm) |
694 |
+ { |
695 |
+ kvm_mmu_zap_all(kvm); |
696 |
++ kvm_reload_remote_mmus(kvm); |
697 |
+ } |
698 |
+ |
699 |
+ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
700 |
+diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c |
701 |
+index ca91749..d80126f 100644 |
702 |
+--- a/arch/x86/kvm/x86_emulate.c |
703 |
++++ b/arch/x86/kvm/x86_emulate.c |
704 |
+@@ -1349,6 +1349,20 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, |
705 |
+ return 0; |
706 |
+ } |
707 |
+ |
708 |
++void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask) |
709 |
++{ |
710 |
++ u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(ctxt->vcpu, mask); |
711 |
++ /* |
712 |
++ * an sti; sti; sequence only disable interrupts for the first |
713 |
++ * instruction. So, if the last instruction, be it emulated or |
714 |
++ * not, left the system with the INT_STI flag enabled, it |
715 |
++ * means that the last instruction is an sti. We should not |
716 |
++ * leave the flag on in this case. The same goes for mov ss |
717 |
++ */ |
718 |
++ if (!(int_shadow & mask)) |
719 |
++ ctxt->interruptibility = mask; |
720 |
++} |
721 |
++ |
722 |
+ int |
723 |
+ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) |
724 |
+ { |
725 |
+@@ -1360,6 +1374,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) |
726 |
+ int io_dir_in; |
727 |
+ int rc = 0; |
728 |
+ |
729 |
++ ctxt->interruptibility = 0; |
730 |
++ |
731 |
+ /* Shadow copy of register state. Committed on successful emulation. |
732 |
+ * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't |
733 |
+ * modify them. |
734 |
+@@ -1609,6 +1625,9 @@ special_insn: |
735 |
+ int err; |
736 |
+ |
737 |
+ sel = c->src.val; |
738 |
++ if (c->modrm_reg == VCPU_SREG_SS) |
739 |
++ toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS); |
740 |
++ |
741 |
+ if (c->modrm_reg <= 5) { |
742 |
+ type_bits = (c->modrm_reg == 1) ? 9 : 1; |
743 |
+ err = kvm_load_segment_descriptor(ctxt->vcpu, sel, |
744 |
+@@ -1865,6 +1884,7 @@ special_insn: |
745 |
+ c->dst.type = OP_NONE; /* Disable writeback. */ |
746 |
+ break; |
747 |
+ case 0xfb: /* sti */ |
748 |
++ toggle_interruptibility(ctxt, X86_SHADOW_INT_STI); |
749 |
+ ctxt->eflags |= X86_EFLAGS_IF; |
750 |
+ c->dst.type = OP_NONE; /* Disable writeback. */ |
751 |
+ break; |
752 |
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c |
753 |
+index 821e970..c814e14 100644 |
754 |
+--- a/arch/x86/mm/tlb.c |
755 |
++++ b/arch/x86/mm/tlb.c |
756 |
+@@ -183,18 +183,17 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, |
757 |
+ |
758 |
+ f->flush_mm = mm; |
759 |
+ f->flush_va = va; |
760 |
+- cpumask_andnot(to_cpumask(f->flush_cpumask), |
761 |
+- cpumask, cpumask_of(smp_processor_id())); |
762 |
+- |
763 |
+- /* |
764 |
+- * We have to send the IPI only to |
765 |
+- * CPUs affected. |
766 |
+- */ |
767 |
+- apic->send_IPI_mask(to_cpumask(f->flush_cpumask), |
768 |
+- INVALIDATE_TLB_VECTOR_START + sender); |
769 |
++ if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) { |
770 |
++ /* |
771 |
++ * We have to send the IPI only to |
772 |
++ * CPUs affected. |
773 |
++ */ |
774 |
++ apic->send_IPI_mask(to_cpumask(f->flush_cpumask), |
775 |
++ INVALIDATE_TLB_VECTOR_START + sender); |
776 |
+ |
777 |
+- while (!cpumask_empty(to_cpumask(f->flush_cpumask))) |
778 |
+- cpu_relax(); |
779 |
++ while (!cpumask_empty(to_cpumask(f->flush_cpumask))) |
780 |
++ cpu_relax(); |
781 |
++ } |
782 |
+ |
783 |
+ f->flush_mm = NULL; |
784 |
+ f->flush_va = 0; |
785 |
+diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c |
786 |
+index 39838c6..31adda1 100644 |
787 |
+--- a/drivers/acpi/processor_thermal.c |
788 |
++++ b/drivers/acpi/processor_thermal.c |
789 |
+@@ -66,7 +66,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr) |
790 |
+ if (pr->limit.thermal.tx > tx) |
791 |
+ tx = pr->limit.thermal.tx; |
792 |
+ |
793 |
+- result = acpi_processor_set_throttling(pr, tx); |
794 |
++ result = acpi_processor_set_throttling(pr, tx, false); |
795 |
+ if (result) |
796 |
+ goto end; |
797 |
+ } |
798 |
+@@ -421,12 +421,12 @@ processor_set_cur_state(struct thermal_cooling_device *cdev, |
799 |
+ |
800 |
+ if (state <= max_pstate) { |
801 |
+ if (pr->flags.throttling && pr->throttling.state) |
802 |
+- result = acpi_processor_set_throttling(pr, 0); |
803 |
++ result = acpi_processor_set_throttling(pr, 0, false); |
804 |
+ cpufreq_set_cur_state(pr->id, state); |
805 |
+ } else { |
806 |
+ cpufreq_set_cur_state(pr->id, max_pstate); |
807 |
+ result = acpi_processor_set_throttling(pr, |
808 |
+- state - max_pstate); |
809 |
++ state - max_pstate, false); |
810 |
+ } |
811 |
+ return result; |
812 |
+ } |
813 |
+diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c |
814 |
+index 2275437..841be4e 100644 |
815 |
+--- a/drivers/acpi/processor_throttling.c |
816 |
++++ b/drivers/acpi/processor_throttling.c |
817 |
+@@ -62,7 +62,8 @@ struct throttling_tstate { |
818 |
+ #define THROTTLING_POSTCHANGE (2) |
819 |
+ |
820 |
+ static int acpi_processor_get_throttling(struct acpi_processor *pr); |
821 |
+-int acpi_processor_set_throttling(struct acpi_processor *pr, int state); |
822 |
++int acpi_processor_set_throttling(struct acpi_processor *pr, |
823 |
++ int state, bool force); |
824 |
+ |
825 |
+ static int acpi_processor_update_tsd_coord(void) |
826 |
+ { |
827 |
+@@ -361,7 +362,7 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr) |
828 |
+ */ |
829 |
+ target_state = throttling_limit; |
830 |
+ } |
831 |
+- return acpi_processor_set_throttling(pr, target_state); |
832 |
++ return acpi_processor_set_throttling(pr, target_state, false); |
833 |
+ } |
834 |
+ |
835 |
+ /* |
836 |
+@@ -842,7 +843,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) |
837 |
+ ACPI_WARNING((AE_INFO, |
838 |
+ "Invalid throttling state, reset")); |
839 |
+ state = 0; |
840 |
+- ret = acpi_processor_set_throttling(pr, state); |
841 |
++ ret = acpi_processor_set_throttling(pr, state, true); |
842 |
+ if (ret) |
843 |
+ return ret; |
844 |
+ } |
845 |
+@@ -915,7 +916,7 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr) |
846 |
+ } |
847 |
+ |
848 |
+ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, |
849 |
+- int state) |
850 |
++ int state, bool force) |
851 |
+ { |
852 |
+ u32 value = 0; |
853 |
+ u32 duty_mask = 0; |
854 |
+@@ -930,7 +931,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, |
855 |
+ if (!pr->flags.throttling) |
856 |
+ return -ENODEV; |
857 |
+ |
858 |
+- if (state == pr->throttling.state) |
859 |
++ if (!force && (state == pr->throttling.state)) |
860 |
+ return 0; |
861 |
+ |
862 |
+ if (state < pr->throttling_platform_limit) |
863 |
+@@ -988,7 +989,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, |
864 |
+ } |
865 |
+ |
866 |
+ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, |
867 |
+- int state) |
868 |
++ int state, bool force) |
869 |
+ { |
870 |
+ int ret; |
871 |
+ acpi_integer value; |
872 |
+@@ -1002,7 +1003,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, |
873 |
+ if (!pr->flags.throttling) |
874 |
+ return -ENODEV; |
875 |
+ |
876 |
+- if (state == pr->throttling.state) |
877 |
++ if (!force && (state == pr->throttling.state)) |
878 |
+ return 0; |
879 |
+ |
880 |
+ if (state < pr->throttling_platform_limit) |
881 |
+@@ -1018,7 +1019,8 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, |
882 |
+ return 0; |
883 |
+ } |
884 |
+ |
885 |
+-int acpi_processor_set_throttling(struct acpi_processor *pr, int state) |
886 |
++int acpi_processor_set_throttling(struct acpi_processor *pr, |
887 |
++ int state, bool force) |
888 |
+ { |
889 |
+ cpumask_var_t saved_mask; |
890 |
+ int ret = 0; |
891 |
+@@ -1070,7 +1072,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) |
892 |
+ /* FIXME: use work_on_cpu() */ |
893 |
+ set_cpus_allowed_ptr(current, cpumask_of(pr->id)); |
894 |
+ ret = p_throttling->acpi_processor_set_throttling(pr, |
895 |
+- t_state.target_state); |
896 |
++ t_state.target_state, force); |
897 |
+ } else { |
898 |
+ /* |
899 |
+ * When the T-state coordination is SW_ALL or HW_ALL, |
900 |
+@@ -1103,7 +1105,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) |
901 |
+ set_cpus_allowed_ptr(current, cpumask_of(i)); |
902 |
+ ret = match_pr->throttling. |
903 |
+ acpi_processor_set_throttling( |
904 |
+- match_pr, t_state.target_state); |
905 |
++ match_pr, t_state.target_state, force); |
906 |
+ } |
907 |
+ } |
908 |
+ /* |
909 |
+@@ -1201,7 +1203,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) |
910 |
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
911 |
+ "Disabling throttling (was T%d)\n", |
912 |
+ pr->throttling.state)); |
913 |
+- result = acpi_processor_set_throttling(pr, 0); |
914 |
++ result = acpi_processor_set_throttling(pr, 0, false); |
915 |
+ if (result) |
916 |
+ goto end; |
917 |
+ } |
918 |
+@@ -1307,7 +1309,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file, |
919 |
+ if (strcmp(tmpbuf, charp) != 0) |
920 |
+ return -EINVAL; |
921 |
+ |
922 |
+- result = acpi_processor_set_throttling(pr, state_val); |
923 |
++ result = acpi_processor_set_throttling(pr, state_val, false); |
924 |
+ if (result) |
925 |
+ return result; |
926 |
+ |
927 |
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
928 |
+index c924230..7b2f499 100644 |
929 |
+--- a/drivers/ata/libata-core.c |
930 |
++++ b/drivers/ata/libata-core.c |
931 |
+@@ -4271,6 +4271,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { |
932 |
+ { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, |
933 |
+ { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, |
934 |
+ |
935 |
++ /* this one allows HPA unlocking but fails IOs on the area */ |
936 |
++ { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, |
937 |
++ |
938 |
+ /* Devices which report 1 sector over size HPA */ |
939 |
+ { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, |
940 |
+ { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, |
941 |
+diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c |
942 |
+index 19e0bc6..504f849 100644 |
943 |
+--- a/drivers/media/video/gspca/ov534.c |
944 |
++++ b/drivers/media/video/gspca/ov534.c |
945 |
+@@ -832,9 +832,11 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, struct gspca_frame *frame, |
946 |
+ __u32 this_pts; |
947 |
+ u16 this_fid; |
948 |
+ int remaining_len = len; |
949 |
++ int payload_len; |
950 |
+ |
951 |
++ payload_len = (sd->sensor == SENSOR_OV772X) ? 2048 : 2040; |
952 |
+ do { |
953 |
+- len = min(remaining_len, 2040); /*fixme: was 2048*/ |
954 |
++ len = min(remaining_len, payload_len); |
955 |
+ |
956 |
+ /* Payloads are prefixed with a UVC-style header. We |
957 |
+ consider a frame to start when the FID toggles, or the PTS |
958 |
+diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h |
959 |
+index 16a4138..4567e90 100644 |
960 |
+--- a/drivers/net/ehea/ehea.h |
961 |
++++ b/drivers/net/ehea/ehea.h |
962 |
+@@ -40,7 +40,7 @@ |
963 |
+ #include <asm/io.h> |
964 |
+ |
965 |
+ #define DRV_NAME "ehea" |
966 |
+-#define DRV_VERSION "EHEA_0101" |
967 |
++#define DRV_VERSION "EHEA_0102" |
968 |
+ |
969 |
+ /* eHEA capability flags */ |
970 |
+ #define DLPAR_PORT_ADD_REM 1 |
971 |
+diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c |
972 |
+index 0a7a288..9bc4775 100644 |
973 |
+--- a/drivers/net/ehea/ehea_main.c |
974 |
++++ b/drivers/net/ehea/ehea_main.c |
975 |
+@@ -1545,6 +1545,9 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) |
976 |
+ { |
977 |
+ int ret, i; |
978 |
+ |
979 |
++ if (pr->qp) |
980 |
++ netif_napi_del(&pr->napi); |
981 |
++ |
982 |
+ ret = ehea_destroy_qp(pr->qp); |
983 |
+ |
984 |
+ if (!ret) { |
985 |
+diff --git a/drivers/net/wireless/ar9170/main.c b/drivers/net/wireless/ar9170/main.c |
986 |
+index 5996ff9..f839c83 100644 |
987 |
+--- a/drivers/net/wireless/ar9170/main.c |
988 |
++++ b/drivers/net/wireless/ar9170/main.c |
989 |
+@@ -1486,13 +1486,14 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue, |
990 |
+ int ret; |
991 |
+ |
992 |
+ mutex_lock(&ar->mutex); |
993 |
+- if ((param) && !(queue > ar->hw->queues)) { |
994 |
++ if (queue < __AR9170_NUM_TXQ) { |
995 |
+ memcpy(&ar->edcf[ar9170_qos_hwmap[queue]], |
996 |
+ param, sizeof(*param)); |
997 |
+ |
998 |
+ ret = ar9170_set_qos(ar); |
999 |
+- } else |
1000 |
++ } else { |
1001 |
+ ret = -EINVAL; |
1002 |
++ } |
1003 |
+ |
1004 |
+ mutex_unlock(&ar->mutex); |
1005 |
+ return ret; |
1006 |
+diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c |
1007 |
+index 2ad9faf..fc3a95f 100644 |
1008 |
+--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c |
1009 |
++++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c |
1010 |
+@@ -53,22 +53,31 @@ static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state) |
1011 |
+ switch (state) { |
1012 |
+ case RFKILL_STATE_UNBLOCKED: |
1013 |
+ if (iwl_is_rfkill_hw(priv)) { |
1014 |
++ /* pass error to rfkill core, make it state HARD |
1015 |
++ * BLOCKED (rfkill->mutex taken) and disable |
1016 |
++ * software kill switch */ |
1017 |
+ err = -EBUSY; |
1018 |
+- goto out_unlock; |
1019 |
++ priv->rfkill->state = RFKILL_STATE_HARD_BLOCKED; |
1020 |
+ } |
1021 |
+ iwl_radio_kill_sw_enable_radio(priv); |
1022 |
+ break; |
1023 |
+ case RFKILL_STATE_SOFT_BLOCKED: |
1024 |
+ iwl_radio_kill_sw_disable_radio(priv); |
1025 |
++ /* rfkill->mutex is taken */ |
1026 |
++ if (priv->rfkill->state == RFKILL_STATE_HARD_BLOCKED) { |
1027 |
++ /* force rfkill core state to be SOFT BLOCKED, |
1028 |
++ * otherwise core will be unable to disable software |
1029 |
++ * kill switch */ |
1030 |
++ priv->rfkill->state = RFKILL_STATE_SOFT_BLOCKED; |
1031 |
++ } |
1032 |
+ break; |
1033 |
+ default: |
1034 |
+ IWL_WARN(priv, "we received unexpected RFKILL state %d\n", |
1035 |
+ state); |
1036 |
+ break; |
1037 |
+ } |
1038 |
+-out_unlock: |
1039 |
+- mutex_unlock(&priv->mutex); |
1040 |
+ |
1041 |
++ mutex_unlock(&priv->mutex); |
1042 |
+ return err; |
1043 |
+ } |
1044 |
+ |
1045 |
+@@ -132,14 +141,11 @@ void iwl_rfkill_set_hw_state(struct iwl_priv *priv) |
1046 |
+ if (!priv->rfkill) |
1047 |
+ return; |
1048 |
+ |
1049 |
+- if (iwl_is_rfkill_hw(priv)) { |
1050 |
++ if (iwl_is_rfkill_sw(priv)) |
1051 |
++ rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED); |
1052 |
++ else if (iwl_is_rfkill_hw(priv)) |
1053 |
+ rfkill_force_state(priv->rfkill, RFKILL_STATE_HARD_BLOCKED); |
1054 |
+- return; |
1055 |
+- } |
1056 |
+- |
1057 |
+- if (!iwl_is_rfkill_sw(priv)) |
1058 |
+- rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED); |
1059 |
+ else |
1060 |
+- rfkill_force_state(priv->rfkill, RFKILL_STATE_SOFT_BLOCKED); |
1061 |
++ rfkill_force_state(priv->rfkill, RFKILL_STATE_UNBLOCKED); |
1062 |
+ } |
1063 |
+ EXPORT_SYMBOL(iwl_rfkill_set_hw_state); |
1064 |
+diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c |
1065 |
+index 44ab03a..da2e2d4 100644 |
1066 |
+--- a/drivers/net/wireless/iwlwifi/iwl-sta.c |
1067 |
++++ b/drivers/net/wireless/iwlwifi/iwl-sta.c |
1068 |
+@@ -560,6 +560,8 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv, |
1069 |
+ unsigned long flags; |
1070 |
+ |
1071 |
+ spin_lock_irqsave(&priv->sta_lock, flags); |
1072 |
++ IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", |
1073 |
++ keyconf->keyidx); |
1074 |
+ |
1075 |
+ if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) |
1076 |
+ IWL_ERR(priv, "index %d not used in uCode key table.\n", |
1077 |
+@@ -567,6 +569,11 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv, |
1078 |
+ |
1079 |
+ priv->default_wep_key--; |
1080 |
+ memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); |
1081 |
++ if (iwl_is_rfkill(priv)) { |
1082 |
++ IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); |
1083 |
++ spin_unlock_irqrestore(&priv->sta_lock, flags); |
1084 |
++ return 0; |
1085 |
++ } |
1086 |
+ ret = iwl_send_static_wepkey_cmd(priv, 1); |
1087 |
+ IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", |
1088 |
+ keyconf->keyidx, ret); |
1089 |
+@@ -847,6 +854,11 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv, |
1090 |
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; |
1091 |
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; |
1092 |
+ |
1093 |
++ if (iwl_is_rfkill(priv)) { |
1094 |
++ IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n"); |
1095 |
++ spin_unlock_irqrestore(&priv->sta_lock, flags); |
1096 |
++ return 0; |
1097 |
++ } |
1098 |
+ ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); |
1099 |
+ spin_unlock_irqrestore(&priv->sta_lock, flags); |
1100 |
+ return ret; |
1101 |
+diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h |
1102 |
+index 84bd6f1..c242b54 100644 |
1103 |
+--- a/drivers/net/wireless/rt2x00/rt2x00.h |
1104 |
++++ b/drivers/net/wireless/rt2x00/rt2x00.h |
1105 |
+@@ -814,13 +814,15 @@ struct rt2x00_dev { |
1106 |
+ static inline void rt2x00_rf_read(struct rt2x00_dev *rt2x00dev, |
1107 |
+ const unsigned int word, u32 *data) |
1108 |
+ { |
1109 |
+- *data = rt2x00dev->rf[word]; |
1110 |
++ BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32)); |
1111 |
++ *data = rt2x00dev->rf[word - 1]; |
1112 |
+ } |
1113 |
+ |
1114 |
+ static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev, |
1115 |
+ const unsigned int word, u32 data) |
1116 |
+ { |
1117 |
+- rt2x00dev->rf[word] = data; |
1118 |
++ BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32)); |
1119 |
++ rt2x00dev->rf[word - 1] = data; |
1120 |
+ } |
1121 |
+ |
1122 |
+ /* |
1123 |
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c |
1124 |
+index 043b208..f215a59 100644 |
1125 |
+--- a/drivers/platform/x86/wmi.c |
1126 |
++++ b/drivers/platform/x86/wmi.c |
1127 |
+@@ -270,7 +270,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out) |
1128 |
+ acpi_status status; |
1129 |
+ struct acpi_object_list input; |
1130 |
+ union acpi_object params[3]; |
1131 |
+- char method[4] = "WM"; |
1132 |
++ char method[5] = "WM"; |
1133 |
+ |
1134 |
+ if (!find_guid(guid_string, &wblock)) |
1135 |
+ return AE_ERROR; |
1136 |
+@@ -328,8 +328,8 @@ struct acpi_buffer *out) |
1137 |
+ acpi_status status, wc_status = AE_ERROR; |
1138 |
+ struct acpi_object_list input, wc_input; |
1139 |
+ union acpi_object wc_params[1], wq_params[1]; |
1140 |
+- char method[4]; |
1141 |
+- char wc_method[4] = "WC"; |
1142 |
++ char method[5]; |
1143 |
++ char wc_method[5] = "WC"; |
1144 |
+ |
1145 |
+ if (!guid_string || !out) |
1146 |
+ return AE_BAD_PARAMETER; |
1147 |
+@@ -410,7 +410,7 @@ const struct acpi_buffer *in) |
1148 |
+ acpi_handle handle; |
1149 |
+ struct acpi_object_list input; |
1150 |
+ union acpi_object params[2]; |
1151 |
+- char method[4] = "WS"; |
1152 |
++ char method[5] = "WS"; |
1153 |
+ |
1154 |
+ if (!guid_string || !in) |
1155 |
+ return AE_BAD_DATA; |
1156 |
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c |
1157 |
+index f3da592..35a1386 100644 |
1158 |
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c |
1159 |
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c |
1160 |
+@@ -119,6 +119,64 @@ _base_fault_reset_work(struct work_struct *work) |
1161 |
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); |
1162 |
+ } |
1163 |
+ |
1164 |
++/** |
1165 |
++ * mpt2sas_base_start_watchdog - start the fault_reset_work_q |
1166 |
++ * @ioc: pointer to scsi command object |
1167 |
++ * Context: sleep. |
1168 |
++ * |
1169 |
++ * Return nothing. |
1170 |
++ */ |
1171 |
++void |
1172 |
++mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc) |
1173 |
++{ |
1174 |
++ unsigned long flags; |
1175 |
++ |
1176 |
++ if (ioc->fault_reset_work_q) |
1177 |
++ return; |
1178 |
++ |
1179 |
++ /* initialize fault polling */ |
1180 |
++ INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); |
1181 |
++ snprintf(ioc->fault_reset_work_q_name, |
1182 |
++ sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id); |
1183 |
++ ioc->fault_reset_work_q = |
1184 |
++ create_singlethread_workqueue(ioc->fault_reset_work_q_name); |
1185 |
++ if (!ioc->fault_reset_work_q) { |
1186 |
++ printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n", |
1187 |
++ ioc->name, __func__, __LINE__); |
1188 |
++ return; |
1189 |
++ } |
1190 |
++ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
1191 |
++ if (ioc->fault_reset_work_q) |
1192 |
++ queue_delayed_work(ioc->fault_reset_work_q, |
1193 |
++ &ioc->fault_reset_work, |
1194 |
++ msecs_to_jiffies(FAULT_POLLING_INTERVAL)); |
1195 |
++ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); |
1196 |
++} |
1197 |
++ |
1198 |
++/** |
1199 |
++ * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q |
1200 |
++ * @ioc: pointer to scsi command object |
1201 |
++ * Context: sleep. |
1202 |
++ * |
1203 |
++ * Return nothing. |
1204 |
++ */ |
1205 |
++void |
1206 |
++mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc) |
1207 |
++{ |
1208 |
++ unsigned long flags; |
1209 |
++ struct workqueue_struct *wq; |
1210 |
++ |
1211 |
++ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
1212 |
++ wq = ioc->fault_reset_work_q; |
1213 |
++ ioc->fault_reset_work_q = NULL; |
1214 |
++ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); |
1215 |
++ if (wq) { |
1216 |
++ if (!cancel_delayed_work(&ioc->fault_reset_work)) |
1217 |
++ flush_workqueue(wq); |
1218 |
++ destroy_workqueue(wq); |
1219 |
++ } |
1220 |
++} |
1221 |
++ |
1222 |
+ #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
1223 |
+ /** |
1224 |
+ * _base_sas_ioc_info - verbose translation of the ioc status |
1225 |
+@@ -440,6 +498,10 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info) |
1226 |
+ if (sas_loginfo.dw.bus_type != 3 /*SAS*/) |
1227 |
+ return; |
1228 |
+ |
1229 |
++ /* each nexus loss loginfo */ |
1230 |
++ if (log_info == 0x31170000) |
1231 |
++ return; |
1232 |
++ |
1233 |
+ /* eat the loginfos associated with task aborts */ |
1234 |
+ if (ioc->ignore_loginfos && (log_info == 30050000 || log_info == |
1235 |
+ 0x31140000 || log_info == 0x31130000)) |
1236 |
+@@ -1109,7 +1171,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) |
1237 |
+ } |
1238 |
+ } |
1239 |
+ |
1240 |
+- pci_set_drvdata(pdev, ioc->shost); |
1241 |
+ _base_mask_interrupts(ioc); |
1242 |
+ r = _base_enable_msix(ioc); |
1243 |
+ if (r) |
1244 |
+@@ -1132,7 +1193,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) |
1245 |
+ ioc->pci_irq = -1; |
1246 |
+ pci_release_selected_regions(ioc->pdev, ioc->bars); |
1247 |
+ pci_disable_device(pdev); |
1248 |
+- pci_set_drvdata(pdev, NULL); |
1249 |
+ return r; |
1250 |
+ } |
1251 |
+ |
1252 |
+@@ -3191,7 +3251,6 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc) |
1253 |
+ ioc->chip_phys = 0; |
1254 |
+ pci_release_selected_regions(ioc->pdev, ioc->bars); |
1255 |
+ pci_disable_device(pdev); |
1256 |
+- pci_set_drvdata(pdev, NULL); |
1257 |
+ return; |
1258 |
+ } |
1259 |
+ |
1260 |
+@@ -3205,7 +3264,6 @@ int |
1261 |
+ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) |
1262 |
+ { |
1263 |
+ int r, i; |
1264 |
+- unsigned long flags; |
1265 |
+ |
1266 |
+ dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, |
1267 |
+ __func__)); |
1268 |
+@@ -3214,6 +3272,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) |
1269 |
+ if (r) |
1270 |
+ return r; |
1271 |
+ |
1272 |
++ pci_set_drvdata(ioc->pdev, ioc->shost); |
1273 |
+ r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); |
1274 |
+ if (r) |
1275 |
+ goto out_free_resources; |
1276 |
+@@ -3288,23 +3347,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) |
1277 |
+ if (r) |
1278 |
+ goto out_free_resources; |
1279 |
+ |
1280 |
+- /* initialize fault polling */ |
1281 |
+- INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); |
1282 |
+- snprintf(ioc->fault_reset_work_q_name, |
1283 |
+- sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id); |
1284 |
+- ioc->fault_reset_work_q = |
1285 |
+- create_singlethread_workqueue(ioc->fault_reset_work_q_name); |
1286 |
+- if (!ioc->fault_reset_work_q) { |
1287 |
+- printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n", |
1288 |
+- ioc->name, __func__, __LINE__); |
1289 |
+- goto out_free_resources; |
1290 |
+- } |
1291 |
+- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
1292 |
+- if (ioc->fault_reset_work_q) |
1293 |
+- queue_delayed_work(ioc->fault_reset_work_q, |
1294 |
+- &ioc->fault_reset_work, |
1295 |
+- msecs_to_jiffies(FAULT_POLLING_INTERVAL)); |
1296 |
+- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); |
1297 |
++ mpt2sas_base_start_watchdog(ioc); |
1298 |
+ return 0; |
1299 |
+ |
1300 |
+ out_free_resources: |
1301 |
+@@ -3312,6 +3355,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) |
1302 |
+ ioc->remove_host = 1; |
1303 |
+ mpt2sas_base_free_resources(ioc); |
1304 |
+ _base_release_memory_pools(ioc); |
1305 |
++ pci_set_drvdata(ioc->pdev, NULL); |
1306 |
+ kfree(ioc->tm_cmds.reply); |
1307 |
+ kfree(ioc->transport_cmds.reply); |
1308 |
+ kfree(ioc->config_cmds.reply); |
1309 |
+@@ -3337,22 +3381,14 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) |
1310 |
+ void |
1311 |
+ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) |
1312 |
+ { |
1313 |
+- unsigned long flags; |
1314 |
+- struct workqueue_struct *wq; |
1315 |
+ |
1316 |
+ dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, |
1317 |
+ __func__)); |
1318 |
+ |
1319 |
+- spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
1320 |
+- wq = ioc->fault_reset_work_q; |
1321 |
+- ioc->fault_reset_work_q = NULL; |
1322 |
+- spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); |
1323 |
+- if (!cancel_delayed_work(&ioc->fault_reset_work)) |
1324 |
+- flush_workqueue(wq); |
1325 |
+- destroy_workqueue(wq); |
1326 |
+- |
1327 |
++ mpt2sas_base_stop_watchdog(ioc); |
1328 |
+ mpt2sas_base_free_resources(ioc); |
1329 |
+ _base_release_memory_pools(ioc); |
1330 |
++ pci_set_drvdata(ioc->pdev, NULL); |
1331 |
+ kfree(ioc->pfacts); |
1332 |
+ kfree(ioc->ctl_cmds.reply); |
1333 |
+ kfree(ioc->base_cmds.reply); |
1334 |
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h |
1335 |
+index 36b1d10..1dd7c9a 100644 |
1336 |
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.h |
1337 |
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.h |
1338 |
+@@ -672,6 +672,8 @@ typedef void (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, |
1339 |
+ |
1340 |
+ /* base shared API */ |
1341 |
+ extern struct list_head mpt2sas_ioc_list; |
1342 |
++void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc); |
1343 |
++void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc); |
1344 |
+ |
1345 |
+ int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc); |
1346 |
+ void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc); |
1347 |
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c |
1348 |
+index 58cfb97..6ddee16 100644 |
1349 |
+--- a/drivers/scsi/mpt2sas/mpt2sas_config.c |
1350 |
++++ b/drivers/scsi/mpt2sas/mpt2sas_config.c |
1351 |
+@@ -236,17 +236,25 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t |
1352 |
+ Mpi2ConfigRequest_t *config_request; |
1353 |
+ int r; |
1354 |
+ u8 retry_count; |
1355 |
+- u8 issue_reset; |
1356 |
++ u8 issue_host_reset = 0; |
1357 |
+ u16 wait_state_count; |
1358 |
+ |
1359 |
++ mutex_lock(&ioc->config_cmds.mutex); |
1360 |
+ if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) { |
1361 |
+ printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n", |
1362 |
+ ioc->name, __func__); |
1363 |
++ mutex_unlock(&ioc->config_cmds.mutex); |
1364 |
+ return -EAGAIN; |
1365 |
+ } |
1366 |
+ retry_count = 0; |
1367 |
+ |
1368 |
+ retry_config: |
1369 |
++ if (retry_count) { |
1370 |
++ if (retry_count > 2) /* attempt only 2 retries */ |
1371 |
++ return -EFAULT; |
1372 |
++ printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n", |
1373 |
++ ioc->name, __func__, retry_count); |
1374 |
++ } |
1375 |
+ wait_state_count = 0; |
1376 |
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1); |
1377 |
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { |
1378 |
+@@ -254,8 +262,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t |
1379 |
+ printk(MPT2SAS_ERR_FMT |
1380 |
+ "%s: failed due to ioc not operational\n", |
1381 |
+ ioc->name, __func__); |
1382 |
+- ioc->config_cmds.status = MPT2_CMD_NOT_USED; |
1383 |
+- return -EFAULT; |
1384 |
++ r = -EFAULT; |
1385 |
++ goto out; |
1386 |
+ } |
1387 |
+ ssleep(1); |
1388 |
+ ioc_state = mpt2sas_base_get_iocstate(ioc, 1); |
1389 |
+@@ -271,8 +279,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t |
1390 |
+ if (!smid) { |
1391 |
+ printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", |
1392 |
+ ioc->name, __func__); |
1393 |
+- ioc->config_cmds.status = MPT2_CMD_NOT_USED; |
1394 |
+- return -EAGAIN; |
1395 |
++ r = -EAGAIN; |
1396 |
++ goto out; |
1397 |
+ } |
1398 |
+ |
1399 |
+ r = 0; |
1400 |
+@@ -292,9 +300,15 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t |
1401 |
+ ioc->name, __func__); |
1402 |
+ _debug_dump_mf(mpi_request, |
1403 |
+ sizeof(Mpi2ConfigRequest_t)/4); |
1404 |
+- if (!(ioc->config_cmds.status & MPT2_CMD_RESET)) |
1405 |
+- issue_reset = 1; |
1406 |
+- goto issue_host_reset; |
1407 |
++ retry_count++; |
1408 |
++ if (ioc->config_cmds.smid == smid) |
1409 |
++ mpt2sas_base_free_smid(ioc, smid); |
1410 |
++ if ((ioc->shost_recovery) || |
1411 |
++ (ioc->config_cmds.status & MPT2_CMD_RESET)) |
1412 |
++ goto retry_config; |
1413 |
++ issue_host_reset = 1; |
1414 |
++ r = -EFAULT; |
1415 |
++ goto out; |
1416 |
+ } |
1417 |
+ if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID) |
1418 |
+ memcpy(mpi_reply, ioc->config_cmds.reply, |
1419 |
+@@ -302,21 +316,13 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t |
1420 |
+ if (retry_count) |
1421 |
+ printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n", |
1422 |
+ ioc->name, __func__); |
1423 |
++out: |
1424 |
+ ioc->config_cmds.status = MPT2_CMD_NOT_USED; |
1425 |
+- return r; |
1426 |
+- |
1427 |
+- issue_host_reset: |
1428 |
+- if (issue_reset) |
1429 |
++ mutex_unlock(&ioc->config_cmds.mutex); |
1430 |
++ if (issue_host_reset) |
1431 |
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, |
1432 |
+ FORCE_BIG_HAMMER); |
1433 |
+- ioc->config_cmds.status = MPT2_CMD_NOT_USED; |
1434 |
+- if (!retry_count) { |
1435 |
+- printk(MPT2SAS_INFO_FMT "%s: attempting retry\n", |
1436 |
+- ioc->name, __func__); |
1437 |
+- retry_count++; |
1438 |
+- goto retry_config; |
1439 |
+- } |
1440 |
+- return -EFAULT; |
1441 |
++ return r; |
1442 |
+ } |
1443 |
+ |
1444 |
+ /** |
1445 |
+@@ -375,7 +381,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, |
1446 |
+ int r; |
1447 |
+ struct config_request mem; |
1448 |
+ |
1449 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1450 |
+ memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t)); |
1451 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1452 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1453 |
+@@ -417,7 +422,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc, |
1454 |
+ _config_free_config_dma_memory(ioc, &mem); |
1455 |
+ |
1456 |
+ out: |
1457 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1458 |
+ return r; |
1459 |
+ } |
1460 |
+ |
1461 |
+@@ -438,7 +442,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, |
1462 |
+ int r; |
1463 |
+ struct config_request mem; |
1464 |
+ |
1465 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1466 |
+ memset(config_page, 0, sizeof(Mpi2BiosPage2_t)); |
1467 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1468 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1469 |
+@@ -480,7 +483,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, |
1470 |
+ _config_free_config_dma_memory(ioc, &mem); |
1471 |
+ |
1472 |
+ out: |
1473 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1474 |
+ return r; |
1475 |
+ } |
1476 |
+ |
1477 |
+@@ -501,7 +503,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1478 |
+ int r; |
1479 |
+ struct config_request mem; |
1480 |
+ |
1481 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1482 |
+ memset(config_page, 0, sizeof(Mpi2BiosPage3_t)); |
1483 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1484 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1485 |
+@@ -543,7 +544,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1486 |
+ _config_free_config_dma_memory(ioc, &mem); |
1487 |
+ |
1488 |
+ out: |
1489 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1490 |
+ return r; |
1491 |
+ } |
1492 |
+ |
1493 |
+@@ -564,7 +564,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, |
1494 |
+ int r; |
1495 |
+ struct config_request mem; |
1496 |
+ |
1497 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1498 |
+ memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t)); |
1499 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1500 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1501 |
+@@ -606,7 +605,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, |
1502 |
+ _config_free_config_dma_memory(ioc, &mem); |
1503 |
+ |
1504 |
+ out: |
1505 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1506 |
+ return r; |
1507 |
+ } |
1508 |
+ |
1509 |
+@@ -627,7 +625,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, |
1510 |
+ int r; |
1511 |
+ struct config_request mem; |
1512 |
+ |
1513 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1514 |
+ memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t)); |
1515 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1516 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1517 |
+@@ -669,7 +666,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, |
1518 |
+ _config_free_config_dma_memory(ioc, &mem); |
1519 |
+ |
1520 |
+ out: |
1521 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1522 |
+ return r; |
1523 |
+ } |
1524 |
+ |
1525 |
+@@ -690,7 +686,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, |
1526 |
+ int r; |
1527 |
+ struct config_request mem; |
1528 |
+ |
1529 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1530 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1531 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1532 |
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; |
1533 |
+@@ -732,7 +727,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, |
1534 |
+ _config_free_config_dma_memory(ioc, &mem); |
1535 |
+ |
1536 |
+ out: |
1537 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1538 |
+ return r; |
1539 |
+ } |
1540 |
+ |
1541 |
+@@ -753,7 +747,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, |
1542 |
+ int r; |
1543 |
+ struct config_request mem; |
1544 |
+ |
1545 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1546 |
+ memset(config_page, 0, sizeof(Mpi2IOCPage8_t)); |
1547 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1548 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1549 |
+@@ -795,7 +788,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, |
1550 |
+ _config_free_config_dma_memory(ioc, &mem); |
1551 |
+ |
1552 |
+ out: |
1553 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1554 |
+ return r; |
1555 |
+ } |
1556 |
+ |
1557 |
+@@ -818,7 +810,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1558 |
+ int r; |
1559 |
+ struct config_request mem; |
1560 |
+ |
1561 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1562 |
+ memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t)); |
1563 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1564 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1565 |
+@@ -863,7 +854,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1566 |
+ _config_free_config_dma_memory(ioc, &mem); |
1567 |
+ |
1568 |
+ out: |
1569 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1570 |
+ return r; |
1571 |
+ } |
1572 |
+ |
1573 |
+@@ -886,7 +876,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1574 |
+ int r; |
1575 |
+ struct config_request mem; |
1576 |
+ |
1577 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1578 |
+ memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t)); |
1579 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1580 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1581 |
+@@ -931,7 +920,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1582 |
+ _config_free_config_dma_memory(ioc, &mem); |
1583 |
+ |
1584 |
+ out: |
1585 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1586 |
+ return r; |
1587 |
+ } |
1588 |
+ |
1589 |
+@@ -953,7 +941,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys) |
1590 |
+ Mpi2ConfigReply_t mpi_reply; |
1591 |
+ Mpi2SasIOUnitPage0_t config_page; |
1592 |
+ |
1593 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1594 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1595 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1596 |
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; |
1597 |
+@@ -1002,7 +989,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys) |
1598 |
+ _config_free_config_dma_memory(ioc, &mem); |
1599 |
+ |
1600 |
+ out: |
1601 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1602 |
+ return r; |
1603 |
+ } |
1604 |
+ |
1605 |
+@@ -1026,8 +1012,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1606 |
+ Mpi2ConfigRequest_t mpi_request; |
1607 |
+ int r; |
1608 |
+ struct config_request mem; |
1609 |
+- |
1610 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1611 |
+ memset(config_page, 0, sz); |
1612 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1613 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1614 |
+@@ -1070,7 +1054,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1615 |
+ _config_free_config_dma_memory(ioc, &mem); |
1616 |
+ |
1617 |
+ out: |
1618 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1619 |
+ return r; |
1620 |
+ } |
1621 |
+ |
1622 |
+@@ -1095,7 +1078,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1623 |
+ int r; |
1624 |
+ struct config_request mem; |
1625 |
+ |
1626 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1627 |
+ memset(config_page, 0, sz); |
1628 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1629 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1630 |
+@@ -1138,7 +1120,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1631 |
+ _config_free_config_dma_memory(ioc, &mem); |
1632 |
+ |
1633 |
+ out: |
1634 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1635 |
+ return r; |
1636 |
+ } |
1637 |
+ |
1638 |
+@@ -1161,7 +1142,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1639 |
+ int r; |
1640 |
+ struct config_request mem; |
1641 |
+ |
1642 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1643 |
+ memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t)); |
1644 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1645 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1646 |
+@@ -1206,7 +1186,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1647 |
+ _config_free_config_dma_memory(ioc, &mem); |
1648 |
+ |
1649 |
+ out: |
1650 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1651 |
+ return r; |
1652 |
+ } |
1653 |
+ |
1654 |
+@@ -1230,7 +1209,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1655 |
+ int r; |
1656 |
+ struct config_request mem; |
1657 |
+ |
1658 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1659 |
+ memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t)); |
1660 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1661 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1662 |
+@@ -1277,7 +1255,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1663 |
+ _config_free_config_dma_memory(ioc, &mem); |
1664 |
+ |
1665 |
+ out: |
1666 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1667 |
+ return r; |
1668 |
+ } |
1669 |
+ |
1670 |
+@@ -1300,7 +1277,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1671 |
+ int r; |
1672 |
+ struct config_request mem; |
1673 |
+ |
1674 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1675 |
+ memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t)); |
1676 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1677 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1678 |
+@@ -1345,7 +1321,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1679 |
+ _config_free_config_dma_memory(ioc, &mem); |
1680 |
+ |
1681 |
+ out: |
1682 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1683 |
+ return r; |
1684 |
+ } |
1685 |
+ |
1686 |
+@@ -1367,7 +1342,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1687 |
+ int r; |
1688 |
+ struct config_request mem; |
1689 |
+ |
1690 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1691 |
+ memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t)); |
1692 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1693 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1694 |
+@@ -1413,7 +1387,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1695 |
+ _config_free_config_dma_memory(ioc, &mem); |
1696 |
+ |
1697 |
+ out: |
1698 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1699 |
+ return r; |
1700 |
+ } |
1701 |
+ |
1702 |
+@@ -1435,7 +1408,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1703 |
+ int r; |
1704 |
+ struct config_request mem; |
1705 |
+ |
1706 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1707 |
+ memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t)); |
1708 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1709 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1710 |
+@@ -1481,7 +1453,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1711 |
+ _config_free_config_dma_memory(ioc, &mem); |
1712 |
+ |
1713 |
+ out: |
1714 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1715 |
+ return r; |
1716 |
+ } |
1717 |
+ |
1718 |
+@@ -1505,7 +1476,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc, |
1719 |
+ int r; |
1720 |
+ struct config_request mem; |
1721 |
+ |
1722 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1723 |
+ memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t)); |
1724 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1725 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1726 |
+@@ -1548,7 +1518,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc, |
1727 |
+ _config_free_config_dma_memory(ioc, &mem); |
1728 |
+ |
1729 |
+ out: |
1730 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1731 |
+ return r; |
1732 |
+ } |
1733 |
+ |
1734 |
+@@ -1572,7 +1541,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle, |
1735 |
+ struct config_request mem; |
1736 |
+ u16 ioc_status; |
1737 |
+ |
1738 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1739 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1740 |
+ *num_pds = 0; |
1741 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1742 |
+@@ -1620,7 +1588,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle, |
1743 |
+ _config_free_config_dma_memory(ioc, &mem); |
1744 |
+ |
1745 |
+ out: |
1746 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1747 |
+ return r; |
1748 |
+ } |
1749 |
+ |
1750 |
+@@ -1645,7 +1612,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc, |
1751 |
+ int r; |
1752 |
+ struct config_request mem; |
1753 |
+ |
1754 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1755 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1756 |
+ memset(config_page, 0, sz); |
1757 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1758 |
+@@ -1687,7 +1653,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc, |
1759 |
+ _config_free_config_dma_memory(ioc, &mem); |
1760 |
+ |
1761 |
+ out: |
1762 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1763 |
+ return r; |
1764 |
+ } |
1765 |
+ |
1766 |
+@@ -1711,7 +1676,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1767 |
+ int r; |
1768 |
+ struct config_request mem; |
1769 |
+ |
1770 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1771 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1772 |
+ memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t)); |
1773 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1774 |
+@@ -1754,7 +1718,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1775 |
+ _config_free_config_dma_memory(ioc, &mem); |
1776 |
+ |
1777 |
+ out: |
1778 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1779 |
+ return r; |
1780 |
+ } |
1781 |
+ |
1782 |
+@@ -1778,7 +1741,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, |
1783 |
+ struct config_request mem; |
1784 |
+ u16 ioc_status; |
1785 |
+ |
1786 |
+- mutex_lock(&ioc->config_cmds.mutex); |
1787 |
+ *volume_handle = 0; |
1788 |
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); |
1789 |
+ mpi_request.Function = MPI2_FUNCTION_CONFIG; |
1790 |
+@@ -1842,7 +1804,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle, |
1791 |
+ _config_free_config_dma_memory(ioc, &mem); |
1792 |
+ |
1793 |
+ out: |
1794 |
+- mutex_unlock(&ioc->config_cmds.mutex); |
1795 |
+ return r; |
1796 |
+ } |
1797 |
+ |
1798 |
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c |
1799 |
+index e3a7967..7dacc68 100644 |
1800 |
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c |
1801 |
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c |
1802 |
+@@ -2560,6 +2560,10 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, |
1803 |
+ char *desc_ioc_state = NULL; |
1804 |
+ char *desc_scsi_status = NULL; |
1805 |
+ char *desc_scsi_state = ioc->tmp_string; |
1806 |
++ u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); |
1807 |
++ |
1808 |
++ if (log_info == 0x31170000) |
1809 |
++ return; |
1810 |
+ |
1811 |
+ switch (ioc_status) { |
1812 |
+ case MPI2_IOCSTATUS_SUCCESS: |
1813 |
+@@ -3205,7 +3209,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) |
1814 |
+ __le64 sas_address; |
1815 |
+ int i; |
1816 |
+ unsigned long flags; |
1817 |
+- struct _sas_port *mpt2sas_port; |
1818 |
++ struct _sas_port *mpt2sas_port = NULL; |
1819 |
+ int rc = 0; |
1820 |
+ |
1821 |
+ if (!handle) |
1822 |
+@@ -3297,12 +3301,20 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) |
1823 |
+ &expander_pg1, i, handle))) { |
1824 |
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", |
1825 |
+ ioc->name, __FILE__, __LINE__, __func__); |
1826 |
+- continue; |
1827 |
++ rc = -1; |
1828 |
++ goto out_fail; |
1829 |
+ } |
1830 |
+ sas_expander->phy[i].handle = handle; |
1831 |
+ sas_expander->phy[i].phy_id = i; |
1832 |
+- mpt2sas_transport_add_expander_phy(ioc, &sas_expander->phy[i], |
1833 |
+- expander_pg1, sas_expander->parent_dev); |
1834 |
++ |
1835 |
++ if ((mpt2sas_transport_add_expander_phy(ioc, |
1836 |
++ &sas_expander->phy[i], expander_pg1, |
1837 |
++ sas_expander->parent_dev))) { |
1838 |
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", |
1839 |
++ ioc->name, __FILE__, __LINE__, __func__); |
1840 |
++ rc = -1; |
1841 |
++ goto out_fail; |
1842 |
++ } |
1843 |
+ } |
1844 |
+ |
1845 |
+ if (sas_expander->enclosure_handle) { |
1846 |
+@@ -3319,8 +3331,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) |
1847 |
+ |
1848 |
+ out_fail: |
1849 |
+ |
1850 |
+- if (sas_expander) |
1851 |
+- kfree(sas_expander->phy); |
1852 |
++ if (mpt2sas_port) |
1853 |
++ mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, |
1854 |
++ sas_expander->parent_handle); |
1855 |
+ kfree(sas_expander); |
1856 |
+ return rc; |
1857 |
+ } |
1858 |
+@@ -3442,12 +3455,11 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd) |
1859 |
+ sas_device->hidden_raid_component = is_pd; |
1860 |
+ |
1861 |
+ /* get enclosure_logical_id */ |
1862 |
+- if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0, |
1863 |
+- MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, |
1864 |
+- sas_device->enclosure_handle))) { |
1865 |
++ if (sas_device->enclosure_handle && !(mpt2sas_config_get_enclosure_pg0( |
1866 |
++ ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, |
1867 |
++ sas_device->enclosure_handle))) |
1868 |
+ sas_device->enclosure_logical_id = |
1869 |
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID); |
1870 |
+- } |
1871 |
+ |
1872 |
+ /* get device name */ |
1873 |
+ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); |
1874 |
+@@ -4029,12 +4041,6 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc, |
1875 |
+ u16 handle = le16_to_cpu(element->VolDevHandle); |
1876 |
+ int rc; |
1877 |
+ |
1878 |
+-#if 0 /* RAID_HACKS */ |
1879 |
+- if (le32_to_cpu(event_data->Flags) & |
1880 |
+- MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) |
1881 |
+- return; |
1882 |
+-#endif |
1883 |
+- |
1884 |
+ mpt2sas_config_get_volume_wwid(ioc, handle, &wwid); |
1885 |
+ if (!wwid) { |
1886 |
+ printk(MPT2SAS_ERR_FMT |
1887 |
+@@ -4089,12 +4095,6 @@ _scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc, |
1888 |
+ unsigned long flags; |
1889 |
+ struct MPT2SAS_TARGET *sas_target_priv_data; |
1890 |
+ |
1891 |
+-#if 0 /* RAID_HACKS */ |
1892 |
+- if (le32_to_cpu(event_data->Flags) & |
1893 |
+- MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) |
1894 |
+- return; |
1895 |
+-#endif |
1896 |
+- |
1897 |
+ spin_lock_irqsave(&ioc->raid_device_lock, flags); |
1898 |
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle); |
1899 |
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags); |
1900 |
+@@ -4207,14 +4207,38 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc, |
1901 |
+ struct _sas_device *sas_device; |
1902 |
+ unsigned long flags; |
1903 |
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle); |
1904 |
++ Mpi2ConfigReply_t mpi_reply; |
1905 |
++ Mpi2SasDevicePage0_t sas_device_pg0; |
1906 |
++ u32 ioc_status; |
1907 |
+ |
1908 |
+ spin_lock_irqsave(&ioc->sas_device_lock, flags); |
1909 |
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle); |
1910 |
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
1911 |
+- if (sas_device) |
1912 |
++ if (sas_device) { |
1913 |
+ sas_device->hidden_raid_component = 1; |
1914 |
+- else |
1915 |
+- _scsih_add_device(ioc, handle, 0, 1); |
1916 |
++ return; |
1917 |
++ } |
1918 |
++ |
1919 |
++ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, |
1920 |
++ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { |
1921 |
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", |
1922 |
++ ioc->name, __FILE__, __LINE__, __func__); |
1923 |
++ return; |
1924 |
++ } |
1925 |
++ |
1926 |
++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
1927 |
++ MPI2_IOCSTATUS_MASK; |
1928 |
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
1929 |
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", |
1930 |
++ ioc->name, __FILE__, __LINE__, __func__); |
1931 |
++ return; |
1932 |
++ } |
1933 |
++ |
1934 |
++ _scsih_link_change(ioc, |
1935 |
++ le16_to_cpu(sas_device_pg0.ParentDevHandle), |
1936 |
++ handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); |
1937 |
++ |
1938 |
++ _scsih_add_device(ioc, handle, 0, 1); |
1939 |
+ } |
1940 |
+ |
1941 |
+ #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
1942 |
+@@ -4314,12 +4338,15 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, |
1943 |
+ { |
1944 |
+ Mpi2EventIrConfigElement_t *element; |
1945 |
+ int i; |
1946 |
++ u8 foreign_config; |
1947 |
+ |
1948 |
+ #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
1949 |
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) |
1950 |
+ _scsih_sas_ir_config_change_event_debug(ioc, event_data); |
1951 |
+ |
1952 |
+ #endif |
1953 |
++ foreign_config = (le32_to_cpu(event_data->Flags) & |
1954 |
++ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; |
1955 |
+ |
1956 |
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; |
1957 |
+ for (i = 0; i < event_data->NumElements; i++, element++) { |
1958 |
+@@ -4327,11 +4354,13 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, |
1959 |
+ switch (element->ReasonCode) { |
1960 |
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: |
1961 |
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED: |
1962 |
+- _scsih_sas_volume_add(ioc, element); |
1963 |
++ if (!foreign_config) |
1964 |
++ _scsih_sas_volume_add(ioc, element); |
1965 |
+ break; |
1966 |
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: |
1967 |
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED: |
1968 |
+- _scsih_sas_volume_delete(ioc, element); |
1969 |
++ if (!foreign_config) |
1970 |
++ _scsih_sas_volume_delete(ioc, element); |
1971 |
+ break; |
1972 |
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: |
1973 |
+ _scsih_sas_pd_hide(ioc, element); |
1974 |
+@@ -4450,6 +4479,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, |
1975 |
+ u32 state; |
1976 |
+ struct _sas_device *sas_device; |
1977 |
+ unsigned long flags; |
1978 |
++ Mpi2ConfigReply_t mpi_reply; |
1979 |
++ Mpi2SasDevicePage0_t sas_device_pg0; |
1980 |
++ u32 ioc_status; |
1981 |
+ |
1982 |
+ if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) |
1983 |
+ return; |
1984 |
+@@ -4466,22 +4498,40 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID, |
1985 |
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags); |
1986 |
+ |
1987 |
+ switch (state) { |
1988 |
+-#if 0 |
1989 |
+- case MPI2_RAID_PD_STATE_OFFLINE: |
1990 |
+- if (sas_device) |
1991 |
+- _scsih_remove_device(ioc, handle); |
1992 |
+- break; |
1993 |
+-#endif |
1994 |
+ case MPI2_RAID_PD_STATE_ONLINE: |
1995 |
+ case MPI2_RAID_PD_STATE_DEGRADED: |
1996 |
+ case MPI2_RAID_PD_STATE_REBUILDING: |
1997 |
+ case MPI2_RAID_PD_STATE_OPTIMAL: |
1998 |
+- if (sas_device) |
1999 |
++ if (sas_device) { |
2000 |
+ sas_device->hidden_raid_component = 1; |
2001 |
+- else |
2002 |
+- _scsih_add_device(ioc, handle, 0, 1); |
2003 |
++ return; |
2004 |
++ } |
2005 |
++ |
2006 |
++ if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, |
2007 |
++ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, |
2008 |
++ handle))) { |
2009 |
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", |
2010 |
++ ioc->name, __FILE__, __LINE__, __func__); |
2011 |
++ return; |
2012 |
++ } |
2013 |
++ |
2014 |
++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & |
2015 |
++ MPI2_IOCSTATUS_MASK; |
2016 |
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { |
2017 |
++ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", |
2018 |
++ ioc->name, __FILE__, __LINE__, __func__); |
2019 |
++ return; |
2020 |
++ } |
2021 |
++ |
2022 |
++ _scsih_link_change(ioc, |
2023 |
++ le16_to_cpu(sas_device_pg0.ParentDevHandle), |
2024 |
++ handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); |
2025 |
++ |
2026 |
++ _scsih_add_device(ioc, handle, 0, 1); |
2027 |
++ |
2028 |
+ break; |
2029 |
+ |
2030 |
++ case MPI2_RAID_PD_STATE_OFFLINE: |
2031 |
+ case MPI2_RAID_PD_STATE_NOT_CONFIGURED: |
2032 |
+ case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: |
2033 |
+ case MPI2_RAID_PD_STATE_HOT_SPARE: |
2034 |
+@@ -5549,6 +5599,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state) |
2035 |
+ struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); |
2036 |
+ u32 device_state; |
2037 |
+ |
2038 |
++ mpt2sas_base_stop_watchdog(ioc); |
2039 |
+ flush_scheduled_work(); |
2040 |
+ scsi_block_requests(shost); |
2041 |
+ device_state = pci_choose_state(pdev, state); |
2042 |
+@@ -5591,6 +5642,7 @@ scsih_resume(struct pci_dev *pdev) |
2043 |
+ |
2044 |
+ mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET); |
2045 |
+ scsi_unblock_requests(shost); |
2046 |
++ mpt2sas_base_start_watchdog(ioc); |
2047 |
+ return 0; |
2048 |
+ } |
2049 |
+ #endif /* CONFIG_PM */ |
2050 |
+diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h |
2051 |
+index e7d4479..798f362 100644 |
2052 |
+--- a/drivers/usb/core/hcd.h |
2053 |
++++ b/drivers/usb/core/hcd.h |
2054 |
+@@ -224,6 +224,10 @@ struct hc_driver { |
2055 |
+ void (*relinquish_port)(struct usb_hcd *, int); |
2056 |
+ /* has a port been handed over to a companion? */ |
2057 |
+ int (*port_handed_over)(struct usb_hcd *, int); |
2058 |
++ |
2059 |
++ /* CLEAR_TT_BUFFER completion callback */ |
2060 |
++ void (*clear_tt_buffer_complete)(struct usb_hcd *, |
2061 |
++ struct usb_host_endpoint *); |
2062 |
+ }; |
2063 |
+ |
2064 |
+ extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); |
2065 |
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
2066 |
+index be86ae3..2fc5b57 100644 |
2067 |
+--- a/drivers/usb/core/hub.c |
2068 |
++++ b/drivers/usb/core/hub.c |
2069 |
+@@ -448,10 +448,10 @@ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) |
2070 |
+ * talking to TTs must queue control transfers (not just bulk and iso), so |
2071 |
+ * both can talk to the same hub concurrently. |
2072 |
+ */ |
2073 |
+-static void hub_tt_kevent (struct work_struct *work) |
2074 |
++static void hub_tt_work(struct work_struct *work) |
2075 |
+ { |
2076 |
+ struct usb_hub *hub = |
2077 |
+- container_of(work, struct usb_hub, tt.kevent); |
2078 |
++ container_of(work, struct usb_hub, tt.clear_work); |
2079 |
+ unsigned long flags; |
2080 |
+ int limit = 100; |
2081 |
+ |
2082 |
+@@ -460,6 +460,7 @@ static void hub_tt_kevent (struct work_struct *work) |
2083 |
+ struct list_head *temp; |
2084 |
+ struct usb_tt_clear *clear; |
2085 |
+ struct usb_device *hdev = hub->hdev; |
2086 |
++ const struct hc_driver *drv; |
2087 |
+ int status; |
2088 |
+ |
2089 |
+ temp = hub->tt.clear_list.next; |
2090 |
+@@ -469,21 +470,25 @@ static void hub_tt_kevent (struct work_struct *work) |
2091 |
+ /* drop lock so HCD can concurrently report other TT errors */ |
2092 |
+ spin_unlock_irqrestore (&hub->tt.lock, flags); |
2093 |
+ status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt); |
2094 |
+- spin_lock_irqsave (&hub->tt.lock, flags); |
2095 |
+- |
2096 |
+ if (status) |
2097 |
+ dev_err (&hdev->dev, |
2098 |
+ "clear tt %d (%04x) error %d\n", |
2099 |
+ clear->tt, clear->devinfo, status); |
2100 |
++ |
2101 |
++ /* Tell the HCD, even if the operation failed */ |
2102 |
++ drv = clear->hcd->driver; |
2103 |
++ if (drv->clear_tt_buffer_complete) |
2104 |
++ (drv->clear_tt_buffer_complete)(clear->hcd, clear->ep); |
2105 |
++ |
2106 |
+ kfree(clear); |
2107 |
++ spin_lock_irqsave(&hub->tt.lock, flags); |
2108 |
+ } |
2109 |
+ spin_unlock_irqrestore (&hub->tt.lock, flags); |
2110 |
+ } |
2111 |
+ |
2112 |
+ /** |
2113 |
+- * usb_hub_tt_clear_buffer - clear control/bulk TT state in high speed hub |
2114 |
+- * @udev: the device whose split transaction failed |
2115 |
+- * @pipe: identifies the endpoint of the failed transaction |
2116 |
++ * usb_hub_clear_tt_buffer - clear control/bulk TT state in high speed hub |
2117 |
++ * @urb: an URB associated with the failed or incomplete split transaction |
2118 |
+ * |
2119 |
+ * High speed HCDs use this to tell the hub driver that some split control or |
2120 |
+ * bulk transaction failed in a way that requires clearing internal state of |
2121 |
+@@ -493,8 +498,10 @@ static void hub_tt_kevent (struct work_struct *work) |
2122 |
+ * It may not be possible for that hub to handle additional full (or low) |
2123 |
+ * speed transactions until that state is fully cleared out. |
2124 |
+ */ |
2125 |
+-void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe) |
2126 |
++int usb_hub_clear_tt_buffer(struct urb *urb) |
2127 |
+ { |
2128 |
++ struct usb_device *udev = urb->dev; |
2129 |
++ int pipe = urb->pipe; |
2130 |
+ struct usb_tt *tt = udev->tt; |
2131 |
+ unsigned long flags; |
2132 |
+ struct usb_tt_clear *clear; |
2133 |
+@@ -506,7 +513,7 @@ void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe) |
2134 |
+ if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) { |
2135 |
+ dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n"); |
2136 |
+ /* FIXME recover somehow ... RESET_TT? */ |
2137 |
+- return; |
2138 |
++ return -ENOMEM; |
2139 |
+ } |
2140 |
+ |
2141 |
+ /* info that CLEAR_TT_BUFFER needs */ |
2142 |
+@@ -518,14 +525,19 @@ void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe) |
2143 |
+ : (USB_ENDPOINT_XFER_BULK << 11); |
2144 |
+ if (usb_pipein (pipe)) |
2145 |
+ clear->devinfo |= 1 << 15; |
2146 |
+- |
2147 |
++ |
2148 |
++ /* info for completion callback */ |
2149 |
++ clear->hcd = bus_to_hcd(udev->bus); |
2150 |
++ clear->ep = urb->ep; |
2151 |
++ |
2152 |
+ /* tell keventd to clear state for this TT */ |
2153 |
+ spin_lock_irqsave (&tt->lock, flags); |
2154 |
+ list_add_tail (&clear->clear_list, &tt->clear_list); |
2155 |
+- schedule_work (&tt->kevent); |
2156 |
++ schedule_work(&tt->clear_work); |
2157 |
+ spin_unlock_irqrestore (&tt->lock, flags); |
2158 |
++ return 0; |
2159 |
+ } |
2160 |
+-EXPORT_SYMBOL_GPL(usb_hub_tt_clear_buffer); |
2161 |
++EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer); |
2162 |
+ |
2163 |
+ /* If do_delay is false, return the number of milliseconds the caller |
2164 |
+ * needs to delay. |
2165 |
+@@ -816,7 +828,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) |
2166 |
+ if (hub->has_indicators) |
2167 |
+ cancel_delayed_work_sync(&hub->leds); |
2168 |
+ if (hub->tt.hub) |
2169 |
+- cancel_work_sync(&hub->tt.kevent); |
2170 |
++ cancel_work_sync(&hub->tt.clear_work); |
2171 |
+ } |
2172 |
+ |
2173 |
+ /* caller has locked the hub device */ |
2174 |
+@@ -933,7 +945,7 @@ static int hub_configure(struct usb_hub *hub, |
2175 |
+ |
2176 |
+ spin_lock_init (&hub->tt.lock); |
2177 |
+ INIT_LIST_HEAD (&hub->tt.clear_list); |
2178 |
+- INIT_WORK (&hub->tt.kevent, hub_tt_kevent); |
2179 |
++ INIT_WORK(&hub->tt.clear_work, hub_tt_work); |
2180 |
+ switch (hdev->descriptor.bDeviceProtocol) { |
2181 |
+ case 0: |
2182 |
+ break; |
2183 |
+diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h |
2184 |
+index 2a116ce..528c411 100644 |
2185 |
+--- a/drivers/usb/core/hub.h |
2186 |
++++ b/drivers/usb/core/hub.h |
2187 |
+@@ -185,16 +185,18 @@ struct usb_tt { |
2188 |
+ /* for control/bulk error recovery (CLEAR_TT_BUFFER) */ |
2189 |
+ spinlock_t lock; |
2190 |
+ struct list_head clear_list; /* of usb_tt_clear */ |
2191 |
+- struct work_struct kevent; |
2192 |
++ struct work_struct clear_work; |
2193 |
+ }; |
2194 |
+ |
2195 |
+ struct usb_tt_clear { |
2196 |
+ struct list_head clear_list; |
2197 |
+ unsigned tt; |
2198 |
+ u16 devinfo; |
2199 |
++ struct usb_hcd *hcd; |
2200 |
++ struct usb_host_endpoint *ep; |
2201 |
+ }; |
2202 |
+ |
2203 |
+-extern void usb_hub_tt_clear_buffer(struct usb_device *dev, int pipe); |
2204 |
++extern int usb_hub_clear_tt_buffer(struct urb *urb); |
2205 |
+ extern void usb_ep0_reinit(struct usb_device *); |
2206 |
+ |
2207 |
+ #endif /* __LINUX_HUB_H */ |
2208 |
+diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c |
2209 |
+index bf69f47..5c25b1a 100644 |
2210 |
+--- a/drivers/usb/host/ehci-au1xxx.c |
2211 |
++++ b/drivers/usb/host/ehci-au1xxx.c |
2212 |
+@@ -112,6 +112,8 @@ static const struct hc_driver ehci_au1xxx_hc_driver = { |
2213 |
+ .bus_resume = ehci_bus_resume, |
2214 |
+ .relinquish_port = ehci_relinquish_port, |
2215 |
+ .port_handed_over = ehci_port_handed_over, |
2216 |
++ |
2217 |
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, |
2218 |
+ }; |
2219 |
+ |
2220 |
+ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev) |
2221 |
+diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c |
2222 |
+index 01c3da3..7fb1ef0 100644 |
2223 |
+--- a/drivers/usb/host/ehci-fsl.c |
2224 |
++++ b/drivers/usb/host/ehci-fsl.c |
2225 |
+@@ -324,6 +324,8 @@ static const struct hc_driver ehci_fsl_hc_driver = { |
2226 |
+ .bus_resume = ehci_bus_resume, |
2227 |
+ .relinquish_port = ehci_relinquish_port, |
2228 |
+ .port_handed_over = ehci_port_handed_over, |
2229 |
++ |
2230 |
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, |
2231 |
+ }; |
2232 |
+ |
2233 |
+ static int ehci_fsl_drv_probe(struct platform_device *pdev) |
2234 |
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c |
2235 |
+index c637207..d75b8cf 100644 |
2236 |
+--- a/drivers/usb/host/ehci-hcd.c |
2237 |
++++ b/drivers/usb/host/ehci-hcd.c |
2238 |
+@@ -903,7 +903,8 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
2239 |
+ /* already started */ |
2240 |
+ break; |
2241 |
+ case QH_STATE_IDLE: |
2242 |
+- WARN_ON(1); |
2243 |
++ /* QH might be waiting for a Clear-TT-Buffer */ |
2244 |
++ qh_completions(ehci, qh); |
2245 |
+ break; |
2246 |
+ } |
2247 |
+ break; |
2248 |
+@@ -1003,6 +1004,8 @@ idle_timeout: |
2249 |
+ schedule_timeout_uninterruptible(1); |
2250 |
+ goto rescan; |
2251 |
+ case QH_STATE_IDLE: /* fully unlinked */ |
2252 |
++ if (qh->clearing_tt) |
2253 |
++ goto idle_timeout; |
2254 |
+ if (list_empty (&qh->qtd_list)) { |
2255 |
+ qh_put (qh); |
2256 |
+ break; |
2257 |
+diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c |
2258 |
+index 9c32063..8573b03 100644 |
2259 |
+--- a/drivers/usb/host/ehci-ixp4xx.c |
2260 |
++++ b/drivers/usb/host/ehci-ixp4xx.c |
2261 |
+@@ -60,6 +60,8 @@ static const struct hc_driver ixp4xx_ehci_hc_driver = { |
2262 |
+ #endif |
2263 |
+ .relinquish_port = ehci_relinquish_port, |
2264 |
+ .port_handed_over = ehci_port_handed_over, |
2265 |
++ |
2266 |
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, |
2267 |
+ }; |
2268 |
+ |
2269 |
+ static int ixp4xx_ehci_probe(struct platform_device *pdev) |
2270 |
+diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c |
2271 |
+index 9d48790..64ab30a 100644 |
2272 |
+--- a/drivers/usb/host/ehci-orion.c |
2273 |
++++ b/drivers/usb/host/ehci-orion.c |
2274 |
+@@ -164,6 +164,8 @@ static const struct hc_driver ehci_orion_hc_driver = { |
2275 |
+ .bus_resume = ehci_bus_resume, |
2276 |
+ .relinquish_port = ehci_relinquish_port, |
2277 |
+ .port_handed_over = ehci_port_handed_over, |
2278 |
++ |
2279 |
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, |
2280 |
+ }; |
2281 |
+ |
2282 |
+ static void __init |
2283 |
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c |
2284 |
+index 5aa8bce..a26b7f7 100644 |
2285 |
+--- a/drivers/usb/host/ehci-pci.c |
2286 |
++++ b/drivers/usb/host/ehci-pci.c |
2287 |
+@@ -408,6 +408,8 @@ static const struct hc_driver ehci_pci_hc_driver = { |
2288 |
+ .bus_resume = ehci_bus_resume, |
2289 |
+ .relinquish_port = ehci_relinquish_port, |
2290 |
+ .port_handed_over = ehci_port_handed_over, |
2291 |
++ |
2292 |
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, |
2293 |
+ }; |
2294 |
+ |
2295 |
+ /*-------------------------------------------------------------------------*/ |
2296 |
+diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c |
2297 |
+index ef732b7..8b6556e 100644 |
2298 |
+--- a/drivers/usb/host/ehci-ppc-of.c |
2299 |
++++ b/drivers/usb/host/ehci-ppc-of.c |
2300 |
+@@ -78,6 +78,8 @@ static const struct hc_driver ehci_ppc_of_hc_driver = { |
2301 |
+ #endif |
2302 |
+ .relinquish_port = ehci_relinquish_port, |
2303 |
+ .port_handed_over = ehci_port_handed_over, |
2304 |
++ |
2305 |
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, |
2306 |
+ }; |
2307 |
+ |
2308 |
+ |
2309 |
+diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c |
2310 |
+index 1ba9f9a..efefc91 100644 |
2311 |
+--- a/drivers/usb/host/ehci-ps3.c |
2312 |
++++ b/drivers/usb/host/ehci-ps3.c |
2313 |
+@@ -74,6 +74,8 @@ static const struct hc_driver ps3_ehci_hc_driver = { |
2314 |
+ #endif |
2315 |
+ .relinquish_port = ehci_relinquish_port, |
2316 |
+ .port_handed_over = ehci_port_handed_over, |
2317 |
++ |
2318 |
++ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, |
2319 |
+ }; |
2320 |
+ |
2321 |
+ static int ps3_ehci_probe(struct ps3_system_bus_device *dev) |
2322 |
+diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c |
2323 |
+index 1976b1b..a39f2c6 100644 |
2324 |
+--- a/drivers/usb/host/ehci-q.c |
2325 |
++++ b/drivers/usb/host/ehci-q.c |
2326 |
+@@ -139,6 +139,55 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) |
2327 |
+ |
2328 |
+ /*-------------------------------------------------------------------------*/ |
2329 |
+ |
2330 |
++static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh); |
2331 |
++ |
2332 |
++static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd, |
2333 |
++ struct usb_host_endpoint *ep) |
2334 |
++{ |
2335 |
++ struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
2336 |
++ struct ehci_qh *qh = ep->hcpriv; |
2337 |
++ unsigned long flags; |
2338 |
++ |
2339 |
++ spin_lock_irqsave(&ehci->lock, flags); |
2340 |
++ qh->clearing_tt = 0; |
2341 |
++ if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) |
2342 |
++ && HC_IS_RUNNING(hcd->state)) |
2343 |
++ qh_link_async(ehci, qh); |
2344 |
++ spin_unlock_irqrestore(&ehci->lock, flags); |
2345 |
++} |
2346 |
++ |
2347 |
++static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, |
2348 |
++ struct urb *urb, u32 token) |
2349 |
++{ |
2350 |
++ |
2351 |
++ /* If an async split transaction gets an error or is unlinked, |
2352 |
++ * the TT buffer may be left in an indeterminate state. We |
2353 |
++ * have to clear the TT buffer. |
2354 |
++ * |
2355 |
++ * Note: this routine is never called for Isochronous transfers. |
2356 |
++ */ |
2357 |
++ if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { |
2358 |
++#ifdef DEBUG |
2359 |
++ struct usb_device *tt = urb->dev->tt->hub; |
2360 |
++ dev_dbg(&tt->dev, |
2361 |
++ "clear tt buffer port %d, a%d ep%d t%08x\n", |
2362 |
++ urb->dev->ttport, urb->dev->devnum, |
2363 |
++ usb_pipeendpoint(urb->pipe), token); |
2364 |
++#endif /* DEBUG */ |
2365 |
++ if (!ehci_is_TDI(ehci) |
2366 |
++ || urb->dev->tt->hub != |
2367 |
++ ehci_to_hcd(ehci)->self.root_hub) { |
2368 |
++ if (usb_hub_clear_tt_buffer(urb) == 0) |
2369 |
++ qh->clearing_tt = 1; |
2370 |
++ } else { |
2371 |
++ |
2372 |
++ /* REVISIT ARC-derived cores don't clear the root |
2373 |
++ * hub TT buffer in this way... |
2374 |
++ */ |
2375 |
++ } |
2376 |
++ } |
2377 |
++} |
2378 |
++ |
2379 |
+ static int qtd_copy_status ( |
2380 |
+ struct ehci_hcd *ehci, |
2381 |
+ struct urb *urb, |
2382 |
+@@ -195,28 +244,6 @@ static int qtd_copy_status ( |
2383 |
+ usb_pipeendpoint (urb->pipe), |
2384 |
+ usb_pipein (urb->pipe) ? "in" : "out", |
2385 |
+ token, status); |
2386 |
+- |
2387 |
+- /* if async CSPLIT failed, try cleaning out the TT buffer */ |
2388 |
+- if (status != -EPIPE |
2389 |
+- && urb->dev->tt |
2390 |
+- && !usb_pipeint(urb->pipe) |
2391 |
+- && ((token & QTD_STS_MMF) != 0 |
2392 |
+- || QTD_CERR(token) == 0) |
2393 |
+- && (!ehci_is_TDI(ehci) |
2394 |
+- || urb->dev->tt->hub != |
2395 |
+- ehci_to_hcd(ehci)->self.root_hub)) { |
2396 |
+-#ifdef DEBUG |
2397 |
+- struct usb_device *tt = urb->dev->tt->hub; |
2398 |
+- dev_dbg (&tt->dev, |
2399 |
+- "clear tt buffer port %d, a%d ep%d t%08x\n", |
2400 |
+- urb->dev->ttport, urb->dev->devnum, |
2401 |
+- usb_pipeendpoint (urb->pipe), token); |
2402 |
+-#endif /* DEBUG */ |
2403 |
+- /* REVISIT ARC-derived cores don't clear the root |
2404 |
+- * hub TT buffer in this way... |
2405 |
+- */ |
2406 |
+- usb_hub_tt_clear_buffer (urb->dev, urb->pipe); |
2407 |
+- } |
2408 |
+ } |
2409 |
+ |
2410 |
+ return status; |
2411 |
+@@ -407,9 +434,16 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) |
2412 |
+ /* qh unlinked; token in overlay may be most current */ |
2413 |
+ if (state == QH_STATE_IDLE |
2414 |
+ && cpu_to_hc32(ehci, qtd->qtd_dma) |
2415 |
+- == qh->hw_current) |
2416 |
++ == qh->hw_current) { |
2417 |
+ token = hc32_to_cpu(ehci, qh->hw_token); |
2418 |
+ |
2419 |
++ /* An unlink may leave an incomplete |
2420 |
++ * async transaction in the TT buffer. |
2421 |
++ * We have to clear it. |
2422 |
++ */ |
2423 |
++ ehci_clear_tt_buffer(ehci, qh, urb, token); |
2424 |
++ } |
2425 |
++ |
2426 |
+ /* force halt for unlinked or blocked qh, so we'll |
2427 |
+ * patch the qh later and so that completions can't |
2428 |
+ * activate it while we "know" it's stopped. |
2429 |
+@@ -435,6 +469,13 @@ halt: |
2430 |
+ && (qtd->hw_alt_next |
2431 |
+ & EHCI_LIST_END(ehci))) |
2432 |
+ last_status = -EINPROGRESS; |
2433 |
++ |
2434 |
++ /* As part of low/full-speed endpoint-halt processing |
2435 |
++ * we must clear the TT buffer (11.17.5). |
2436 |
++ */ |
2437 |
++ if (unlikely(last_status != -EINPROGRESS && |
2438 |
++ last_status != -EREMOTEIO)) |
2439 |
++ ehci_clear_tt_buffer(ehci, qh, urb, token); |
2440 |
+ } |
2441 |
+ |
2442 |
+ /* if we're removing something not at the queue head, |
2443 |
+@@ -864,6 +905,10 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) |
2444 |
+ __hc32 dma = QH_NEXT(ehci, qh->qh_dma); |
2445 |
+ struct ehci_qh *head; |
2446 |
+ |
2447 |
++ /* Don't link a QH if there's a Clear-TT-Buffer pending */ |
2448 |
++ if (unlikely(qh->clearing_tt)) |
2449 |
++ return; |
2450 |
++ |
2451 |
+ /* (re)start the async schedule? */ |
2452 |
+ head = ehci->async; |
2453 |
+ timer_action_done (ehci, TIMER_ASYNC_OFF); |
2454 |
+@@ -893,6 +938,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) |
2455 |
+ head->qh_next.qh = qh; |
2456 |
+ head->hw_next = dma; |
2457 |
+ |
2458 |
++ qh_get(qh); |
2459 |
+ qh->xacterrs = QH_XACTERR_MAX; |
2460 |
+ qh->qh_state = QH_STATE_LINKED; |
2461 |
+ /* qtd completions reported later by interrupt */ |
2462 |
+@@ -1033,7 +1079,7 @@ submit_async ( |
2463 |
+ * the HC and TT handle it when the TT has a buffer ready. |
2464 |
+ */ |
2465 |
+ if (likely (qh->qh_state == QH_STATE_IDLE)) |
2466 |
+- qh_link_async (ehci, qh_get (qh)); |
2467 |
++ qh_link_async(ehci, qh); |
2468 |
+ done: |
2469 |
+ spin_unlock_irqrestore (&ehci->lock, flags); |
2470 |
+ if (unlikely (qh == NULL)) |
2471 |
+@@ -1068,8 +1114,6 @@ static void end_unlink_async (struct ehci_hcd *ehci) |
2472 |
+ && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) |
2473 |
+ qh_link_async (ehci, qh); |
2474 |
+ else { |
2475 |
+- qh_put (qh); // refcount from async list |
2476 |
+- |
2477 |
+ /* it's not free to turn the async schedule on/off; leave it |
2478 |
+ * active but idle for a while once it empties. |
2479 |
+ */ |
2480 |
+@@ -1077,6 +1121,7 @@ static void end_unlink_async (struct ehci_hcd *ehci) |
2481 |
+ && ehci->async->qh_next.qh == NULL) |
2482 |
+ timer_action (ehci, TIMER_ASYNC_OFF); |
2483 |
+ } |
2484 |
++ qh_put(qh); /* refcount from async list */ |
2485 |
+ |
2486 |
+ if (next) { |
2487 |
+ ehci->reclaim = NULL; |
2488 |
+diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h |
2489 |
+index 6cff195..ec5af22 100644 |
2490 |
+--- a/drivers/usb/host/ehci.h |
2491 |
++++ b/drivers/usb/host/ehci.h |
2492 |
+@@ -353,7 +353,9 @@ struct ehci_qh { |
2493 |
+ unsigned short period; /* polling interval */ |
2494 |
+ unsigned short start; /* where polling starts */ |
2495 |
+ #define NO_FRAME ((unsigned short)~0) /* pick new start */ |
2496 |
++ |
2497 |
+ struct usb_device *dev; /* access to TT */ |
2498 |
++ unsigned clearing_tt:1; /* Clear-TT-Buf in progress */ |
2499 |
+ } __attribute__ ((aligned (32))); |
2500 |
+ |
2501 |
+ /*-------------------------------------------------------------------------*/ |
2502 |
+diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c |
2503 |
+index 2493f05..d886bf9 100644 |
2504 |
+--- a/drivers/video/xen-fbfront.c |
2505 |
++++ b/drivers/video/xen-fbfront.c |
2506 |
+@@ -454,6 +454,10 @@ static int __devinit xenfb_probe(struct xenbus_device *dev, |
2507 |
+ |
2508 |
+ xenfb_init_shared_page(info, fb_info); |
2509 |
+ |
2510 |
++ ret = xenfb_connect_backend(dev, info); |
2511 |
++ if (ret < 0) |
2512 |
++ goto error; |
2513 |
++ |
2514 |
+ ret = register_framebuffer(fb_info); |
2515 |
+ if (ret) { |
2516 |
+ fb_deferred_io_cleanup(fb_info); |
2517 |
+@@ -464,10 +468,6 @@ static int __devinit xenfb_probe(struct xenbus_device *dev, |
2518 |
+ } |
2519 |
+ info->fb_info = fb_info; |
2520 |
+ |
2521 |
+- ret = xenfb_connect_backend(dev, info); |
2522 |
+- if (ret < 0) |
2523 |
+- goto error; |
2524 |
+- |
2525 |
+ xenfb_make_preferred_console(); |
2526 |
+ return 0; |
2527 |
+ |
2528 |
+diff --git a/fs/buffer.c b/fs/buffer.c |
2529 |
+index 4910612..941c78b 100644 |
2530 |
+--- a/fs/buffer.c |
2531 |
++++ b/fs/buffer.c |
2532 |
+@@ -1165,8 +1165,11 @@ void mark_buffer_dirty(struct buffer_head *bh) |
2533 |
+ |
2534 |
+ if (!test_set_buffer_dirty(bh)) { |
2535 |
+ struct page *page = bh->b_page; |
2536 |
+- if (!TestSetPageDirty(page)) |
2537 |
+- __set_page_dirty(page, page_mapping(page), 0); |
2538 |
++ if (!TestSetPageDirty(page)) { |
2539 |
++ struct address_space *mapping = page_mapping(page); |
2540 |
++ if (mapping) |
2541 |
++ __set_page_dirty(page, mapping, 0); |
2542 |
++ } |
2543 |
+ } |
2544 |
+ } |
2545 |
+ |
2546 |
+diff --git a/fs/exec.c b/fs/exec.c |
2547 |
+index 895823d..42414e5 100644 |
2548 |
+--- a/fs/exec.c |
2549 |
++++ b/fs/exec.c |
2550 |
+@@ -677,8 +677,8 @@ exit: |
2551 |
+ } |
2552 |
+ EXPORT_SYMBOL(open_exec); |
2553 |
+ |
2554 |
+-int kernel_read(struct file *file, unsigned long offset, |
2555 |
+- char *addr, unsigned long count) |
2556 |
++int kernel_read(struct file *file, loff_t offset, |
2557 |
++ char *addr, unsigned long count) |
2558 |
+ { |
2559 |
+ mm_segment_t old_fs; |
2560 |
+ loff_t pos = offset; |
2561 |
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c |
2562 |
+index c1462d4..7ae4e4b 100644 |
2563 |
+--- a/fs/hugetlbfs/inode.c |
2564 |
++++ b/fs/hugetlbfs/inode.c |
2565 |
+@@ -934,26 +934,28 @@ static int can_do_hugetlb_shm(void) |
2566 |
+ return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group); |
2567 |
+ } |
2568 |
+ |
2569 |
+-struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag) |
2570 |
++struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag, |
2571 |
++ struct user_struct **user) |
2572 |
+ { |
2573 |
+ int error = -ENOMEM; |
2574 |
+- int unlock_shm = 0; |
2575 |
+ struct file *file; |
2576 |
+ struct inode *inode; |
2577 |
+ struct dentry *dentry, *root; |
2578 |
+ struct qstr quick_string; |
2579 |
+- struct user_struct *user = current_user(); |
2580 |
+ |
2581 |
++ *user = NULL; |
2582 |
+ if (!hugetlbfs_vfsmount) |
2583 |
+ return ERR_PTR(-ENOENT); |
2584 |
+ |
2585 |
+ if (!can_do_hugetlb_shm()) { |
2586 |
+- if (user_shm_lock(size, user)) { |
2587 |
+- unlock_shm = 1; |
2588 |
++ *user = current_user(); |
2589 |
++ if (user_shm_lock(size, *user)) { |
2590 |
+ WARN_ONCE(1, |
2591 |
+ "Using mlock ulimits for SHM_HUGETLB deprecated\n"); |
2592 |
+- } else |
2593 |
++ } else { |
2594 |
++ *user = NULL; |
2595 |
+ return ERR_PTR(-EPERM); |
2596 |
++ } |
2597 |
+ } |
2598 |
+ |
2599 |
+ root = hugetlbfs_vfsmount->mnt_root; |
2600 |
+@@ -994,8 +996,10 @@ out_inode: |
2601 |
+ out_dentry: |
2602 |
+ dput(dentry); |
2603 |
+ out_shm_unlock: |
2604 |
+- if (unlock_shm) |
2605 |
+- user_shm_unlock(size, user); |
2606 |
++ if (*user) { |
2607 |
++ user_shm_unlock(size, *user); |
2608 |
++ *user = NULL; |
2609 |
++ } |
2610 |
+ return ERR_PTR(error); |
2611 |
+ } |
2612 |
+ |
2613 |
+diff --git a/fs/inode.c b/fs/inode.c |
2614 |
+index bca0c61..a9e8ef0 100644 |
2615 |
+--- a/fs/inode.c |
2616 |
++++ b/fs/inode.c |
2617 |
+@@ -118,12 +118,11 @@ static void wake_up_inode(struct inode *inode) |
2618 |
+ * These are initializations that need to be done on every inode |
2619 |
+ * allocation as the fields are not initialised by slab allocation. |
2620 |
+ */ |
2621 |
+-struct inode *inode_init_always(struct super_block *sb, struct inode *inode) |
2622 |
++int inode_init_always(struct super_block *sb, struct inode *inode) |
2623 |
+ { |
2624 |
+ static const struct address_space_operations empty_aops; |
2625 |
+ static struct inode_operations empty_iops; |
2626 |
+ static const struct file_operations empty_fops; |
2627 |
+- |
2628 |
+ struct address_space *const mapping = &inode->i_data; |
2629 |
+ |
2630 |
+ inode->i_sb = sb; |
2631 |
+@@ -150,7 +149,7 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) |
2632 |
+ inode->dirtied_when = 0; |
2633 |
+ |
2634 |
+ if (security_inode_alloc(inode)) |
2635 |
+- goto out_free_inode; |
2636 |
++ goto out; |
2637 |
+ |
2638 |
+ /* allocate and initialize an i_integrity */ |
2639 |
+ if (ima_inode_alloc(inode)) |
2640 |
+@@ -189,16 +188,12 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) |
2641 |
+ inode->i_private = NULL; |
2642 |
+ inode->i_mapping = mapping; |
2643 |
+ |
2644 |
+- return inode; |
2645 |
++ return 0; |
2646 |
+ |
2647 |
+ out_free_security: |
2648 |
+ security_inode_free(inode); |
2649 |
+-out_free_inode: |
2650 |
+- if (inode->i_sb->s_op->destroy_inode) |
2651 |
+- inode->i_sb->s_op->destroy_inode(inode); |
2652 |
+- else |
2653 |
+- kmem_cache_free(inode_cachep, (inode)); |
2654 |
+- return NULL; |
2655 |
++out: |
2656 |
++ return -ENOMEM; |
2657 |
+ } |
2658 |
+ EXPORT_SYMBOL(inode_init_always); |
2659 |
+ |
2660 |
+@@ -211,23 +206,36 @@ static struct inode *alloc_inode(struct super_block *sb) |
2661 |
+ else |
2662 |
+ inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); |
2663 |
+ |
2664 |
+- if (inode) |
2665 |
+- return inode_init_always(sb, inode); |
2666 |
+- return NULL; |
2667 |
++ if (!inode) |
2668 |
++ return NULL; |
2669 |
++ |
2670 |
++ if (unlikely(inode_init_always(sb, inode))) { |
2671 |
++ if (inode->i_sb->s_op->destroy_inode) |
2672 |
++ inode->i_sb->s_op->destroy_inode(inode); |
2673 |
++ else |
2674 |
++ kmem_cache_free(inode_cachep, inode); |
2675 |
++ return NULL; |
2676 |
++ } |
2677 |
++ |
2678 |
++ return inode; |
2679 |
+ } |
2680 |
+ |
2681 |
+-void destroy_inode(struct inode *inode) |
2682 |
++void __destroy_inode(struct inode *inode) |
2683 |
+ { |
2684 |
+ BUG_ON(inode_has_buffers(inode)); |
2685 |
+ ima_inode_free(inode); |
2686 |
+ security_inode_free(inode); |
2687 |
++} |
2688 |
++EXPORT_SYMBOL(__destroy_inode); |
2689 |
++ |
2690 |
++void destroy_inode(struct inode *inode) |
2691 |
++{ |
2692 |
++ __destroy_inode(inode); |
2693 |
+ if (inode->i_sb->s_op->destroy_inode) |
2694 |
+ inode->i_sb->s_op->destroy_inode(inode); |
2695 |
+ else |
2696 |
+ kmem_cache_free(inode_cachep, (inode)); |
2697 |
+ } |
2698 |
+-EXPORT_SYMBOL(destroy_inode); |
2699 |
+- |
2700 |
+ |
2701 |
+ /* |
2702 |
+ * These are initializations that only need to be done |
2703 |
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c |
2704 |
+index b2c52b3..044990a 100644 |
2705 |
+--- a/fs/ocfs2/aops.c |
2706 |
++++ b/fs/ocfs2/aops.c |
2707 |
+@@ -894,18 +894,17 @@ struct ocfs2_write_cluster_desc { |
2708 |
+ */ |
2709 |
+ unsigned c_new; |
2710 |
+ unsigned c_unwritten; |
2711 |
++ unsigned c_needs_zero; |
2712 |
+ }; |
2713 |
+ |
2714 |
+-static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d) |
2715 |
+-{ |
2716 |
+- return d->c_new || d->c_unwritten; |
2717 |
+-} |
2718 |
+- |
2719 |
+ struct ocfs2_write_ctxt { |
2720 |
+ /* Logical cluster position / len of write */ |
2721 |
+ u32 w_cpos; |
2722 |
+ u32 w_clen; |
2723 |
+ |
2724 |
++ /* First cluster allocated in a nonsparse extend */ |
2725 |
++ u32 w_first_new_cpos; |
2726 |
++ |
2727 |
+ struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; |
2728 |
+ |
2729 |
+ /* |
2730 |
+@@ -983,6 +982,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, |
2731 |
+ return -ENOMEM; |
2732 |
+ |
2733 |
+ wc->w_cpos = pos >> osb->s_clustersize_bits; |
2734 |
++ wc->w_first_new_cpos = UINT_MAX; |
2735 |
+ cend = (pos + len - 1) >> osb->s_clustersize_bits; |
2736 |
+ wc->w_clen = cend - wc->w_cpos + 1; |
2737 |
+ get_bh(di_bh); |
2738 |
+@@ -1217,20 +1217,18 @@ out: |
2739 |
+ */ |
2740 |
+ static int ocfs2_write_cluster(struct address_space *mapping, |
2741 |
+ u32 phys, unsigned int unwritten, |
2742 |
++ unsigned int should_zero, |
2743 |
+ struct ocfs2_alloc_context *data_ac, |
2744 |
+ struct ocfs2_alloc_context *meta_ac, |
2745 |
+ struct ocfs2_write_ctxt *wc, u32 cpos, |
2746 |
+ loff_t user_pos, unsigned user_len) |
2747 |
+ { |
2748 |
+- int ret, i, new, should_zero = 0; |
2749 |
++ int ret, i, new; |
2750 |
+ u64 v_blkno, p_blkno; |
2751 |
+ struct inode *inode = mapping->host; |
2752 |
+ struct ocfs2_extent_tree et; |
2753 |
+ |
2754 |
+ new = phys == 0 ? 1 : 0; |
2755 |
+- if (new || unwritten) |
2756 |
+- should_zero = 1; |
2757 |
+- |
2758 |
+ if (new) { |
2759 |
+ u32 tmp_pos; |
2760 |
+ |
2761 |
+@@ -1341,7 +1339,9 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping, |
2762 |
+ local_len = osb->s_clustersize - cluster_off; |
2763 |
+ |
2764 |
+ ret = ocfs2_write_cluster(mapping, desc->c_phys, |
2765 |
+- desc->c_unwritten, data_ac, meta_ac, |
2766 |
++ desc->c_unwritten, |
2767 |
++ desc->c_needs_zero, |
2768 |
++ data_ac, meta_ac, |
2769 |
+ wc, desc->c_cpos, pos, local_len); |
2770 |
+ if (ret) { |
2771 |
+ mlog_errno(ret); |
2772 |
+@@ -1391,14 +1391,14 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, |
2773 |
+ * newly allocated cluster. |
2774 |
+ */ |
2775 |
+ desc = &wc->w_desc[0]; |
2776 |
+- if (ocfs2_should_zero_cluster(desc)) |
2777 |
++ if (desc->c_needs_zero) |
2778 |
+ ocfs2_figure_cluster_boundaries(osb, |
2779 |
+ desc->c_cpos, |
2780 |
+ &wc->w_target_from, |
2781 |
+ NULL); |
2782 |
+ |
2783 |
+ desc = &wc->w_desc[wc->w_clen - 1]; |
2784 |
+- if (ocfs2_should_zero_cluster(desc)) |
2785 |
++ if (desc->c_needs_zero) |
2786 |
+ ocfs2_figure_cluster_boundaries(osb, |
2787 |
+ desc->c_cpos, |
2788 |
+ NULL, |
2789 |
+@@ -1466,13 +1466,28 @@ static int ocfs2_populate_write_desc(struct inode *inode, |
2790 |
+ phys++; |
2791 |
+ } |
2792 |
+ |
2793 |
++ /* |
2794 |
++ * If w_first_new_cpos is < UINT_MAX, we have a non-sparse |
2795 |
++ * file that got extended. w_first_new_cpos tells us |
2796 |
++ * where the newly allocated clusters are so we can |
2797 |
++ * zero them. |
2798 |
++ */ |
2799 |
++ if (desc->c_cpos >= wc->w_first_new_cpos) { |
2800 |
++ BUG_ON(phys == 0); |
2801 |
++ desc->c_needs_zero = 1; |
2802 |
++ } |
2803 |
++ |
2804 |
+ desc->c_phys = phys; |
2805 |
+ if (phys == 0) { |
2806 |
+ desc->c_new = 1; |
2807 |
++ desc->c_needs_zero = 1; |
2808 |
+ *clusters_to_alloc = *clusters_to_alloc + 1; |
2809 |
+ } |
2810 |
+- if (ext_flags & OCFS2_EXT_UNWRITTEN) |
2811 |
++ |
2812 |
++ if (ext_flags & OCFS2_EXT_UNWRITTEN) { |
2813 |
+ desc->c_unwritten = 1; |
2814 |
++ desc->c_needs_zero = 1; |
2815 |
++ } |
2816 |
+ |
2817 |
+ num_clusters--; |
2818 |
+ } |
2819 |
+@@ -1632,10 +1647,13 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos, |
2820 |
+ if (newsize <= i_size_read(inode)) |
2821 |
+ return 0; |
2822 |
+ |
2823 |
+- ret = ocfs2_extend_no_holes(inode, newsize, newsize - len); |
2824 |
++ ret = ocfs2_extend_no_holes(inode, newsize, pos); |
2825 |
+ if (ret) |
2826 |
+ mlog_errno(ret); |
2827 |
+ |
2828 |
++ wc->w_first_new_cpos = |
2829 |
++ ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); |
2830 |
++ |
2831 |
+ return ret; |
2832 |
+ } |
2833 |
+ |
2834 |
+@@ -1644,7 +1662,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, |
2835 |
+ struct page **pagep, void **fsdata, |
2836 |
+ struct buffer_head *di_bh, struct page *mmap_page) |
2837 |
+ { |
2838 |
+- int ret, credits = OCFS2_INODE_UPDATE_CREDITS; |
2839 |
++ int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; |
2840 |
+ unsigned int clusters_to_alloc, extents_to_split; |
2841 |
+ struct ocfs2_write_ctxt *wc; |
2842 |
+ struct inode *inode = mapping->host; |
2843 |
+@@ -1722,8 +1740,19 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, |
2844 |
+ |
2845 |
+ } |
2846 |
+ |
2847 |
+- ocfs2_set_target_boundaries(osb, wc, pos, len, |
2848 |
+- clusters_to_alloc + extents_to_split); |
2849 |
++ /* |
2850 |
++ * We have to zero sparse allocated clusters, unwritten extent clusters, |
2851 |
++ * and non-sparse clusters we just extended. For non-sparse writes, |
2852 |
++ * we know zeros will only be needed in the first and/or last cluster. |
2853 |
++ */ |
2854 |
++ if (clusters_to_alloc || extents_to_split || |
2855 |
++ (wc->w_clen && (wc->w_desc[0].c_needs_zero || |
2856 |
++ wc->w_desc[wc->w_clen - 1].c_needs_zero))) |
2857 |
++ cluster_of_pages = 1; |
2858 |
++ else |
2859 |
++ cluster_of_pages = 0; |
2860 |
++ |
2861 |
++ ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages); |
2862 |
+ |
2863 |
+ handle = ocfs2_start_trans(osb, credits); |
2864 |
+ if (IS_ERR(handle)) { |
2865 |
+@@ -1756,8 +1785,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, |
2866 |
+ * extent. |
2867 |
+ */ |
2868 |
+ ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, |
2869 |
+- clusters_to_alloc + extents_to_split, |
2870 |
+- mmap_page); |
2871 |
++ cluster_of_pages, mmap_page); |
2872 |
+ if (ret) { |
2873 |
+ mlog_errno(ret); |
2874 |
+ goto out_quota; |
2875 |
+diff --git a/fs/select.c b/fs/select.c |
2876 |
+index 0fe0e14..6d76b82 100644 |
2877 |
+--- a/fs/select.c |
2878 |
++++ b/fs/select.c |
2879 |
+@@ -110,6 +110,7 @@ void poll_initwait(struct poll_wqueues *pwq) |
2880 |
+ { |
2881 |
+ init_poll_funcptr(&pwq->pt, __pollwait); |
2882 |
+ pwq->polling_task = current; |
2883 |
++ pwq->triggered = 0; |
2884 |
+ pwq->error = 0; |
2885 |
+ pwq->table = NULL; |
2886 |
+ pwq->inline_index = 0; |
2887 |
+diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c |
2888 |
+index 89b81ee..1863b0d 100644 |
2889 |
+--- a/fs/xfs/xfs_iget.c |
2890 |
++++ b/fs/xfs/xfs_iget.c |
2891 |
+@@ -63,6 +63,10 @@ xfs_inode_alloc( |
2892 |
+ ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); |
2893 |
+ if (!ip) |
2894 |
+ return NULL; |
2895 |
++ if (inode_init_always(mp->m_super, VFS_I(ip))) { |
2896 |
++ kmem_zone_free(xfs_inode_zone, ip); |
2897 |
++ return NULL; |
2898 |
++ } |
2899 |
+ |
2900 |
+ ASSERT(atomic_read(&ip->i_iocount) == 0); |
2901 |
+ ASSERT(atomic_read(&ip->i_pincount) == 0); |
2902 |
+@@ -104,17 +108,6 @@ xfs_inode_alloc( |
2903 |
+ #ifdef XFS_DIR2_TRACE |
2904 |
+ ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); |
2905 |
+ #endif |
2906 |
+- /* |
2907 |
+- * Now initialise the VFS inode. We do this after the xfs_inode |
2908 |
+- * initialisation as internal failures will result in ->destroy_inode |
2909 |
+- * being called and that will pass down through the reclaim path and |
2910 |
+- * free the XFS inode. This path requires the XFS inode to already be |
2911 |
+- * initialised. Hence if this call fails, the xfs_inode has already |
2912 |
+- * been freed and we should not reference it at all in the error |
2913 |
+- * handling. |
2914 |
+- */ |
2915 |
+- if (!inode_init_always(mp->m_super, VFS_I(ip))) |
2916 |
+- return NULL; |
2917 |
+ |
2918 |
+ /* prevent anyone from using this yet */ |
2919 |
+ VFS_I(ip)->i_state = I_NEW|I_LOCK; |
2920 |
+@@ -122,6 +115,71 @@ xfs_inode_alloc( |
2921 |
+ return ip; |
2922 |
+ } |
2923 |
+ |
2924 |
++STATIC void |
2925 |
++xfs_inode_free( |
2926 |
++ struct xfs_inode *ip) |
2927 |
++{ |
2928 |
++ switch (ip->i_d.di_mode & S_IFMT) { |
2929 |
++ case S_IFREG: |
2930 |
++ case S_IFDIR: |
2931 |
++ case S_IFLNK: |
2932 |
++ xfs_idestroy_fork(ip, XFS_DATA_FORK); |
2933 |
++ break; |
2934 |
++ } |
2935 |
++ |
2936 |
++ if (ip->i_afp) |
2937 |
++ xfs_idestroy_fork(ip, XFS_ATTR_FORK); |
2938 |
++ |
2939 |
++#ifdef XFS_INODE_TRACE |
2940 |
++ ktrace_free(ip->i_trace); |
2941 |
++#endif |
2942 |
++#ifdef XFS_BMAP_TRACE |
2943 |
++ ktrace_free(ip->i_xtrace); |
2944 |
++#endif |
2945 |
++#ifdef XFS_BTREE_TRACE |
2946 |
++ ktrace_free(ip->i_btrace); |
2947 |
++#endif |
2948 |
++#ifdef XFS_RW_TRACE |
2949 |
++ ktrace_free(ip->i_rwtrace); |
2950 |
++#endif |
2951 |
++#ifdef XFS_ILOCK_TRACE |
2952 |
++ ktrace_free(ip->i_lock_trace); |
2953 |
++#endif |
2954 |
++#ifdef XFS_DIR2_TRACE |
2955 |
++ ktrace_free(ip->i_dir_trace); |
2956 |
++#endif |
2957 |
++ |
2958 |
++ if (ip->i_itemp) { |
2959 |
++ /* |
2960 |
++ * Only if we are shutting down the fs will we see an |
2961 |
++ * inode still in the AIL. If it is there, we should remove |
2962 |
++ * it to prevent a use-after-free from occurring. |
2963 |
++ */ |
2964 |
++ xfs_log_item_t *lip = &ip->i_itemp->ili_item; |
2965 |
++ struct xfs_ail *ailp = lip->li_ailp; |
2966 |
++ |
2967 |
++ ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || |
2968 |
++ XFS_FORCED_SHUTDOWN(ip->i_mount)); |
2969 |
++ if (lip->li_flags & XFS_LI_IN_AIL) { |
2970 |
++ spin_lock(&ailp->xa_lock); |
2971 |
++ if (lip->li_flags & XFS_LI_IN_AIL) |
2972 |
++ xfs_trans_ail_delete(ailp, lip); |
2973 |
++ else |
2974 |
++ spin_unlock(&ailp->xa_lock); |
2975 |
++ } |
2976 |
++ xfs_inode_item_destroy(ip); |
2977 |
++ ip->i_itemp = NULL; |
2978 |
++ } |
2979 |
++ |
2980 |
++ /* asserts to verify all state is correct here */ |
2981 |
++ ASSERT(atomic_read(&ip->i_iocount) == 0); |
2982 |
++ ASSERT(atomic_read(&ip->i_pincount) == 0); |
2983 |
++ ASSERT(!spin_is_locked(&ip->i_flags_lock)); |
2984 |
++ ASSERT(completion_done(&ip->i_flush)); |
2985 |
++ |
2986 |
++ kmem_zone_free(xfs_inode_zone, ip); |
2987 |
++} |
2988 |
++ |
2989 |
+ /* |
2990 |
+ * Check the validity of the inode we just found it the cache |
2991 |
+ */ |
2992 |
+@@ -166,7 +224,7 @@ xfs_iget_cache_hit( |
2993 |
+ * errors cleanly, then tag it so it can be set up correctly |
2994 |
+ * later. |
2995 |
+ */ |
2996 |
+- if (!inode_init_always(mp->m_super, VFS_I(ip))) { |
2997 |
++ if (inode_init_always(mp->m_super, VFS_I(ip))) { |
2998 |
+ error = ENOMEM; |
2999 |
+ goto out_error; |
3000 |
+ } |
3001 |
+@@ -298,7 +356,8 @@ out_preload_end: |
3002 |
+ if (lock_flags) |
3003 |
+ xfs_iunlock(ip, lock_flags); |
3004 |
+ out_destroy: |
3005 |
+- xfs_destroy_inode(ip); |
3006 |
++ __destroy_inode(VFS_I(ip)); |
3007 |
++ xfs_inode_free(ip); |
3008 |
+ return error; |
3009 |
+ } |
3010 |
+ |
3011 |
+@@ -506,62 +565,7 @@ xfs_ireclaim( |
3012 |
+ XFS_QM_DQDETACH(ip->i_mount, ip); |
3013 |
+ xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); |
3014 |
+ |
3015 |
+- switch (ip->i_d.di_mode & S_IFMT) { |
3016 |
+- case S_IFREG: |
3017 |
+- case S_IFDIR: |
3018 |
+- case S_IFLNK: |
3019 |
+- xfs_idestroy_fork(ip, XFS_DATA_FORK); |
3020 |
+- break; |
3021 |
+- } |
3022 |
+- |
3023 |
+- if (ip->i_afp) |
3024 |
+- xfs_idestroy_fork(ip, XFS_ATTR_FORK); |
3025 |
+- |
3026 |
+-#ifdef XFS_INODE_TRACE |
3027 |
+- ktrace_free(ip->i_trace); |
3028 |
+-#endif |
3029 |
+-#ifdef XFS_BMAP_TRACE |
3030 |
+- ktrace_free(ip->i_xtrace); |
3031 |
+-#endif |
3032 |
+-#ifdef XFS_BTREE_TRACE |
3033 |
+- ktrace_free(ip->i_btrace); |
3034 |
+-#endif |
3035 |
+-#ifdef XFS_RW_TRACE |
3036 |
+- ktrace_free(ip->i_rwtrace); |
3037 |
+-#endif |
3038 |
+-#ifdef XFS_ILOCK_TRACE |
3039 |
+- ktrace_free(ip->i_lock_trace); |
3040 |
+-#endif |
3041 |
+-#ifdef XFS_DIR2_TRACE |
3042 |
+- ktrace_free(ip->i_dir_trace); |
3043 |
+-#endif |
3044 |
+- if (ip->i_itemp) { |
3045 |
+- /* |
3046 |
+- * Only if we are shutting down the fs will we see an |
3047 |
+- * inode still in the AIL. If it is there, we should remove |
3048 |
+- * it to prevent a use-after-free from occurring. |
3049 |
+- */ |
3050 |
+- xfs_log_item_t *lip = &ip->i_itemp->ili_item; |
3051 |
+- struct xfs_ail *ailp = lip->li_ailp; |
3052 |
+- |
3053 |
+- ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || |
3054 |
+- XFS_FORCED_SHUTDOWN(ip->i_mount)); |
3055 |
+- if (lip->li_flags & XFS_LI_IN_AIL) { |
3056 |
+- spin_lock(&ailp->xa_lock); |
3057 |
+- if (lip->li_flags & XFS_LI_IN_AIL) |
3058 |
+- xfs_trans_ail_delete(ailp, lip); |
3059 |
+- else |
3060 |
+- spin_unlock(&ailp->xa_lock); |
3061 |
+- } |
3062 |
+- xfs_inode_item_destroy(ip); |
3063 |
+- ip->i_itemp = NULL; |
3064 |
+- } |
3065 |
+- /* asserts to verify all state is correct here */ |
3066 |
+- ASSERT(atomic_read(&ip->i_iocount) == 0); |
3067 |
+- ASSERT(atomic_read(&ip->i_pincount) == 0); |
3068 |
+- ASSERT(!spin_is_locked(&ip->i_flags_lock)); |
3069 |
+- ASSERT(completion_done(&ip->i_flush)); |
3070 |
+- kmem_zone_free(xfs_inode_zone, ip); |
3071 |
++ xfs_inode_free(ip); |
3072 |
+ } |
3073 |
+ |
3074 |
+ /* |
3075 |
+diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h |
3076 |
+index f879c1b..71c20ec 100644 |
3077 |
+--- a/fs/xfs/xfs_inode.h |
3078 |
++++ b/fs/xfs/xfs_inode.h |
3079 |
+@@ -309,23 +309,6 @@ static inline struct inode *VFS_I(struct xfs_inode *ip) |
3080 |
+ } |
3081 |
+ |
3082 |
+ /* |
3083 |
+- * Get rid of a partially initialized inode. |
3084 |
+- * |
3085 |
+- * We have to go through destroy_inode to make sure allocations |
3086 |
+- * from init_inode_always like the security data are undone. |
3087 |
+- * |
3088 |
+- * We mark the inode bad so that it takes the short cut in |
3089 |
+- * the reclaim path instead of going through the flush path |
3090 |
+- * which doesn't make sense for an inode that has never seen the |
3091 |
+- * light of day. |
3092 |
+- */ |
3093 |
+-static inline void xfs_destroy_inode(struct xfs_inode *ip) |
3094 |
+-{ |
3095 |
+- make_bad_inode(VFS_I(ip)); |
3096 |
+- return destroy_inode(VFS_I(ip)); |
3097 |
+-} |
3098 |
+- |
3099 |
+-/* |
3100 |
+ * i_flags helper functions |
3101 |
+ */ |
3102 |
+ static inline void |
3103 |
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c |
3104 |
+index 3750f04..9dbdff3 100644 |
3105 |
+--- a/fs/xfs/xfs_log.c |
3106 |
++++ b/fs/xfs/xfs_log.c |
3107 |
+@@ -3180,7 +3180,7 @@ try_again: |
3108 |
+ STATIC void |
3109 |
+ xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) |
3110 |
+ { |
3111 |
+- ASSERT(spin_is_locked(&log->l_icloglock)); |
3112 |
++ assert_spin_locked(&log->l_icloglock); |
3113 |
+ |
3114 |
+ if (iclog->ic_state == XLOG_STATE_ACTIVE) { |
3115 |
+ xlog_state_switch_iclogs(log, iclog, 0); |
3116 |
+diff --git a/include/acpi/processor.h b/include/acpi/processor.h |
3117 |
+index 4927c06..e498c79 100644 |
3118 |
+--- a/include/acpi/processor.h |
3119 |
++++ b/include/acpi/processor.h |
3120 |
+@@ -174,7 +174,7 @@ struct acpi_processor_throttling { |
3121 |
+ cpumask_var_t shared_cpu_map; |
3122 |
+ int (*acpi_processor_get_throttling) (struct acpi_processor * pr); |
3123 |
+ int (*acpi_processor_set_throttling) (struct acpi_processor * pr, |
3124 |
+- int state); |
3125 |
++ int state, bool force); |
3126 |
+ |
3127 |
+ u32 address; |
3128 |
+ u8 duty_offset; |
3129 |
+@@ -320,7 +320,8 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr) |
3130 |
+ /* in processor_throttling.c */ |
3131 |
+ int acpi_processor_tstate_has_changed(struct acpi_processor *pr); |
3132 |
+ int acpi_processor_get_throttling_info(struct acpi_processor *pr); |
3133 |
+-extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state); |
3134 |
++extern int acpi_processor_set_throttling(struct acpi_processor *pr, |
3135 |
++ int state, bool force); |
3136 |
+ extern const struct file_operations acpi_processor_throttling_fops; |
3137 |
+ extern void acpi_processor_throttling_init(void); |
3138 |
+ /* in processor_idle.c */ |
3139 |
+diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h |
3140 |
+index 2878811..756d78b 100644 |
3141 |
+--- a/include/linux/bitmap.h |
3142 |
++++ b/include/linux/bitmap.h |
3143 |
+@@ -94,13 +94,13 @@ extern void __bitmap_shift_right(unsigned long *dst, |
3144 |
+ const unsigned long *src, int shift, int bits); |
3145 |
+ extern void __bitmap_shift_left(unsigned long *dst, |
3146 |
+ const unsigned long *src, int shift, int bits); |
3147 |
+-extern void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
3148 |
++extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
3149 |
+ const unsigned long *bitmap2, int bits); |
3150 |
+ extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, |
3151 |
+ const unsigned long *bitmap2, int bits); |
3152 |
+ extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, |
3153 |
+ const unsigned long *bitmap2, int bits); |
3154 |
+-extern void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
3155 |
++extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
3156 |
+ const unsigned long *bitmap2, int bits); |
3157 |
+ extern int __bitmap_intersects(const unsigned long *bitmap1, |
3158 |
+ const unsigned long *bitmap2, int bits); |
3159 |
+@@ -171,13 +171,12 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, |
3160 |
+ } |
3161 |
+ } |
3162 |
+ |
3163 |
+-static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, |
3164 |
++static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, |
3165 |
+ const unsigned long *src2, int nbits) |
3166 |
+ { |
3167 |
+ if (small_const_nbits(nbits)) |
3168 |
+- *dst = *src1 & *src2; |
3169 |
+- else |
3170 |
+- __bitmap_and(dst, src1, src2, nbits); |
3171 |
++ return (*dst = *src1 & *src2) != 0; |
3172 |
++ return __bitmap_and(dst, src1, src2, nbits); |
3173 |
+ } |
3174 |
+ |
3175 |
+ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, |
3176 |
+@@ -198,13 +197,12 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, |
3177 |
+ __bitmap_xor(dst, src1, src2, nbits); |
3178 |
+ } |
3179 |
+ |
3180 |
+-static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, |
3181 |
++static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, |
3182 |
+ const unsigned long *src2, int nbits) |
3183 |
+ { |
3184 |
+ if (small_const_nbits(nbits)) |
3185 |
+- *dst = *src1 & ~(*src2); |
3186 |
+- else |
3187 |
+- __bitmap_andnot(dst, src1, src2, nbits); |
3188 |
++ return (*dst = *src1 & ~(*src2)) != 0; |
3189 |
++ return __bitmap_andnot(dst, src1, src2, nbits); |
3190 |
+ } |
3191 |
+ |
3192 |
+ static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, |
3193 |
+diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h |
3194 |
+index c5ac87c..796df12 100644 |
3195 |
+--- a/include/linux/cpumask.h |
3196 |
++++ b/include/linux/cpumask.h |
3197 |
+@@ -43,10 +43,10 @@ |
3198 |
+ * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask |
3199 |
+ * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask |
3200 |
+ * |
3201 |
+- * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection] |
3202 |
++ * int cpus_and(dst, src1, src2) dst = src1 & src2 [intersection] |
3203 |
+ * void cpus_or(dst, src1, src2) dst = src1 | src2 [union] |
3204 |
+ * void cpus_xor(dst, src1, src2) dst = src1 ^ src2 |
3205 |
+- * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2 |
3206 |
++ * int cpus_andnot(dst, src1, src2) dst = src1 & ~src2 |
3207 |
+ * void cpus_complement(dst, src) dst = ~src |
3208 |
+ * |
3209 |
+ * int cpus_equal(mask1, mask2) Does mask1 == mask2? |
3210 |
+@@ -179,10 +179,10 @@ static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) |
3211 |
+ } |
3212 |
+ |
3213 |
+ #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) |
3214 |
+-static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, |
3215 |
++static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, |
3216 |
+ const cpumask_t *src2p, int nbits) |
3217 |
+ { |
3218 |
+- bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); |
3219 |
++ return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); |
3220 |
+ } |
3221 |
+ |
3222 |
+ #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) |
3223 |
+@@ -201,10 +201,10 @@ static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, |
3224 |
+ |
3225 |
+ #define cpus_andnot(dst, src1, src2) \ |
3226 |
+ __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) |
3227 |
+-static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, |
3228 |
++static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, |
3229 |
+ const cpumask_t *src2p, int nbits) |
3230 |
+ { |
3231 |
+- bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); |
3232 |
++ return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); |
3233 |
+ } |
3234 |
+ |
3235 |
+ #define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS) |
3236 |
+@@ -738,11 +738,11 @@ static inline void cpumask_clear(struct cpumask *dstp) |
3237 |
+ * @src1p: the first input |
3238 |
+ * @src2p: the second input |
3239 |
+ */ |
3240 |
+-static inline void cpumask_and(struct cpumask *dstp, |
3241 |
++static inline int cpumask_and(struct cpumask *dstp, |
3242 |
+ const struct cpumask *src1p, |
3243 |
+ const struct cpumask *src2p) |
3244 |
+ { |
3245 |
+- bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), |
3246 |
++ return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), |
3247 |
+ cpumask_bits(src2p), nr_cpumask_bits); |
3248 |
+ } |
3249 |
+ |
3250 |
+@@ -779,11 +779,11 @@ static inline void cpumask_xor(struct cpumask *dstp, |
3251 |
+ * @src1p: the first input |
3252 |
+ * @src2p: the second input |
3253 |
+ */ |
3254 |
+-static inline void cpumask_andnot(struct cpumask *dstp, |
3255 |
++static inline int cpumask_andnot(struct cpumask *dstp, |
3256 |
+ const struct cpumask *src1p, |
3257 |
+ const struct cpumask *src2p) |
3258 |
+ { |
3259 |
+- bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), |
3260 |
++ return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), |
3261 |
+ cpumask_bits(src2p), nr_cpumask_bits); |
3262 |
+ } |
3263 |
+ |
3264 |
+diff --git a/include/linux/fs.h b/include/linux/fs.h |
3265 |
+index 3b534e5..53618df 100644 |
3266 |
+--- a/include/linux/fs.h |
3267 |
++++ b/include/linux/fs.h |
3268 |
+@@ -2121,7 +2121,7 @@ extern struct file *do_filp_open(int dfd, const char *pathname, |
3269 |
+ int open_flag, int mode, int acc_mode); |
3270 |
+ extern int may_open(struct path *, int, int); |
3271 |
+ |
3272 |
+-extern int kernel_read(struct file *, unsigned long, char *, unsigned long); |
3273 |
++extern int kernel_read(struct file *, loff_t, char *, unsigned long); |
3274 |
+ extern struct file * open_exec(const char *); |
3275 |
+ |
3276 |
+ /* fs/dcache.c -- generic fs support functions */ |
3277 |
+@@ -2135,7 +2135,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin); |
3278 |
+ |
3279 |
+ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); |
3280 |
+ |
3281 |
+-extern struct inode * inode_init_always(struct super_block *, struct inode *); |
3282 |
++extern int inode_init_always(struct super_block *, struct inode *); |
3283 |
+ extern void inode_init_once(struct inode *); |
3284 |
+ extern void inode_add_to_lists(struct super_block *, struct inode *); |
3285 |
+ extern void iput(struct inode *); |
3286 |
+@@ -2162,6 +2162,7 @@ extern void __iget(struct inode * inode); |
3287 |
+ extern void iget_failed(struct inode *); |
3288 |
+ extern void clear_inode(struct inode *); |
3289 |
+ extern void destroy_inode(struct inode *); |
3290 |
++extern void __destroy_inode(struct inode *); |
3291 |
+ extern struct inode *new_inode(struct super_block *); |
3292 |
+ extern int should_remove_suid(struct dentry *); |
3293 |
+ extern int file_remove_suid(struct file *); |
3294 |
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h |
3295 |
+index 03be7f2..7e2f1ef 100644 |
3296 |
+--- a/include/linux/hugetlb.h |
3297 |
++++ b/include/linux/hugetlb.h |
3298 |
+@@ -10,6 +10,7 @@ |
3299 |
+ #include <asm/tlbflush.h> |
3300 |
+ |
3301 |
+ struct ctl_table; |
3302 |
++struct user_struct; |
3303 |
+ |
3304 |
+ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) |
3305 |
+ { |
3306 |
+@@ -139,7 +140,8 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) |
3307 |
+ |
3308 |
+ extern const struct file_operations hugetlbfs_file_operations; |
3309 |
+ extern struct vm_operations_struct hugetlb_vm_ops; |
3310 |
+-struct file *hugetlb_file_setup(const char *name, size_t, int); |
3311 |
++struct file *hugetlb_file_setup(const char *name, size_t size, int acct, |
3312 |
++ struct user_struct **user); |
3313 |
+ int hugetlb_get_quota(struct address_space *mapping, long delta); |
3314 |
+ void hugetlb_put_quota(struct address_space *mapping, long delta); |
3315 |
+ |
3316 |
+@@ -161,7 +163,7 @@ static inline void set_file_hugepages(struct file *file) |
3317 |
+ |
3318 |
+ #define is_file_hugepages(file) 0 |
3319 |
+ #define set_file_hugepages(file) BUG() |
3320 |
+-#define hugetlb_file_setup(name,size,acctflag) ERR_PTR(-ENOSYS) |
3321 |
++#define hugetlb_file_setup(name,size,acct,user) ERR_PTR(-ENOSYS) |
3322 |
+ |
3323 |
+ #endif /* !CONFIG_HUGETLBFS */ |
3324 |
+ |
3325 |
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h |
3326 |
+index 5eed8fa..340e909 100644 |
3327 |
+--- a/include/linux/kvm_host.h |
3328 |
++++ b/include/linux/kvm_host.h |
3329 |
+@@ -110,6 +110,7 @@ struct kvm_memory_slot { |
3330 |
+ |
3331 |
+ struct kvm_kernel_irq_routing_entry { |
3332 |
+ u32 gsi; |
3333 |
++ u32 type; |
3334 |
+ int (*set)(struct kvm_kernel_irq_routing_entry *e, |
3335 |
+ struct kvm *kvm, int level); |
3336 |
+ union { |
3337 |
+diff --git a/init/main.c b/init/main.c |
3338 |
+index d721dad..303903c 100644 |
3339 |
+--- a/init/main.c |
3340 |
++++ b/init/main.c |
3341 |
+@@ -702,13 +702,14 @@ asmlinkage void __init start_kernel(void) |
3342 |
+ int initcall_debug; |
3343 |
+ core_param(initcall_debug, initcall_debug, bool, 0644); |
3344 |
+ |
3345 |
++static char msgbuf[64]; |
3346 |
++static struct boot_trace_call call; |
3347 |
++static struct boot_trace_ret ret; |
3348 |
++ |
3349 |
+ int do_one_initcall(initcall_t fn) |
3350 |
+ { |
3351 |
+ int count = preempt_count(); |
3352 |
+ ktime_t calltime, delta, rettime; |
3353 |
+- char msgbuf[64]; |
3354 |
+- struct boot_trace_call call; |
3355 |
+- struct boot_trace_ret ret; |
3356 |
+ |
3357 |
+ if (initcall_debug) { |
3358 |
+ call.caller = task_pid_nr(current); |
3359 |
+diff --git a/ipc/shm.c b/ipc/shm.c |
3360 |
+index 4259716..30b1265 100644 |
3361 |
+--- a/ipc/shm.c |
3362 |
++++ b/ipc/shm.c |
3363 |
+@@ -174,7 +174,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) |
3364 |
+ shm_unlock(shp); |
3365 |
+ if (!is_file_hugepages(shp->shm_file)) |
3366 |
+ shmem_lock(shp->shm_file, 0, shp->mlock_user); |
3367 |
+- else |
3368 |
++ else if (shp->mlock_user) |
3369 |
+ user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size, |
3370 |
+ shp->mlock_user); |
3371 |
+ fput (shp->shm_file); |
3372 |
+@@ -369,8 +369,8 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) |
3373 |
+ /* hugetlb_file_setup applies strict accounting */ |
3374 |
+ if (shmflg & SHM_NORESERVE) |
3375 |
+ acctflag = VM_NORESERVE; |
3376 |
+- file = hugetlb_file_setup(name, size, acctflag); |
3377 |
+- shp->mlock_user = current_user(); |
3378 |
++ file = hugetlb_file_setup(name, size, acctflag, |
3379 |
++ &shp->mlock_user); |
3380 |
+ } else { |
3381 |
+ /* |
3382 |
+ * Do not allow no accounting for OVERCOMMIT_NEVER, even |
3383 |
+@@ -411,6 +411,8 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) |
3384 |
+ return error; |
3385 |
+ |
3386 |
+ no_id: |
3387 |
++ if (shp->mlock_user) /* shmflg & SHM_HUGETLB case */ |
3388 |
++ user_shm_unlock(size, shp->mlock_user); |
3389 |
+ fput(file); |
3390 |
+ no_file: |
3391 |
+ security_shm_free(shp); |
3392 |
+diff --git a/kernel/fork.c b/kernel/fork.c |
3393 |
+index 9c1f52d..f4be1ee 100644 |
3394 |
+--- a/kernel/fork.c |
3395 |
++++ b/kernel/fork.c |
3396 |
+@@ -816,11 +816,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) |
3397 |
+ { |
3398 |
+ struct signal_struct *sig; |
3399 |
+ |
3400 |
+- if (clone_flags & CLONE_THREAD) { |
3401 |
+- atomic_inc(¤t->signal->count); |
3402 |
+- atomic_inc(¤t->signal->live); |
3403 |
++ if (clone_flags & CLONE_THREAD) |
3404 |
+ return 0; |
3405 |
+- } |
3406 |
+ |
3407 |
+ sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
3408 |
+ tsk->signal = sig; |
3409 |
+@@ -878,16 +875,6 @@ void __cleanup_signal(struct signal_struct *sig) |
3410 |
+ kmem_cache_free(signal_cachep, sig); |
3411 |
+ } |
3412 |
+ |
3413 |
+-static void cleanup_signal(struct task_struct *tsk) |
3414 |
+-{ |
3415 |
+- struct signal_struct *sig = tsk->signal; |
3416 |
+- |
3417 |
+- atomic_dec(&sig->live); |
3418 |
+- |
3419 |
+- if (atomic_dec_and_test(&sig->count)) |
3420 |
+- __cleanup_signal(sig); |
3421 |
+-} |
3422 |
+- |
3423 |
+ static void copy_flags(unsigned long clone_flags, struct task_struct *p) |
3424 |
+ { |
3425 |
+ unsigned long new_flags = p->flags; |
3426 |
+@@ -1237,6 +1224,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, |
3427 |
+ } |
3428 |
+ |
3429 |
+ if (clone_flags & CLONE_THREAD) { |
3430 |
++ atomic_inc(¤t->signal->count); |
3431 |
++ atomic_inc(¤t->signal->live); |
3432 |
+ p->group_leader = current->group_leader; |
3433 |
+ list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); |
3434 |
+ } |
3435 |
+@@ -1281,7 +1270,8 @@ bad_fork_cleanup_mm: |
3436 |
+ if (p->mm) |
3437 |
+ mmput(p->mm); |
3438 |
+ bad_fork_cleanup_signal: |
3439 |
+- cleanup_signal(p); |
3440 |
++ if (!(clone_flags & CLONE_THREAD)) |
3441 |
++ __cleanup_signal(p->signal); |
3442 |
+ bad_fork_cleanup_sighand: |
3443 |
+ __cleanup_sighand(p->sighand); |
3444 |
+ bad_fork_cleanup_fs: |
3445 |
+diff --git a/kernel/kthread.c b/kernel/kthread.c |
3446 |
+index 4ebaf85..7fbaa09 100644 |
3447 |
+--- a/kernel/kthread.c |
3448 |
++++ b/kernel/kthread.c |
3449 |
+@@ -216,12 +216,12 @@ int kthread_stop(struct task_struct *k) |
3450 |
+ /* Now set kthread_should_stop() to true, and wake it up. */ |
3451 |
+ kthread_stop_info.k = k; |
3452 |
+ wake_up_process(k); |
3453 |
+- put_task_struct(k); |
3454 |
+ |
3455 |
+ /* Once it dies, reset stop ptr, gather result and we're done. */ |
3456 |
+ wait_for_completion(&kthread_stop_info.done); |
3457 |
+ kthread_stop_info.k = NULL; |
3458 |
+ ret = kthread_stop_info.err; |
3459 |
++ put_task_struct(k); |
3460 |
+ mutex_unlock(&kthread_stop_lock); |
3461 |
+ |
3462 |
+ trace_sched_kthread_stop_ret(ret); |
3463 |
+diff --git a/kernel/signal.c b/kernel/signal.c |
3464 |
+index d803473..2dfc931 100644 |
3465 |
+--- a/kernel/signal.c |
3466 |
++++ b/kernel/signal.c |
3467 |
+@@ -2414,11 +2414,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s |
3468 |
+ stack_t oss; |
3469 |
+ int error; |
3470 |
+ |
3471 |
+- if (uoss) { |
3472 |
+- oss.ss_sp = (void __user *) current->sas_ss_sp; |
3473 |
+- oss.ss_size = current->sas_ss_size; |
3474 |
+- oss.ss_flags = sas_ss_flags(sp); |
3475 |
+- } |
3476 |
++ oss.ss_sp = (void __user *) current->sas_ss_sp; |
3477 |
++ oss.ss_size = current->sas_ss_size; |
3478 |
++ oss.ss_flags = sas_ss_flags(sp); |
3479 |
+ |
3480 |
+ if (uss) { |
3481 |
+ void __user *ss_sp; |
3482 |
+@@ -2461,13 +2459,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s |
3483 |
+ current->sas_ss_size = ss_size; |
3484 |
+ } |
3485 |
+ |
3486 |
++ error = 0; |
3487 |
+ if (uoss) { |
3488 |
+ error = -EFAULT; |
3489 |
+- if (copy_to_user(uoss, &oss, sizeof(oss))) |
3490 |
++ if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) |
3491 |
+ goto out; |
3492 |
++ error = __put_user(oss.ss_sp, &uoss->ss_sp) | |
3493 |
++ __put_user(oss.ss_size, &uoss->ss_size) | |
3494 |
++ __put_user(oss.ss_flags, &uoss->ss_flags); |
3495 |
+ } |
3496 |
+ |
3497 |
+- error = 0; |
3498 |
+ out: |
3499 |
+ return error; |
3500 |
+ } |
3501 |
+diff --git a/lib/bitmap.c b/lib/bitmap.c |
3502 |
+index 35a1f7f..7025658 100644 |
3503 |
+--- a/lib/bitmap.c |
3504 |
++++ b/lib/bitmap.c |
3505 |
+@@ -179,14 +179,16 @@ void __bitmap_shift_left(unsigned long *dst, |
3506 |
+ } |
3507 |
+ EXPORT_SYMBOL(__bitmap_shift_left); |
3508 |
+ |
3509 |
+-void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
3510 |
++int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
3511 |
+ const unsigned long *bitmap2, int bits) |
3512 |
+ { |
3513 |
+ int k; |
3514 |
+ int nr = BITS_TO_LONGS(bits); |
3515 |
++ unsigned long result = 0; |
3516 |
+ |
3517 |
+ for (k = 0; k < nr; k++) |
3518 |
+- dst[k] = bitmap1[k] & bitmap2[k]; |
3519 |
++ result |= (dst[k] = bitmap1[k] & bitmap2[k]); |
3520 |
++ return result != 0; |
3521 |
+ } |
3522 |
+ EXPORT_SYMBOL(__bitmap_and); |
3523 |
+ |
3524 |
+@@ -212,14 +214,16 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, |
3525 |
+ } |
3526 |
+ EXPORT_SYMBOL(__bitmap_xor); |
3527 |
+ |
3528 |
+-void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
3529 |
++int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
3530 |
+ const unsigned long *bitmap2, int bits) |
3531 |
+ { |
3532 |
+ int k; |
3533 |
+ int nr = BITS_TO_LONGS(bits); |
3534 |
++ unsigned long result = 0; |
3535 |
+ |
3536 |
+ for (k = 0; k < nr; k++) |
3537 |
+- dst[k] = bitmap1[k] & ~bitmap2[k]; |
3538 |
++ result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); |
3539 |
++ return result != 0; |
3540 |
+ } |
3541 |
+ EXPORT_SYMBOL(__bitmap_andnot); |
3542 |
+ |
3543 |
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
3544 |
+index 6bf3cc4..b91020e 100644 |
3545 |
+--- a/mm/page_alloc.c |
3546 |
++++ b/mm/page_alloc.c |
3547 |
+@@ -2342,7 +2342,6 @@ static void build_zonelists(pg_data_t *pgdat) |
3548 |
+ prev_node = local_node; |
3549 |
+ nodes_clear(used_mask); |
3550 |
+ |
3551 |
+- memset(node_load, 0, sizeof(node_load)); |
3552 |
+ memset(node_order, 0, sizeof(node_order)); |
3553 |
+ j = 0; |
3554 |
+ |
3555 |
+@@ -2451,6 +2450,9 @@ static int __build_all_zonelists(void *dummy) |
3556 |
+ { |
3557 |
+ int nid; |
3558 |
+ |
3559 |
++#ifdef CONFIG_NUMA |
3560 |
++ memset(node_load, 0, sizeof(node_load)); |
3561 |
++#endif |
3562 |
+ for_each_online_node(nid) { |
3563 |
+ pg_data_t *pgdat = NODE_DATA(nid); |
3564 |
+ |
3565 |
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c |
3566 |
+index d6a9243..e8e9bad 100644 |
3567 |
+--- a/net/appletalk/ddp.c |
3568 |
++++ b/net/appletalk/ddp.c |
3569 |
+@@ -1242,6 +1242,7 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr, |
3570 |
+ return -ENOBUFS; |
3571 |
+ |
3572 |
+ *uaddr_len = sizeof(struct sockaddr_at); |
3573 |
++ memset(&sat.sat_zero, 0, sizeof(sat.sat_zero)); |
3574 |
+ |
3575 |
+ if (peer) { |
3576 |
+ if (sk->sk_state != TCP_ESTABLISHED) |
3577 |
+diff --git a/net/can/raw.c b/net/can/raw.c |
3578 |
+index 6aa154e..5df3bf6 100644 |
3579 |
+--- a/net/can/raw.c |
3580 |
++++ b/net/can/raw.c |
3581 |
+@@ -397,6 +397,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr, |
3582 |
+ if (peer) |
3583 |
+ return -EOPNOTSUPP; |
3584 |
+ |
3585 |
++ memset(addr, 0, sizeof(*addr)); |
3586 |
+ addr->can_family = AF_CAN; |
3587 |
+ addr->can_ifindex = ro->ifindex; |
3588 |
+ |
3589 |
+diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c |
3590 |
+index 6f479fa..3bafb21 100644 |
3591 |
+--- a/net/econet/af_econet.c |
3592 |
++++ b/net/econet/af_econet.c |
3593 |
+@@ -520,6 +520,7 @@ static int econet_getname(struct socket *sock, struct sockaddr *uaddr, |
3594 |
+ if (peer) |
3595 |
+ return -EOPNOTSUPP; |
3596 |
+ |
3597 |
++ memset(sec, 0, sizeof(*sec)); |
3598 |
+ mutex_lock(&econet_mutex); |
3599 |
+ |
3600 |
+ sk = sock->sk; |
3601 |
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
3602 |
+index 3e7e910..d1d88e6 100644 |
3603 |
+--- a/net/ipv4/ip_output.c |
3604 |
++++ b/net/ipv4/ip_output.c |
3605 |
+@@ -814,6 +814,8 @@ int ip_append_data(struct sock *sk, |
3606 |
+ inet->cork.addr = ipc->addr; |
3607 |
+ } |
3608 |
+ rt = *rtp; |
3609 |
++ if (unlikely(!rt)) |
3610 |
++ return -EFAULT; |
3611 |
+ /* |
3612 |
+ * We steal reference to this route, caller should not release it |
3613 |
+ */ |
3614 |
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c |
3615 |
+index 61f5538..55e315a 100644 |
3616 |
+--- a/net/ipv6/af_inet6.c |
3617 |
++++ b/net/ipv6/af_inet6.c |
3618 |
+@@ -294,8 +294,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
3619 |
+ v4addr != htonl(INADDR_ANY) && |
3620 |
+ chk_addr_ret != RTN_LOCAL && |
3621 |
+ chk_addr_ret != RTN_MULTICAST && |
3622 |
+- chk_addr_ret != RTN_BROADCAST) |
3623 |
++ chk_addr_ret != RTN_BROADCAST) { |
3624 |
++ err = -EADDRNOTAVAIL; |
3625 |
+ goto out; |
3626 |
++ } |
3627 |
+ } else { |
3628 |
+ if (addr_type != IPV6_ADDR_ANY) { |
3629 |
+ struct net_device *dev = NULL; |
3630 |
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c |
3631 |
+index e0fbcff..b06224b 100644 |
3632 |
+--- a/net/irda/af_irda.c |
3633 |
++++ b/net/irda/af_irda.c |
3634 |
+@@ -714,6 +714,7 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr, |
3635 |
+ struct sock *sk = sock->sk; |
3636 |
+ struct irda_sock *self = irda_sk(sk); |
3637 |
+ |
3638 |
++ memset(&saddr, 0, sizeof(saddr)); |
3639 |
+ if (peer) { |
3640 |
+ if (sk->sk_state != TCP_ESTABLISHED) |
3641 |
+ return -ENOTCONN; |
3642 |
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c |
3643 |
+index febae70..515d556 100644 |
3644 |
+--- a/net/llc/af_llc.c |
3645 |
++++ b/net/llc/af_llc.c |
3646 |
+@@ -914,6 +914,7 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr, |
3647 |
+ struct llc_sock *llc = llc_sk(sk); |
3648 |
+ int rc = 0; |
3649 |
+ |
3650 |
++ memset(&sllc, 0, sizeof(sllc)); |
3651 |
+ lock_sock(sk); |
3652 |
+ if (sock_flag(sk, SOCK_ZAPPED)) |
3653 |
+ goto out; |
3654 |
+diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c |
3655 |
+index 947aaaa..baf0f77 100644 |
3656 |
+--- a/net/mac80211/agg-tx.c |
3657 |
++++ b/net/mac80211/agg-tx.c |
3658 |
+@@ -376,6 +376,14 @@ static void ieee80211_agg_splice_packets(struct ieee80211_local *local, |
3659 |
+ &local->hw, queue, |
3660 |
+ IEEE80211_QUEUE_STOP_REASON_AGGREGATION); |
3661 |
+ |
3662 |
++ if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)) |
3663 |
++ return; |
3664 |
++ |
3665 |
++ if (WARN(!sta->ampdu_mlme.tid_tx[tid], |
3666 |
++ "TID %d gone but expected when splicing aggregates from" |
3667 |
++ "the pending queue\n", tid)) |
3668 |
++ return; |
3669 |
++ |
3670 |
+ if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) { |
3671 |
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
3672 |
+ /* mark queue as pending, it is stopped already */ |
3673 |
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c |
3674 |
+index 3be0e01..0c3e755 100644 |
3675 |
+--- a/net/netrom/af_netrom.c |
3676 |
++++ b/net/netrom/af_netrom.c |
3677 |
+@@ -848,6 +848,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr, |
3678 |
+ sax->fsa_ax25.sax25_family = AF_NETROM; |
3679 |
+ sax->fsa_ax25.sax25_ndigis = 1; |
3680 |
+ sax->fsa_ax25.sax25_call = nr->user_addr; |
3681 |
++ memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater)); |
3682 |
+ sax->fsa_digipeater[0] = nr->dest_addr; |
3683 |
+ *uaddr_len = sizeof(struct full_sockaddr_ax25); |
3684 |
+ } else { |
3685 |
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c |
3686 |
+index 877a7f6..ebe1cc9 100644 |
3687 |
+--- a/net/rose/af_rose.c |
3688 |
++++ b/net/rose/af_rose.c |
3689 |
+@@ -957,6 +957,7 @@ static int rose_getname(struct socket *sock, struct sockaddr *uaddr, |
3690 |
+ struct rose_sock *rose = rose_sk(sk); |
3691 |
+ int n; |
3692 |
+ |
3693 |
++ memset(srose, 0, sizeof(*srose)); |
3694 |
+ if (peer != 0) { |
3695 |
+ if (sk->sk_state != TCP_ESTABLISHED) |
3696 |
+ return -ENOTCONN; |
3697 |
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c |
3698 |
+index 5abab09..8d02e05 100644 |
3699 |
+--- a/net/sunrpc/clnt.c |
3700 |
++++ b/net/sunrpc/clnt.c |
3701 |
+@@ -876,6 +876,7 @@ static inline void |
3702 |
+ rpc_task_force_reencode(struct rpc_task *task) |
3703 |
+ { |
3704 |
+ task->tk_rqstp->rq_snd_buf.len = 0; |
3705 |
++ task->tk_rqstp->rq_bytes_sent = 0; |
3706 |
+ } |
3707 |
+ |
3708 |
+ static inline void |
3709 |
+diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c |
3710 |
+index 50d572b..2ae3aff 100644 |
3711 |
+--- a/security/integrity/ima/ima_crypto.c |
3712 |
++++ b/security/integrity/ima/ima_crypto.c |
3713 |
+@@ -45,9 +45,9 @@ int ima_calc_hash(struct file *file, char *digest) |
3714 |
+ { |
3715 |
+ struct hash_desc desc; |
3716 |
+ struct scatterlist sg[1]; |
3717 |
+- loff_t i_size; |
3718 |
++ loff_t i_size, offset = 0; |
3719 |
+ char *rbuf; |
3720 |
+- int rc, offset = 0; |
3721 |
++ int rc; |
3722 |
+ |
3723 |
+ rc = init_desc(&desc); |
3724 |
+ if (rc != 0) |
3725 |
+@@ -67,6 +67,8 @@ int ima_calc_hash(struct file *file, char *digest) |
3726 |
+ rc = rbuf_len; |
3727 |
+ break; |
3728 |
+ } |
3729 |
++ if (rbuf_len == 0) |
3730 |
++ break; |
3731 |
+ offset += rbuf_len; |
3732 |
+ sg_init_one(sg, rbuf, rbuf_len); |
3733 |
+ |
3734 |
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c |
3735 |
+index d659995..2a2c2ca 100644 |
3736 |
+--- a/sound/core/pcm_lib.c |
3737 |
++++ b/sound/core/pcm_lib.c |
3738 |
+@@ -876,47 +876,24 @@ static int snd_interval_ratden(struct snd_interval *i, |
3739 |
+ int snd_interval_list(struct snd_interval *i, unsigned int count, unsigned int *list, unsigned int mask) |
3740 |
+ { |
3741 |
+ unsigned int k; |
3742 |
+- int changed = 0; |
3743 |
++ struct snd_interval list_range; |
3744 |
+ |
3745 |
+ if (!count) { |
3746 |
+ i->empty = 1; |
3747 |
+ return -EINVAL; |
3748 |
+ } |
3749 |
++ snd_interval_any(&list_range); |
3750 |
++ list_range.min = UINT_MAX; |
3751 |
++ list_range.max = 0; |
3752 |
+ for (k = 0; k < count; k++) { |
3753 |
+ if (mask && !(mask & (1 << k))) |
3754 |
+ continue; |
3755 |
+- if (i->min == list[k] && !i->openmin) |
3756 |
+- goto _l1; |
3757 |
+- if (i->min < list[k]) { |
3758 |
+- i->min = list[k]; |
3759 |
+- i->openmin = 0; |
3760 |
+- changed = 1; |
3761 |
+- goto _l1; |
3762 |
+- } |
3763 |
+- } |
3764 |
+- i->empty = 1; |
3765 |
+- return -EINVAL; |
3766 |
+- _l1: |
3767 |
+- for (k = count; k-- > 0;) { |
3768 |
+- if (mask && !(mask & (1 << k))) |
3769 |
++ if (!snd_interval_test(i, list[k])) |
3770 |
+ continue; |
3771 |
+- if (i->max == list[k] && !i->openmax) |
3772 |
+- goto _l2; |
3773 |
+- if (i->max > list[k]) { |
3774 |
+- i->max = list[k]; |
3775 |
+- i->openmax = 0; |
3776 |
+- changed = 1; |
3777 |
+- goto _l2; |
3778 |
+- } |
3779 |
++ list_range.min = min(list_range.min, list[k]); |
3780 |
++ list_range.max = max(list_range.max, list[k]); |
3781 |
+ } |
3782 |
+- i->empty = 1; |
3783 |
+- return -EINVAL; |
3784 |
+- _l2: |
3785 |
+- if (snd_interval_checkempty(i)) { |
3786 |
+- i->empty = 1; |
3787 |
+- return -EINVAL; |
3788 |
+- } |
3789 |
+- return changed; |
3790 |
++ return snd_interval_refine(i, &list_range); |
3791 |
+ } |
3792 |
+ |
3793 |
+ EXPORT_SYMBOL(snd_interval_list); |
3794 |
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
3795 |
+index 1df7692..c734840 100644 |
3796 |
+--- a/sound/pci/hda/patch_realtek.c |
3797 |
++++ b/sound/pci/hda/patch_realtek.c |
3798 |
+@@ -6186,9 +6186,9 @@ static struct hda_verb alc885_mbp_ch2_init[] = { |
3799 |
+ }; |
3800 |
+ |
3801 |
+ /* |
3802 |
+- * 6ch mode |
3803 |
++ * 4ch mode |
3804 |
+ */ |
3805 |
+-static struct hda_verb alc885_mbp_ch6_init[] = { |
3806 |
++static struct hda_verb alc885_mbp_ch4_init[] = { |
3807 |
+ { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT }, |
3808 |
+ { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, |
3809 |
+ { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 }, |
3810 |
+@@ -6197,9 +6197,9 @@ static struct hda_verb alc885_mbp_ch6_init[] = { |
3811 |
+ { } /* end */ |
3812 |
+ }; |
3813 |
+ |
3814 |
+-static struct hda_channel_mode alc885_mbp_6ch_modes[2] = { |
3815 |
++static struct hda_channel_mode alc885_mbp_4ch_modes[2] = { |
3816 |
+ { 2, alc885_mbp_ch2_init }, |
3817 |
+- { 6, alc885_mbp_ch6_init }, |
3818 |
++ { 4, alc885_mbp_ch4_init }, |
3819 |
+ }; |
3820 |
+ |
3821 |
+ |
3822 |
+@@ -6232,10 +6232,11 @@ static struct snd_kcontrol_new alc882_base_mixer[] = { |
3823 |
+ }; |
3824 |
+ |
3825 |
+ static struct snd_kcontrol_new alc885_mbp3_mixer[] = { |
3826 |
+- HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x00, HDA_OUTPUT), |
3827 |
+- HDA_BIND_MUTE ("Front Playback Switch", 0x0c, 0x02, HDA_INPUT), |
3828 |
+- HDA_CODEC_MUTE ("Speaker Playback Switch", 0x14, 0x00, HDA_OUTPUT), |
3829 |
+- HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x0d, 0x00, HDA_OUTPUT), |
3830 |
++ HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0c, 0x00, HDA_OUTPUT), |
3831 |
++ HDA_BIND_MUTE ("Speaker Playback Switch", 0x0c, 0x02, HDA_INPUT), |
3832 |
++ HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0e, 0x00, HDA_OUTPUT), |
3833 |
++ HDA_BIND_MUTE ("Headphone Playback Switch", 0x0e, 0x02, HDA_INPUT), |
3834 |
++ HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x00, HDA_OUTPUT), |
3835 |
+ HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT), |
3836 |
+ HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT), |
3837 |
+ HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT), |
3838 |
+@@ -6481,14 +6482,18 @@ static struct hda_verb alc885_mbp3_init_verbs[] = { |
3839 |
+ {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, |
3840 |
+ {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, |
3841 |
+ {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, |
3842 |
++ /* HP mixer */ |
3843 |
++ {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, |
3844 |
++ {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, |
3845 |
++ {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, |
3846 |
+ /* Front Pin: output 0 (0x0c) */ |
3847 |
+ {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, |
3848 |
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, |
3849 |
+ {0x14, AC_VERB_SET_CONNECT_SEL, 0x00}, |
3850 |
+- /* HP Pin: output 0 (0x0d) */ |
3851 |
++ /* HP Pin: output 0 (0x0e) */ |
3852 |
+ {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc4}, |
3853 |
+- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, |
3854 |
+- {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, |
3855 |
++ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, |
3856 |
++ {0x15, AC_VERB_SET_CONNECT_SEL, 0x02}, |
3857 |
+ {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, |
3858 |
+ /* Mic (rear) pin: input vref at 80% */ |
3859 |
+ {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, |
3860 |
+@@ -6885,10 +6890,11 @@ static struct alc_config_preset alc882_presets[] = { |
3861 |
+ .mixers = { alc885_mbp3_mixer, alc882_chmode_mixer }, |
3862 |
+ .init_verbs = { alc885_mbp3_init_verbs, |
3863 |
+ alc880_gpio1_init_verbs }, |
3864 |
+- .num_dacs = ARRAY_SIZE(alc882_dac_nids), |
3865 |
++ .num_dacs = 2, |
3866 |
+ .dac_nids = alc882_dac_nids, |
3867 |
+- .channel_mode = alc885_mbp_6ch_modes, |
3868 |
+- .num_channel_mode = ARRAY_SIZE(alc885_mbp_6ch_modes), |
3869 |
++ .hp_nid = 0x04, |
3870 |
++ .channel_mode = alc885_mbp_4ch_modes, |
3871 |
++ .num_channel_mode = ARRAY_SIZE(alc885_mbp_4ch_modes), |
3872 |
+ .input_mux = &alc882_capture_source, |
3873 |
+ .dig_out_nid = ALC882_DIGOUT_NID, |
3874 |
+ .dig_in_nid = ALC882_DIGIN_NID, |
3875 |
+diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c |
3876 |
+index 864ac54..8f2018a 100644 |
3877 |
+--- a/virt/kvm/irq_comm.c |
3878 |
++++ b/virt/kvm/irq_comm.c |
3879 |
+@@ -141,7 +141,8 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) |
3880 |
+ unsigned gsi = pin; |
3881 |
+ |
3882 |
+ list_for_each_entry(e, &kvm->irq_routing, link) |
3883 |
+- if (e->irqchip.irqchip == irqchip && |
3884 |
++ if (e->type == KVM_IRQ_ROUTING_IRQCHIP && |
3885 |
++ e->irqchip.irqchip == irqchip && |
3886 |
+ e->irqchip.pin == pin) { |
3887 |
+ gsi = e->gsi; |
3888 |
+ break; |
3889 |
+@@ -240,6 +241,7 @@ static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e, |
3890 |
+ int delta; |
3891 |
+ |
3892 |
+ e->gsi = ue->gsi; |
3893 |
++ e->type = ue->type; |
3894 |
+ switch (ue->type) { |
3895 |
+ case KVM_IRQ_ROUTING_IRQCHIP: |
3896 |
+ delta = 0; |
3897 |
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c |
3898 |
+index 1489829..ad38135 100644 |
3899 |
+--- a/virt/kvm/kvm_main.c |
3900 |
++++ b/virt/kvm/kvm_main.c |
3901 |
+@@ -881,6 +881,8 @@ static void kvm_destroy_vm(struct kvm *kvm) |
3902 |
+ #endif |
3903 |
+ #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
3904 |
+ mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); |
3905 |
++#else |
3906 |
++ kvm_arch_flush_shadow(kvm); |
3907 |
+ #endif |
3908 |
+ kvm_arch_destroy_vm(kvm); |
3909 |
+ mmdrop(mm); |
3910 |
+@@ -1055,8 +1057,10 @@ int __kvm_set_memory_region(struct kvm *kvm, |
3911 |
+ |
3912 |
+ kvm_free_physmem_slot(&old, npages ? &new : NULL); |
3913 |
+ /* Slot deletion case: we have to update the current slot */ |
3914 |
++ spin_lock(&kvm->mmu_lock); |
3915 |
+ if (!npages) |
3916 |
+ *memslot = old; |
3917 |
++ spin_unlock(&kvm->mmu_lock); |
3918 |
+ #ifdef CONFIG_DMAR |
3919 |
+ /* map the pages in iommu page table */ |
3920 |
+ r = kvm_iommu_map_pages(kvm, base_gfn, npages); |
3921 |
|
3922 |
Deleted: genpatches-2.6/trunk/2.6.30/1700_empty-ipi-check.patch |
3923 |
=================================================================== |
3924 |
--- genpatches-2.6/trunk/2.6.30/1700_empty-ipi-check.patch 2009-09-06 16:23:11 UTC (rev 1602) |
3925 |
+++ genpatches-2.6/trunk/2.6.30/1700_empty-ipi-check.patch 2009-09-09 12:49:53 UTC (rev 1603) |
3926 |
@@ -1,12 +0,0 @@ |
3927 |
---- a/arch/x86/kernel/apic/ipi.c |
3928 |
-+++ b/arch/x86/kernel/apic/ipi.c |
3929 |
-@@ -106,6 +106,9 @@ |
3930 |
- unsigned long mask = cpumask_bits(cpumask)[0]; |
3931 |
- unsigned long flags; |
3932 |
- |
3933 |
-+ if (WARN_ONCE(!mask, "empty IPI mask")) |
3934 |
-+ return; |
3935 |
-+ |
3936 |
- local_irq_save(flags); |
3937 |
- WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); |
3938 |
- __default_send_IPI_dest_field(mask, vector, apic->dest_logical); |