1 |
commit: b897bcf385333e0aaa97084370db28c16dc589f4 |
2 |
Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sat Apr 1 23:40:12 2017 +0000 |
4 |
Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org> |
5 |
CommitDate: Sat Apr 1 23:40:12 2017 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=b897bcf3 |
7 |
|
8 |
grsecurity-3.1-4.9.20-201703310823 |
9 |
|
10 |
4.9.18/1016_linux-4.9.17.patch | 6091 -------------------- |
11 |
4.9.18/1017_linux-4.9.18.patch | 876 --- |
12 |
{4.9.18 => 4.9.20}/0000_README | 10 +- |
13 |
.../4420_grsecurity-3.1-4.9.20-201703310823.patch | 208 +- |
14 |
{4.9.18 => 4.9.20}/4425_grsec_remove_EI_PAX.patch | 0 |
15 |
.../4426_default_XATTR_PAX_FLAGS.patch | 0 |
16 |
.../4427_force_XATTR_PAX_tmpfs.patch | 0 |
17 |
.../4430_grsec-remove-localversion-grsec.patch | 0 |
18 |
{4.9.18 => 4.9.20}/4435_grsec-mute-warnings.patch | 0 |
19 |
.../4440_grsec-remove-protected-paths.patch | 0 |
20 |
.../4450_grsec-kconfig-default-gids.patch | 0 |
21 |
.../4465_selinux-avc_audit-log-curr_ip.patch | 0 |
22 |
{4.9.18 => 4.9.20}/4470_disable-compat_vdso.patch | 0 |
23 |
{4.9.18 => 4.9.20}/4475_emutramp_default_on.patch | 0 |
24 |
14 files changed, 143 insertions(+), 7042 deletions(-) |
25 |
|
26 |
diff --git a/4.9.18/1016_linux-4.9.17.patch b/4.9.18/1016_linux-4.9.17.patch |
27 |
deleted file mode 100644 |
28 |
index 1a83496..0000000 |
29 |
--- a/4.9.18/1016_linux-4.9.17.patch |
30 |
+++ /dev/null |
31 |
@@ -1,6091 +0,0 @@ |
32 |
-diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt |
33 |
-index 405da11..d11af52 100644 |
34 |
---- a/Documentation/arm64/silicon-errata.txt |
35 |
-+++ b/Documentation/arm64/silicon-errata.txt |
36 |
-@@ -42,24 +42,26 @@ file acts as a registry of software workarounds in the Linux Kernel and |
37 |
- will be updated when new workarounds are committed and backported to |
38 |
- stable kernels. |
39 |
- |
40 |
--| Implementor | Component | Erratum ID | Kconfig | |
41 |
--+----------------+-----------------+-----------------+-------------------------+ |
42 |
--| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 | |
43 |
--| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 | |
44 |
--| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 | |
45 |
--| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 | |
46 |
--| ARM | Cortex-A53 | #845719 | ARM64_ERRATUM_845719 | |
47 |
--| ARM | Cortex-A53 | #843419 | ARM64_ERRATUM_843419 | |
48 |
--| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | |
49 |
--| ARM | Cortex-A57 | #852523 | N/A | |
50 |
--| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | |
51 |
--| ARM | Cortex-A72 | #853709 | N/A | |
52 |
--| ARM | MMU-500 | #841119,#826419 | N/A | |
53 |
--| | | | | |
54 |
--| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | |
55 |
--| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 | |
56 |
--| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | |
57 |
--| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | |
58 |
--| Cavium | ThunderX SMMUv2 | #27704 | N/A | |
59 |
--| | | | | |
60 |
--| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | |
61 |
-+| Implementor | Component | Erratum ID | Kconfig | |
62 |
-++----------------+-----------------+-----------------+-----------------------------+ |
63 |
-+| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 | |
64 |
-+| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 | |
65 |
-+| ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 | |
66 |
-+| ARM | Cortex-A53 | #819472 | ARM64_ERRATUM_819472 | |
67 |
-+| ARM | Cortex-A53 | #845719 | ARM64_ERRATUM_845719 | |
68 |
-+| ARM | Cortex-A53 | #843419 | ARM64_ERRATUM_843419 | |
69 |
-+| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | |
70 |
-+| ARM | Cortex-A57 | #852523 | N/A | |
71 |
-+| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | |
72 |
-+| ARM | Cortex-A72 | #853709 | N/A | |
73 |
-+| ARM | MMU-500 | #841119,#826419 | N/A | |
74 |
-+| | | | | |
75 |
-+| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | |
76 |
-+| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 | |
77 |
-+| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | |
78 |
-+| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | |
79 |
-+| Cavium | ThunderX SMMUv2 | #27704 | N/A | |
80 |
-+| | | | | |
81 |
-+| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | |
82 |
-+| | | | | |
83 |
-+| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | |
84 |
-diff --git a/Makefile b/Makefile |
85 |
-index 4e0f962..004f90a 100644 |
86 |
---- a/Makefile |
87 |
-+++ b/Makefile |
88 |
-@@ -1,6 +1,6 @@ |
89 |
- VERSION = 4 |
90 |
- PATCHLEVEL = 9 |
91 |
--SUBLEVEL = 16 |
92 |
-+SUBLEVEL = 17 |
93 |
- EXTRAVERSION = |
94 |
- NAME = Roaring Lionus |
95 |
- |
96 |
-diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig |
97 |
-index 969ef88..cf57a77 100644 |
98 |
---- a/arch/arm64/Kconfig |
99 |
-+++ b/arch/arm64/Kconfig |
100 |
-@@ -474,6 +474,16 @@ config CAVIUM_ERRATUM_27456 |
101 |
- |
102 |
- If unsure, say Y. |
103 |
- |
104 |
-+config QCOM_QDF2400_ERRATUM_0065 |
105 |
-+ bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size" |
106 |
-+ default y |
107 |
-+ help |
108 |
-+ On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports |
109 |
-+ ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have |
110 |
-+ been indicated as 16Bytes (0xf), not 8Bytes (0x7). |
111 |
-+ |
112 |
-+ If unsure, say Y. |
113 |
-+ |
114 |
- endmenu |
115 |
- |
116 |
- |
117 |
-diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c |
118 |
-index 88e2f2b..55889d0 100644 |
119 |
---- a/arch/arm64/kvm/hyp/tlb.c |
120 |
-+++ b/arch/arm64/kvm/hyp/tlb.c |
121 |
-@@ -17,14 +17,62 @@ |
122 |
- |
123 |
- #include <asm/kvm_hyp.h> |
124 |
- |
125 |
-+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) |
126 |
-+{ |
127 |
-+ u64 val; |
128 |
-+ |
129 |
-+ /* |
130 |
-+ * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and |
131 |
-+ * most TLB operations target EL2/EL0. In order to affect the |
132 |
-+ * guest TLBs (EL1/EL0), we need to change one of these two |
133 |
-+ * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so |
134 |
-+ * let's flip TGE before executing the TLB operation. |
135 |
-+ */ |
136 |
-+ write_sysreg(kvm->arch.vttbr, vttbr_el2); |
137 |
-+ val = read_sysreg(hcr_el2); |
138 |
-+ val &= ~HCR_TGE; |
139 |
-+ write_sysreg(val, hcr_el2); |
140 |
-+ isb(); |
141 |
-+} |
142 |
-+ |
143 |
-+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) |
144 |
-+{ |
145 |
-+ write_sysreg(kvm->arch.vttbr, vttbr_el2); |
146 |
-+ isb(); |
147 |
-+} |
148 |
-+ |
149 |
-+static hyp_alternate_select(__tlb_switch_to_guest, |
150 |
-+ __tlb_switch_to_guest_nvhe, |
151 |
-+ __tlb_switch_to_guest_vhe, |
152 |
-+ ARM64_HAS_VIRT_HOST_EXTN); |
153 |
-+ |
154 |
-+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) |
155 |
-+{ |
156 |
-+ /* |
157 |
-+ * We're done with the TLB operation, let's restore the host's |
158 |
-+ * view of HCR_EL2. |
159 |
-+ */ |
160 |
-+ write_sysreg(0, vttbr_el2); |
161 |
-+ write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); |
162 |
-+} |
163 |
-+ |
164 |
-+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm) |
165 |
-+{ |
166 |
-+ write_sysreg(0, vttbr_el2); |
167 |
-+} |
168 |
-+ |
169 |
-+static hyp_alternate_select(__tlb_switch_to_host, |
170 |
-+ __tlb_switch_to_host_nvhe, |
171 |
-+ __tlb_switch_to_host_vhe, |
172 |
-+ ARM64_HAS_VIRT_HOST_EXTN); |
173 |
-+ |
174 |
- void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
175 |
- { |
176 |
- dsb(ishst); |
177 |
- |
178 |
- /* Switch to requested VMID */ |
179 |
- kvm = kern_hyp_va(kvm); |
180 |
-- write_sysreg(kvm->arch.vttbr, vttbr_el2); |
181 |
-- isb(); |
182 |
-+ __tlb_switch_to_guest()(kvm); |
183 |
- |
184 |
- /* |
185 |
- * We could do so much better if we had the VA as well. |
186 |
-@@ -45,7 +93,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
187 |
- dsb(ish); |
188 |
- isb(); |
189 |
- |
190 |
-- write_sysreg(0, vttbr_el2); |
191 |
-+ __tlb_switch_to_host()(kvm); |
192 |
- } |
193 |
- |
194 |
- void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) |
195 |
-@@ -54,14 +102,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) |
196 |
- |
197 |
- /* Switch to requested VMID */ |
198 |
- kvm = kern_hyp_va(kvm); |
199 |
-- write_sysreg(kvm->arch.vttbr, vttbr_el2); |
200 |
-- isb(); |
201 |
-+ __tlb_switch_to_guest()(kvm); |
202 |
- |
203 |
- asm volatile("tlbi vmalls12e1is" : : ); |
204 |
- dsb(ish); |
205 |
- isb(); |
206 |
- |
207 |
-- write_sysreg(0, vttbr_el2); |
208 |
-+ __tlb_switch_to_host()(kvm); |
209 |
- } |
210 |
- |
211 |
- void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) |
212 |
-@@ -69,14 +116,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) |
213 |
- struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); |
214 |
- |
215 |
- /* Switch to requested VMID */ |
216 |
-- write_sysreg(kvm->arch.vttbr, vttbr_el2); |
217 |
-- isb(); |
218 |
-+ __tlb_switch_to_guest()(kvm); |
219 |
- |
220 |
- asm volatile("tlbi vmalle1" : : ); |
221 |
- dsb(nsh); |
222 |
- isb(); |
223 |
- |
224 |
-- write_sysreg(0, vttbr_el2); |
225 |
-+ __tlb_switch_to_host()(kvm); |
226 |
- } |
227 |
- |
228 |
- void __hyp_text __kvm_flush_vm_context(void) |
229 |
-diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c |
230 |
-index 9fa046d..4119945 100644 |
231 |
---- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c |
232 |
-+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c |
233 |
-@@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm) |
234 |
- { |
235 |
- u32 *key = crypto_tfm_ctx(tfm); |
236 |
- |
237 |
-- *key = 0; |
238 |
-+ *key = ~0; |
239 |
- |
240 |
- return 0; |
241 |
- } |
242 |
-diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h |
243 |
-index 5c45114..b9e3f0a 100644 |
244 |
---- a/arch/powerpc/include/asm/mmu_context.h |
245 |
-+++ b/arch/powerpc/include/asm/mmu_context.h |
246 |
-@@ -19,16 +19,18 @@ extern void destroy_context(struct mm_struct *mm); |
247 |
- struct mm_iommu_table_group_mem_t; |
248 |
- |
249 |
- extern int isolate_lru_page(struct page *page); /* from internal.h */ |
250 |
--extern bool mm_iommu_preregistered(void); |
251 |
--extern long mm_iommu_get(unsigned long ua, unsigned long entries, |
252 |
-+extern bool mm_iommu_preregistered(struct mm_struct *mm); |
253 |
-+extern long mm_iommu_get(struct mm_struct *mm, |
254 |
-+ unsigned long ua, unsigned long entries, |
255 |
- struct mm_iommu_table_group_mem_t **pmem); |
256 |
--extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem); |
257 |
--extern void mm_iommu_init(mm_context_t *ctx); |
258 |
--extern void mm_iommu_cleanup(mm_context_t *ctx); |
259 |
--extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, |
260 |
-- unsigned long size); |
261 |
--extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, |
262 |
-- unsigned long entries); |
263 |
-+extern long mm_iommu_put(struct mm_struct *mm, |
264 |
-+ struct mm_iommu_table_group_mem_t *mem); |
265 |
-+extern void mm_iommu_init(struct mm_struct *mm); |
266 |
-+extern void mm_iommu_cleanup(struct mm_struct *mm); |
267 |
-+extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, |
268 |
-+ unsigned long ua, unsigned long size); |
269 |
-+extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
270 |
-+ unsigned long ua, unsigned long entries); |
271 |
- extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
272 |
- unsigned long ua, unsigned long *hpa); |
273 |
- extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
274 |
-diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c |
275 |
-index 270ee30..f516ac5 100644 |
276 |
---- a/arch/powerpc/kernel/setup-common.c |
277 |
-+++ b/arch/powerpc/kernel/setup-common.c |
278 |
-@@ -915,7 +915,7 @@ void __init setup_arch(char **cmdline_p) |
279 |
- init_mm.context.pte_frag = NULL; |
280 |
- #endif |
281 |
- #ifdef CONFIG_SPAPR_TCE_IOMMU |
282 |
-- mm_iommu_init(&init_mm.context); |
283 |
-+ mm_iommu_init(&init_mm); |
284 |
- #endif |
285 |
- irqstack_early_init(); |
286 |
- exc_lvl_early_init(); |
287 |
-diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c |
288 |
-index b114f8b..73bf6e1 100644 |
289 |
---- a/arch/powerpc/mm/mmu_context_book3s64.c |
290 |
-+++ b/arch/powerpc/mm/mmu_context_book3s64.c |
291 |
-@@ -115,7 +115,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
292 |
- mm->context.pte_frag = NULL; |
293 |
- #endif |
294 |
- #ifdef CONFIG_SPAPR_TCE_IOMMU |
295 |
-- mm_iommu_init(&mm->context); |
296 |
-+ mm_iommu_init(mm); |
297 |
- #endif |
298 |
- return 0; |
299 |
- } |
300 |
-@@ -156,13 +156,11 @@ static inline void destroy_pagetable_page(struct mm_struct *mm) |
301 |
- } |
302 |
- #endif |
303 |
- |
304 |
-- |
305 |
- void destroy_context(struct mm_struct *mm) |
306 |
- { |
307 |
- #ifdef CONFIG_SPAPR_TCE_IOMMU |
308 |
-- mm_iommu_cleanup(&mm->context); |
309 |
-+ WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); |
310 |
- #endif |
311 |
-- |
312 |
- #ifdef CONFIG_PPC_ICSWX |
313 |
- drop_cop(mm->context.acop, mm); |
314 |
- kfree(mm->context.cop_lockp); |
315 |
-diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c |
316 |
-index e0f1c33..7de7124 100644 |
317 |
---- a/arch/powerpc/mm/mmu_context_iommu.c |
318 |
-+++ b/arch/powerpc/mm/mmu_context_iommu.c |
319 |
-@@ -56,7 +56,7 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, |
320 |
- } |
321 |
- |
322 |
- pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n", |
323 |
-- current->pid, |
324 |
-+ current ? current->pid : 0, |
325 |
- incr ? '+' : '-', |
326 |
- npages << PAGE_SHIFT, |
327 |
- mm->locked_vm << PAGE_SHIFT, |
328 |
-@@ -66,12 +66,9 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm, |
329 |
- return ret; |
330 |
- } |
331 |
- |
332 |
--bool mm_iommu_preregistered(void) |
333 |
-+bool mm_iommu_preregistered(struct mm_struct *mm) |
334 |
- { |
335 |
-- if (!current || !current->mm) |
336 |
-- return false; |
337 |
-- |
338 |
-- return !list_empty(¤t->mm->context.iommu_group_mem_list); |
339 |
-+ return !list_empty(&mm->context.iommu_group_mem_list); |
340 |
- } |
341 |
- EXPORT_SYMBOL_GPL(mm_iommu_preregistered); |
342 |
- |
343 |
-@@ -124,19 +121,16 @@ static int mm_iommu_move_page_from_cma(struct page *page) |
344 |
- return 0; |
345 |
- } |
346 |
- |
347 |
--long mm_iommu_get(unsigned long ua, unsigned long entries, |
348 |
-+long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
349 |
- struct mm_iommu_table_group_mem_t **pmem) |
350 |
- { |
351 |
- struct mm_iommu_table_group_mem_t *mem; |
352 |
- long i, j, ret = 0, locked_entries = 0; |
353 |
- struct page *page = NULL; |
354 |
- |
355 |
-- if (!current || !current->mm) |
356 |
-- return -ESRCH; /* process exited */ |
357 |
-- |
358 |
- mutex_lock(&mem_list_mutex); |
359 |
- |
360 |
-- list_for_each_entry_rcu(mem, ¤t->mm->context.iommu_group_mem_list, |
361 |
-+ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, |
362 |
- next) { |
363 |
- if ((mem->ua == ua) && (mem->entries == entries)) { |
364 |
- ++mem->used; |
365 |
-@@ -154,7 +148,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries, |
366 |
- |
367 |
- } |
368 |
- |
369 |
-- ret = mm_iommu_adjust_locked_vm(current->mm, entries, true); |
370 |
-+ ret = mm_iommu_adjust_locked_vm(mm, entries, true); |
371 |
- if (ret) |
372 |
- goto unlock_exit; |
373 |
- |
374 |
-@@ -190,7 +184,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries, |
375 |
- * of the CMA zone if possible. NOTE: faulting in + migration |
376 |
- * can be expensive. Batching can be considered later |
377 |
- */ |
378 |
-- if (get_pageblock_migratetype(page) == MIGRATE_CMA) { |
379 |
-+ if (is_migrate_cma_page(page)) { |
380 |
- if (mm_iommu_move_page_from_cma(page)) |
381 |
- goto populate; |
382 |
- if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT), |
383 |
-@@ -215,11 +209,11 @@ long mm_iommu_get(unsigned long ua, unsigned long entries, |
384 |
- mem->entries = entries; |
385 |
- *pmem = mem; |
386 |
- |
387 |
-- list_add_rcu(&mem->next, ¤t->mm->context.iommu_group_mem_list); |
388 |
-+ list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); |
389 |
- |
390 |
- unlock_exit: |
391 |
- if (locked_entries && ret) |
392 |
-- mm_iommu_adjust_locked_vm(current->mm, locked_entries, false); |
393 |
-+ mm_iommu_adjust_locked_vm(mm, locked_entries, false); |
394 |
- |
395 |
- mutex_unlock(&mem_list_mutex); |
396 |
- |
397 |
-@@ -264,17 +258,13 @@ static void mm_iommu_free(struct rcu_head *head) |
398 |
- static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) |
399 |
- { |
400 |
- list_del_rcu(&mem->next); |
401 |
-- mm_iommu_adjust_locked_vm(current->mm, mem->entries, false); |
402 |
- call_rcu(&mem->rcu, mm_iommu_free); |
403 |
- } |
404 |
- |
405 |
--long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) |
406 |
-+long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) |
407 |
- { |
408 |
- long ret = 0; |
409 |
- |
410 |
-- if (!current || !current->mm) |
411 |
-- return -ESRCH; /* process exited */ |
412 |
-- |
413 |
- mutex_lock(&mem_list_mutex); |
414 |
- |
415 |
- if (mem->used == 0) { |
416 |
-@@ -297,6 +287,8 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) |
417 |
- /* @mapped became 0 so now mappings are disabled, release the region */ |
418 |
- mm_iommu_release(mem); |
419 |
- |
420 |
-+ mm_iommu_adjust_locked_vm(mm, mem->entries, false); |
421 |
-+ |
422 |
- unlock_exit: |
423 |
- mutex_unlock(&mem_list_mutex); |
424 |
- |
425 |
-@@ -304,14 +296,12 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem) |
426 |
- } |
427 |
- EXPORT_SYMBOL_GPL(mm_iommu_put); |
428 |
- |
429 |
--struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, |
430 |
-- unsigned long size) |
431 |
-+struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, |
432 |
-+ unsigned long ua, unsigned long size) |
433 |
- { |
434 |
- struct mm_iommu_table_group_mem_t *mem, *ret = NULL; |
435 |
- |
436 |
-- list_for_each_entry_rcu(mem, |
437 |
-- ¤t->mm->context.iommu_group_mem_list, |
438 |
-- next) { |
439 |
-+ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { |
440 |
- if ((mem->ua <= ua) && |
441 |
- (ua + size <= mem->ua + |
442 |
- (mem->entries << PAGE_SHIFT))) { |
443 |
-@@ -324,14 +314,12 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, |
444 |
- } |
445 |
- EXPORT_SYMBOL_GPL(mm_iommu_lookup); |
446 |
- |
447 |
--struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, |
448 |
-- unsigned long entries) |
449 |
-+struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
450 |
-+ unsigned long ua, unsigned long entries) |
451 |
- { |
452 |
- struct mm_iommu_table_group_mem_t *mem, *ret = NULL; |
453 |
- |
454 |
-- list_for_each_entry_rcu(mem, |
455 |
-- ¤t->mm->context.iommu_group_mem_list, |
456 |
-- next) { |
457 |
-+ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { |
458 |
- if ((mem->ua == ua) && (mem->entries == entries)) { |
459 |
- ret = mem; |
460 |
- break; |
461 |
-@@ -373,17 +361,7 @@ void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem) |
462 |
- } |
463 |
- EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec); |
464 |
- |
465 |
--void mm_iommu_init(mm_context_t *ctx) |
466 |
-+void mm_iommu_init(struct mm_struct *mm) |
467 |
- { |
468 |
-- INIT_LIST_HEAD_RCU(&ctx->iommu_group_mem_list); |
469 |
--} |
470 |
-- |
471 |
--void mm_iommu_cleanup(mm_context_t *ctx) |
472 |
--{ |
473 |
-- struct mm_iommu_table_group_mem_t *mem, *tmp; |
474 |
-- |
475 |
-- list_for_each_entry_safe(mem, tmp, &ctx->iommu_group_mem_list, next) { |
476 |
-- list_del_rcu(&mem->next); |
477 |
-- mm_iommu_do_free(mem); |
478 |
-- } |
479 |
-+ INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list); |
480 |
- } |
481 |
-diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c |
482 |
-index 7fe88bb..38623e2 100644 |
483 |
---- a/arch/x86/events/core.c |
484 |
-+++ b/arch/x86/events/core.c |
485 |
-@@ -2096,8 +2096,8 @@ static int x86_pmu_event_init(struct perf_event *event) |
486 |
- |
487 |
- static void refresh_pce(void *ignored) |
488 |
- { |
489 |
-- if (current->mm) |
490 |
-- load_mm_cr4(current->mm); |
491 |
-+ if (current->active_mm) |
492 |
-+ load_mm_cr4(current->active_mm); |
493 |
- } |
494 |
- |
495 |
- static void x86_pmu_event_mapped(struct perf_event *event) |
496 |
-diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c |
497 |
-index 8f44c5a..f228f74 100644 |
498 |
---- a/arch/x86/kernel/cpu/mshyperv.c |
499 |
-+++ b/arch/x86/kernel/cpu/mshyperv.c |
500 |
-@@ -31,6 +31,7 @@ |
501 |
- #include <asm/apic.h> |
502 |
- #include <asm/timer.h> |
503 |
- #include <asm/reboot.h> |
504 |
-+#include <asm/nmi.h> |
505 |
- |
506 |
- struct ms_hyperv_info ms_hyperv; |
507 |
- EXPORT_SYMBOL_GPL(ms_hyperv); |
508 |
-@@ -158,6 +159,26 @@ static unsigned char hv_get_nmi_reason(void) |
509 |
- return 0; |
510 |
- } |
511 |
- |
512 |
-+#ifdef CONFIG_X86_LOCAL_APIC |
513 |
-+/* |
514 |
-+ * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes |
515 |
-+ * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle |
516 |
-+ * unknown NMI on the first CPU which gets it. |
517 |
-+ */ |
518 |
-+static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs) |
519 |
-+{ |
520 |
-+ static atomic_t nmi_cpu = ATOMIC_INIT(-1); |
521 |
-+ |
522 |
-+ if (!unknown_nmi_panic) |
523 |
-+ return NMI_DONE; |
524 |
-+ |
525 |
-+ if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1) |
526 |
-+ return NMI_HANDLED; |
527 |
-+ |
528 |
-+ return NMI_DONE; |
529 |
-+} |
530 |
-+#endif |
531 |
-+ |
532 |
- static void __init ms_hyperv_init_platform(void) |
533 |
- { |
534 |
- /* |
535 |
-@@ -183,6 +204,9 @@ static void __init ms_hyperv_init_platform(void) |
536 |
- pr_info("HyperV: LAPIC Timer Frequency: %#x\n", |
537 |
- lapic_timer_frequency); |
538 |
- } |
539 |
-+ |
540 |
-+ register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST, |
541 |
-+ "hv_nmi_unknown"); |
542 |
- #endif |
543 |
- |
544 |
- if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) |
545 |
-diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c |
546 |
-index 54a2372..b5785c1 100644 |
547 |
---- a/arch/x86/kernel/head64.c |
548 |
-+++ b/arch/x86/kernel/head64.c |
549 |
-@@ -4,6 +4,7 @@ |
550 |
- * Copyright (C) 2000 Andrea Arcangeli <andrea@××××.de> SuSE |
551 |
- */ |
552 |
- |
553 |
-+#define DISABLE_BRANCH_PROFILING |
554 |
- #include <linux/init.h> |
555 |
- #include <linux/linkage.h> |
556 |
- #include <linux/types.h> |
557 |
-diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c |
558 |
-index 46b2f41..eea88fe 100644 |
559 |
---- a/arch/x86/kernel/tsc.c |
560 |
-+++ b/arch/x86/kernel/tsc.c |
561 |
-@@ -1287,6 +1287,8 @@ static int __init init_tsc_clocksource(void) |
562 |
- * exporting a reliable TSC. |
563 |
- */ |
564 |
- if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { |
565 |
-+ if (boot_cpu_has(X86_FEATURE_ART)) |
566 |
-+ art_related_clocksource = &clocksource_tsc; |
567 |
- clocksource_register_khz(&clocksource_tsc, tsc_khz); |
568 |
- return 0; |
569 |
- } |
570 |
-diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c |
571 |
-index 0493c17..333362f 100644 |
572 |
---- a/arch/x86/mm/kasan_init_64.c |
573 |
-+++ b/arch/x86/mm/kasan_init_64.c |
574 |
-@@ -1,3 +1,4 @@ |
575 |
-+#define DISABLE_BRANCH_PROFILING |
576 |
- #define pr_fmt(fmt) "kasan: " fmt |
577 |
- #include <linux/bootmem.h> |
578 |
- #include <linux/kasan.h> |
579 |
-diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c |
580 |
-index bedfab9..a00a6c0 100644 |
581 |
---- a/arch/x86/pci/xen.c |
582 |
-+++ b/arch/x86/pci/xen.c |
583 |
-@@ -234,23 +234,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
584 |
- return 1; |
585 |
- |
586 |
- for_each_pci_msi_entry(msidesc, dev) { |
587 |
-- __pci_read_msi_msg(msidesc, &msg); |
588 |
-- pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | |
589 |
-- ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); |
590 |
-- if (msg.data != XEN_PIRQ_MSI_DATA || |
591 |
-- xen_irq_from_pirq(pirq) < 0) { |
592 |
-- pirq = xen_allocate_pirq_msi(dev, msidesc); |
593 |
-- if (pirq < 0) { |
594 |
-- irq = -ENODEV; |
595 |
-- goto error; |
596 |
-- } |
597 |
-- xen_msi_compose_msg(dev, pirq, &msg); |
598 |
-- __pci_write_msi_msg(msidesc, &msg); |
599 |
-- dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); |
600 |
-- } else { |
601 |
-- dev_dbg(&dev->dev, |
602 |
-- "xen: msi already bound to pirq=%d\n", pirq); |
603 |
-+ pirq = xen_allocate_pirq_msi(dev, msidesc); |
604 |
-+ if (pirq < 0) { |
605 |
-+ irq = -ENODEV; |
606 |
-+ goto error; |
607 |
- } |
608 |
-+ xen_msi_compose_msg(dev, pirq, &msg); |
609 |
-+ __pci_write_msi_msg(msidesc, &msg); |
610 |
-+ dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); |
611 |
- irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, |
612 |
- (type == PCI_CAP_ID_MSI) ? nvec : 1, |
613 |
- (type == PCI_CAP_ID_MSIX) ? |
614 |
-diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c |
615 |
-index 0774799..c6fee74 100644 |
616 |
---- a/block/scsi_ioctl.c |
617 |
-+++ b/block/scsi_ioctl.c |
618 |
-@@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) |
619 |
- __set_bit(WRITE_16, filter->write_ok); |
620 |
- __set_bit(WRITE_LONG, filter->write_ok); |
621 |
- __set_bit(WRITE_LONG_2, filter->write_ok); |
622 |
-+ __set_bit(WRITE_SAME, filter->write_ok); |
623 |
-+ __set_bit(WRITE_SAME_16, filter->write_ok); |
624 |
-+ __set_bit(WRITE_SAME_32, filter->write_ok); |
625 |
- __set_bit(ERASE, filter->write_ok); |
626 |
- __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok); |
627 |
- __set_bit(MODE_SELECT, filter->write_ok); |
628 |
-diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c |
629 |
-index bdc67ba..4421f7c 100644 |
630 |
---- a/drivers/acpi/blacklist.c |
631 |
-+++ b/drivers/acpi/blacklist.c |
632 |
-@@ -160,6 +160,34 @@ static struct dmi_system_id acpi_rev_dmi_table[] __initdata = { |
633 |
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"), |
634 |
- }, |
635 |
- }, |
636 |
-+ { |
637 |
-+ .callback = dmi_enable_rev_override, |
638 |
-+ .ident = "DELL Precision 5520", |
639 |
-+ .matches = { |
640 |
-+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
641 |
-+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"), |
642 |
-+ }, |
643 |
-+ }, |
644 |
-+ { |
645 |
-+ .callback = dmi_enable_rev_override, |
646 |
-+ .ident = "DELL Precision 3520", |
647 |
-+ .matches = { |
648 |
-+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
649 |
-+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"), |
650 |
-+ }, |
651 |
-+ }, |
652 |
-+ /* |
653 |
-+ * Resolves a quirk with the Dell Latitude 3350 that |
654 |
-+ * causes the ethernet adapter to not function. |
655 |
-+ */ |
656 |
-+ { |
657 |
-+ .callback = dmi_enable_rev_override, |
658 |
-+ .ident = "DELL Latitude 3350", |
659 |
-+ .matches = { |
660 |
-+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
661 |
-+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"), |
662 |
-+ }, |
663 |
-+ }, |
664 |
- #endif |
665 |
- {} |
666 |
- }; |
667 |
-diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c |
668 |
-index 3bbd2a5..2acaa77 100644 |
669 |
---- a/drivers/clk/bcm/clk-bcm2835.c |
670 |
-+++ b/drivers/clk/bcm/clk-bcm2835.c |
671 |
-@@ -1598,7 +1598,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { |
672 |
- .a2w_reg = A2W_PLLH_AUX, |
673 |
- .load_mask = CM_PLLH_LOADAUX, |
674 |
- .hold_mask = 0, |
675 |
-- .fixed_divider = 10), |
676 |
-+ .fixed_divider = 1), |
677 |
- [BCM2835_PLLH_PIX] = REGISTER_PLL_DIV( |
678 |
- .name = "pllh_pix", |
679 |
- .source_pll = "pllh", |
680 |
-diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c |
681 |
-index 015f711..d235fbe 100644 |
682 |
---- a/drivers/dma/ioat/init.c |
683 |
-+++ b/drivers/dma/ioat/init.c |
684 |
-@@ -691,7 +691,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) |
685 |
- /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ |
686 |
- ioat_chan->completion = |
687 |
- dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool, |
688 |
-- GFP_KERNEL, &ioat_chan->completion_dma); |
689 |
-+ GFP_NOWAIT, &ioat_chan->completion_dma); |
690 |
- if (!ioat_chan->completion) |
691 |
- return -ENOMEM; |
692 |
- |
693 |
-@@ -701,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) |
694 |
- ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
695 |
- |
696 |
- order = IOAT_MAX_ORDER; |
697 |
-- ring = ioat_alloc_ring(c, order, GFP_KERNEL); |
698 |
-+ ring = ioat_alloc_ring(c, order, GFP_NOWAIT); |
699 |
- if (!ring) |
700 |
- return -ENOMEM; |
701 |
- |
702 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild |
703 |
-index 77a52b5..70f0344 100644 |
704 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild |
705 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild |
706 |
-@@ -95,9 +95,11 @@ nvkm-y += nvkm/engine/disp/cursg84.o |
707 |
- nvkm-y += nvkm/engine/disp/cursgt215.o |
708 |
- nvkm-y += nvkm/engine/disp/cursgf119.o |
709 |
- nvkm-y += nvkm/engine/disp/cursgk104.o |
710 |
-+nvkm-y += nvkm/engine/disp/cursgp102.o |
711 |
- |
712 |
- nvkm-y += nvkm/engine/disp/oimmnv50.o |
713 |
- nvkm-y += nvkm/engine/disp/oimmg84.o |
714 |
- nvkm-y += nvkm/engine/disp/oimmgt215.o |
715 |
- nvkm-y += nvkm/engine/disp/oimmgf119.o |
716 |
- nvkm-y += nvkm/engine/disp/oimmgk104.o |
717 |
-+nvkm-y += nvkm/engine/disp/oimmgp102.o |
718 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c |
719 |
-index dd2953b..9d90d8b 100644 |
720 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c |
721 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c |
722 |
-@@ -82,7 +82,7 @@ nv50_disp_chan_mthd(struct nv50_disp_chan *chan, int debug) |
723 |
- |
724 |
- if (mthd->addr) { |
725 |
- snprintf(cname_, sizeof(cname_), "%s %d", |
726 |
-- mthd->name, chan->chid); |
727 |
-+ mthd->name, chan->chid.user); |
728 |
- cname = cname_; |
729 |
- } |
730 |
- |
731 |
-@@ -139,7 +139,7 @@ nv50_disp_chan_uevent_ctor(struct nvkm_object *object, void *data, u32 size, |
732 |
- if (!(ret = nvif_unvers(ret, &data, &size, args->none))) { |
733 |
- notify->size = sizeof(struct nvif_notify_uevent_rep); |
734 |
- notify->types = 1; |
735 |
-- notify->index = chan->chid; |
736 |
-+ notify->index = chan->chid.user; |
737 |
- return 0; |
738 |
- } |
739 |
- |
740 |
-@@ -159,7 +159,7 @@ nv50_disp_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data) |
741 |
- struct nv50_disp_chan *chan = nv50_disp_chan(object); |
742 |
- struct nv50_disp *disp = chan->root->disp; |
743 |
- struct nvkm_device *device = disp->base.engine.subdev.device; |
744 |
-- *data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr); |
745 |
-+ *data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr); |
746 |
- return 0; |
747 |
- } |
748 |
- |
749 |
-@@ -169,7 +169,7 @@ nv50_disp_chan_wr32(struct nvkm_object *object, u64 addr, u32 data) |
750 |
- struct nv50_disp_chan *chan = nv50_disp_chan(object); |
751 |
- struct nv50_disp *disp = chan->root->disp; |
752 |
- struct nvkm_device *device = disp->base.engine.subdev.device; |
753 |
-- nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data); |
754 |
-+ nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data); |
755 |
- return 0; |
756 |
- } |
757 |
- |
758 |
-@@ -196,7 +196,7 @@ nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size) |
759 |
- struct nv50_disp *disp = chan->root->disp; |
760 |
- struct nvkm_device *device = disp->base.engine.subdev.device; |
761 |
- *addr = device->func->resource_addr(device, 0) + |
762 |
-- 0x640000 + (chan->chid * 0x1000); |
763 |
-+ 0x640000 + (chan->chid.user * 0x1000); |
764 |
- *size = 0x001000; |
765 |
- return 0; |
766 |
- } |
767 |
-@@ -243,8 +243,8 @@ nv50_disp_chan_dtor(struct nvkm_object *object) |
768 |
- { |
769 |
- struct nv50_disp_chan *chan = nv50_disp_chan(object); |
770 |
- struct nv50_disp *disp = chan->root->disp; |
771 |
-- if (chan->chid >= 0) |
772 |
-- disp->chan[chan->chid] = NULL; |
773 |
-+ if (chan->chid.user >= 0) |
774 |
-+ disp->chan[chan->chid.user] = NULL; |
775 |
- return chan->func->dtor ? chan->func->dtor(chan) : chan; |
776 |
- } |
777 |
- |
778 |
-@@ -263,7 +263,7 @@ nv50_disp_chan = { |
779 |
- int |
780 |
- nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func, |
781 |
- const struct nv50_disp_chan_mthd *mthd, |
782 |
-- struct nv50_disp_root *root, int chid, int head, |
783 |
-+ struct nv50_disp_root *root, int ctrl, int user, int head, |
784 |
- const struct nvkm_oclass *oclass, |
785 |
- struct nv50_disp_chan *chan) |
786 |
- { |
787 |
-@@ -273,21 +273,22 @@ nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func, |
788 |
- chan->func = func; |
789 |
- chan->mthd = mthd; |
790 |
- chan->root = root; |
791 |
-- chan->chid = chid; |
792 |
-+ chan->chid.ctrl = ctrl; |
793 |
-+ chan->chid.user = user; |
794 |
- chan->head = head; |
795 |
- |
796 |
-- if (disp->chan[chan->chid]) { |
797 |
-- chan->chid = -1; |
798 |
-+ if (disp->chan[chan->chid.user]) { |
799 |
-+ chan->chid.user = -1; |
800 |
- return -EBUSY; |
801 |
- } |
802 |
-- disp->chan[chan->chid] = chan; |
803 |
-+ disp->chan[chan->chid.user] = chan; |
804 |
- return 0; |
805 |
- } |
806 |
- |
807 |
- int |
808 |
- nv50_disp_chan_new_(const struct nv50_disp_chan_func *func, |
809 |
- const struct nv50_disp_chan_mthd *mthd, |
810 |
-- struct nv50_disp_root *root, int chid, int head, |
811 |
-+ struct nv50_disp_root *root, int ctrl, int user, int head, |
812 |
- const struct nvkm_oclass *oclass, |
813 |
- struct nvkm_object **pobject) |
814 |
- { |
815 |
-@@ -297,5 +298,6 @@ nv50_disp_chan_new_(const struct nv50_disp_chan_func *func, |
816 |
- return -ENOMEM; |
817 |
- *pobject = &chan->object; |
818 |
- |
819 |
-- return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan); |
820 |
-+ return nv50_disp_chan_ctor(func, mthd, root, ctrl, user, |
821 |
-+ head, oclass, chan); |
822 |
- } |
823 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h |
824 |
-index f5f683d..737b38f 100644 |
825 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h |
826 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h |
827 |
-@@ -7,7 +7,11 @@ struct nv50_disp_chan { |
828 |
- const struct nv50_disp_chan_func *func; |
829 |
- const struct nv50_disp_chan_mthd *mthd; |
830 |
- struct nv50_disp_root *root; |
831 |
-- int chid; |
832 |
-+ |
833 |
-+ struct { |
834 |
-+ int ctrl; |
835 |
-+ int user; |
836 |
-+ } chid; |
837 |
- int head; |
838 |
- |
839 |
- struct nvkm_object object; |
840 |
-@@ -25,11 +29,11 @@ struct nv50_disp_chan_func { |
841 |
- |
842 |
- int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *, |
843 |
- const struct nv50_disp_chan_mthd *, |
844 |
-- struct nv50_disp_root *, int chid, int head, |
845 |
-+ struct nv50_disp_root *, int ctrl, int user, int head, |
846 |
- const struct nvkm_oclass *, struct nv50_disp_chan *); |
847 |
- int nv50_disp_chan_new_(const struct nv50_disp_chan_func *, |
848 |
- const struct nv50_disp_chan_mthd *, |
849 |
-- struct nv50_disp_root *, int chid, int head, |
850 |
-+ struct nv50_disp_root *, int ctrl, int user, int head, |
851 |
- const struct nvkm_oclass *, struct nvkm_object **); |
852 |
- |
853 |
- extern const struct nv50_disp_chan_func nv50_disp_pioc_func; |
854 |
-@@ -90,13 +94,16 @@ extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd; |
855 |
- struct nv50_disp_pioc_oclass { |
856 |
- int (*ctor)(const struct nv50_disp_chan_func *, |
857 |
- const struct nv50_disp_chan_mthd *, |
858 |
-- struct nv50_disp_root *, int chid, |
859 |
-+ struct nv50_disp_root *, int ctrl, int user, |
860 |
- const struct nvkm_oclass *, void *data, u32 size, |
861 |
- struct nvkm_object **); |
862 |
- struct nvkm_sclass base; |
863 |
- const struct nv50_disp_chan_func *func; |
864 |
- const struct nv50_disp_chan_mthd *mthd; |
865 |
-- int chid; |
866 |
-+ struct { |
867 |
-+ int ctrl; |
868 |
-+ int user; |
869 |
-+ } chid; |
870 |
- }; |
871 |
- |
872 |
- extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass; |
873 |
-@@ -114,15 +121,17 @@ extern const struct nv50_disp_pioc_oclass gf119_disp_curs_oclass; |
874 |
- extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass; |
875 |
- extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass; |
876 |
- |
877 |
-+extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass; |
878 |
-+extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass; |
879 |
- |
880 |
- int nv50_disp_curs_new(const struct nv50_disp_chan_func *, |
881 |
- const struct nv50_disp_chan_mthd *, |
882 |
-- struct nv50_disp_root *, int chid, |
883 |
-+ struct nv50_disp_root *, int ctrl, int user, |
884 |
- const struct nvkm_oclass *, void *data, u32 size, |
885 |
- struct nvkm_object **); |
886 |
- int nv50_disp_oimm_new(const struct nv50_disp_chan_func *, |
887 |
- const struct nv50_disp_chan_mthd *, |
888 |
-- struct nv50_disp_root *, int chid, |
889 |
-+ struct nv50_disp_root *, int ctrl, int user, |
890 |
- const struct nvkm_oclass *, void *data, u32 size, |
891 |
- struct nvkm_object **); |
892 |
- #endif |
893 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c |
894 |
-index dd99fc7..fa781b5 100644 |
895 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c |
896 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c |
897 |
-@@ -33,5 +33,5 @@ g84_disp_curs_oclass = { |
898 |
- .base.maxver = 0, |
899 |
- .ctor = nv50_disp_curs_new, |
900 |
- .func = &nv50_disp_pioc_func, |
901 |
-- .chid = 7, |
902 |
-+ .chid = { 7, 7 }, |
903 |
- }; |
904 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c |
905 |
-index 2a1574e..2be6fb0 100644 |
906 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c |
907 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c |
908 |
-@@ -33,5 +33,5 @@ gf119_disp_curs_oclass = { |
909 |
- .base.maxver = 0, |
910 |
- .ctor = nv50_disp_curs_new, |
911 |
- .func = &gf119_disp_pioc_func, |
912 |
-- .chid = 13, |
913 |
-+ .chid = { 13, 13 }, |
914 |
- }; |
915 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c |
916 |
-index 28e8f06..2a99db4 100644 |
917 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c |
918 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c |
919 |
-@@ -33,5 +33,5 @@ gk104_disp_curs_oclass = { |
920 |
- .base.maxver = 0, |
921 |
- .ctor = nv50_disp_curs_new, |
922 |
- .func = &gf119_disp_pioc_func, |
923 |
-- .chid = 13, |
924 |
-+ .chid = { 13, 13 }, |
925 |
- }; |
926 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c |
927 |
-new file mode 100644 |
928 |
-index 0000000..e958210 |
929 |
---- /dev/null |
930 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c |
931 |
-@@ -0,0 +1,37 @@ |
932 |
-+/* |
933 |
-+ * Copyright 2016 Red Hat Inc. |
934 |
-+ * |
935 |
-+ * Permission is hereby granted, free of charge, to any person obtaining a |
936 |
-+ * copy of this software and associated documentation files (the "Software"), |
937 |
-+ * to deal in the Software without restriction, including without limitation |
938 |
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
939 |
-+ * and/or sell copies of the Software, and to permit persons to whom the |
940 |
-+ * Software is furnished to do so, subject to the following conditions: |
941 |
-+ * |
942 |
-+ * The above copyright notice and this permission notice shall be included in |
943 |
-+ * all copies or substantial portions of the Software. |
944 |
-+ * |
945 |
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
946 |
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
947 |
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
948 |
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
949 |
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
950 |
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
951 |
-+ * OTHER DEALINGS IN THE SOFTWARE. |
952 |
-+ * |
953 |
-+ * Authors: Ben Skeggs <bskeggs@××××××.com> |
954 |
-+ */ |
955 |
-+#include "channv50.h" |
956 |
-+#include "rootnv50.h" |
957 |
-+ |
958 |
-+#include <nvif/class.h> |
959 |
-+ |
960 |
-+const struct nv50_disp_pioc_oclass |
961 |
-+gp102_disp_curs_oclass = { |
962 |
-+ .base.oclass = GK104_DISP_CURSOR, |
963 |
-+ .base.minver = 0, |
964 |
-+ .base.maxver = 0, |
965 |
-+ .ctor = nv50_disp_curs_new, |
966 |
-+ .func = &gf119_disp_pioc_func, |
967 |
-+ .chid = { 13, 17 }, |
968 |
-+}; |
969 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c |
970 |
-index d8a4b9c..00a7f35 100644 |
971 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c |
972 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c |
973 |
-@@ -33,5 +33,5 @@ gt215_disp_curs_oclass = { |
974 |
- .base.maxver = 0, |
975 |
- .ctor = nv50_disp_curs_new, |
976 |
- .func = &nv50_disp_pioc_func, |
977 |
-- .chid = 7, |
978 |
-+ .chid = { 7, 7 }, |
979 |
- }; |
980 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c |
981 |
-index 8b13204..82ff82d 100644 |
982 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c |
983 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c |
984 |
-@@ -33,7 +33,7 @@ |
985 |
- int |
986 |
- nv50_disp_curs_new(const struct nv50_disp_chan_func *func, |
987 |
- const struct nv50_disp_chan_mthd *mthd, |
988 |
-- struct nv50_disp_root *root, int chid, |
989 |
-+ struct nv50_disp_root *root, int ctrl, int user, |
990 |
- const struct nvkm_oclass *oclass, void *data, u32 size, |
991 |
- struct nvkm_object **pobject) |
992 |
- { |
993 |
-@@ -54,7 +54,7 @@ nv50_disp_curs_new(const struct nv50_disp_chan_func *func, |
994 |
- } else |
995 |
- return ret; |
996 |
- |
997 |
-- return nv50_disp_chan_new_(func, mthd, root, chid + head, |
998 |
-+ return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head, |
999 |
- head, oclass, pobject); |
1000 |
- } |
1001 |
- |
1002 |
-@@ -65,5 +65,5 @@ nv50_disp_curs_oclass = { |
1003 |
- .base.maxver = 0, |
1004 |
- .ctor = nv50_disp_curs_new, |
1005 |
- .func = &nv50_disp_pioc_func, |
1006 |
-- .chid = 7, |
1007 |
-+ .chid = { 7, 7 }, |
1008 |
- }; |
1009 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c |
1010 |
-index a57f7ce..ce7cd74 100644 |
1011 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c |
1012 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c |
1013 |
-@@ -32,8 +32,8 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan, |
1014 |
- struct nvkm_object *object, u32 handle) |
1015 |
- { |
1016 |
- return nvkm_ramht_insert(chan->base.root->ramht, object, |
1017 |
-- chan->base.chid, -9, handle, |
1018 |
-- chan->base.chid << 27 | 0x00000001); |
1019 |
-+ chan->base.chid.user, -9, handle, |
1020 |
-+ chan->base.chid.user << 27 | 0x00000001); |
1021 |
- } |
1022 |
- |
1023 |
- void |
1024 |
-@@ -42,22 +42,23 @@ gf119_disp_dmac_fini(struct nv50_disp_dmac *chan) |
1025 |
- struct nv50_disp *disp = chan->base.root->disp; |
1026 |
- struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
1027 |
- struct nvkm_device *device = subdev->device; |
1028 |
-- int chid = chan->base.chid; |
1029 |
-+ int ctrl = chan->base.chid.ctrl; |
1030 |
-+ int user = chan->base.chid.user; |
1031 |
- |
1032 |
- /* deactivate channel */ |
1033 |
-- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000); |
1034 |
-- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000); |
1035 |
-+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000); |
1036 |
-+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000); |
1037 |
- if (nvkm_msec(device, 2000, |
1038 |
-- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000)) |
1039 |
-+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000)) |
1040 |
- break; |
1041 |
- ) < 0) { |
1042 |
-- nvkm_error(subdev, "ch %d fini: %08x\n", chid, |
1043 |
-- nvkm_rd32(device, 0x610490 + (chid * 0x10))); |
1044 |
-+ nvkm_error(subdev, "ch %d fini: %08x\n", user, |
1045 |
-+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); |
1046 |
- } |
1047 |
- |
1048 |
- /* disable error reporting and completion notification */ |
1049 |
-- nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); |
1050 |
-- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); |
1051 |
-+ nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000); |
1052 |
-+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000); |
1053 |
- } |
1054 |
- |
1055 |
- static int |
1056 |
-@@ -66,26 +67,27 @@ gf119_disp_dmac_init(struct nv50_disp_dmac *chan) |
1057 |
- struct nv50_disp *disp = chan->base.root->disp; |
1058 |
- struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
1059 |
- struct nvkm_device *device = subdev->device; |
1060 |
-- int chid = chan->base.chid; |
1061 |
-+ int ctrl = chan->base.chid.ctrl; |
1062 |
-+ int user = chan->base.chid.user; |
1063 |
- |
1064 |
- /* enable error reporting */ |
1065 |
-- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); |
1066 |
-+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user); |
1067 |
- |
1068 |
- /* initialise channel for dma command submission */ |
1069 |
-- nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push); |
1070 |
-- nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000); |
1071 |
-- nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001); |
1072 |
-- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); |
1073 |
-- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); |
1074 |
-- nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); |
1075 |
-+ nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push); |
1076 |
-+ nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000); |
1077 |
-+ nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001); |
1078 |
-+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010); |
1079 |
-+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000); |
1080 |
-+ nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013); |
1081 |
- |
1082 |
- /* wait for it to go inactive */ |
1083 |
- if (nvkm_msec(device, 2000, |
1084 |
-- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) |
1085 |
-+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000)) |
1086 |
- break; |
1087 |
- ) < 0) { |
1088 |
-- nvkm_error(subdev, "ch %d init: %08x\n", chid, |
1089 |
-- nvkm_rd32(device, 0x610490 + (chid * 0x10))); |
1090 |
-+ nvkm_error(subdev, "ch %d init: %08x\n", user, |
1091 |
-+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); |
1092 |
- return -EBUSY; |
1093 |
- } |
1094 |
- |
1095 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c |
1096 |
-index ad24c2c..d26d3b4 100644 |
1097 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c |
1098 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c |
1099 |
-@@ -32,26 +32,27 @@ gp104_disp_dmac_init(struct nv50_disp_dmac *chan) |
1100 |
- struct nv50_disp *disp = chan->base.root->disp; |
1101 |
- struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
1102 |
- struct nvkm_device *device = subdev->device; |
1103 |
-- int chid = chan->base.chid; |
1104 |
-+ int ctrl = chan->base.chid.ctrl; |
1105 |
-+ int user = chan->base.chid.user; |
1106 |
- |
1107 |
- /* enable error reporting */ |
1108 |
-- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); |
1109 |
-+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user); |
1110 |
- |
1111 |
- /* initialise channel for dma command submission */ |
1112 |
-- nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push); |
1113 |
-- nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000); |
1114 |
-- nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001); |
1115 |
-- nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); |
1116 |
-- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); |
1117 |
-- nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); |
1118 |
-+ nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push); |
1119 |
-+ nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000); |
1120 |
-+ nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001); |
1121 |
-+ nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010); |
1122 |
-+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000); |
1123 |
-+ nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013); |
1124 |
- |
1125 |
- /* wait for it to go inactive */ |
1126 |
- if (nvkm_msec(device, 2000, |
1127 |
-- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) |
1128 |
-+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000)) |
1129 |
- break; |
1130 |
- ) < 0) { |
1131 |
-- nvkm_error(subdev, "ch %d init: %08x\n", chid, |
1132 |
-- nvkm_rd32(device, 0x610490 + (chid * 0x10))); |
1133 |
-+ nvkm_error(subdev, "ch %d init: %08x\n", user, |
1134 |
-+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); |
1135 |
- return -EBUSY; |
1136 |
- } |
1137 |
- |
1138 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c |
1139 |
-index 9c6645a..0a1381a 100644 |
1140 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c |
1141 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c |
1142 |
-@@ -149,7 +149,7 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func, |
1143 |
- chan->func = func; |
1144 |
- |
1145 |
- ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root, |
1146 |
-- chid, head, oclass, &chan->base); |
1147 |
-+ chid, chid, head, oclass, &chan->base); |
1148 |
- if (ret) |
1149 |
- return ret; |
1150 |
- |
1151 |
-@@ -179,9 +179,9 @@ nv50_disp_dmac_bind(struct nv50_disp_dmac *chan, |
1152 |
- struct nvkm_object *object, u32 handle) |
1153 |
- { |
1154 |
- return nvkm_ramht_insert(chan->base.root->ramht, object, |
1155 |
-- chan->base.chid, -10, handle, |
1156 |
-- chan->base.chid << 28 | |
1157 |
-- chan->base.chid); |
1158 |
-+ chan->base.chid.user, -10, handle, |
1159 |
-+ chan->base.chid.user << 28 | |
1160 |
-+ chan->base.chid.user); |
1161 |
- } |
1162 |
- |
1163 |
- static void |
1164 |
-@@ -190,21 +190,22 @@ nv50_disp_dmac_fini(struct nv50_disp_dmac *chan) |
1165 |
- struct nv50_disp *disp = chan->base.root->disp; |
1166 |
- struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
1167 |
- struct nvkm_device *device = subdev->device; |
1168 |
-- int chid = chan->base.chid; |
1169 |
-+ int ctrl = chan->base.chid.ctrl; |
1170 |
-+ int user = chan->base.chid.user; |
1171 |
- |
1172 |
- /* deactivate channel */ |
1173 |
-- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000); |
1174 |
-- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000); |
1175 |
-+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000); |
1176 |
-+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000); |
1177 |
- if (nvkm_msec(device, 2000, |
1178 |
-- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000)) |
1179 |
-+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000)) |
1180 |
- break; |
1181 |
- ) < 0) { |
1182 |
-- nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid, |
1183 |
-- nvkm_rd32(device, 0x610200 + (chid * 0x10))); |
1184 |
-+ nvkm_error(subdev, "ch %d fini timeout, %08x\n", user, |
1185 |
-+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); |
1186 |
- } |
1187 |
- |
1188 |
- /* disable error reporting and completion notifications */ |
1189 |
-- nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid); |
1190 |
-+ nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user); |
1191 |
- } |
1192 |
- |
1193 |
- static int |
1194 |
-@@ -213,26 +214,27 @@ nv50_disp_dmac_init(struct nv50_disp_dmac *chan) |
1195 |
- struct nv50_disp *disp = chan->base.root->disp; |
1196 |
- struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
1197 |
- struct nvkm_device *device = subdev->device; |
1198 |
-- int chid = chan->base.chid; |
1199 |
-+ int ctrl = chan->base.chid.ctrl; |
1200 |
-+ int user = chan->base.chid.user; |
1201 |
- |
1202 |
- /* enable error reporting */ |
1203 |
-- nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid); |
1204 |
-+ nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user); |
1205 |
- |
1206 |
- /* initialise channel for dma command submission */ |
1207 |
-- nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push); |
1208 |
-- nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000); |
1209 |
-- nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid); |
1210 |
-- nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010); |
1211 |
-- nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); |
1212 |
-- nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013); |
1213 |
-+ nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push); |
1214 |
-+ nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000); |
1215 |
-+ nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl); |
1216 |
-+ nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010); |
1217 |
-+ nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000); |
1218 |
-+ nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013); |
1219 |
- |
1220 |
- /* wait for it to go inactive */ |
1221 |
- if (nvkm_msec(device, 2000, |
1222 |
-- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000)) |
1223 |
-+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000)) |
1224 |
- break; |
1225 |
- ) < 0) { |
1226 |
-- nvkm_error(subdev, "ch %d init timeout, %08x\n", chid, |
1227 |
-- nvkm_rd32(device, 0x610200 + (chid * 0x10))); |
1228 |
-+ nvkm_error(subdev, "ch %d init timeout, %08x\n", user, |
1229 |
-+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); |
1230 |
- return -EBUSY; |
1231 |
- } |
1232 |
- |
1233 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c |
1234 |
-index 54a4ae8..5ad5d0f 100644 |
1235 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c |
1236 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c |
1237 |
-@@ -33,5 +33,5 @@ g84_disp_oimm_oclass = { |
1238 |
- .base.maxver = 0, |
1239 |
- .ctor = nv50_disp_oimm_new, |
1240 |
- .func = &nv50_disp_pioc_func, |
1241 |
-- .chid = 5, |
1242 |
-+ .chid = { 5, 5 }, |
1243 |
- }; |
1244 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c |
1245 |
-index c658db5..1f9fd34 100644 |
1246 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c |
1247 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c |
1248 |
-@@ -33,5 +33,5 @@ gf119_disp_oimm_oclass = { |
1249 |
- .base.maxver = 0, |
1250 |
- .ctor = nv50_disp_oimm_new, |
1251 |
- .func = &gf119_disp_pioc_func, |
1252 |
-- .chid = 9, |
1253 |
-+ .chid = { 9, 9 }, |
1254 |
- }; |
1255 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c |
1256 |
-index b1fde8c..0c09fe8 100644 |
1257 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c |
1258 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c |
1259 |
-@@ -33,5 +33,5 @@ gk104_disp_oimm_oclass = { |
1260 |
- .base.maxver = 0, |
1261 |
- .ctor = nv50_disp_oimm_new, |
1262 |
- .func = &gf119_disp_pioc_func, |
1263 |
-- .chid = 9, |
1264 |
-+ .chid = { 9, 9 }, |
1265 |
- }; |
1266 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c |
1267 |
-new file mode 100644 |
1268 |
-index 0000000..abf8236 |
1269 |
---- /dev/null |
1270 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c |
1271 |
-@@ -0,0 +1,37 @@ |
1272 |
-+/* |
1273 |
-+ * Copyright 2016 Red Hat Inc. |
1274 |
-+ * |
1275 |
-+ * Permission is hereby granted, free of charge, to any person obtaining a |
1276 |
-+ * copy of this software and associated documentation files (the "Software"), |
1277 |
-+ * to deal in the Software without restriction, including without limitation |
1278 |
-+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
1279 |
-+ * and/or sell copies of the Software, and to permit persons to whom the |
1280 |
-+ * Software is furnished to do so, subject to the following conditions: |
1281 |
-+ * |
1282 |
-+ * The above copyright notice and this permission notice shall be included in |
1283 |
-+ * all copies or substantial portions of the Software. |
1284 |
-+ * |
1285 |
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
1286 |
-+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
1287 |
-+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
1288 |
-+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
1289 |
-+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
1290 |
-+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
1291 |
-+ * OTHER DEALINGS IN THE SOFTWARE. |
1292 |
-+ * |
1293 |
-+ * Authors: Ben Skeggs <bskeggs@××××××.com> |
1294 |
-+ */ |
1295 |
-+#include "channv50.h" |
1296 |
-+#include "rootnv50.h" |
1297 |
-+ |
1298 |
-+#include <nvif/class.h> |
1299 |
-+ |
1300 |
-+const struct nv50_disp_pioc_oclass |
1301 |
-+gp102_disp_oimm_oclass = { |
1302 |
-+ .base.oclass = GK104_DISP_OVERLAY, |
1303 |
-+ .base.minver = 0, |
1304 |
-+ .base.maxver = 0, |
1305 |
-+ .ctor = nv50_disp_oimm_new, |
1306 |
-+ .func = &gf119_disp_pioc_func, |
1307 |
-+ .chid = { 9, 13 }, |
1308 |
-+}; |
1309 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c |
1310 |
-index f4e7eb3..1281db2 100644 |
1311 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c |
1312 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c |
1313 |
-@@ -33,5 +33,5 @@ gt215_disp_oimm_oclass = { |
1314 |
- .base.maxver = 0, |
1315 |
- .ctor = nv50_disp_oimm_new, |
1316 |
- .func = &nv50_disp_pioc_func, |
1317 |
-- .chid = 5, |
1318 |
-+ .chid = { 5, 5 }, |
1319 |
- }; |
1320 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c |
1321 |
-index 3940b9c..07540f3 100644 |
1322 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c |
1323 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c |
1324 |
-@@ -33,7 +33,7 @@ |
1325 |
- int |
1326 |
- nv50_disp_oimm_new(const struct nv50_disp_chan_func *func, |
1327 |
- const struct nv50_disp_chan_mthd *mthd, |
1328 |
-- struct nv50_disp_root *root, int chid, |
1329 |
-+ struct nv50_disp_root *root, int ctrl, int user, |
1330 |
- const struct nvkm_oclass *oclass, void *data, u32 size, |
1331 |
- struct nvkm_object **pobject) |
1332 |
- { |
1333 |
-@@ -54,7 +54,7 @@ nv50_disp_oimm_new(const struct nv50_disp_chan_func *func, |
1334 |
- } else |
1335 |
- return ret; |
1336 |
- |
1337 |
-- return nv50_disp_chan_new_(func, mthd, root, chid + head, |
1338 |
-+ return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head, |
1339 |
- head, oclass, pobject); |
1340 |
- } |
1341 |
- |
1342 |
-@@ -65,5 +65,5 @@ nv50_disp_oimm_oclass = { |
1343 |
- .base.maxver = 0, |
1344 |
- .ctor = nv50_disp_oimm_new, |
1345 |
- .func = &nv50_disp_pioc_func, |
1346 |
-- .chid = 5, |
1347 |
-+ .chid = { 5, 5 }, |
1348 |
- }; |
1349 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c |
1350 |
-index a625a98..0abaa64 100644 |
1351 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c |
1352 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c |
1353 |
-@@ -32,20 +32,21 @@ gf119_disp_pioc_fini(struct nv50_disp_chan *chan) |
1354 |
- struct nv50_disp *disp = chan->root->disp; |
1355 |
- struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
1356 |
- struct nvkm_device *device = subdev->device; |
1357 |
-- int chid = chan->chid; |
1358 |
-+ int ctrl = chan->chid.ctrl; |
1359 |
-+ int user = chan->chid.user; |
1360 |
- |
1361 |
-- nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000); |
1362 |
-+ nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000); |
1363 |
- if (nvkm_msec(device, 2000, |
1364 |
-- if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000)) |
1365 |
-+ if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000)) |
1366 |
- break; |
1367 |
- ) < 0) { |
1368 |
-- nvkm_error(subdev, "ch %d fini: %08x\n", chid, |
1369 |
-- nvkm_rd32(device, 0x610490 + (chid * 0x10))); |
1370 |
-+ nvkm_error(subdev, "ch %d fini: %08x\n", user, |
1371 |
-+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); |
1372 |
- } |
1373 |
- |
1374 |
- /* disable error reporting and completion notification */ |
1375 |
-- nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000); |
1376 |
-- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000); |
1377 |
-+ nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000); |
1378 |
-+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000); |
1379 |
- } |
1380 |
- |
1381 |
- static int |
1382 |
-@@ -54,20 +55,21 @@ gf119_disp_pioc_init(struct nv50_disp_chan *chan) |
1383 |
- struct nv50_disp *disp = chan->root->disp; |
1384 |
- struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
1385 |
- struct nvkm_device *device = subdev->device; |
1386 |
-- int chid = chan->chid; |
1387 |
-+ int ctrl = chan->chid.ctrl; |
1388 |
-+ int user = chan->chid.user; |
1389 |
- |
1390 |
- /* enable error reporting */ |
1391 |
-- nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); |
1392 |
-+ nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user); |
1393 |
- |
1394 |
- /* activate channel */ |
1395 |
-- nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001); |
1396 |
-+ nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001); |
1397 |
- if (nvkm_msec(device, 2000, |
1398 |
-- u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10)); |
1399 |
-+ u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10)); |
1400 |
- if ((tmp & 0x00030000) == 0x00010000) |
1401 |
- break; |
1402 |
- ) < 0) { |
1403 |
-- nvkm_error(subdev, "ch %d init: %08x\n", chid, |
1404 |
-- nvkm_rd32(device, 0x610490 + (chid * 0x10))); |
1405 |
-+ nvkm_error(subdev, "ch %d init: %08x\n", user, |
1406 |
-+ nvkm_rd32(device, 0x610490 + (ctrl * 0x10))); |
1407 |
- return -EBUSY; |
1408 |
- } |
1409 |
- |
1410 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c |
1411 |
-index 9d2618d..0211e0e 100644 |
1412 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c |
1413 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c |
1414 |
-@@ -32,15 +32,16 @@ nv50_disp_pioc_fini(struct nv50_disp_chan *chan) |
1415 |
- struct nv50_disp *disp = chan->root->disp; |
1416 |
- struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
1417 |
- struct nvkm_device *device = subdev->device; |
1418 |
-- int chid = chan->chid; |
1419 |
-+ int ctrl = chan->chid.ctrl; |
1420 |
-+ int user = chan->chid.user; |
1421 |
- |
1422 |
-- nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000); |
1423 |
-+ nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000); |
1424 |
- if (nvkm_msec(device, 2000, |
1425 |
-- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) |
1426 |
-+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000)) |
1427 |
- break; |
1428 |
- ) < 0) { |
1429 |
-- nvkm_error(subdev, "ch %d timeout: %08x\n", chid, |
1430 |
-- nvkm_rd32(device, 0x610200 + (chid * 0x10))); |
1431 |
-+ nvkm_error(subdev, "ch %d timeout: %08x\n", user, |
1432 |
-+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); |
1433 |
- } |
1434 |
- } |
1435 |
- |
1436 |
-@@ -50,26 +51,27 @@ nv50_disp_pioc_init(struct nv50_disp_chan *chan) |
1437 |
- struct nv50_disp *disp = chan->root->disp; |
1438 |
- struct nvkm_subdev *subdev = &disp->base.engine.subdev; |
1439 |
- struct nvkm_device *device = subdev->device; |
1440 |
-- int chid = chan->chid; |
1441 |
-+ int ctrl = chan->chid.ctrl; |
1442 |
-+ int user = chan->chid.user; |
1443 |
- |
1444 |
-- nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000); |
1445 |
-+ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000); |
1446 |
- if (nvkm_msec(device, 2000, |
1447 |
-- if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000)) |
1448 |
-+ if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000)) |
1449 |
- break; |
1450 |
- ) < 0) { |
1451 |
-- nvkm_error(subdev, "ch %d timeout0: %08x\n", chid, |
1452 |
-- nvkm_rd32(device, 0x610200 + (chid * 0x10))); |
1453 |
-+ nvkm_error(subdev, "ch %d timeout0: %08x\n", user, |
1454 |
-+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); |
1455 |
- return -EBUSY; |
1456 |
- } |
1457 |
- |
1458 |
-- nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001); |
1459 |
-+ nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001); |
1460 |
- if (nvkm_msec(device, 2000, |
1461 |
-- u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10)); |
1462 |
-+ u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10)); |
1463 |
- if ((tmp & 0x00030000) == 0x00010000) |
1464 |
- break; |
1465 |
- ) < 0) { |
1466 |
-- nvkm_error(subdev, "ch %d timeout1: %08x\n", chid, |
1467 |
-- nvkm_rd32(device, 0x610200 + (chid * 0x10))); |
1468 |
-+ nvkm_error(subdev, "ch %d timeout1: %08x\n", user, |
1469 |
-+ nvkm_rd32(device, 0x610200 + (ctrl * 0x10))); |
1470 |
- return -EBUSY; |
1471 |
- } |
1472 |
- |
1473 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c |
1474 |
-index 8443e04..b053b29 100644 |
1475 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c |
1476 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c |
1477 |
-@@ -36,8 +36,8 @@ gp104_disp_root = { |
1478 |
- &gp104_disp_ovly_oclass, |
1479 |
- }, |
1480 |
- .pioc = { |
1481 |
-- &gk104_disp_oimm_oclass, |
1482 |
-- &gk104_disp_curs_oclass, |
1483 |
-+ &gp102_disp_oimm_oclass, |
1484 |
-+ &gp102_disp_curs_oclass, |
1485 |
- }, |
1486 |
- }; |
1487 |
- |
1488 |
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c |
1489 |
-index 2f9cecd..05c829a 100644 |
1490 |
---- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c |
1491 |
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c |
1492 |
-@@ -207,8 +207,8 @@ nv50_disp_root_pioc_new_(const struct nvkm_oclass *oclass, |
1493 |
- { |
1494 |
- const struct nv50_disp_pioc_oclass *sclass = oclass->priv; |
1495 |
- struct nv50_disp_root *root = nv50_disp_root(oclass->parent); |
1496 |
-- return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid, |
1497 |
-- oclass, data, size, pobject); |
1498 |
-+ return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl, |
1499 |
-+ sclass->chid.user, oclass, data, size, pobject); |
1500 |
- } |
1501 |
- |
1502 |
- static int |
1503 |
-diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c |
1504 |
-index d544ff9..7aadce1 100644 |
1505 |
---- a/drivers/gpu/drm/vc4/vc4_crtc.c |
1506 |
-+++ b/drivers/gpu/drm/vc4/vc4_crtc.c |
1507 |
-@@ -83,8 +83,7 @@ struct vc4_crtc_data { |
1508 |
- /* Which channel of the HVS this pixelvalve sources from. */ |
1509 |
- int hvs_channel; |
1510 |
- |
1511 |
-- enum vc4_encoder_type encoder0_type; |
1512 |
-- enum vc4_encoder_type encoder1_type; |
1513 |
-+ enum vc4_encoder_type encoder_types[4]; |
1514 |
- }; |
1515 |
- |
1516 |
- #define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset)) |
1517 |
-@@ -669,6 +668,14 @@ void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id) |
1518 |
- CRTC_WRITE(PV_INTEN, 0); |
1519 |
- } |
1520 |
- |
1521 |
-+/* Must be called with the event lock held */ |
1522 |
-+bool vc4_event_pending(struct drm_crtc *crtc) |
1523 |
-+{ |
1524 |
-+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); |
1525 |
-+ |
1526 |
-+ return !!vc4_crtc->event; |
1527 |
-+} |
1528 |
-+ |
1529 |
- static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) |
1530 |
- { |
1531 |
- struct drm_crtc *crtc = &vc4_crtc->base; |
1532 |
-@@ -859,20 +866,26 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { |
1533 |
- |
1534 |
- static const struct vc4_crtc_data pv0_data = { |
1535 |
- .hvs_channel = 0, |
1536 |
-- .encoder0_type = VC4_ENCODER_TYPE_DSI0, |
1537 |
-- .encoder1_type = VC4_ENCODER_TYPE_DPI, |
1538 |
-+ .encoder_types = { |
1539 |
-+ [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0, |
1540 |
-+ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI, |
1541 |
-+ }, |
1542 |
- }; |
1543 |
- |
1544 |
- static const struct vc4_crtc_data pv1_data = { |
1545 |
- .hvs_channel = 2, |
1546 |
-- .encoder0_type = VC4_ENCODER_TYPE_DSI1, |
1547 |
-- .encoder1_type = VC4_ENCODER_TYPE_SMI, |
1548 |
-+ .encoder_types = { |
1549 |
-+ [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1, |
1550 |
-+ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI, |
1551 |
-+ }, |
1552 |
- }; |
1553 |
- |
1554 |
- static const struct vc4_crtc_data pv2_data = { |
1555 |
- .hvs_channel = 1, |
1556 |
-- .encoder0_type = VC4_ENCODER_TYPE_VEC, |
1557 |
-- .encoder1_type = VC4_ENCODER_TYPE_HDMI, |
1558 |
-+ .encoder_types = { |
1559 |
-+ [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI, |
1560 |
-+ [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC, |
1561 |
-+ }, |
1562 |
- }; |
1563 |
- |
1564 |
- static const struct of_device_id vc4_crtc_dt_match[] = { |
1565 |
-@@ -886,17 +899,20 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm, |
1566 |
- struct drm_crtc *crtc) |
1567 |
- { |
1568 |
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); |
1569 |
-+ const struct vc4_crtc_data *crtc_data = vc4_crtc->data; |
1570 |
-+ const enum vc4_encoder_type *encoder_types = crtc_data->encoder_types; |
1571 |
- struct drm_encoder *encoder; |
1572 |
- |
1573 |
- drm_for_each_encoder(encoder, drm) { |
1574 |
- struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); |
1575 |
-- |
1576 |
-- if (vc4_encoder->type == vc4_crtc->data->encoder0_type) { |
1577 |
-- vc4_encoder->clock_select = 0; |
1578 |
-- encoder->possible_crtcs |= drm_crtc_mask(crtc); |
1579 |
-- } else if (vc4_encoder->type == vc4_crtc->data->encoder1_type) { |
1580 |
-- vc4_encoder->clock_select = 1; |
1581 |
-- encoder->possible_crtcs |= drm_crtc_mask(crtc); |
1582 |
-+ int i; |
1583 |
-+ |
1584 |
-+ for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) { |
1585 |
-+ if (vc4_encoder->type == encoder_types[i]) { |
1586 |
-+ vc4_encoder->clock_select = i; |
1587 |
-+ encoder->possible_crtcs |= drm_crtc_mask(crtc); |
1588 |
-+ break; |
1589 |
-+ } |
1590 |
- } |
1591 |
- } |
1592 |
- } |
1593 |
-diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h |
1594 |
-index 7c1e4d9..50a55ef 100644 |
1595 |
---- a/drivers/gpu/drm/vc4/vc4_drv.h |
1596 |
-+++ b/drivers/gpu/drm/vc4/vc4_drv.h |
1597 |
-@@ -194,6 +194,7 @@ to_vc4_plane(struct drm_plane *plane) |
1598 |
- } |
1599 |
- |
1600 |
- enum vc4_encoder_type { |
1601 |
-+ VC4_ENCODER_TYPE_NONE, |
1602 |
- VC4_ENCODER_TYPE_HDMI, |
1603 |
- VC4_ENCODER_TYPE_VEC, |
1604 |
- VC4_ENCODER_TYPE_DSI0, |
1605 |
-@@ -440,6 +441,7 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *arg); |
1606 |
- extern struct platform_driver vc4_crtc_driver; |
1607 |
- int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id); |
1608 |
- void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id); |
1609 |
-+bool vc4_event_pending(struct drm_crtc *crtc); |
1610 |
- int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg); |
1611 |
- int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, |
1612 |
- unsigned int flags, int *vpos, int *hpos, |
1613 |
-diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c |
1614 |
-index c1f65c6..67af2af 100644 |
1615 |
---- a/drivers/gpu/drm/vc4/vc4_kms.c |
1616 |
-+++ b/drivers/gpu/drm/vc4/vc4_kms.c |
1617 |
-@@ -119,17 +119,34 @@ static int vc4_atomic_commit(struct drm_device *dev, |
1618 |
- |
1619 |
- /* Make sure that any outstanding modesets have finished. */ |
1620 |
- if (nonblock) { |
1621 |
-- ret = down_trylock(&vc4->async_modeset); |
1622 |
-- if (ret) { |
1623 |
-+ struct drm_crtc *crtc; |
1624 |
-+ struct drm_crtc_state *crtc_state; |
1625 |
-+ unsigned long flags; |
1626 |
-+ bool busy = false; |
1627 |
-+ |
1628 |
-+ /* |
1629 |
-+ * If there's an undispatched event to send then we're |
1630 |
-+ * obviously still busy. If there isn't, then we can |
1631 |
-+ * unconditionally wait for the semaphore because it |
1632 |
-+ * shouldn't be contended (for long). |
1633 |
-+ * |
1634 |
-+ * This is to prevent a race where queuing a new flip |
1635 |
-+ * from userspace immediately on receipt of an event |
1636 |
-+ * beats our clean-up and returns EBUSY. |
1637 |
-+ */ |
1638 |
-+ spin_lock_irqsave(&dev->event_lock, flags); |
1639 |
-+ for_each_crtc_in_state(state, crtc, crtc_state, i) |
1640 |
-+ busy |= vc4_event_pending(crtc); |
1641 |
-+ spin_unlock_irqrestore(&dev->event_lock, flags); |
1642 |
-+ if (busy) { |
1643 |
- kfree(c); |
1644 |
- return -EBUSY; |
1645 |
- } |
1646 |
-- } else { |
1647 |
-- ret = down_interruptible(&vc4->async_modeset); |
1648 |
-- if (ret) { |
1649 |
-- kfree(c); |
1650 |
-- return ret; |
1651 |
-- } |
1652 |
-+ } |
1653 |
-+ ret = down_interruptible(&vc4->async_modeset); |
1654 |
-+ if (ret) { |
1655 |
-+ kfree(c); |
1656 |
-+ return ret; |
1657 |
- } |
1658 |
- |
1659 |
- ret = drm_atomic_helper_prepare_planes(dev, state); |
1660 |
-diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h |
1661 |
-index 1aa44c2..39f6886 100644 |
1662 |
---- a/drivers/gpu/drm/vc4/vc4_regs.h |
1663 |
-+++ b/drivers/gpu/drm/vc4/vc4_regs.h |
1664 |
-@@ -177,8 +177,9 @@ |
1665 |
- # define PV_CONTROL_WAIT_HSTART BIT(12) |
1666 |
- # define PV_CONTROL_PIXEL_REP_MASK VC4_MASK(5, 4) |
1667 |
- # define PV_CONTROL_PIXEL_REP_SHIFT 4 |
1668 |
--# define PV_CONTROL_CLK_SELECT_DSI_VEC 0 |
1669 |
-+# define PV_CONTROL_CLK_SELECT_DSI 0 |
1670 |
- # define PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI 1 |
1671 |
-+# define PV_CONTROL_CLK_SELECT_VEC 2 |
1672 |
- # define PV_CONTROL_CLK_SELECT_MASK VC4_MASK(3, 2) |
1673 |
- # define PV_CONTROL_CLK_SELECT_SHIFT 2 |
1674 |
- # define PV_CONTROL_FIFO_CLR BIT(1) |
1675 |
-diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c |
1676 |
-index c5dee30..acb9d25 100644 |
1677 |
---- a/drivers/irqchip/irq-gic-v3-its.c |
1678 |
-+++ b/drivers/irqchip/irq-gic-v3-its.c |
1679 |
-@@ -1598,6 +1598,14 @@ static void __maybe_unused its_enable_quirk_cavium_23144(void *data) |
1680 |
- its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; |
1681 |
- } |
1682 |
- |
1683 |
-+static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) |
1684 |
-+{ |
1685 |
-+ struct its_node *its = data; |
1686 |
-+ |
1687 |
-+ /* On QDF2400, the size of the ITE is 16Bytes */ |
1688 |
-+ its->ite_size = 16; |
1689 |
-+} |
1690 |
-+ |
1691 |
- static const struct gic_quirk its_quirks[] = { |
1692 |
- #ifdef CONFIG_CAVIUM_ERRATUM_22375 |
1693 |
- { |
1694 |
-@@ -1615,6 +1623,14 @@ static const struct gic_quirk its_quirks[] = { |
1695 |
- .init = its_enable_quirk_cavium_23144, |
1696 |
- }, |
1697 |
- #endif |
1698 |
-+#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 |
1699 |
-+ { |
1700 |
-+ .desc = "ITS: QDF2400 erratum 0065", |
1701 |
-+ .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ |
1702 |
-+ .mask = 0xffffffff, |
1703 |
-+ .init = its_enable_quirk_qdf2400_e0065, |
1704 |
-+ }, |
1705 |
-+#endif |
1706 |
- { |
1707 |
- } |
1708 |
- }; |
1709 |
-diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c |
1710 |
-index 302e284..cde43b6 100644 |
1711 |
---- a/drivers/media/usb/uvc/uvc_driver.c |
1712 |
-+++ b/drivers/media/usb/uvc/uvc_driver.c |
1713 |
-@@ -1595,6 +1595,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain) |
1714 |
- return buffer; |
1715 |
- } |
1716 |
- |
1717 |
-+static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev) |
1718 |
-+{ |
1719 |
-+ struct uvc_video_chain *chain; |
1720 |
-+ |
1721 |
-+ chain = kzalloc(sizeof(*chain), GFP_KERNEL); |
1722 |
-+ if (chain == NULL) |
1723 |
-+ return NULL; |
1724 |
-+ |
1725 |
-+ INIT_LIST_HEAD(&chain->entities); |
1726 |
-+ mutex_init(&chain->ctrl_mutex); |
1727 |
-+ chain->dev = dev; |
1728 |
-+ v4l2_prio_init(&chain->prio); |
1729 |
-+ |
1730 |
-+ return chain; |
1731 |
-+} |
1732 |
-+ |
1733 |
-+/* |
1734 |
-+ * Fallback heuristic for devices that don't connect units and terminals in a |
1735 |
-+ * valid chain. |
1736 |
-+ * |
1737 |
-+ * Some devices have invalid baSourceID references, causing uvc_scan_chain() |
1738 |
-+ * to fail, but if we just take the entities we can find and put them together |
1739 |
-+ * in the most sensible chain we can think of, turns out they do work anyway. |
1740 |
-+ * Note: This heuristic assumes there is a single chain. |
1741 |
-+ * |
1742 |
-+ * At the time of writing, devices known to have such a broken chain are |
1743 |
-+ * - Acer Integrated Camera (5986:055a) |
1744 |
-+ * - Realtek rtl157a7 (0bda:57a7) |
1745 |
-+ */ |
1746 |
-+static int uvc_scan_fallback(struct uvc_device *dev) |
1747 |
-+{ |
1748 |
-+ struct uvc_video_chain *chain; |
1749 |
-+ struct uvc_entity *iterm = NULL; |
1750 |
-+ struct uvc_entity *oterm = NULL; |
1751 |
-+ struct uvc_entity *entity; |
1752 |
-+ struct uvc_entity *prev; |
1753 |
-+ |
1754 |
-+ /* |
1755 |
-+ * Start by locating the input and output terminals. We only support |
1756 |
-+ * devices with exactly one of each for now. |
1757 |
-+ */ |
1758 |
-+ list_for_each_entry(entity, &dev->entities, list) { |
1759 |
-+ if (UVC_ENTITY_IS_ITERM(entity)) { |
1760 |
-+ if (iterm) |
1761 |
-+ return -EINVAL; |
1762 |
-+ iterm = entity; |
1763 |
-+ } |
1764 |
-+ |
1765 |
-+ if (UVC_ENTITY_IS_OTERM(entity)) { |
1766 |
-+ if (oterm) |
1767 |
-+ return -EINVAL; |
1768 |
-+ oterm = entity; |
1769 |
-+ } |
1770 |
-+ } |
1771 |
-+ |
1772 |
-+ if (iterm == NULL || oterm == NULL) |
1773 |
-+ return -EINVAL; |
1774 |
-+ |
1775 |
-+ /* Allocate the chain and fill it. */ |
1776 |
-+ chain = uvc_alloc_chain(dev); |
1777 |
-+ if (chain == NULL) |
1778 |
-+ return -ENOMEM; |
1779 |
-+ |
1780 |
-+ if (uvc_scan_chain_entity(chain, oterm) < 0) |
1781 |
-+ goto error; |
1782 |
-+ |
1783 |
-+ prev = oterm; |
1784 |
-+ |
1785 |
-+ /* |
1786 |
-+ * Add all Processing and Extension Units with two pads. The order |
1787 |
-+ * doesn't matter much, use reverse list traversal to connect units in |
1788 |
-+ * UVC descriptor order as we build the chain from output to input. This |
1789 |
-+ * leads to units appearing in the order meant by the manufacturer for |
1790 |
-+ * the cameras known to require this heuristic. |
1791 |
-+ */ |
1792 |
-+ list_for_each_entry_reverse(entity, &dev->entities, list) { |
1793 |
-+ if (entity->type != UVC_VC_PROCESSING_UNIT && |
1794 |
-+ entity->type != UVC_VC_EXTENSION_UNIT) |
1795 |
-+ continue; |
1796 |
-+ |
1797 |
-+ if (entity->num_pads != 2) |
1798 |
-+ continue; |
1799 |
-+ |
1800 |
-+ if (uvc_scan_chain_entity(chain, entity) < 0) |
1801 |
-+ goto error; |
1802 |
-+ |
1803 |
-+ prev->baSourceID[0] = entity->id; |
1804 |
-+ prev = entity; |
1805 |
-+ } |
1806 |
-+ |
1807 |
-+ if (uvc_scan_chain_entity(chain, iterm) < 0) |
1808 |
-+ goto error; |
1809 |
-+ |
1810 |
-+ prev->baSourceID[0] = iterm->id; |
1811 |
-+ |
1812 |
-+ list_add_tail(&chain->list, &dev->chains); |
1813 |
-+ |
1814 |
-+ uvc_trace(UVC_TRACE_PROBE, |
1815 |
-+ "Found a video chain by fallback heuristic (%s).\n", |
1816 |
-+ uvc_print_chain(chain)); |
1817 |
-+ |
1818 |
-+ return 0; |
1819 |
-+ |
1820 |
-+error: |
1821 |
-+ kfree(chain); |
1822 |
-+ return -EINVAL; |
1823 |
-+} |
1824 |
-+ |
1825 |
- /* |
1826 |
- * Scan the device for video chains and register video devices. |
1827 |
- * |
1828 |
-@@ -1617,15 +1725,10 @@ static int uvc_scan_device(struct uvc_device *dev) |
1829 |
- if (term->chain.next || term->chain.prev) |
1830 |
- continue; |
1831 |
- |
1832 |
-- chain = kzalloc(sizeof(*chain), GFP_KERNEL); |
1833 |
-+ chain = uvc_alloc_chain(dev); |
1834 |
- if (chain == NULL) |
1835 |
- return -ENOMEM; |
1836 |
- |
1837 |
-- INIT_LIST_HEAD(&chain->entities); |
1838 |
-- mutex_init(&chain->ctrl_mutex); |
1839 |
-- chain->dev = dev; |
1840 |
-- v4l2_prio_init(&chain->prio); |
1841 |
-- |
1842 |
- term->flags |= UVC_ENTITY_FLAG_DEFAULT; |
1843 |
- |
1844 |
- if (uvc_scan_chain(chain, term) < 0) { |
1845 |
-@@ -1639,6 +1742,9 @@ static int uvc_scan_device(struct uvc_device *dev) |
1846 |
- list_add_tail(&chain->list, &dev->chains); |
1847 |
- } |
1848 |
- |
1849 |
-+ if (list_empty(&dev->chains)) |
1850 |
-+ uvc_scan_fallback(dev); |
1851 |
-+ |
1852 |
- if (list_empty(&dev->chains)) { |
1853 |
- uvc_printk(KERN_INFO, "No valid video chain found.\n"); |
1854 |
- return -1; |
1855 |
-diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c |
1856 |
-index a36022b..03dca73 100644 |
1857 |
---- a/drivers/net/ethernet/ibm/ibmveth.c |
1858 |
-+++ b/drivers/net/ethernet/ibm/ibmveth.c |
1859 |
-@@ -1181,7 +1181,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, |
1860 |
- |
1861 |
- static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) |
1862 |
- { |
1863 |
-+ struct tcphdr *tcph; |
1864 |
- int offset = 0; |
1865 |
-+ int hdr_len; |
1866 |
- |
1867 |
- /* only TCP packets will be aggregated */ |
1868 |
- if (skb->protocol == htons(ETH_P_IP)) { |
1869 |
-@@ -1208,14 +1210,20 @@ static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt) |
1870 |
- /* if mss is not set through Large Packet bit/mss in rx buffer, |
1871 |
- * expect that the mss will be written to the tcp header checksum. |
1872 |
- */ |
1873 |
-+ tcph = (struct tcphdr *)(skb->data + offset); |
1874 |
- if (lrg_pkt) { |
1875 |
- skb_shinfo(skb)->gso_size = mss; |
1876 |
- } else if (offset) { |
1877 |
-- struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset); |
1878 |
-- |
1879 |
- skb_shinfo(skb)->gso_size = ntohs(tcph->check); |
1880 |
- tcph->check = 0; |
1881 |
- } |
1882 |
-+ |
1883 |
-+ if (skb_shinfo(skb)->gso_size) { |
1884 |
-+ hdr_len = offset + tcph->doff * 4; |
1885 |
-+ skb_shinfo(skb)->gso_segs = |
1886 |
-+ DIV_ROUND_UP(skb->len - hdr_len, |
1887 |
-+ skb_shinfo(skb)->gso_size); |
1888 |
-+ } |
1889 |
- } |
1890 |
- |
1891 |
- static int ibmveth_poll(struct napi_struct *napi, int budget) |
1892 |
-diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c |
1893 |
-index 5b54254..2788a54 100644 |
1894 |
---- a/drivers/net/ethernet/intel/igb/e1000_phy.c |
1895 |
-+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c |
1896 |
-@@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw) |
1897 |
- s32 ret_val = 0; |
1898 |
- u16 phy_id; |
1899 |
- |
1900 |
-+ /* ensure PHY page selection to fix misconfigured i210 */ |
1901 |
-+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) |
1902 |
-+ phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0); |
1903 |
-+ |
1904 |
- ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); |
1905 |
- if (ret_val) |
1906 |
- goto out; |
1907 |
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c |
1908 |
-index b3067137..d4fa851 100644 |
1909 |
---- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c |
1910 |
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c |
1911 |
-@@ -81,6 +81,7 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) |
1912 |
- static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) |
1913 |
- { |
1914 |
- priv->params.rq_wq_type = rq_type; |
1915 |
-+ priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; |
1916 |
- switch (priv->params.rq_wq_type) { |
1917 |
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
1918 |
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; |
1919 |
-@@ -92,6 +93,10 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) |
1920 |
- break; |
1921 |
- default: /* MLX5_WQ_TYPE_LINKED_LIST */ |
1922 |
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; |
1923 |
-+ |
1924 |
-+ /* Extra room needed for build_skb */ |
1925 |
-+ priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM + |
1926 |
-+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
1927 |
- } |
1928 |
- priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, |
1929 |
- BIT(priv->params.log_rq_size)); |
1930 |
-@@ -3473,12 +3478,6 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, |
1931 |
- mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, |
1932 |
- MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev)); |
1933 |
- |
1934 |
-- priv->params.lro_wqe_sz = |
1935 |
-- MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - |
1936 |
-- /* Extra room needed for build_skb */ |
1937 |
-- MLX5_RX_HEADROOM - |
1938 |
-- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
1939 |
-- |
1940 |
- /* Initialize pflags */ |
1941 |
- MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, |
1942 |
- priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); |
1943 |
-@@ -3936,6 +3935,19 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev) |
1944 |
- } |
1945 |
- } |
1946 |
- |
1947 |
-+static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev) |
1948 |
-+{ |
1949 |
-+ struct mlx5_eswitch *esw = mdev->priv.eswitch; |
1950 |
-+ int total_vfs = MLX5_TOTAL_VPORTS(mdev); |
1951 |
-+ int vport; |
1952 |
-+ |
1953 |
-+ if (!MLX5_CAP_GEN(mdev, vport_group_manager)) |
1954 |
-+ return; |
1955 |
-+ |
1956 |
-+ for (vport = 1; vport < total_vfs; vport++) |
1957 |
-+ mlx5_eswitch_unregister_vport_rep(esw, vport); |
1958 |
-+} |
1959 |
-+ |
1960 |
- void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) |
1961 |
- { |
1962 |
- struct mlx5e_priv *priv = netdev_priv(netdev); |
1963 |
-@@ -3983,6 +3995,7 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv) |
1964 |
- return err; |
1965 |
- } |
1966 |
- |
1967 |
-+ mlx5e_register_vport_rep(mdev); |
1968 |
- return 0; |
1969 |
- } |
1970 |
- |
1971 |
-@@ -3994,6 +4007,7 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) |
1972 |
- if (!netif_device_present(netdev)) |
1973 |
- return; |
1974 |
- |
1975 |
-+ mlx5e_unregister_vport_rep(mdev); |
1976 |
- mlx5e_detach_netdev(mdev, netdev); |
1977 |
- mlx5e_destroy_mdev_resources(mdev); |
1978 |
- } |
1979 |
-@@ -4012,8 +4026,6 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) |
1980 |
- if (err) |
1981 |
- return NULL; |
1982 |
- |
1983 |
-- mlx5e_register_vport_rep(mdev); |
1984 |
-- |
1985 |
- if (MLX5_CAP_GEN(mdev, vport_group_manager)) |
1986 |
- ppriv = &esw->offloads.vport_reps[0]; |
1987 |
- |
1988 |
-@@ -4065,13 +4077,7 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv) |
1989 |
- |
1990 |
- static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) |
1991 |
- { |
1992 |
-- struct mlx5_eswitch *esw = mdev->priv.eswitch; |
1993 |
-- int total_vfs = MLX5_TOTAL_VPORTS(mdev); |
1994 |
- struct mlx5e_priv *priv = vpriv; |
1995 |
-- int vport; |
1996 |
-- |
1997 |
-- for (vport = 1; vport < total_vfs; vport++) |
1998 |
-- mlx5_eswitch_unregister_vport_rep(esw, vport); |
1999 |
- |
2000 |
- unregister_netdev(priv->netdev); |
2001 |
- mlx5e_detach(mdev, vpriv); |
2002 |
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c |
2003 |
-index e7b2158..796bdf0 100644 |
2004 |
---- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c |
2005 |
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c |
2006 |
-@@ -92,19 +92,18 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) |
2007 |
- static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, |
2008 |
- struct mlx5e_cq *cq, u32 cqcc) |
2009 |
- { |
2010 |
-- u16 wqe_cnt_step; |
2011 |
-- |
2012 |
- cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; |
2013 |
- cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; |
2014 |
- cq->title.op_own &= 0xf0; |
2015 |
- cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz); |
2016 |
- cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); |
2017 |
- |
2018 |
-- wqe_cnt_step = |
2019 |
-- rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? |
2020 |
-- mpwrq_get_cqe_consumed_strides(&cq->title) : 1; |
2021 |
-- cq->decmprs_wqe_counter = |
2022 |
-- (cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1; |
2023 |
-+ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) |
2024 |
-+ cq->decmprs_wqe_counter += |
2025 |
-+ mpwrq_get_cqe_consumed_strides(&cq->title); |
2026 |
-+ else |
2027 |
-+ cq->decmprs_wqe_counter = |
2028 |
-+ (cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1; |
2029 |
- } |
2030 |
- |
2031 |
- static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, |
2032 |
-diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
2033 |
-index e83072d..6905630 100644 |
2034 |
---- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
2035 |
-+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c |
2036 |
-@@ -500,30 +500,40 @@ static int |
2037 |
- mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, |
2038 |
- struct mlxsw_sp_prefix_usage *req_prefix_usage) |
2039 |
- { |
2040 |
-- struct mlxsw_sp_lpm_tree *lpm_tree; |
2041 |
-+ struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree; |
2042 |
-+ struct mlxsw_sp_lpm_tree *new_tree; |
2043 |
-+ int err; |
2044 |
- |
2045 |
-- if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, |
2046 |
-- &vr->lpm_tree->prefix_usage)) |
2047 |
-+ if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage)) |
2048 |
- return 0; |
2049 |
- |
2050 |
-- lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, |
2051 |
-+ new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, |
2052 |
- vr->proto, false); |
2053 |
-- if (IS_ERR(lpm_tree)) { |
2054 |
-+ if (IS_ERR(new_tree)) { |
2055 |
- /* We failed to get a tree according to the required |
2056 |
- * prefix usage. However, the current tree might be still good |
2057 |
- * for us if our requirement is subset of the prefixes used |
2058 |
- * in the tree. |
2059 |
- */ |
2060 |
- if (mlxsw_sp_prefix_usage_subset(req_prefix_usage, |
2061 |
-- &vr->lpm_tree->prefix_usage)) |
2062 |
-+ &lpm_tree->prefix_usage)) |
2063 |
- return 0; |
2064 |
-- return PTR_ERR(lpm_tree); |
2065 |
-+ return PTR_ERR(new_tree); |
2066 |
- } |
2067 |
- |
2068 |
-- mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); |
2069 |
-- mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); |
2070 |
-+ /* Prevent packet loss by overwriting existing binding */ |
2071 |
-+ vr->lpm_tree = new_tree; |
2072 |
-+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); |
2073 |
-+ if (err) |
2074 |
-+ goto err_tree_bind; |
2075 |
-+ mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); |
2076 |
-+ |
2077 |
-+ return 0; |
2078 |
-+ |
2079 |
-+err_tree_bind: |
2080 |
- vr->lpm_tree = lpm_tree; |
2081 |
-- return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); |
2082 |
-+ mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); |
2083 |
-+ return err; |
2084 |
- } |
2085 |
- |
2086 |
- static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, |
2087 |
-diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c |
2088 |
-index 8b4822a..3c1f89a 100644 |
2089 |
---- a/drivers/net/geneve.c |
2090 |
-+++ b/drivers/net/geneve.c |
2091 |
-@@ -1039,16 +1039,22 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) |
2092 |
- { |
2093 |
- struct geneve_dev *geneve = netdev_priv(dev); |
2094 |
- struct ip_tunnel_info *info = NULL; |
2095 |
-+ int err; |
2096 |
- |
2097 |
- if (geneve->collect_md) |
2098 |
- info = skb_tunnel_info(skb); |
2099 |
- |
2100 |
-+ rcu_read_lock(); |
2101 |
- #if IS_ENABLED(CONFIG_IPV6) |
2102 |
- if ((info && ip_tunnel_info_af(info) == AF_INET6) || |
2103 |
- (!info && geneve->remote.sa.sa_family == AF_INET6)) |
2104 |
-- return geneve6_xmit_skb(skb, dev, info); |
2105 |
-+ err = geneve6_xmit_skb(skb, dev, info); |
2106 |
-+ else |
2107 |
- #endif |
2108 |
-- return geneve_xmit_skb(skb, dev, info); |
2109 |
-+ err = geneve_xmit_skb(skb, dev, info); |
2110 |
-+ rcu_read_unlock(); |
2111 |
-+ |
2112 |
-+ return err; |
2113 |
- } |
2114 |
- |
2115 |
- static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict) |
2116 |
-diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c |
2117 |
-index f424b86..201ffa5 100644 |
2118 |
---- a/drivers/net/phy/phy.c |
2119 |
-+++ b/drivers/net/phy/phy.c |
2120 |
-@@ -611,14 +611,18 @@ void phy_start_machine(struct phy_device *phydev) |
2121 |
- * phy_trigger_machine - trigger the state machine to run |
2122 |
- * |
2123 |
- * @phydev: the phy_device struct |
2124 |
-+ * @sync: indicate whether we should wait for the workqueue cancelation |
2125 |
- * |
2126 |
- * Description: There has been a change in state which requires that the |
2127 |
- * state machine runs. |
2128 |
- */ |
2129 |
- |
2130 |
--static void phy_trigger_machine(struct phy_device *phydev) |
2131 |
-+static void phy_trigger_machine(struct phy_device *phydev, bool sync) |
2132 |
- { |
2133 |
-- cancel_delayed_work_sync(&phydev->state_queue); |
2134 |
-+ if (sync) |
2135 |
-+ cancel_delayed_work_sync(&phydev->state_queue); |
2136 |
-+ else |
2137 |
-+ cancel_delayed_work(&phydev->state_queue); |
2138 |
- queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); |
2139 |
- } |
2140 |
- |
2141 |
-@@ -655,7 +659,7 @@ static void phy_error(struct phy_device *phydev) |
2142 |
- phydev->state = PHY_HALTED; |
2143 |
- mutex_unlock(&phydev->lock); |
2144 |
- |
2145 |
-- phy_trigger_machine(phydev); |
2146 |
-+ phy_trigger_machine(phydev, false); |
2147 |
- } |
2148 |
- |
2149 |
- /** |
2150 |
-@@ -817,7 +821,7 @@ void phy_change(struct work_struct *work) |
2151 |
- } |
2152 |
- |
2153 |
- /* reschedule state queue work to run as soon as possible */ |
2154 |
-- phy_trigger_machine(phydev); |
2155 |
-+ phy_trigger_machine(phydev, true); |
2156 |
- return; |
2157 |
- |
2158 |
- ignore: |
2159 |
-@@ -907,7 +911,7 @@ void phy_start(struct phy_device *phydev) |
2160 |
- if (do_resume) |
2161 |
- phy_resume(phydev); |
2162 |
- |
2163 |
-- phy_trigger_machine(phydev); |
2164 |
-+ phy_trigger_machine(phydev, true); |
2165 |
- } |
2166 |
- EXPORT_SYMBOL(phy_start); |
2167 |
- |
2168 |
-diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
2169 |
-index b31aca8..a931b73 100644 |
2170 |
---- a/drivers/net/tun.c |
2171 |
-+++ b/drivers/net/tun.c |
2172 |
-@@ -819,7 +819,18 @@ static void tun_net_uninit(struct net_device *dev) |
2173 |
- /* Net device open. */ |
2174 |
- static int tun_net_open(struct net_device *dev) |
2175 |
- { |
2176 |
-+ struct tun_struct *tun = netdev_priv(dev); |
2177 |
-+ int i; |
2178 |
-+ |
2179 |
- netif_tx_start_all_queues(dev); |
2180 |
-+ |
2181 |
-+ for (i = 0; i < tun->numqueues; i++) { |
2182 |
-+ struct tun_file *tfile; |
2183 |
-+ |
2184 |
-+ tfile = rtnl_dereference(tun->tfiles[i]); |
2185 |
-+ tfile->socket.sk->sk_write_space(tfile->socket.sk); |
2186 |
-+ } |
2187 |
-+ |
2188 |
- return 0; |
2189 |
- } |
2190 |
- |
2191 |
-@@ -1116,9 +1127,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait) |
2192 |
- if (!skb_array_empty(&tfile->tx_array)) |
2193 |
- mask |= POLLIN | POLLRDNORM; |
2194 |
- |
2195 |
-- if (sock_writeable(sk) || |
2196 |
-- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && |
2197 |
-- sock_writeable(sk))) |
2198 |
-+ if (tun->dev->flags & IFF_UP && |
2199 |
-+ (sock_writeable(sk) || |
2200 |
-+ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && |
2201 |
-+ sock_writeable(sk)))) |
2202 |
- mask |= POLLOUT | POLLWRNORM; |
2203 |
- |
2204 |
- if (tun->dev->reg_state != NETREG_REGISTERED) |
2205 |
-diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c |
2206 |
-index 95cf1d8..bc744ac 100644 |
2207 |
---- a/drivers/net/vrf.c |
2208 |
-+++ b/drivers/net/vrf.c |
2209 |
-@@ -346,6 +346,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) |
2210 |
- |
2211 |
- static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) |
2212 |
- { |
2213 |
-+ int len = skb->len; |
2214 |
- netdev_tx_t ret = is_ip_tx_frame(skb, dev); |
2215 |
- |
2216 |
- if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { |
2217 |
-@@ -353,7 +354,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) |
2218 |
- |
2219 |
- u64_stats_update_begin(&dstats->syncp); |
2220 |
- dstats->tx_pkts++; |
2221 |
-- dstats->tx_bytes += skb->len; |
2222 |
-+ dstats->tx_bytes += len; |
2223 |
- u64_stats_update_end(&dstats->syncp); |
2224 |
- } else { |
2225 |
- this_cpu_inc(dev->dstats->tx_drps); |
2226 |
-diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
2227 |
-index d4f495b..3c4c2cf 100644 |
2228 |
---- a/drivers/net/vxlan.c |
2229 |
-+++ b/drivers/net/vxlan.c |
2230 |
-@@ -1942,7 +1942,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2231 |
- const struct iphdr *old_iph; |
2232 |
- union vxlan_addr *dst; |
2233 |
- union vxlan_addr remote_ip, local_ip; |
2234 |
-- union vxlan_addr *src; |
2235 |
- struct vxlan_metadata _md; |
2236 |
- struct vxlan_metadata *md = &_md; |
2237 |
- __be16 src_port = 0, dst_port; |
2238 |
-@@ -1956,11 +1955,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2239 |
- |
2240 |
- info = skb_tunnel_info(skb); |
2241 |
- |
2242 |
-+ rcu_read_lock(); |
2243 |
- if (rdst) { |
2244 |
- dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; |
2245 |
- vni = rdst->remote_vni; |
2246 |
- dst = &rdst->remote_ip; |
2247 |
-- src = &vxlan->cfg.saddr; |
2248 |
-+ local_ip = vxlan->cfg.saddr; |
2249 |
- dst_cache = &rdst->dst_cache; |
2250 |
- } else { |
2251 |
- if (!info) { |
2252 |
-@@ -1979,7 +1979,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2253 |
- local_ip.sin6.sin6_addr = info->key.u.ipv6.src; |
2254 |
- } |
2255 |
- dst = &remote_ip; |
2256 |
-- src = &local_ip; |
2257 |
- dst_cache = &info->dst_cache; |
2258 |
- } |
2259 |
- |
2260 |
-@@ -1987,7 +1986,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2261 |
- if (did_rsc) { |
2262 |
- /* short-circuited back to local bridge */ |
2263 |
- vxlan_encap_bypass(skb, vxlan, vxlan); |
2264 |
-- return; |
2265 |
-+ goto out_unlock; |
2266 |
- } |
2267 |
- goto drop; |
2268 |
- } |
2269 |
-@@ -2028,7 +2027,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2270 |
- rt = vxlan_get_route(vxlan, skb, |
2271 |
- rdst ? rdst->remote_ifindex : 0, tos, |
2272 |
- dst->sin.sin_addr.s_addr, |
2273 |
-- &src->sin.sin_addr.s_addr, |
2274 |
-+ &local_ip.sin.sin_addr.s_addr, |
2275 |
- dst_cache, info); |
2276 |
- if (IS_ERR(rt)) { |
2277 |
- netdev_dbg(dev, "no route to %pI4\n", |
2278 |
-@@ -2056,7 +2055,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2279 |
- if (!dst_vxlan) |
2280 |
- goto tx_error; |
2281 |
- vxlan_encap_bypass(skb, vxlan, dst_vxlan); |
2282 |
-- return; |
2283 |
-+ goto out_unlock; |
2284 |
- } |
2285 |
- |
2286 |
- if (!info) |
2287 |
-@@ -2071,7 +2070,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2288 |
- if (err < 0) |
2289 |
- goto xmit_tx_error; |
2290 |
- |
2291 |
-- udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr, |
2292 |
-+ udp_tunnel_xmit_skb(rt, sk, skb, local_ip.sin.sin_addr.s_addr, |
2293 |
- dst->sin.sin_addr.s_addr, tos, ttl, df, |
2294 |
- src_port, dst_port, xnet, !udp_sum); |
2295 |
- #if IS_ENABLED(CONFIG_IPV6) |
2296 |
-@@ -2087,7 +2086,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2297 |
- ndst = vxlan6_get_route(vxlan, skb, |
2298 |
- rdst ? rdst->remote_ifindex : 0, tos, |
2299 |
- label, &dst->sin6.sin6_addr, |
2300 |
-- &src->sin6.sin6_addr, |
2301 |
-+ &local_ip.sin6.sin6_addr, |
2302 |
- dst_cache, info); |
2303 |
- if (IS_ERR(ndst)) { |
2304 |
- netdev_dbg(dev, "no route to %pI6\n", |
2305 |
-@@ -2117,7 +2116,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2306 |
- if (!dst_vxlan) |
2307 |
- goto tx_error; |
2308 |
- vxlan_encap_bypass(skb, vxlan, dst_vxlan); |
2309 |
-- return; |
2310 |
-+ goto out_unlock; |
2311 |
- } |
2312 |
- |
2313 |
- if (!info) |
2314 |
-@@ -2131,15 +2130,16 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2315 |
- if (err < 0) { |
2316 |
- dst_release(ndst); |
2317 |
- dev->stats.tx_errors++; |
2318 |
-- return; |
2319 |
-+ goto out_unlock; |
2320 |
- } |
2321 |
- udp_tunnel6_xmit_skb(ndst, sk, skb, dev, |
2322 |
-- &src->sin6.sin6_addr, |
2323 |
-+ &local_ip.sin6.sin6_addr, |
2324 |
- &dst->sin6.sin6_addr, tos, ttl, |
2325 |
- label, src_port, dst_port, !udp_sum); |
2326 |
- #endif |
2327 |
- } |
2328 |
-- |
2329 |
-+out_unlock: |
2330 |
-+ rcu_read_unlock(); |
2331 |
- return; |
2332 |
- |
2333 |
- drop: |
2334 |
-@@ -2155,6 +2155,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, |
2335 |
- dev->stats.tx_errors++; |
2336 |
- tx_free: |
2337 |
- dev_kfree_skb(skb); |
2338 |
-+ rcu_read_unlock(); |
2339 |
- } |
2340 |
- |
2341 |
- /* Transmit local packets over Vxlan |
2342 |
-@@ -2637,7 +2638,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) |
2343 |
- |
2344 |
- if (data[IFLA_VXLAN_ID]) { |
2345 |
- __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); |
2346 |
-- if (id >= VXLAN_VID_MASK) |
2347 |
-+ if (id >= VXLAN_N_VID) |
2348 |
- return -ERANGE; |
2349 |
- } |
2350 |
- |
2351 |
-diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c |
2352 |
-index e30f05c..4722782 100644 |
2353 |
---- a/drivers/pci/iov.c |
2354 |
-+++ b/drivers/pci/iov.c |
2355 |
-@@ -306,13 +306,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) |
2356 |
- return rc; |
2357 |
- } |
2358 |
- |
2359 |
-- pci_iov_set_numvfs(dev, nr_virtfn); |
2360 |
-- iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; |
2361 |
-- pci_cfg_access_lock(dev); |
2362 |
-- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); |
2363 |
-- msleep(100); |
2364 |
-- pci_cfg_access_unlock(dev); |
2365 |
-- |
2366 |
- iov->initial_VFs = initial; |
2367 |
- if (nr_virtfn < initial) |
2368 |
- initial = nr_virtfn; |
2369 |
-@@ -323,6 +316,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) |
2370 |
- goto err_pcibios; |
2371 |
- } |
2372 |
- |
2373 |
-+ pci_iov_set_numvfs(dev, nr_virtfn); |
2374 |
-+ iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; |
2375 |
-+ pci_cfg_access_lock(dev); |
2376 |
-+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); |
2377 |
-+ msleep(100); |
2378 |
-+ pci_cfg_access_unlock(dev); |
2379 |
-+ |
2380 |
- for (i = 0; i < initial; i++) { |
2381 |
- rc = pci_iov_add_virtfn(dev, i, 0); |
2382 |
- if (rc) |
2383 |
-@@ -554,21 +554,61 @@ void pci_iov_release(struct pci_dev *dev) |
2384 |
- } |
2385 |
- |
2386 |
- /** |
2387 |
-- * pci_iov_resource_bar - get position of the SR-IOV BAR |
2388 |
-+ * pci_iov_update_resource - update a VF BAR |
2389 |
- * @dev: the PCI device |
2390 |
- * @resno: the resource number |
2391 |
- * |
2392 |
-- * Returns position of the BAR encapsulated in the SR-IOV capability. |
2393 |
-+ * Update a VF BAR in the SR-IOV capability of a PF. |
2394 |
- */ |
2395 |
--int pci_iov_resource_bar(struct pci_dev *dev, int resno) |
2396 |
-+void pci_iov_update_resource(struct pci_dev *dev, int resno) |
2397 |
- { |
2398 |
-- if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END) |
2399 |
-- return 0; |
2400 |
-+ struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL; |
2401 |
-+ struct resource *res = dev->resource + resno; |
2402 |
-+ int vf_bar = resno - PCI_IOV_RESOURCES; |
2403 |
-+ struct pci_bus_region region; |
2404 |
-+ u16 cmd; |
2405 |
-+ u32 new; |
2406 |
-+ int reg; |
2407 |
-+ |
2408 |
-+ /* |
2409 |
-+ * The generic pci_restore_bars() path calls this for all devices, |
2410 |
-+ * including VFs and non-SR-IOV devices. If this is not a PF, we |
2411 |
-+ * have nothing to do. |
2412 |
-+ */ |
2413 |
-+ if (!iov) |
2414 |
-+ return; |
2415 |
-+ |
2416 |
-+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd); |
2417 |
-+ if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) { |
2418 |
-+ dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n", |
2419 |
-+ vf_bar, res); |
2420 |
-+ return; |
2421 |
-+ } |
2422 |
-+ |
2423 |
-+ /* |
2424 |
-+ * Ignore unimplemented BARs, unused resource slots for 64-bit |
2425 |
-+ * BARs, and non-movable resources, e.g., those described via |
2426 |
-+ * Enhanced Allocation. |
2427 |
-+ */ |
2428 |
-+ if (!res->flags) |
2429 |
-+ return; |
2430 |
-+ |
2431 |
-+ if (res->flags & IORESOURCE_UNSET) |
2432 |
-+ return; |
2433 |
-+ |
2434 |
-+ if (res->flags & IORESOURCE_PCI_FIXED) |
2435 |
-+ return; |
2436 |
- |
2437 |
-- BUG_ON(!dev->is_physfn); |
2438 |
-+ pcibios_resource_to_bus(dev->bus, ®ion, res); |
2439 |
-+ new = region.start; |
2440 |
-+ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; |
2441 |
- |
2442 |
-- return dev->sriov->pos + PCI_SRIOV_BAR + |
2443 |
-- 4 * (resno - PCI_IOV_RESOURCES); |
2444 |
-+ reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar; |
2445 |
-+ pci_write_config_dword(dev, reg, new); |
2446 |
-+ if (res->flags & IORESOURCE_MEM_64) { |
2447 |
-+ new = region.start >> 16 >> 16; |
2448 |
-+ pci_write_config_dword(dev, reg + 4, new); |
2449 |
-+ } |
2450 |
- } |
2451 |
- |
2452 |
- resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev, |
2453 |
-diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c |
2454 |
-index eda6a7c..6922964 100644 |
2455 |
---- a/drivers/pci/pci.c |
2456 |
-+++ b/drivers/pci/pci.c |
2457 |
-@@ -564,10 +564,6 @@ static void pci_restore_bars(struct pci_dev *dev) |
2458 |
- { |
2459 |
- int i; |
2460 |
- |
2461 |
-- /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ |
2462 |
-- if (dev->is_virtfn) |
2463 |
-- return; |
2464 |
-- |
2465 |
- for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) |
2466 |
- pci_update_resource(dev, i); |
2467 |
- } |
2468 |
-@@ -4835,36 +4831,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags) |
2469 |
- } |
2470 |
- EXPORT_SYMBOL(pci_select_bars); |
2471 |
- |
2472 |
--/** |
2473 |
-- * pci_resource_bar - get position of the BAR associated with a resource |
2474 |
-- * @dev: the PCI device |
2475 |
-- * @resno: the resource number |
2476 |
-- * @type: the BAR type to be filled in |
2477 |
-- * |
2478 |
-- * Returns BAR position in config space, or 0 if the BAR is invalid. |
2479 |
-- */ |
2480 |
--int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) |
2481 |
--{ |
2482 |
-- int reg; |
2483 |
-- |
2484 |
-- if (resno < PCI_ROM_RESOURCE) { |
2485 |
-- *type = pci_bar_unknown; |
2486 |
-- return PCI_BASE_ADDRESS_0 + 4 * resno; |
2487 |
-- } else if (resno == PCI_ROM_RESOURCE) { |
2488 |
-- *type = pci_bar_mem32; |
2489 |
-- return dev->rom_base_reg; |
2490 |
-- } else if (resno < PCI_BRIDGE_RESOURCES) { |
2491 |
-- /* device specific resource */ |
2492 |
-- *type = pci_bar_unknown; |
2493 |
-- reg = pci_iov_resource_bar(dev, resno); |
2494 |
-- if (reg) |
2495 |
-- return reg; |
2496 |
-- } |
2497 |
-- |
2498 |
-- dev_err(&dev->dev, "BAR %d: invalid resource\n", resno); |
2499 |
-- return 0; |
2500 |
--} |
2501 |
-- |
2502 |
- /* Some architectures require additional programming to enable VGA */ |
2503 |
- static arch_set_vga_state_t arch_set_vga_state; |
2504 |
- |
2505 |
-diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h |
2506 |
-index 4518562..a5d37f6 100644 |
2507 |
---- a/drivers/pci/pci.h |
2508 |
-+++ b/drivers/pci/pci.h |
2509 |
-@@ -245,7 +245,6 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, |
2510 |
- int pci_setup_device(struct pci_dev *dev); |
2511 |
- int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, |
2512 |
- struct resource *res, unsigned int reg); |
2513 |
--int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); |
2514 |
- void pci_configure_ari(struct pci_dev *dev); |
2515 |
- void __pci_bus_size_bridges(struct pci_bus *bus, |
2516 |
- struct list_head *realloc_head); |
2517 |
-@@ -289,7 +288,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev) |
2518 |
- #ifdef CONFIG_PCI_IOV |
2519 |
- int pci_iov_init(struct pci_dev *dev); |
2520 |
- void pci_iov_release(struct pci_dev *dev); |
2521 |
--int pci_iov_resource_bar(struct pci_dev *dev, int resno); |
2522 |
-+void pci_iov_update_resource(struct pci_dev *dev, int resno); |
2523 |
- resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); |
2524 |
- void pci_restore_iov_state(struct pci_dev *dev); |
2525 |
- int pci_iov_bus_range(struct pci_bus *bus); |
2526 |
-@@ -303,10 +302,6 @@ static inline void pci_iov_release(struct pci_dev *dev) |
2527 |
- |
2528 |
- { |
2529 |
- } |
2530 |
--static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno) |
2531 |
--{ |
2532 |
-- return 0; |
2533 |
--} |
2534 |
- static inline void pci_restore_iov_state(struct pci_dev *dev) |
2535 |
- { |
2536 |
- } |
2537 |
-diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c |
2538 |
-index 300770c..d266d80 100644 |
2539 |
---- a/drivers/pci/probe.c |
2540 |
-+++ b/drivers/pci/probe.c |
2541 |
-@@ -227,7 +227,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, |
2542 |
- mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK; |
2543 |
- } |
2544 |
- } else { |
2545 |
-- res->flags |= (l & IORESOURCE_ROM_ENABLE); |
2546 |
-+ if (l & PCI_ROM_ADDRESS_ENABLE) |
2547 |
-+ res->flags |= IORESOURCE_ROM_ENABLE; |
2548 |
- l64 = l & PCI_ROM_ADDRESS_MASK; |
2549 |
- sz64 = sz & PCI_ROM_ADDRESS_MASK; |
2550 |
- mask64 = (u32)PCI_ROM_ADDRESS_MASK; |
2551 |
-diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c |
2552 |
-index 06663d3..b6edb18 100644 |
2553 |
---- a/drivers/pci/rom.c |
2554 |
-+++ b/drivers/pci/rom.c |
2555 |
-@@ -35,6 +35,11 @@ int pci_enable_rom(struct pci_dev *pdev) |
2556 |
- if (res->flags & IORESOURCE_ROM_SHADOW) |
2557 |
- return 0; |
2558 |
- |
2559 |
-+ /* |
2560 |
-+ * Ideally pci_update_resource() would update the ROM BAR address, |
2561 |
-+ * and we would only set the enable bit here. But apparently some |
2562 |
-+ * devices have buggy ROM BARs that read as zero when disabled. |
2563 |
-+ */ |
2564 |
- pcibios_resource_to_bus(pdev->bus, ®ion, res); |
2565 |
- pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
2566 |
- rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
2567 |
-diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c |
2568 |
-index 9526e34..4bc589e 100644 |
2569 |
---- a/drivers/pci/setup-res.c |
2570 |
-+++ b/drivers/pci/setup-res.c |
2571 |
-@@ -25,21 +25,18 @@ |
2572 |
- #include <linux/slab.h> |
2573 |
- #include "pci.h" |
2574 |
- |
2575 |
-- |
2576 |
--void pci_update_resource(struct pci_dev *dev, int resno) |
2577 |
-+static void pci_std_update_resource(struct pci_dev *dev, int resno) |
2578 |
- { |
2579 |
- struct pci_bus_region region; |
2580 |
- bool disable; |
2581 |
- u16 cmd; |
2582 |
- u32 new, check, mask; |
2583 |
- int reg; |
2584 |
-- enum pci_bar_type type; |
2585 |
- struct resource *res = dev->resource + resno; |
2586 |
- |
2587 |
-- if (dev->is_virtfn) { |
2588 |
-- dev_warn(&dev->dev, "can't update VF BAR%d\n", resno); |
2589 |
-+ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ |
2590 |
-+ if (dev->is_virtfn) |
2591 |
- return; |
2592 |
-- } |
2593 |
- |
2594 |
- /* |
2595 |
- * Ignore resources for unimplemented BARs and unused resource slots |
2596 |
-@@ -60,21 +57,34 @@ void pci_update_resource(struct pci_dev *dev, int resno) |
2597 |
- return; |
2598 |
- |
2599 |
- pcibios_resource_to_bus(dev->bus, ®ion, res); |
2600 |
-+ new = region.start; |
2601 |
- |
2602 |
-- new = region.start | (res->flags & PCI_REGION_FLAG_MASK); |
2603 |
-- if (res->flags & IORESOURCE_IO) |
2604 |
-+ if (res->flags & IORESOURCE_IO) { |
2605 |
- mask = (u32)PCI_BASE_ADDRESS_IO_MASK; |
2606 |
-- else |
2607 |
-+ new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK; |
2608 |
-+ } else if (resno == PCI_ROM_RESOURCE) { |
2609 |
-+ mask = (u32)PCI_ROM_ADDRESS_MASK; |
2610 |
-+ } else { |
2611 |
- mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; |
2612 |
-+ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; |
2613 |
-+ } |
2614 |
- |
2615 |
-- reg = pci_resource_bar(dev, resno, &type); |
2616 |
-- if (!reg) |
2617 |
-- return; |
2618 |
-- if (type != pci_bar_unknown) { |
2619 |
-+ if (resno < PCI_ROM_RESOURCE) { |
2620 |
-+ reg = PCI_BASE_ADDRESS_0 + 4 * resno; |
2621 |
-+ } else if (resno == PCI_ROM_RESOURCE) { |
2622 |
-+ |
2623 |
-+ /* |
2624 |
-+ * Apparently some Matrox devices have ROM BARs that read |
2625 |
-+ * as zero when disabled, so don't update ROM BARs unless |
2626 |
-+ * they're enabled. See https://lkml.org/lkml/2005/8/30/138. |
2627 |
-+ */ |
2628 |
- if (!(res->flags & IORESOURCE_ROM_ENABLE)) |
2629 |
- return; |
2630 |
-+ |
2631 |
-+ reg = dev->rom_base_reg; |
2632 |
- new |= PCI_ROM_ADDRESS_ENABLE; |
2633 |
-- } |
2634 |
-+ } else |
2635 |
-+ return; |
2636 |
- |
2637 |
- /* |
2638 |
- * We can't update a 64-bit BAR atomically, so when possible, |
2639 |
-@@ -110,6 +120,16 @@ void pci_update_resource(struct pci_dev *dev, int resno) |
2640 |
- pci_write_config_word(dev, PCI_COMMAND, cmd); |
2641 |
- } |
2642 |
- |
2643 |
-+void pci_update_resource(struct pci_dev *dev, int resno) |
2644 |
-+{ |
2645 |
-+ if (resno <= PCI_ROM_RESOURCE) |
2646 |
-+ pci_std_update_resource(dev, resno); |
2647 |
-+#ifdef CONFIG_PCI_IOV |
2648 |
-+ else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) |
2649 |
-+ pci_iov_update_resource(dev, resno); |
2650 |
-+#endif |
2651 |
-+} |
2652 |
-+ |
2653 |
- int pci_claim_resource(struct pci_dev *dev, int resource) |
2654 |
- { |
2655 |
- struct resource *res = &dev->resource[resource]; |
2656 |
-diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c |
2657 |
-index ed92fb0..76b802c 100644 |
2658 |
---- a/drivers/s390/crypto/ap_bus.c |
2659 |
-+++ b/drivers/s390/crypto/ap_bus.c |
2660 |
-@@ -1712,6 +1712,9 @@ static void ap_scan_bus(struct work_struct *unused) |
2661 |
- ap_dev->queue_depth = queue_depth; |
2662 |
- ap_dev->raw_hwtype = device_type; |
2663 |
- ap_dev->device_type = device_type; |
2664 |
-+ /* CEX6 toleration: map to CEX5 */ |
2665 |
-+ if (device_type == AP_DEVICE_TYPE_CEX6) |
2666 |
-+ ap_dev->device_type = AP_DEVICE_TYPE_CEX5; |
2667 |
- ap_dev->functions = device_functions; |
2668 |
- spin_lock_init(&ap_dev->lock); |
2669 |
- INIT_LIST_HEAD(&ap_dev->pendingq); |
2670 |
-diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h |
2671 |
-index d7fdf5c..fd66d2c 100644 |
2672 |
---- a/drivers/s390/crypto/ap_bus.h |
2673 |
-+++ b/drivers/s390/crypto/ap_bus.h |
2674 |
-@@ -105,6 +105,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) |
2675 |
- #define AP_DEVICE_TYPE_CEX3C 9 |
2676 |
- #define AP_DEVICE_TYPE_CEX4 10 |
2677 |
- #define AP_DEVICE_TYPE_CEX5 11 |
2678 |
-+#define AP_DEVICE_TYPE_CEX6 12 |
2679 |
- |
2680 |
- /* |
2681 |
- * Known function facilities |
2682 |
-diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c |
2683 |
-index 91dfd58..c4fe95a 100644 |
2684 |
---- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c |
2685 |
-+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c |
2686 |
-@@ -22,7 +22,7 @@ |
2687 |
- * |
2688 |
- ****************************************************************************/ |
2689 |
- |
2690 |
--#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2691 |
-+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2692 |
- |
2693 |
- #include <linux/module.h> |
2694 |
- #include <linux/kernel.h> |
2695 |
-@@ -82,7 +82,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, |
2696 |
- } |
2697 |
- } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { |
2698 |
- if (se_cmd->data_direction == DMA_TO_DEVICE) { |
2699 |
-- /* residual data from an overflow write */ |
2700 |
-+ /* residual data from an overflow write */ |
2701 |
- rsp->flags = SRP_RSP_FLAG_DOOVER; |
2702 |
- rsp->data_out_res_cnt = cpu_to_be32(residual_count); |
2703 |
- } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { |
2704 |
-@@ -102,7 +102,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, |
2705 |
- * and the function returns TRUE. |
2706 |
- * |
2707 |
- * EXECUTION ENVIRONMENT: |
2708 |
-- * Interrupt or Process environment |
2709 |
-+ * Interrupt or Process environment |
2710 |
- */ |
2711 |
- static bool connection_broken(struct scsi_info *vscsi) |
2712 |
- { |
2713 |
-@@ -325,7 +325,7 @@ static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask, |
2714 |
- } |
2715 |
- |
2716 |
- /** |
2717 |
-- * ibmvscsis_send_init_message() - send initialize message to the client |
2718 |
-+ * ibmvscsis_send_init_message() - send initialize message to the client |
2719 |
- * @vscsi: Pointer to our adapter structure |
2720 |
- * @format: Which Init Message format to send |
2721 |
- * |
2722 |
-@@ -383,13 +383,13 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) |
2723 |
- vscsi->cmd_q.base_addr); |
2724 |
- if (crq) { |
2725 |
- *format = (uint)(crq->format); |
2726 |
-- rc = ERROR; |
2727 |
-+ rc = ERROR; |
2728 |
- crq->valid = INVALIDATE_CMD_RESP_EL; |
2729 |
- dma_rmb(); |
2730 |
- } |
2731 |
- } else { |
2732 |
- *format = (uint)(crq->format); |
2733 |
-- rc = ERROR; |
2734 |
-+ rc = ERROR; |
2735 |
- crq->valid = INVALIDATE_CMD_RESP_EL; |
2736 |
- dma_rmb(); |
2737 |
- } |
2738 |
-@@ -398,166 +398,6 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) |
2739 |
- } |
2740 |
- |
2741 |
- /** |
2742 |
-- * ibmvscsis_establish_new_q() - Establish new CRQ queue |
2743 |
-- * @vscsi: Pointer to our adapter structure |
2744 |
-- * @new_state: New state being established after resetting the queue |
2745 |
-- * |
2746 |
-- * Must be called with interrupt lock held. |
2747 |
-- */ |
2748 |
--static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state) |
2749 |
--{ |
2750 |
-- long rc = ADAPT_SUCCESS; |
2751 |
-- uint format; |
2752 |
-- |
2753 |
-- vscsi->flags &= PRESERVE_FLAG_FIELDS; |
2754 |
-- vscsi->rsp_q_timer.timer_pops = 0; |
2755 |
-- vscsi->debit = 0; |
2756 |
-- vscsi->credit = 0; |
2757 |
-- |
2758 |
-- rc = vio_enable_interrupts(vscsi->dma_dev); |
2759 |
-- if (rc) { |
2760 |
-- pr_warn("reset_queue: failed to enable interrupts, rc %ld\n", |
2761 |
-- rc); |
2762 |
-- return rc; |
2763 |
-- } |
2764 |
-- |
2765 |
-- rc = ibmvscsis_check_init_msg(vscsi, &format); |
2766 |
-- if (rc) { |
2767 |
-- dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n", |
2768 |
-- rc); |
2769 |
-- return rc; |
2770 |
-- } |
2771 |
-- |
2772 |
-- if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) { |
2773 |
-- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); |
2774 |
-- switch (rc) { |
2775 |
-- case H_SUCCESS: |
2776 |
-- case H_DROPPED: |
2777 |
-- case H_CLOSED: |
2778 |
-- rc = ADAPT_SUCCESS; |
2779 |
-- break; |
2780 |
-- |
2781 |
-- case H_PARAMETER: |
2782 |
-- case H_HARDWARE: |
2783 |
-- break; |
2784 |
-- |
2785 |
-- default: |
2786 |
-- vscsi->state = UNDEFINED; |
2787 |
-- rc = H_HARDWARE; |
2788 |
-- break; |
2789 |
-- } |
2790 |
-- } |
2791 |
-- |
2792 |
-- return rc; |
2793 |
--} |
2794 |
-- |
2795 |
--/** |
2796 |
-- * ibmvscsis_reset_queue() - Reset CRQ Queue |
2797 |
-- * @vscsi: Pointer to our adapter structure |
2798 |
-- * @new_state: New state to establish after resetting the queue |
2799 |
-- * |
2800 |
-- * This function calls h_free_q and then calls h_reg_q and does all |
2801 |
-- * of the bookkeeping to get us back to where we can communicate. |
2802 |
-- * |
2803 |
-- * Actually, we don't always call h_free_crq. A problem was discovered |
2804 |
-- * where one partition would close and reopen his queue, which would |
2805 |
-- * cause his partner to get a transport event, which would cause him to |
2806 |
-- * close and reopen his queue, which would cause the original partition |
2807 |
-- * to get a transport event, etc., etc. To prevent this, we don't |
2808 |
-- * actually close our queue if the client initiated the reset, (i.e. |
2809 |
-- * either we got a transport event or we have detected that the client's |
2810 |
-- * queue is gone) |
2811 |
-- * |
2812 |
-- * EXECUTION ENVIRONMENT: |
2813 |
-- * Process environment, called with interrupt lock held |
2814 |
-- */ |
2815 |
--static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state) |
2816 |
--{ |
2817 |
-- int bytes; |
2818 |
-- long rc = ADAPT_SUCCESS; |
2819 |
-- |
2820 |
-- pr_debug("reset_queue: flags 0x%x\n", vscsi->flags); |
2821 |
-- |
2822 |
-- /* don't reset, the client did it for us */ |
2823 |
-- if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { |
2824 |
-- vscsi->flags &= PRESERVE_FLAG_FIELDS; |
2825 |
-- vscsi->rsp_q_timer.timer_pops = 0; |
2826 |
-- vscsi->debit = 0; |
2827 |
-- vscsi->credit = 0; |
2828 |
-- vscsi->state = new_state; |
2829 |
-- vio_enable_interrupts(vscsi->dma_dev); |
2830 |
-- } else { |
2831 |
-- rc = ibmvscsis_free_command_q(vscsi); |
2832 |
-- if (rc == ADAPT_SUCCESS) { |
2833 |
-- vscsi->state = new_state; |
2834 |
-- |
2835 |
-- bytes = vscsi->cmd_q.size * PAGE_SIZE; |
2836 |
-- rc = h_reg_crq(vscsi->dds.unit_id, |
2837 |
-- vscsi->cmd_q.crq_token, bytes); |
2838 |
-- if (rc == H_CLOSED || rc == H_SUCCESS) { |
2839 |
-- rc = ibmvscsis_establish_new_q(vscsi, |
2840 |
-- new_state); |
2841 |
-- } |
2842 |
-- |
2843 |
-- if (rc != ADAPT_SUCCESS) { |
2844 |
-- pr_debug("reset_queue: reg_crq rc %ld\n", rc); |
2845 |
-- |
2846 |
-- vscsi->state = ERR_DISCONNECTED; |
2847 |
-- vscsi->flags |= RESPONSE_Q_DOWN; |
2848 |
-- ibmvscsis_free_command_q(vscsi); |
2849 |
-- } |
2850 |
-- } else { |
2851 |
-- vscsi->state = ERR_DISCONNECTED; |
2852 |
-- vscsi->flags |= RESPONSE_Q_DOWN; |
2853 |
-- } |
2854 |
-- } |
2855 |
--} |
2856 |
-- |
2857 |
--/** |
2858 |
-- * ibmvscsis_free_cmd_resources() - Free command resources |
2859 |
-- * @vscsi: Pointer to our adapter structure |
2860 |
-- * @cmd: Command which is not longer in use |
2861 |
-- * |
2862 |
-- * Must be called with interrupt lock held. |
2863 |
-- */ |
2864 |
--static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, |
2865 |
-- struct ibmvscsis_cmd *cmd) |
2866 |
--{ |
2867 |
-- struct iu_entry *iue = cmd->iue; |
2868 |
-- |
2869 |
-- switch (cmd->type) { |
2870 |
-- case TASK_MANAGEMENT: |
2871 |
-- case SCSI_CDB: |
2872 |
-- /* |
2873 |
-- * When the queue goes down this value is cleared, so it |
2874 |
-- * cannot be cleared in this general purpose function. |
2875 |
-- */ |
2876 |
-- if (vscsi->debit) |
2877 |
-- vscsi->debit -= 1; |
2878 |
-- break; |
2879 |
-- case ADAPTER_MAD: |
2880 |
-- vscsi->flags &= ~PROCESSING_MAD; |
2881 |
-- break; |
2882 |
-- case UNSET_TYPE: |
2883 |
-- break; |
2884 |
-- default: |
2885 |
-- dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", |
2886 |
-- cmd->type); |
2887 |
-- break; |
2888 |
-- } |
2889 |
-- |
2890 |
-- cmd->iue = NULL; |
2891 |
-- list_add_tail(&cmd->list, &vscsi->free_cmd); |
2892 |
-- srp_iu_put(iue); |
2893 |
-- |
2894 |
-- if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && |
2895 |
-- list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { |
2896 |
-- vscsi->flags &= ~WAIT_FOR_IDLE; |
2897 |
-- complete(&vscsi->wait_idle); |
2898 |
-- } |
2899 |
--} |
2900 |
-- |
2901 |
--/** |
2902 |
- * ibmvscsis_disconnect() - Helper function to disconnect |
2903 |
- * @work: Pointer to work_struct, gives access to our adapter structure |
2904 |
- * |
2905 |
-@@ -576,7 +416,6 @@ static void ibmvscsis_disconnect(struct work_struct *work) |
2906 |
- proc_work); |
2907 |
- u16 new_state; |
2908 |
- bool wait_idle = false; |
2909 |
-- long rc = ADAPT_SUCCESS; |
2910 |
- |
2911 |
- spin_lock_bh(&vscsi->intr_lock); |
2912 |
- new_state = vscsi->new_state; |
2913 |
-@@ -590,7 +429,7 @@ static void ibmvscsis_disconnect(struct work_struct *work) |
2914 |
- * should transitition to the new state |
2915 |
- */ |
2916 |
- switch (vscsi->state) { |
2917 |
-- /* Should never be called while in this state. */ |
2918 |
-+ /* Should never be called while in this state. */ |
2919 |
- case NO_QUEUE: |
2920 |
- /* |
2921 |
- * Can never transition from this state; |
2922 |
-@@ -629,30 +468,24 @@ static void ibmvscsis_disconnect(struct work_struct *work) |
2923 |
- vscsi->state = new_state; |
2924 |
- break; |
2925 |
- |
2926 |
-- /* |
2927 |
-- * If this is a transition into an error state. |
2928 |
-- * a client is attempting to establish a connection |
2929 |
-- * and has violated the RPA protocol. |
2930 |
-- * There can be nothing pending on the adapter although |
2931 |
-- * there can be requests in the command queue. |
2932 |
-- */ |
2933 |
- case WAIT_ENABLED: |
2934 |
-- case PART_UP_WAIT_ENAB: |
2935 |
- switch (new_state) { |
2936 |
-- case ERR_DISCONNECT: |
2937 |
-- vscsi->flags |= RESPONSE_Q_DOWN; |
2938 |
-+ case UNCONFIGURING: |
2939 |
- vscsi->state = new_state; |
2940 |
-+ vscsi->flags |= RESPONSE_Q_DOWN; |
2941 |
- vscsi->flags &= ~(SCHEDULE_DISCONNECT | |
2942 |
- DISCONNECT_SCHEDULED); |
2943 |
-- ibmvscsis_free_command_q(vscsi); |
2944 |
-- break; |
2945 |
-- case ERR_DISCONNECT_RECONNECT: |
2946 |
-- ibmvscsis_reset_queue(vscsi, WAIT_ENABLED); |
2947 |
-+ dma_rmb(); |
2948 |
-+ if (vscsi->flags & CFG_SLEEPING) { |
2949 |
-+ vscsi->flags &= ~CFG_SLEEPING; |
2950 |
-+ complete(&vscsi->unconfig); |
2951 |
-+ } |
2952 |
- break; |
2953 |
- |
2954 |
- /* should never happen */ |
2955 |
-+ case ERR_DISCONNECT: |
2956 |
-+ case ERR_DISCONNECT_RECONNECT: |
2957 |
- case WAIT_IDLE: |
2958 |
-- rc = ERROR; |
2959 |
- dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n", |
2960 |
- vscsi->state); |
2961 |
- break; |
2962 |
-@@ -661,6 +494,13 @@ static void ibmvscsis_disconnect(struct work_struct *work) |
2963 |
- |
2964 |
- case WAIT_IDLE: |
2965 |
- switch (new_state) { |
2966 |
-+ case UNCONFIGURING: |
2967 |
-+ vscsi->flags |= RESPONSE_Q_DOWN; |
2968 |
-+ vscsi->state = new_state; |
2969 |
-+ vscsi->flags &= ~(SCHEDULE_DISCONNECT | |
2970 |
-+ DISCONNECT_SCHEDULED); |
2971 |
-+ ibmvscsis_free_command_q(vscsi); |
2972 |
-+ break; |
2973 |
- case ERR_DISCONNECT: |
2974 |
- case ERR_DISCONNECT_RECONNECT: |
2975 |
- vscsi->state = new_state; |
2976 |
-@@ -765,45 +605,348 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state, |
2977 |
- else |
2978 |
- state = vscsi->state; |
2979 |
- |
2980 |
-- switch (state) { |
2981 |
-- case NO_QUEUE: |
2982 |
-- case UNCONFIGURING: |
2983 |
-- break; |
2984 |
-+ switch (state) { |
2985 |
-+ case NO_QUEUE: |
2986 |
-+ case UNCONFIGURING: |
2987 |
-+ break; |
2988 |
-+ |
2989 |
-+ case ERR_DISCONNECTED: |
2990 |
-+ case ERR_DISCONNECT: |
2991 |
-+ case UNDEFINED: |
2992 |
-+ if (new_state == UNCONFIGURING) |
2993 |
-+ vscsi->new_state = new_state; |
2994 |
-+ break; |
2995 |
-+ |
2996 |
-+ case ERR_DISCONNECT_RECONNECT: |
2997 |
-+ switch (new_state) { |
2998 |
-+ case UNCONFIGURING: |
2999 |
-+ case ERR_DISCONNECT: |
3000 |
-+ vscsi->new_state = new_state; |
3001 |
-+ break; |
3002 |
-+ default: |
3003 |
-+ break; |
3004 |
-+ } |
3005 |
-+ break; |
3006 |
-+ |
3007 |
-+ case WAIT_ENABLED: |
3008 |
-+ case WAIT_IDLE: |
3009 |
-+ case WAIT_CONNECTION: |
3010 |
-+ case CONNECTED: |
3011 |
-+ case SRP_PROCESSING: |
3012 |
-+ vscsi->new_state = new_state; |
3013 |
-+ break; |
3014 |
-+ |
3015 |
-+ default: |
3016 |
-+ break; |
3017 |
-+ } |
3018 |
-+ } |
3019 |
-+ |
3020 |
-+ pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", |
3021 |
-+ vscsi->flags, vscsi->new_state); |
3022 |
-+} |
3023 |
-+ |
3024 |
-+/** |
3025 |
-+ * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message |
3026 |
-+ * @vscsi: Pointer to our adapter structure |
3027 |
-+ * |
3028 |
-+ * Must be called with interrupt lock held. |
3029 |
-+ */ |
3030 |
-+static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) |
3031 |
-+{ |
3032 |
-+ long rc = ADAPT_SUCCESS; |
3033 |
-+ |
3034 |
-+ switch (vscsi->state) { |
3035 |
-+ case NO_QUEUE: |
3036 |
-+ case ERR_DISCONNECT: |
3037 |
-+ case ERR_DISCONNECT_RECONNECT: |
3038 |
-+ case ERR_DISCONNECTED: |
3039 |
-+ case UNCONFIGURING: |
3040 |
-+ case UNDEFINED: |
3041 |
-+ rc = ERROR; |
3042 |
-+ break; |
3043 |
-+ |
3044 |
-+ case WAIT_CONNECTION: |
3045 |
-+ vscsi->state = CONNECTED; |
3046 |
-+ break; |
3047 |
-+ |
3048 |
-+ case WAIT_IDLE: |
3049 |
-+ case SRP_PROCESSING: |
3050 |
-+ case CONNECTED: |
3051 |
-+ case WAIT_ENABLED: |
3052 |
-+ default: |
3053 |
-+ rc = ERROR; |
3054 |
-+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", |
3055 |
-+ vscsi->state); |
3056 |
-+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
3057 |
-+ break; |
3058 |
-+ } |
3059 |
-+ |
3060 |
-+ return rc; |
3061 |
-+} |
3062 |
-+ |
3063 |
-+/** |
3064 |
-+ * ibmvscsis_handle_init_msg() - Respond to an Init Message |
3065 |
-+ * @vscsi: Pointer to our adapter structure |
3066 |
-+ * |
3067 |
-+ * Must be called with interrupt lock held. |
3068 |
-+ */ |
3069 |
-+static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) |
3070 |
-+{ |
3071 |
-+ long rc = ADAPT_SUCCESS; |
3072 |
-+ |
3073 |
-+ switch (vscsi->state) { |
3074 |
-+ case WAIT_CONNECTION: |
3075 |
-+ rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); |
3076 |
-+ switch (rc) { |
3077 |
-+ case H_SUCCESS: |
3078 |
-+ vscsi->state = CONNECTED; |
3079 |
-+ break; |
3080 |
-+ |
3081 |
-+ case H_PARAMETER: |
3082 |
-+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", |
3083 |
-+ rc); |
3084 |
-+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); |
3085 |
-+ break; |
3086 |
-+ |
3087 |
-+ case H_DROPPED: |
3088 |
-+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", |
3089 |
-+ rc); |
3090 |
-+ rc = ERROR; |
3091 |
-+ ibmvscsis_post_disconnect(vscsi, |
3092 |
-+ ERR_DISCONNECT_RECONNECT, 0); |
3093 |
-+ break; |
3094 |
-+ |
3095 |
-+ case H_CLOSED: |
3096 |
-+ pr_warn("init_msg: failed to send, rc %ld\n", rc); |
3097 |
-+ rc = 0; |
3098 |
-+ break; |
3099 |
-+ } |
3100 |
-+ break; |
3101 |
-+ |
3102 |
-+ case UNDEFINED: |
3103 |
-+ rc = ERROR; |
3104 |
-+ break; |
3105 |
-+ |
3106 |
-+ case UNCONFIGURING: |
3107 |
-+ break; |
3108 |
-+ |
3109 |
-+ case WAIT_ENABLED: |
3110 |
-+ case CONNECTED: |
3111 |
-+ case SRP_PROCESSING: |
3112 |
-+ case WAIT_IDLE: |
3113 |
-+ case NO_QUEUE: |
3114 |
-+ case ERR_DISCONNECT: |
3115 |
-+ case ERR_DISCONNECT_RECONNECT: |
3116 |
-+ case ERR_DISCONNECTED: |
3117 |
-+ default: |
3118 |
-+ rc = ERROR; |
3119 |
-+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", |
3120 |
-+ vscsi->state); |
3121 |
-+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
3122 |
-+ break; |
3123 |
-+ } |
3124 |
-+ |
3125 |
-+ return rc; |
3126 |
-+} |
3127 |
-+ |
3128 |
-+/** |
3129 |
-+ * ibmvscsis_init_msg() - Respond to an init message |
3130 |
-+ * @vscsi: Pointer to our adapter structure |
3131 |
-+ * @crq: Pointer to CRQ element containing the Init Message |
3132 |
-+ * |
3133 |
-+ * EXECUTION ENVIRONMENT: |
3134 |
-+ * Interrupt, interrupt lock held |
3135 |
-+ */ |
3136 |
-+static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) |
3137 |
-+{ |
3138 |
-+ long rc = ADAPT_SUCCESS; |
3139 |
-+ |
3140 |
-+ pr_debug("init_msg: state 0x%hx\n", vscsi->state); |
3141 |
-+ |
3142 |
-+ rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, |
3143 |
-+ (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, |
3144 |
-+ 0); |
3145 |
-+ if (rc == H_SUCCESS) { |
3146 |
-+ vscsi->client_data.partition_number = |
3147 |
-+ be64_to_cpu(*(u64 *)vscsi->map_buf); |
3148 |
-+ pr_debug("init_msg, part num %d\n", |
3149 |
-+ vscsi->client_data.partition_number); |
3150 |
-+ } else { |
3151 |
-+ pr_debug("init_msg h_vioctl rc %ld\n", rc); |
3152 |
-+ rc = ADAPT_SUCCESS; |
3153 |
-+ } |
3154 |
-+ |
3155 |
-+ if (crq->format == INIT_MSG) { |
3156 |
-+ rc = ibmvscsis_handle_init_msg(vscsi); |
3157 |
-+ } else if (crq->format == INIT_COMPLETE_MSG) { |
3158 |
-+ rc = ibmvscsis_handle_init_compl_msg(vscsi); |
3159 |
-+ } else { |
3160 |
-+ rc = ERROR; |
3161 |
-+ dev_err(&vscsi->dev, "init_msg: invalid format %d\n", |
3162 |
-+ (uint)crq->format); |
3163 |
-+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
3164 |
-+ } |
3165 |
-+ |
3166 |
-+ return rc; |
3167 |
-+} |
3168 |
-+ |
3169 |
-+/** |
3170 |
-+ * ibmvscsis_establish_new_q() - Establish new CRQ queue |
3171 |
-+ * @vscsi: Pointer to our adapter structure |
3172 |
-+ * |
3173 |
-+ * Must be called with interrupt lock held. |
3174 |
-+ */ |
3175 |
-+static long ibmvscsis_establish_new_q(struct scsi_info *vscsi) |
3176 |
-+{ |
3177 |
-+ long rc = ADAPT_SUCCESS; |
3178 |
-+ uint format; |
3179 |
-+ |
3180 |
-+ vscsi->flags &= PRESERVE_FLAG_FIELDS; |
3181 |
-+ vscsi->rsp_q_timer.timer_pops = 0; |
3182 |
-+ vscsi->debit = 0; |
3183 |
-+ vscsi->credit = 0; |
3184 |
-+ |
3185 |
-+ rc = vio_enable_interrupts(vscsi->dma_dev); |
3186 |
-+ if (rc) { |
3187 |
-+ pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n", |
3188 |
-+ rc); |
3189 |
-+ return rc; |
3190 |
-+ } |
3191 |
-+ |
3192 |
-+ rc = ibmvscsis_check_init_msg(vscsi, &format); |
3193 |
-+ if (rc) { |
3194 |
-+ dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n", |
3195 |
-+ rc); |
3196 |
-+ return rc; |
3197 |
-+ } |
3198 |
-+ |
3199 |
-+ if (format == UNUSED_FORMAT) { |
3200 |
-+ rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); |
3201 |
-+ switch (rc) { |
3202 |
-+ case H_SUCCESS: |
3203 |
-+ case H_DROPPED: |
3204 |
-+ case H_CLOSED: |
3205 |
-+ rc = ADAPT_SUCCESS; |
3206 |
-+ break; |
3207 |
-+ |
3208 |
-+ case H_PARAMETER: |
3209 |
-+ case H_HARDWARE: |
3210 |
-+ break; |
3211 |
-+ |
3212 |
-+ default: |
3213 |
-+ vscsi->state = UNDEFINED; |
3214 |
-+ rc = H_HARDWARE; |
3215 |
-+ break; |
3216 |
-+ } |
3217 |
-+ } else if (format == INIT_MSG) { |
3218 |
-+ rc = ibmvscsis_handle_init_msg(vscsi); |
3219 |
-+ } |
3220 |
-+ |
3221 |
-+ return rc; |
3222 |
-+} |
3223 |
-+ |
3224 |
-+/** |
3225 |
-+ * ibmvscsis_reset_queue() - Reset CRQ Queue |
3226 |
-+ * @vscsi: Pointer to our adapter structure |
3227 |
-+ * |
3228 |
-+ * This function calls h_free_q and then calls h_reg_q and does all |
3229 |
-+ * of the bookkeeping to get us back to where we can communicate. |
3230 |
-+ * |
3231 |
-+ * Actually, we don't always call h_free_crq. A problem was discovered |
3232 |
-+ * where one partition would close and reopen his queue, which would |
3233 |
-+ * cause his partner to get a transport event, which would cause him to |
3234 |
-+ * close and reopen his queue, which would cause the original partition |
3235 |
-+ * to get a transport event, etc., etc. To prevent this, we don't |
3236 |
-+ * actually close our queue if the client initiated the reset, (i.e. |
3237 |
-+ * either we got a transport event or we have detected that the client's |
3238 |
-+ * queue is gone) |
3239 |
-+ * |
3240 |
-+ * EXECUTION ENVIRONMENT: |
3241 |
-+ * Process environment, called with interrupt lock held |
3242 |
-+ */ |
3243 |
-+static void ibmvscsis_reset_queue(struct scsi_info *vscsi) |
3244 |
-+{ |
3245 |
-+ int bytes; |
3246 |
-+ long rc = ADAPT_SUCCESS; |
3247 |
-+ |
3248 |
-+ pr_debug("reset_queue: flags 0x%x\n", vscsi->flags); |
3249 |
-+ |
3250 |
-+ /* don't reset, the client did it for us */ |
3251 |
-+ if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { |
3252 |
-+ vscsi->flags &= PRESERVE_FLAG_FIELDS; |
3253 |
-+ vscsi->rsp_q_timer.timer_pops = 0; |
3254 |
-+ vscsi->debit = 0; |
3255 |
-+ vscsi->credit = 0; |
3256 |
-+ vscsi->state = WAIT_CONNECTION; |
3257 |
-+ vio_enable_interrupts(vscsi->dma_dev); |
3258 |
-+ } else { |
3259 |
-+ rc = ibmvscsis_free_command_q(vscsi); |
3260 |
-+ if (rc == ADAPT_SUCCESS) { |
3261 |
-+ vscsi->state = WAIT_CONNECTION; |
3262 |
-+ |
3263 |
-+ bytes = vscsi->cmd_q.size * PAGE_SIZE; |
3264 |
-+ rc = h_reg_crq(vscsi->dds.unit_id, |
3265 |
-+ vscsi->cmd_q.crq_token, bytes); |
3266 |
-+ if (rc == H_CLOSED || rc == H_SUCCESS) { |
3267 |
-+ rc = ibmvscsis_establish_new_q(vscsi); |
3268 |
-+ } |
3269 |
- |
3270 |
-- case ERR_DISCONNECTED: |
3271 |
-- case ERR_DISCONNECT: |
3272 |
-- case UNDEFINED: |
3273 |
-- if (new_state == UNCONFIGURING) |
3274 |
-- vscsi->new_state = new_state; |
3275 |
-- break; |
3276 |
-+ if (rc != ADAPT_SUCCESS) { |
3277 |
-+ pr_debug("reset_queue: reg_crq rc %ld\n", rc); |
3278 |
- |
3279 |
-- case ERR_DISCONNECT_RECONNECT: |
3280 |
-- switch (new_state) { |
3281 |
-- case UNCONFIGURING: |
3282 |
-- case ERR_DISCONNECT: |
3283 |
-- vscsi->new_state = new_state; |
3284 |
-- break; |
3285 |
-- default: |
3286 |
-- break; |
3287 |
-+ vscsi->state = ERR_DISCONNECTED; |
3288 |
-+ vscsi->flags |= RESPONSE_Q_DOWN; |
3289 |
-+ ibmvscsis_free_command_q(vscsi); |
3290 |
- } |
3291 |
-- break; |
3292 |
-+ } else { |
3293 |
-+ vscsi->state = ERR_DISCONNECTED; |
3294 |
-+ vscsi->flags |= RESPONSE_Q_DOWN; |
3295 |
-+ } |
3296 |
-+ } |
3297 |
-+} |
3298 |
- |
3299 |
-- case WAIT_ENABLED: |
3300 |
-- case PART_UP_WAIT_ENAB: |
3301 |
-- case WAIT_IDLE: |
3302 |
-- case WAIT_CONNECTION: |
3303 |
-- case CONNECTED: |
3304 |
-- case SRP_PROCESSING: |
3305 |
-- vscsi->new_state = new_state; |
3306 |
-- break; |
3307 |
-+/** |
3308 |
-+ * ibmvscsis_free_cmd_resources() - Free command resources |
3309 |
-+ * @vscsi: Pointer to our adapter structure |
3310 |
-+ * @cmd: Command which is not longer in use |
3311 |
-+ * |
3312 |
-+ * Must be called with interrupt lock held. |
3313 |
-+ */ |
3314 |
-+static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, |
3315 |
-+ struct ibmvscsis_cmd *cmd) |
3316 |
-+{ |
3317 |
-+ struct iu_entry *iue = cmd->iue; |
3318 |
- |
3319 |
-- default: |
3320 |
-- break; |
3321 |
-- } |
3322 |
-+ switch (cmd->type) { |
3323 |
-+ case TASK_MANAGEMENT: |
3324 |
-+ case SCSI_CDB: |
3325 |
-+ /* |
3326 |
-+ * When the queue goes down this value is cleared, so it |
3327 |
-+ * cannot be cleared in this general purpose function. |
3328 |
-+ */ |
3329 |
-+ if (vscsi->debit) |
3330 |
-+ vscsi->debit -= 1; |
3331 |
-+ break; |
3332 |
-+ case ADAPTER_MAD: |
3333 |
-+ vscsi->flags &= ~PROCESSING_MAD; |
3334 |
-+ break; |
3335 |
-+ case UNSET_TYPE: |
3336 |
-+ break; |
3337 |
-+ default: |
3338 |
-+ dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", |
3339 |
-+ cmd->type); |
3340 |
-+ break; |
3341 |
- } |
3342 |
- |
3343 |
-- pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", |
3344 |
-- vscsi->flags, vscsi->new_state); |
3345 |
-+ cmd->iue = NULL; |
3346 |
-+ list_add_tail(&cmd->list, &vscsi->free_cmd); |
3347 |
-+ srp_iu_put(iue); |
3348 |
-+ |
3349 |
-+ if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && |
3350 |
-+ list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { |
3351 |
-+ vscsi->flags &= ~WAIT_FOR_IDLE; |
3352 |
-+ complete(&vscsi->wait_idle); |
3353 |
-+ } |
3354 |
- } |
3355 |
- |
3356 |
- /** |
3357 |
-@@ -864,10 +1007,6 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi, |
3358 |
- TRANS_EVENT)); |
3359 |
- break; |
3360 |
- |
3361 |
-- case PART_UP_WAIT_ENAB: |
3362 |
-- vscsi->state = WAIT_ENABLED; |
3363 |
-- break; |
3364 |
-- |
3365 |
- case SRP_PROCESSING: |
3366 |
- if ((vscsi->debit > 0) || |
3367 |
- !list_empty(&vscsi->schedule_q) || |
3368 |
-@@ -896,7 +1035,7 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi, |
3369 |
- } |
3370 |
- } |
3371 |
- |
3372 |
-- rc = vscsi->flags & SCHEDULE_DISCONNECT; |
3373 |
-+ rc = vscsi->flags & SCHEDULE_DISCONNECT; |
3374 |
- |
3375 |
- pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n", |
3376 |
- vscsi->flags, vscsi->state, rc); |
3377 |
-@@ -1067,16 +1206,28 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) |
3378 |
- free_qs = true; |
3379 |
- |
3380 |
- switch (vscsi->state) { |
3381 |
-+ case UNCONFIGURING: |
3382 |
-+ ibmvscsis_free_command_q(vscsi); |
3383 |
-+ dma_rmb(); |
3384 |
-+ isync(); |
3385 |
-+ if (vscsi->flags & CFG_SLEEPING) { |
3386 |
-+ vscsi->flags &= ~CFG_SLEEPING; |
3387 |
-+ complete(&vscsi->unconfig); |
3388 |
-+ } |
3389 |
-+ break; |
3390 |
- case ERR_DISCONNECT_RECONNECT: |
3391 |
-- ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION); |
3392 |
-+ ibmvscsis_reset_queue(vscsi); |
3393 |
- pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags); |
3394 |
- break; |
3395 |
- |
3396 |
- case ERR_DISCONNECT: |
3397 |
- ibmvscsis_free_command_q(vscsi); |
3398 |
-- vscsi->flags &= ~DISCONNECT_SCHEDULED; |
3399 |
-+ vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED); |
3400 |
- vscsi->flags |= RESPONSE_Q_DOWN; |
3401 |
-- vscsi->state = ERR_DISCONNECTED; |
3402 |
-+ if (vscsi->tport.enabled) |
3403 |
-+ vscsi->state = ERR_DISCONNECTED; |
3404 |
-+ else |
3405 |
-+ vscsi->state = WAIT_ENABLED; |
3406 |
- pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n", |
3407 |
- vscsi->flags, vscsi->state); |
3408 |
- break; |
3409 |
-@@ -1221,7 +1372,7 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi, |
3410 |
- * @iue: Information Unit containing the Adapter Info MAD request |
3411 |
- * |
3412 |
- * EXECUTION ENVIRONMENT: |
3413 |
-- * Interrupt adpater lock is held |
3414 |
-+ * Interrupt adapter lock is held |
3415 |
- */ |
3416 |
- static long ibmvscsis_adapter_info(struct scsi_info *vscsi, |
3417 |
- struct iu_entry *iue) |
3418 |
-@@ -1621,8 +1772,8 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) |
3419 |
- be64_to_cpu(msg_hi), |
3420 |
- be64_to_cpu(cmd->rsp.tag)); |
3421 |
- |
3422 |
-- pr_debug("send_messages: tag 0x%llx, rc %ld\n", |
3423 |
-- be64_to_cpu(cmd->rsp.tag), rc); |
3424 |
-+ pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n", |
3425 |
-+ cmd, be64_to_cpu(cmd->rsp.tag), rc); |
3426 |
- |
3427 |
- /* if all ok free up the command element resources */ |
3428 |
- if (rc == H_SUCCESS) { |
3429 |
-@@ -1692,7 +1843,7 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi, |
3430 |
- * @crq: Pointer to the CRQ entry containing the MAD request |
3431 |
- * |
3432 |
- * EXECUTION ENVIRONMENT: |
3433 |
-- * Interrupt called with adapter lock held |
3434 |
-+ * Interrupt, called with adapter lock held |
3435 |
- */ |
3436 |
- static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) |
3437 |
- { |
3438 |
-@@ -1746,14 +1897,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) |
3439 |
- |
3440 |
- pr_debug("mad: type %d\n", be32_to_cpu(mad->type)); |
3441 |
- |
3442 |
-- if (be16_to_cpu(mad->length) < 0) { |
3443 |
-- dev_err(&vscsi->dev, "mad: length is < 0\n"); |
3444 |
-- ibmvscsis_post_disconnect(vscsi, |
3445 |
-- ERR_DISCONNECT_RECONNECT, 0); |
3446 |
-- rc = SRP_VIOLATION; |
3447 |
-- } else { |
3448 |
-- rc = ibmvscsis_process_mad(vscsi, iue); |
3449 |
-- } |
3450 |
-+ rc = ibmvscsis_process_mad(vscsi, iue); |
3451 |
- |
3452 |
- pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status), |
3453 |
- rc); |
3454 |
-@@ -1865,7 +2009,7 @@ static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi, |
3455 |
- break; |
3456 |
- case H_PERMISSION: |
3457 |
- if (connection_broken(vscsi)) |
3458 |
-- flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; |
3459 |
-+ flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; |
3460 |
- dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", |
3461 |
- rc); |
3462 |
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, |
3463 |
-@@ -2090,248 +2234,98 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq) |
3464 |
- break; |
3465 |
- |
3466 |
- case SRP_TSK_MGMT: |
3467 |
-- tsk = &vio_iu(iue)->srp.tsk_mgmt; |
3468 |
-- pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag, |
3469 |
-- tsk->tag); |
3470 |
-- cmd->rsp.tag = tsk->tag; |
3471 |
-- vscsi->debit += 1; |
3472 |
-- cmd->type = TASK_MANAGEMENT; |
3473 |
-- list_add_tail(&cmd->list, &vscsi->schedule_q); |
3474 |
-- queue_work(vscsi->work_q, &cmd->work); |
3475 |
-- break; |
3476 |
-- |
3477 |
-- case SRP_CMD: |
3478 |
-- pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag, |
3479 |
-- srp->tag); |
3480 |
-- cmd->rsp.tag = srp->tag; |
3481 |
-- vscsi->debit += 1; |
3482 |
-- cmd->type = SCSI_CDB; |
3483 |
-- /* |
3484 |
-- * We want to keep track of work waiting for |
3485 |
-- * the workqueue. |
3486 |
-- */ |
3487 |
-- list_add_tail(&cmd->list, &vscsi->schedule_q); |
3488 |
-- queue_work(vscsi->work_q, &cmd->work); |
3489 |
-- break; |
3490 |
-- |
3491 |
-- case SRP_I_LOGOUT: |
3492 |
-- rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); |
3493 |
-- break; |
3494 |
-- |
3495 |
-- case SRP_CRED_RSP: |
3496 |
-- case SRP_AER_RSP: |
3497 |
-- default: |
3498 |
-- ibmvscsis_free_cmd_resources(vscsi, cmd); |
3499 |
-- dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", |
3500 |
-- (uint)srp->opcode); |
3501 |
-- ibmvscsis_post_disconnect(vscsi, |
3502 |
-- ERR_DISCONNECT_RECONNECT, 0); |
3503 |
-- break; |
3504 |
-- } |
3505 |
-- } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { |
3506 |
-- rc = ibmvscsis_srp_login(vscsi, cmd, crq); |
3507 |
-- } else { |
3508 |
-- ibmvscsis_free_cmd_resources(vscsi, cmd); |
3509 |
-- dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", |
3510 |
-- vscsi->state); |
3511 |
-- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
3512 |
-- } |
3513 |
--} |
3514 |
-- |
3515 |
--/** |
3516 |
-- * ibmvscsis_ping_response() - Respond to a ping request |
3517 |
-- * @vscsi: Pointer to our adapter structure |
3518 |
-- * |
3519 |
-- * Let the client know that the server is alive and waiting on |
3520 |
-- * its native I/O stack. |
3521 |
-- * If any type of error occurs from the call to queue a ping |
3522 |
-- * response then the client is either not accepting or receiving |
3523 |
-- * interrupts. Disconnect with an error. |
3524 |
-- * |
3525 |
-- * EXECUTION ENVIRONMENT: |
3526 |
-- * Interrupt, interrupt lock held |
3527 |
-- */ |
3528 |
--static long ibmvscsis_ping_response(struct scsi_info *vscsi) |
3529 |
--{ |
3530 |
-- struct viosrp_crq *crq; |
3531 |
-- u64 buffer[2] = { 0, 0 }; |
3532 |
-- long rc; |
3533 |
-- |
3534 |
-- crq = (struct viosrp_crq *)&buffer; |
3535 |
-- crq->valid = VALID_CMD_RESP_EL; |
3536 |
-- crq->format = (u8)MESSAGE_IN_CRQ; |
3537 |
-- crq->status = PING_RESPONSE; |
3538 |
-- |
3539 |
-- rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), |
3540 |
-- cpu_to_be64(buffer[MSG_LOW])); |
3541 |
-- |
3542 |
-- switch (rc) { |
3543 |
-- case H_SUCCESS: |
3544 |
-- break; |
3545 |
-- case H_CLOSED: |
3546 |
-- vscsi->flags |= CLIENT_FAILED; |
3547 |
-- case H_DROPPED: |
3548 |
-- vscsi->flags |= RESPONSE_Q_DOWN; |
3549 |
-- case H_REMOTE_PARM: |
3550 |
-- dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", |
3551 |
-- rc); |
3552 |
-- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
3553 |
-- break; |
3554 |
-- default: |
3555 |
-- dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", |
3556 |
-- rc); |
3557 |
-- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); |
3558 |
-- break; |
3559 |
-- } |
3560 |
-- |
3561 |
-- return rc; |
3562 |
--} |
3563 |
-- |
3564 |
--/** |
3565 |
-- * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message |
3566 |
-- * @vscsi: Pointer to our adapter structure |
3567 |
-- * |
3568 |
-- * Must be called with interrupt lock held. |
3569 |
-- */ |
3570 |
--static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) |
3571 |
--{ |
3572 |
-- long rc = ADAPT_SUCCESS; |
3573 |
-- |
3574 |
-- switch (vscsi->state) { |
3575 |
-- case NO_QUEUE: |
3576 |
-- case ERR_DISCONNECT: |
3577 |
-- case ERR_DISCONNECT_RECONNECT: |
3578 |
-- case ERR_DISCONNECTED: |
3579 |
-- case UNCONFIGURING: |
3580 |
-- case UNDEFINED: |
3581 |
-- rc = ERROR; |
3582 |
-- break; |
3583 |
-- |
3584 |
-- case WAIT_CONNECTION: |
3585 |
-- vscsi->state = CONNECTED; |
3586 |
-- break; |
3587 |
-- |
3588 |
-- case WAIT_IDLE: |
3589 |
-- case SRP_PROCESSING: |
3590 |
-- case CONNECTED: |
3591 |
-- case WAIT_ENABLED: |
3592 |
-- case PART_UP_WAIT_ENAB: |
3593 |
-- default: |
3594 |
-- rc = ERROR; |
3595 |
-- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", |
3596 |
-- vscsi->state); |
3597 |
-- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
3598 |
-- break; |
3599 |
-- } |
3600 |
-- |
3601 |
-- return rc; |
3602 |
--} |
3603 |
-- |
3604 |
--/** |
3605 |
-- * ibmvscsis_handle_init_msg() - Respond to an Init Message |
3606 |
-- * @vscsi: Pointer to our adapter structure |
3607 |
-- * |
3608 |
-- * Must be called with interrupt lock held. |
3609 |
-- */ |
3610 |
--static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) |
3611 |
--{ |
3612 |
-- long rc = ADAPT_SUCCESS; |
3613 |
-- |
3614 |
-- switch (vscsi->state) { |
3615 |
-- case WAIT_ENABLED: |
3616 |
-- vscsi->state = PART_UP_WAIT_ENAB; |
3617 |
-- break; |
3618 |
-+ tsk = &vio_iu(iue)->srp.tsk_mgmt; |
3619 |
-+ pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag, |
3620 |
-+ tsk->tag); |
3621 |
-+ cmd->rsp.tag = tsk->tag; |
3622 |
-+ vscsi->debit += 1; |
3623 |
-+ cmd->type = TASK_MANAGEMENT; |
3624 |
-+ list_add_tail(&cmd->list, &vscsi->schedule_q); |
3625 |
-+ queue_work(vscsi->work_q, &cmd->work); |
3626 |
-+ break; |
3627 |
- |
3628 |
-- case WAIT_CONNECTION: |
3629 |
-- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); |
3630 |
-- switch (rc) { |
3631 |
-- case H_SUCCESS: |
3632 |
-- vscsi->state = CONNECTED; |
3633 |
-+ case SRP_CMD: |
3634 |
-+ pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag, |
3635 |
-+ srp->tag); |
3636 |
-+ cmd->rsp.tag = srp->tag; |
3637 |
-+ vscsi->debit += 1; |
3638 |
-+ cmd->type = SCSI_CDB; |
3639 |
-+ /* |
3640 |
-+ * We want to keep track of work waiting for |
3641 |
-+ * the workqueue. |
3642 |
-+ */ |
3643 |
-+ list_add_tail(&cmd->list, &vscsi->schedule_q); |
3644 |
-+ queue_work(vscsi->work_q, &cmd->work); |
3645 |
- break; |
3646 |
- |
3647 |
-- case H_PARAMETER: |
3648 |
-- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", |
3649 |
-- rc); |
3650 |
-- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); |
3651 |
-+ case SRP_I_LOGOUT: |
3652 |
-+ rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); |
3653 |
- break; |
3654 |
- |
3655 |
-- case H_DROPPED: |
3656 |
-- dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", |
3657 |
-- rc); |
3658 |
-- rc = ERROR; |
3659 |
-+ case SRP_CRED_RSP: |
3660 |
-+ case SRP_AER_RSP: |
3661 |
-+ default: |
3662 |
-+ ibmvscsis_free_cmd_resources(vscsi, cmd); |
3663 |
-+ dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", |
3664 |
-+ (uint)srp->opcode); |
3665 |
- ibmvscsis_post_disconnect(vscsi, |
3666 |
- ERR_DISCONNECT_RECONNECT, 0); |
3667 |
- break; |
3668 |
-- |
3669 |
-- case H_CLOSED: |
3670 |
-- pr_warn("init_msg: failed to send, rc %ld\n", rc); |
3671 |
-- rc = 0; |
3672 |
-- break; |
3673 |
- } |
3674 |
-- break; |
3675 |
-- |
3676 |
-- case UNDEFINED: |
3677 |
-- rc = ERROR; |
3678 |
-- break; |
3679 |
-- |
3680 |
-- case UNCONFIGURING: |
3681 |
-- break; |
3682 |
-- |
3683 |
-- case PART_UP_WAIT_ENAB: |
3684 |
-- case CONNECTED: |
3685 |
-- case SRP_PROCESSING: |
3686 |
-- case WAIT_IDLE: |
3687 |
-- case NO_QUEUE: |
3688 |
-- case ERR_DISCONNECT: |
3689 |
-- case ERR_DISCONNECT_RECONNECT: |
3690 |
-- case ERR_DISCONNECTED: |
3691 |
-- default: |
3692 |
-- rc = ERROR; |
3693 |
-- dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", |
3694 |
-+ } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { |
3695 |
-+ rc = ibmvscsis_srp_login(vscsi, cmd, crq); |
3696 |
-+ } else { |
3697 |
-+ ibmvscsis_free_cmd_resources(vscsi, cmd); |
3698 |
-+ dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", |
3699 |
- vscsi->state); |
3700 |
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
3701 |
-- break; |
3702 |
- } |
3703 |
-- |
3704 |
-- return rc; |
3705 |
- } |
3706 |
- |
3707 |
- /** |
3708 |
-- * ibmvscsis_init_msg() - Respond to an init message |
3709 |
-+ * ibmvscsis_ping_response() - Respond to a ping request |
3710 |
- * @vscsi: Pointer to our adapter structure |
3711 |
-- * @crq: Pointer to CRQ element containing the Init Message |
3712 |
-+ * |
3713 |
-+ * Let the client know that the server is alive and waiting on |
3714 |
-+ * its native I/O stack. |
3715 |
-+ * If any type of error occurs from the call to queue a ping |
3716 |
-+ * response then the client is either not accepting or receiving |
3717 |
-+ * interrupts. Disconnect with an error. |
3718 |
- * |
3719 |
- * EXECUTION ENVIRONMENT: |
3720 |
- * Interrupt, interrupt lock held |
3721 |
- */ |
3722 |
--static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) |
3723 |
-+static long ibmvscsis_ping_response(struct scsi_info *vscsi) |
3724 |
- { |
3725 |
-- long rc = ADAPT_SUCCESS; |
3726 |
-+ struct viosrp_crq *crq; |
3727 |
-+ u64 buffer[2] = { 0, 0 }; |
3728 |
-+ long rc; |
3729 |
- |
3730 |
-- pr_debug("init_msg: state 0x%hx\n", vscsi->state); |
3731 |
-+ crq = (struct viosrp_crq *)&buffer; |
3732 |
-+ crq->valid = VALID_CMD_RESP_EL; |
3733 |
-+ crq->format = (u8)MESSAGE_IN_CRQ; |
3734 |
-+ crq->status = PING_RESPONSE; |
3735 |
- |
3736 |
-- rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, |
3737 |
-- (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, |
3738 |
-- 0); |
3739 |
-- if (rc == H_SUCCESS) { |
3740 |
-- vscsi->client_data.partition_number = |
3741 |
-- be64_to_cpu(*(u64 *)vscsi->map_buf); |
3742 |
-- pr_debug("init_msg, part num %d\n", |
3743 |
-- vscsi->client_data.partition_number); |
3744 |
-- } else { |
3745 |
-- pr_debug("init_msg h_vioctl rc %ld\n", rc); |
3746 |
-- rc = ADAPT_SUCCESS; |
3747 |
-- } |
3748 |
-+ rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), |
3749 |
-+ cpu_to_be64(buffer[MSG_LOW])); |
3750 |
- |
3751 |
-- if (crq->format == INIT_MSG) { |
3752 |
-- rc = ibmvscsis_handle_init_msg(vscsi); |
3753 |
-- } else if (crq->format == INIT_COMPLETE_MSG) { |
3754 |
-- rc = ibmvscsis_handle_init_compl_msg(vscsi); |
3755 |
-- } else { |
3756 |
-- rc = ERROR; |
3757 |
-- dev_err(&vscsi->dev, "init_msg: invalid format %d\n", |
3758 |
-- (uint)crq->format); |
3759 |
-+ switch (rc) { |
3760 |
-+ case H_SUCCESS: |
3761 |
-+ break; |
3762 |
-+ case H_CLOSED: |
3763 |
-+ vscsi->flags |= CLIENT_FAILED; |
3764 |
-+ case H_DROPPED: |
3765 |
-+ vscsi->flags |= RESPONSE_Q_DOWN; |
3766 |
-+ case H_REMOTE_PARM: |
3767 |
-+ dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", |
3768 |
-+ rc); |
3769 |
- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
3770 |
-+ break; |
3771 |
-+ default: |
3772 |
-+ dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", |
3773 |
-+ rc); |
3774 |
-+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); |
3775 |
-+ break; |
3776 |
- } |
3777 |
- |
3778 |
- return rc; |
3779 |
-@@ -2392,7 +2386,7 @@ static long ibmvscsis_parse_command(struct scsi_info *vscsi, |
3780 |
- break; |
3781 |
- |
3782 |
- case VALID_TRANS_EVENT: |
3783 |
-- rc = ibmvscsis_trans_event(vscsi, crq); |
3784 |
-+ rc = ibmvscsis_trans_event(vscsi, crq); |
3785 |
- break; |
3786 |
- |
3787 |
- case VALID_INIT_MSG: |
3788 |
-@@ -2523,7 +2517,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, |
3789 |
- dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n", |
3790 |
- srp->tag); |
3791 |
- goto fail; |
3792 |
-- return; |
3793 |
- } |
3794 |
- |
3795 |
- cmd->rsp.sol_not = srp->sol_not; |
3796 |
-@@ -2560,6 +2553,10 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, |
3797 |
- data_len, attr, dir, 0); |
3798 |
- if (rc) { |
3799 |
- dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc); |
3800 |
-+ spin_lock_bh(&vscsi->intr_lock); |
3801 |
-+ list_del(&cmd->list); |
3802 |
-+ ibmvscsis_free_cmd_resources(vscsi, cmd); |
3803 |
-+ spin_unlock_bh(&vscsi->intr_lock); |
3804 |
- goto fail; |
3805 |
- } |
3806 |
- return; |
3807 |
-@@ -2639,6 +2636,9 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi, |
3808 |
- if (rc) { |
3809 |
- dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n", |
3810 |
- rc); |
3811 |
-+ spin_lock_bh(&vscsi->intr_lock); |
3812 |
-+ list_del(&cmd->list); |
3813 |
-+ spin_unlock_bh(&vscsi->intr_lock); |
3814 |
- cmd->se_cmd.se_tmr_req->response = |
3815 |
- TMR_FUNCTION_REJECTED; |
3816 |
- } |
3817 |
-@@ -2787,36 +2787,6 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data) |
3818 |
- } |
3819 |
- |
3820 |
- /** |
3821 |
-- * ibmvscsis_check_q() - Helper function to Check Init Message Valid |
3822 |
-- * @vscsi: Pointer to our adapter structure |
3823 |
-- * |
3824 |
-- * Checks if a initialize message was queued by the initiatior |
3825 |
-- * while the timing window was open. This function is called from |
3826 |
-- * probe after the CRQ is created and interrupts are enabled. |
3827 |
-- * It would only be used by adapters who wait for some event before |
3828 |
-- * completing the init handshake with the client. For ibmvscsi, this |
3829 |
-- * event is waiting for the port to be enabled. |
3830 |
-- * |
3831 |
-- * EXECUTION ENVIRONMENT: |
3832 |
-- * Process level only, interrupt lock held |
3833 |
-- */ |
3834 |
--static long ibmvscsis_check_q(struct scsi_info *vscsi) |
3835 |
--{ |
3836 |
-- uint format; |
3837 |
-- long rc; |
3838 |
-- |
3839 |
-- rc = ibmvscsis_check_init_msg(vscsi, &format); |
3840 |
-- if (rc) |
3841 |
-- ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); |
3842 |
-- else if (format == UNUSED_FORMAT) |
3843 |
-- vscsi->state = WAIT_ENABLED; |
3844 |
-- else |
3845 |
-- vscsi->state = PART_UP_WAIT_ENAB; |
3846 |
-- |
3847 |
-- return rc; |
3848 |
--} |
3849 |
-- |
3850 |
--/** |
3851 |
- * ibmvscsis_enable_change_state() - Set new state based on enabled status |
3852 |
- * @vscsi: Pointer to our adapter structure |
3853 |
- * |
3854 |
-@@ -2827,77 +2797,19 @@ static long ibmvscsis_check_q(struct scsi_info *vscsi) |
3855 |
- */ |
3856 |
- static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) |
3857 |
- { |
3858 |
-+ int bytes; |
3859 |
- long rc = ADAPT_SUCCESS; |
3860 |
- |
3861 |
--handle_state_change: |
3862 |
-- switch (vscsi->state) { |
3863 |
-- case WAIT_ENABLED: |
3864 |
-- rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); |
3865 |
-- switch (rc) { |
3866 |
-- case H_SUCCESS: |
3867 |
-- case H_DROPPED: |
3868 |
-- case H_CLOSED: |
3869 |
-- vscsi->state = WAIT_CONNECTION; |
3870 |
-- rc = ADAPT_SUCCESS; |
3871 |
-- break; |
3872 |
-- |
3873 |
-- case H_PARAMETER: |
3874 |
-- break; |
3875 |
-- |
3876 |
-- case H_HARDWARE: |
3877 |
-- break; |
3878 |
-- |
3879 |
-- default: |
3880 |
-- vscsi->state = UNDEFINED; |
3881 |
-- rc = H_HARDWARE; |
3882 |
-- break; |
3883 |
-- } |
3884 |
-- break; |
3885 |
-- case PART_UP_WAIT_ENAB: |
3886 |
-- rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); |
3887 |
-- switch (rc) { |
3888 |
-- case H_SUCCESS: |
3889 |
-- vscsi->state = CONNECTED; |
3890 |
-- rc = ADAPT_SUCCESS; |
3891 |
-- break; |
3892 |
-- |
3893 |
-- case H_DROPPED: |
3894 |
-- case H_CLOSED: |
3895 |
-- vscsi->state = WAIT_ENABLED; |
3896 |
-- goto handle_state_change; |
3897 |
-- |
3898 |
-- case H_PARAMETER: |
3899 |
-- break; |
3900 |
-- |
3901 |
-- case H_HARDWARE: |
3902 |
-- break; |
3903 |
-- |
3904 |
-- default: |
3905 |
-- rc = H_HARDWARE; |
3906 |
-- break; |
3907 |
-- } |
3908 |
-- break; |
3909 |
-- |
3910 |
-- case WAIT_CONNECTION: |
3911 |
-- case WAIT_IDLE: |
3912 |
-- case SRP_PROCESSING: |
3913 |
-- case CONNECTED: |
3914 |
-- rc = ADAPT_SUCCESS; |
3915 |
-- break; |
3916 |
-- /* should not be able to get here */ |
3917 |
-- case UNCONFIGURING: |
3918 |
-- rc = ERROR; |
3919 |
-- vscsi->state = UNDEFINED; |
3920 |
-- break; |
3921 |
-+ bytes = vscsi->cmd_q.size * PAGE_SIZE; |
3922 |
-+ rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes); |
3923 |
-+ if (rc == H_CLOSED || rc == H_SUCCESS) { |
3924 |
-+ vscsi->state = WAIT_CONNECTION; |
3925 |
-+ rc = ibmvscsis_establish_new_q(vscsi); |
3926 |
-+ } |
3927 |
- |
3928 |
-- /* driver should never allow this to happen */ |
3929 |
-- case ERR_DISCONNECT: |
3930 |
-- case ERR_DISCONNECT_RECONNECT: |
3931 |
-- default: |
3932 |
-- dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n", |
3933 |
-- vscsi->state); |
3934 |
-- rc = ADAPT_SUCCESS; |
3935 |
-- break; |
3936 |
-+ if (rc != ADAPT_SUCCESS) { |
3937 |
-+ vscsi->state = ERR_DISCONNECTED; |
3938 |
-+ vscsi->flags |= RESPONSE_Q_DOWN; |
3939 |
- } |
3940 |
- |
3941 |
- return rc; |
3942 |
-@@ -2917,7 +2829,6 @@ static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) |
3943 |
- */ |
3944 |
- static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) |
3945 |
- { |
3946 |
-- long rc = 0; |
3947 |
- int pages; |
3948 |
- struct vio_dev *vdev = vscsi->dma_dev; |
3949 |
- |
3950 |
-@@ -2941,22 +2852,7 @@ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) |
3951 |
- return -ENOMEM; |
3952 |
- } |
3953 |
- |
3954 |
-- rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE); |
3955 |
-- if (rc) { |
3956 |
-- if (rc == H_CLOSED) { |
3957 |
-- vscsi->state = WAIT_ENABLED; |
3958 |
-- rc = 0; |
3959 |
-- } else { |
3960 |
-- dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token, |
3961 |
-- PAGE_SIZE, DMA_BIDIRECTIONAL); |
3962 |
-- free_page((unsigned long)vscsi->cmd_q.base_addr); |
3963 |
-- rc = -ENODEV; |
3964 |
-- } |
3965 |
-- } else { |
3966 |
-- vscsi->state = WAIT_ENABLED; |
3967 |
-- } |
3968 |
-- |
3969 |
-- return rc; |
3970 |
-+ return 0; |
3971 |
- } |
3972 |
- |
3973 |
- /** |
3974 |
-@@ -3271,7 +3167,7 @@ static void ibmvscsis_handle_crq(unsigned long data) |
3975 |
- /* |
3976 |
- * if we are in a path where we are waiting for all pending commands |
3977 |
- * to complete because we received a transport event and anything in |
3978 |
-- * the command queue is for a new connection, do nothing |
3979 |
-+ * the command queue is for a new connection, do nothing |
3980 |
- */ |
3981 |
- if (TARGET_STOP(vscsi)) { |
3982 |
- vio_enable_interrupts(vscsi->dma_dev); |
3983 |
-@@ -3315,7 +3211,7 @@ static void ibmvscsis_handle_crq(unsigned long data) |
3984 |
- * everything but transport events on the queue |
3985 |
- * |
3986 |
- * need to decrement the queue index so we can |
3987 |
-- * look at the elment again |
3988 |
-+ * look at the element again |
3989 |
- */ |
3990 |
- if (vscsi->cmd_q.index) |
3991 |
- vscsi->cmd_q.index -= 1; |
3992 |
-@@ -3379,7 +3275,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev, |
3993 |
- INIT_LIST_HEAD(&vscsi->waiting_rsp); |
3994 |
- INIT_LIST_HEAD(&vscsi->active_q); |
3995 |
- |
3996 |
-- snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev)); |
3997 |
-+ snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s", |
3998 |
-+ dev_name(&vdev->dev)); |
3999 |
- |
4000 |
- pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name); |
4001 |
- |
4002 |
-@@ -3394,6 +3291,9 @@ static int ibmvscsis_probe(struct vio_dev *vdev, |
4003 |
- strncat(vscsi->eye, vdev->name, MAX_EYE); |
4004 |
- |
4005 |
- vscsi->dds.unit_id = vdev->unit_address; |
4006 |
-+ strncpy(vscsi->dds.partition_name, partition_name, |
4007 |
-+ sizeof(vscsi->dds.partition_name)); |
4008 |
-+ vscsi->dds.partition_num = partition_number; |
4009 |
- |
4010 |
- spin_lock_bh(&ibmvscsis_dev_lock); |
4011 |
- list_add_tail(&vscsi->list, &ibmvscsis_dev_list); |
4012 |
-@@ -3470,6 +3370,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev, |
4013 |
- (unsigned long)vscsi); |
4014 |
- |
4015 |
- init_completion(&vscsi->wait_idle); |
4016 |
-+ init_completion(&vscsi->unconfig); |
4017 |
- |
4018 |
- snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); |
4019 |
- vscsi->work_q = create_workqueue(wq_name); |
4020 |
-@@ -3486,31 +3387,12 @@ static int ibmvscsis_probe(struct vio_dev *vdev, |
4021 |
- goto destroy_WQ; |
4022 |
- } |
4023 |
- |
4024 |
-- spin_lock_bh(&vscsi->intr_lock); |
4025 |
-- vio_enable_interrupts(vdev); |
4026 |
-- if (rc) { |
4027 |
-- dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc); |
4028 |
-- rc = -ENODEV; |
4029 |
-- spin_unlock_bh(&vscsi->intr_lock); |
4030 |
-- goto free_irq; |
4031 |
-- } |
4032 |
-- |
4033 |
-- if (ibmvscsis_check_q(vscsi)) { |
4034 |
-- rc = ERROR; |
4035 |
-- dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc); |
4036 |
-- spin_unlock_bh(&vscsi->intr_lock); |
4037 |
-- goto disable_interrupt; |
4038 |
-- } |
4039 |
-- spin_unlock_bh(&vscsi->intr_lock); |
4040 |
-+ vscsi->state = WAIT_ENABLED; |
4041 |
- |
4042 |
- dev_set_drvdata(&vdev->dev, vscsi); |
4043 |
- |
4044 |
- return 0; |
4045 |
- |
4046 |
--disable_interrupt: |
4047 |
-- vio_disable_interrupts(vdev); |
4048 |
--free_irq: |
4049 |
-- free_irq(vdev->irq, vscsi); |
4050 |
- destroy_WQ: |
4051 |
- destroy_workqueue(vscsi->work_q); |
4052 |
- unmap_buf: |
4053 |
-@@ -3544,10 +3426,11 @@ static int ibmvscsis_remove(struct vio_dev *vdev) |
4054 |
- |
4055 |
- pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev)); |
4056 |
- |
4057 |
-- /* |
4058 |
-- * TBD: Need to handle if there are commands on the waiting_rsp q |
4059 |
-- * Actually, can there still be cmds outstanding to tcm? |
4060 |
-- */ |
4061 |
-+ spin_lock_bh(&vscsi->intr_lock); |
4062 |
-+ ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0); |
4063 |
-+ vscsi->flags |= CFG_SLEEPING; |
4064 |
-+ spin_unlock_bh(&vscsi->intr_lock); |
4065 |
-+ wait_for_completion(&vscsi->unconfig); |
4066 |
- |
4067 |
- vio_disable_interrupts(vdev); |
4068 |
- free_irq(vdev->irq, vscsi); |
4069 |
-@@ -3556,7 +3439,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev) |
4070 |
- DMA_BIDIRECTIONAL); |
4071 |
- kfree(vscsi->map_buf); |
4072 |
- tasklet_kill(&vscsi->work_task); |
4073 |
-- ibmvscsis_unregister_command_q(vscsi); |
4074 |
- ibmvscsis_destroy_command_q(vscsi); |
4075 |
- ibmvscsis_freetimer(vscsi); |
4076 |
- ibmvscsis_free_cmds(vscsi); |
4077 |
-@@ -3610,7 +3492,7 @@ static int ibmvscsis_get_system_info(void) |
4078 |
- |
4079 |
- num = of_get_property(rootdn, "ibm,partition-no", NULL); |
4080 |
- if (num) |
4081 |
-- partition_number = *num; |
4082 |
-+ partition_number = of_read_number(num, 1); |
4083 |
- |
4084 |
- of_node_put(rootdn); |
4085 |
- |
4086 |
-@@ -3904,18 +3786,22 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item, |
4087 |
- } |
4088 |
- |
4089 |
- if (tmp) { |
4090 |
-- tport->enabled = true; |
4091 |
- spin_lock_bh(&vscsi->intr_lock); |
4092 |
-+ tport->enabled = true; |
4093 |
- lrc = ibmvscsis_enable_change_state(vscsi); |
4094 |
- if (lrc) |
4095 |
- pr_err("enable_change_state failed, rc %ld state %d\n", |
4096 |
- lrc, vscsi->state); |
4097 |
- spin_unlock_bh(&vscsi->intr_lock); |
4098 |
- } else { |
4099 |
-+ spin_lock_bh(&vscsi->intr_lock); |
4100 |
- tport->enabled = false; |
4101 |
-+ /* This simulates the server going down */ |
4102 |
-+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); |
4103 |
-+ spin_unlock_bh(&vscsi->intr_lock); |
4104 |
- } |
4105 |
- |
4106 |
-- pr_debug("tpg_enable_store, state %d\n", vscsi->state); |
4107 |
-+ pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state); |
4108 |
- |
4109 |
- return count; |
4110 |
- } |
4111 |
-@@ -3985,10 +3871,10 @@ static struct attribute *ibmvscsis_dev_attrs[] = { |
4112 |
- ATTRIBUTE_GROUPS(ibmvscsis_dev); |
4113 |
- |
4114 |
- static struct class ibmvscsis_class = { |
4115 |
-- .name = "ibmvscsis", |
4116 |
-- .dev_release = ibmvscsis_dev_release, |
4117 |
-- .class_attrs = ibmvscsis_class_attrs, |
4118 |
-- .dev_groups = ibmvscsis_dev_groups, |
4119 |
-+ .name = "ibmvscsis", |
4120 |
-+ .dev_release = ibmvscsis_dev_release, |
4121 |
-+ .class_attrs = ibmvscsis_class_attrs, |
4122 |
-+ .dev_groups = ibmvscsis_dev_groups, |
4123 |
- }; |
4124 |
- |
4125 |
- static struct vio_device_id ibmvscsis_device_table[] = { |
4126 |
-diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h |
4127 |
-index 981a0c9..98b0ca7 100644 |
4128 |
---- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h |
4129 |
-+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h |
4130 |
-@@ -204,8 +204,6 @@ struct scsi_info { |
4131 |
- struct list_head waiting_rsp; |
4132 |
- #define NO_QUEUE 0x00 |
4133 |
- #define WAIT_ENABLED 0X01 |
4134 |
-- /* driver has received an initialize command */ |
4135 |
--#define PART_UP_WAIT_ENAB 0x02 |
4136 |
- #define WAIT_CONNECTION 0x04 |
4137 |
- /* have established a connection */ |
4138 |
- #define CONNECTED 0x08 |
4139 |
-@@ -259,6 +257,8 @@ struct scsi_info { |
4140 |
- #define SCHEDULE_DISCONNECT 0x00400 |
4141 |
- /* disconnect handler is scheduled */ |
4142 |
- #define DISCONNECT_SCHEDULED 0x00800 |
4143 |
-+ /* remove function is sleeping */ |
4144 |
-+#define CFG_SLEEPING 0x01000 |
4145 |
- u32 flags; |
4146 |
- /* adapter lock */ |
4147 |
- spinlock_t intr_lock; |
4148 |
-@@ -287,6 +287,7 @@ struct scsi_info { |
4149 |
- |
4150 |
- struct workqueue_struct *work_q; |
4151 |
- struct completion wait_idle; |
4152 |
-+ struct completion unconfig; |
4153 |
- struct device dev; |
4154 |
- struct vio_dev *dma_dev; |
4155 |
- struct srp_target target; |
4156 |
-diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c |
4157 |
-index 4d09bd4..6e3e636 100644 |
4158 |
---- a/drivers/tty/serial/8250/8250_pci.c |
4159 |
-+++ b/drivers/tty/serial/8250/8250_pci.c |
4160 |
-@@ -52,6 +52,7 @@ struct serial_private { |
4161 |
- struct pci_dev *dev; |
4162 |
- unsigned int nr; |
4163 |
- struct pci_serial_quirk *quirk; |
4164 |
-+ const struct pciserial_board *board; |
4165 |
- int line[0]; |
4166 |
- }; |
4167 |
- |
4168 |
-@@ -3871,6 +3872,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) |
4169 |
- } |
4170 |
- } |
4171 |
- priv->nr = i; |
4172 |
-+ priv->board = board; |
4173 |
- return priv; |
4174 |
- |
4175 |
- err_deinit: |
4176 |
-@@ -3881,7 +3883,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) |
4177 |
- } |
4178 |
- EXPORT_SYMBOL_GPL(pciserial_init_ports); |
4179 |
- |
4180 |
--void pciserial_remove_ports(struct serial_private *priv) |
4181 |
-+void pciserial_detach_ports(struct serial_private *priv) |
4182 |
- { |
4183 |
- struct pci_serial_quirk *quirk; |
4184 |
- int i; |
4185 |
-@@ -3895,7 +3897,11 @@ void pciserial_remove_ports(struct serial_private *priv) |
4186 |
- quirk = find_quirk(priv->dev); |
4187 |
- if (quirk->exit) |
4188 |
- quirk->exit(priv->dev); |
4189 |
-+} |
4190 |
- |
4191 |
-+void pciserial_remove_ports(struct serial_private *priv) |
4192 |
-+{ |
4193 |
-+ pciserial_detach_ports(priv); |
4194 |
- kfree(priv); |
4195 |
- } |
4196 |
- EXPORT_SYMBOL_GPL(pciserial_remove_ports); |
4197 |
-@@ -5590,7 +5596,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev, |
4198 |
- return PCI_ERS_RESULT_DISCONNECT; |
4199 |
- |
4200 |
- if (priv) |
4201 |
-- pciserial_suspend_ports(priv); |
4202 |
-+ pciserial_detach_ports(priv); |
4203 |
- |
4204 |
- pci_disable_device(dev); |
4205 |
- |
4206 |
-@@ -5615,9 +5621,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) |
4207 |
- static void serial8250_io_resume(struct pci_dev *dev) |
4208 |
- { |
4209 |
- struct serial_private *priv = pci_get_drvdata(dev); |
4210 |
-+ const struct pciserial_board *board; |
4211 |
- |
4212 |
-- if (priv) |
4213 |
-- pciserial_resume_ports(priv); |
4214 |
-+ if (!priv) |
4215 |
-+ return; |
4216 |
-+ |
4217 |
-+ board = priv->board; |
4218 |
-+ kfree(priv); |
4219 |
-+ priv = pciserial_init_ports(dev, board); |
4220 |
-+ |
4221 |
-+ if (!IS_ERR(priv)) { |
4222 |
-+ pci_set_drvdata(dev, priv); |
4223 |
-+ } |
4224 |
- } |
4225 |
- |
4226 |
- static const struct pci_error_handlers serial8250_err_handler = { |
4227 |
-diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c |
4228 |
-index 45bc997..a95b3e7 100644 |
4229 |
---- a/drivers/usb/gadget/udc/atmel_usba_udc.c |
4230 |
-+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c |
4231 |
-@@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, |
4232 |
- dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret); |
4233 |
- goto err; |
4234 |
- } |
4235 |
-- ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index); |
4236 |
-+ sprintf(ep->name, "ep%d", ep->index); |
4237 |
-+ ep->ep.name = ep->name; |
4238 |
- |
4239 |
- ep->ep_regs = udc->regs + USBA_EPT_BASE(i); |
4240 |
- ep->dma_regs = udc->regs + USBA_DMA_BASE(i); |
4241 |
-diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h |
4242 |
-index 3e1c9d5..b03b2eb 100644 |
4243 |
---- a/drivers/usb/gadget/udc/atmel_usba_udc.h |
4244 |
-+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h |
4245 |
-@@ -280,6 +280,7 @@ struct usba_ep { |
4246 |
- void __iomem *ep_regs; |
4247 |
- void __iomem *dma_regs; |
4248 |
- void __iomem *fifo; |
4249 |
-+ char name[8]; |
4250 |
- struct usb_ep ep; |
4251 |
- struct usba_udc *udc; |
4252 |
- |
4253 |
-diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c |
4254 |
-index 80378dd..c882357 100644 |
4255 |
---- a/drivers/vfio/vfio_iommu_spapr_tce.c |
4256 |
-+++ b/drivers/vfio/vfio_iommu_spapr_tce.c |
4257 |
-@@ -31,49 +31,49 @@ |
4258 |
- static void tce_iommu_detach_group(void *iommu_data, |
4259 |
- struct iommu_group *iommu_group); |
4260 |
- |
4261 |
--static long try_increment_locked_vm(long npages) |
4262 |
-+static long try_increment_locked_vm(struct mm_struct *mm, long npages) |
4263 |
- { |
4264 |
- long ret = 0, locked, lock_limit; |
4265 |
- |
4266 |
-- if (!current || !current->mm) |
4267 |
-- return -ESRCH; /* process exited */ |
4268 |
-+ if (WARN_ON_ONCE(!mm)) |
4269 |
-+ return -EPERM; |
4270 |
- |
4271 |
- if (!npages) |
4272 |
- return 0; |
4273 |
- |
4274 |
-- down_write(¤t->mm->mmap_sem); |
4275 |
-- locked = current->mm->locked_vm + npages; |
4276 |
-+ down_write(&mm->mmap_sem); |
4277 |
-+ locked = mm->locked_vm + npages; |
4278 |
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
4279 |
- if (locked > lock_limit && !capable(CAP_IPC_LOCK)) |
4280 |
- ret = -ENOMEM; |
4281 |
- else |
4282 |
-- current->mm->locked_vm += npages; |
4283 |
-+ mm->locked_vm += npages; |
4284 |
- |
4285 |
- pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid, |
4286 |
- npages << PAGE_SHIFT, |
4287 |
-- current->mm->locked_vm << PAGE_SHIFT, |
4288 |
-+ mm->locked_vm << PAGE_SHIFT, |
4289 |
- rlimit(RLIMIT_MEMLOCK), |
4290 |
- ret ? " - exceeded" : ""); |
4291 |
- |
4292 |
-- up_write(¤t->mm->mmap_sem); |
4293 |
-+ up_write(&mm->mmap_sem); |
4294 |
- |
4295 |
- return ret; |
4296 |
- } |
4297 |
- |
4298 |
--static void decrement_locked_vm(long npages) |
4299 |
-+static void decrement_locked_vm(struct mm_struct *mm, long npages) |
4300 |
- { |
4301 |
-- if (!current || !current->mm || !npages) |
4302 |
-- return; /* process exited */ |
4303 |
-+ if (!mm || !npages) |
4304 |
-+ return; |
4305 |
- |
4306 |
-- down_write(¤t->mm->mmap_sem); |
4307 |
-- if (WARN_ON_ONCE(npages > current->mm->locked_vm)) |
4308 |
-- npages = current->mm->locked_vm; |
4309 |
-- current->mm->locked_vm -= npages; |
4310 |
-+ down_write(&mm->mmap_sem); |
4311 |
-+ if (WARN_ON_ONCE(npages > mm->locked_vm)) |
4312 |
-+ npages = mm->locked_vm; |
4313 |
-+ mm->locked_vm -= npages; |
4314 |
- pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid, |
4315 |
- npages << PAGE_SHIFT, |
4316 |
-- current->mm->locked_vm << PAGE_SHIFT, |
4317 |
-+ mm->locked_vm << PAGE_SHIFT, |
4318 |
- rlimit(RLIMIT_MEMLOCK)); |
4319 |
-- up_write(¤t->mm->mmap_sem); |
4320 |
-+ up_write(&mm->mmap_sem); |
4321 |
- } |
4322 |
- |
4323 |
- /* |
4324 |
-@@ -89,6 +89,15 @@ struct tce_iommu_group { |
4325 |
- }; |
4326 |
- |
4327 |
- /* |
4328 |
-+ * A container needs to remember which preregistered region it has |
4329 |
-+ * referenced to do proper cleanup at the userspace process exit. |
4330 |
-+ */ |
4331 |
-+struct tce_iommu_prereg { |
4332 |
-+ struct list_head next; |
4333 |
-+ struct mm_iommu_table_group_mem_t *mem; |
4334 |
-+}; |
4335 |
-+ |
4336 |
-+/* |
4337 |
- * The container descriptor supports only a single group per container. |
4338 |
- * Required by the API as the container is not supplied with the IOMMU group |
4339 |
- * at the moment of initialization. |
4340 |
-@@ -97,24 +106,68 @@ struct tce_container { |
4341 |
- struct mutex lock; |
4342 |
- bool enabled; |
4343 |
- bool v2; |
4344 |
-+ bool def_window_pending; |
4345 |
- unsigned long locked_pages; |
4346 |
-+ struct mm_struct *mm; |
4347 |
- struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; |
4348 |
- struct list_head group_list; |
4349 |
-+ struct list_head prereg_list; |
4350 |
- }; |
4351 |
- |
4352 |
-+static long tce_iommu_mm_set(struct tce_container *container) |
4353 |
-+{ |
4354 |
-+ if (container->mm) { |
4355 |
-+ if (container->mm == current->mm) |
4356 |
-+ return 0; |
4357 |
-+ return -EPERM; |
4358 |
-+ } |
4359 |
-+ BUG_ON(!current->mm); |
4360 |
-+ container->mm = current->mm; |
4361 |
-+ atomic_inc(&container->mm->mm_count); |
4362 |
-+ |
4363 |
-+ return 0; |
4364 |
-+} |
4365 |
-+ |
4366 |
-+static long tce_iommu_prereg_free(struct tce_container *container, |
4367 |
-+ struct tce_iommu_prereg *tcemem) |
4368 |
-+{ |
4369 |
-+ long ret; |
4370 |
-+ |
4371 |
-+ ret = mm_iommu_put(container->mm, tcemem->mem); |
4372 |
-+ if (ret) |
4373 |
-+ return ret; |
4374 |
-+ |
4375 |
-+ list_del(&tcemem->next); |
4376 |
-+ kfree(tcemem); |
4377 |
-+ |
4378 |
-+ return 0; |
4379 |
-+} |
4380 |
-+ |
4381 |
- static long tce_iommu_unregister_pages(struct tce_container *container, |
4382 |
- __u64 vaddr, __u64 size) |
4383 |
- { |
4384 |
- struct mm_iommu_table_group_mem_t *mem; |
4385 |
-+ struct tce_iommu_prereg *tcemem; |
4386 |
-+ bool found = false; |
4387 |
- |
4388 |
- if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK)) |
4389 |
- return -EINVAL; |
4390 |
- |
4391 |
-- mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT); |
4392 |
-+ mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT); |
4393 |
- if (!mem) |
4394 |
- return -ENOENT; |
4395 |
- |
4396 |
-- return mm_iommu_put(mem); |
4397 |
-+ list_for_each_entry(tcemem, &container->prereg_list, next) { |
4398 |
-+ if (tcemem->mem == mem) { |
4399 |
-+ found = true; |
4400 |
-+ break; |
4401 |
-+ } |
4402 |
-+ } |
4403 |
-+ |
4404 |
-+ if (!found) |
4405 |
-+ return -ENOENT; |
4406 |
-+ |
4407 |
-+ return tce_iommu_prereg_free(container, tcemem); |
4408 |
- } |
4409 |
- |
4410 |
- static long tce_iommu_register_pages(struct tce_container *container, |
4411 |
-@@ -122,22 +175,36 @@ static long tce_iommu_register_pages(struct tce_container *container, |
4412 |
- { |
4413 |
- long ret = 0; |
4414 |
- struct mm_iommu_table_group_mem_t *mem = NULL; |
4415 |
-+ struct tce_iommu_prereg *tcemem; |
4416 |
- unsigned long entries = size >> PAGE_SHIFT; |
4417 |
- |
4418 |
- if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) || |
4419 |
- ((vaddr + size) < vaddr)) |
4420 |
- return -EINVAL; |
4421 |
- |
4422 |
-- ret = mm_iommu_get(vaddr, entries, &mem); |
4423 |
-+ mem = mm_iommu_find(container->mm, vaddr, entries); |
4424 |
-+ if (mem) { |
4425 |
-+ list_for_each_entry(tcemem, &container->prereg_list, next) { |
4426 |
-+ if (tcemem->mem == mem) |
4427 |
-+ return -EBUSY; |
4428 |
-+ } |
4429 |
-+ } |
4430 |
-+ |
4431 |
-+ ret = mm_iommu_get(container->mm, vaddr, entries, &mem); |
4432 |
- if (ret) |
4433 |
- return ret; |
4434 |
- |
4435 |
-+ tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL); |
4436 |
-+ tcemem->mem = mem; |
4437 |
-+ list_add(&tcemem->next, &container->prereg_list); |
4438 |
-+ |
4439 |
- container->enabled = true; |
4440 |
- |
4441 |
- return 0; |
4442 |
- } |
4443 |
- |
4444 |
--static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) |
4445 |
-+static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl, |
4446 |
-+ struct mm_struct *mm) |
4447 |
- { |
4448 |
- unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * |
4449 |
- tbl->it_size, PAGE_SIZE); |
4450 |
-@@ -146,13 +213,13 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) |
4451 |
- |
4452 |
- BUG_ON(tbl->it_userspace); |
4453 |
- |
4454 |
-- ret = try_increment_locked_vm(cb >> PAGE_SHIFT); |
4455 |
-+ ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT); |
4456 |
- if (ret) |
4457 |
- return ret; |
4458 |
- |
4459 |
- uas = vzalloc(cb); |
4460 |
- if (!uas) { |
4461 |
-- decrement_locked_vm(cb >> PAGE_SHIFT); |
4462 |
-+ decrement_locked_vm(mm, cb >> PAGE_SHIFT); |
4463 |
- return -ENOMEM; |
4464 |
- } |
4465 |
- tbl->it_userspace = uas; |
4466 |
-@@ -160,7 +227,8 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) |
4467 |
- return 0; |
4468 |
- } |
4469 |
- |
4470 |
--static void tce_iommu_userspace_view_free(struct iommu_table *tbl) |
4471 |
-+static void tce_iommu_userspace_view_free(struct iommu_table *tbl, |
4472 |
-+ struct mm_struct *mm) |
4473 |
- { |
4474 |
- unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * |
4475 |
- tbl->it_size, PAGE_SIZE); |
4476 |
-@@ -170,7 +238,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl) |
4477 |
- |
4478 |
- vfree(tbl->it_userspace); |
4479 |
- tbl->it_userspace = NULL; |
4480 |
-- decrement_locked_vm(cb >> PAGE_SHIFT); |
4481 |
-+ decrement_locked_vm(mm, cb >> PAGE_SHIFT); |
4482 |
- } |
4483 |
- |
4484 |
- static bool tce_page_is_contained(struct page *page, unsigned page_shift) |
4485 |
-@@ -230,9 +298,6 @@ static int tce_iommu_enable(struct tce_container *container) |
4486 |
- struct iommu_table_group *table_group; |
4487 |
- struct tce_iommu_group *tcegrp; |
4488 |
- |
4489 |
-- if (!current->mm) |
4490 |
-- return -ESRCH; /* process exited */ |
4491 |
-- |
4492 |
- if (container->enabled) |
4493 |
- return -EBUSY; |
4494 |
- |
4495 |
-@@ -277,8 +342,12 @@ static int tce_iommu_enable(struct tce_container *container) |
4496 |
- if (!table_group->tce32_size) |
4497 |
- return -EPERM; |
4498 |
- |
4499 |
-+ ret = tce_iommu_mm_set(container); |
4500 |
-+ if (ret) |
4501 |
-+ return ret; |
4502 |
-+ |
4503 |
- locked = table_group->tce32_size >> PAGE_SHIFT; |
4504 |
-- ret = try_increment_locked_vm(locked); |
4505 |
-+ ret = try_increment_locked_vm(container->mm, locked); |
4506 |
- if (ret) |
4507 |
- return ret; |
4508 |
- |
4509 |
-@@ -296,10 +365,8 @@ static void tce_iommu_disable(struct tce_container *container) |
4510 |
- |
4511 |
- container->enabled = false; |
4512 |
- |
4513 |
-- if (!current->mm) |
4514 |
-- return; |
4515 |
-- |
4516 |
-- decrement_locked_vm(container->locked_pages); |
4517 |
-+ BUG_ON(!container->mm); |
4518 |
-+ decrement_locked_vm(container->mm, container->locked_pages); |
4519 |
- } |
4520 |
- |
4521 |
- static void *tce_iommu_open(unsigned long arg) |
4522 |
-@@ -317,6 +384,7 @@ static void *tce_iommu_open(unsigned long arg) |
4523 |
- |
4524 |
- mutex_init(&container->lock); |
4525 |
- INIT_LIST_HEAD_RCU(&container->group_list); |
4526 |
-+ INIT_LIST_HEAD_RCU(&container->prereg_list); |
4527 |
- |
4528 |
- container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU; |
4529 |
- |
4530 |
-@@ -326,7 +394,8 @@ static void *tce_iommu_open(unsigned long arg) |
4531 |
- static int tce_iommu_clear(struct tce_container *container, |
4532 |
- struct iommu_table *tbl, |
4533 |
- unsigned long entry, unsigned long pages); |
4534 |
--static void tce_iommu_free_table(struct iommu_table *tbl); |
4535 |
-+static void tce_iommu_free_table(struct tce_container *container, |
4536 |
-+ struct iommu_table *tbl); |
4537 |
- |
4538 |
- static void tce_iommu_release(void *iommu_data) |
4539 |
- { |
4540 |
-@@ -351,10 +420,20 @@ static void tce_iommu_release(void *iommu_data) |
4541 |
- continue; |
4542 |
- |
4543 |
- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); |
4544 |
-- tce_iommu_free_table(tbl); |
4545 |
-+ tce_iommu_free_table(container, tbl); |
4546 |
-+ } |
4547 |
-+ |
4548 |
-+ while (!list_empty(&container->prereg_list)) { |
4549 |
-+ struct tce_iommu_prereg *tcemem; |
4550 |
-+ |
4551 |
-+ tcemem = list_first_entry(&container->prereg_list, |
4552 |
-+ struct tce_iommu_prereg, next); |
4553 |
-+ WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem)); |
4554 |
- } |
4555 |
- |
4556 |
- tce_iommu_disable(container); |
4557 |
-+ if (container->mm) |
4558 |
-+ mmdrop(container->mm); |
4559 |
- mutex_destroy(&container->lock); |
4560 |
- |
4561 |
- kfree(container); |
4562 |
-@@ -369,13 +448,14 @@ static void tce_iommu_unuse_page(struct tce_container *container, |
4563 |
- put_page(page); |
4564 |
- } |
4565 |
- |
4566 |
--static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size, |
4567 |
-+static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, |
4568 |
-+ unsigned long tce, unsigned long size, |
4569 |
- unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) |
4570 |
- { |
4571 |
- long ret = 0; |
4572 |
- struct mm_iommu_table_group_mem_t *mem; |
4573 |
- |
4574 |
-- mem = mm_iommu_lookup(tce, size); |
4575 |
-+ mem = mm_iommu_lookup(container->mm, tce, size); |
4576 |
- if (!mem) |
4577 |
- return -EINVAL; |
4578 |
- |
4579 |
-@@ -388,18 +468,18 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size, |
4580 |
- return 0; |
4581 |
- } |
4582 |
- |
4583 |
--static void tce_iommu_unuse_page_v2(struct iommu_table *tbl, |
4584 |
-- unsigned long entry) |
4585 |
-+static void tce_iommu_unuse_page_v2(struct tce_container *container, |
4586 |
-+ struct iommu_table *tbl, unsigned long entry) |
4587 |
- { |
4588 |
- struct mm_iommu_table_group_mem_t *mem = NULL; |
4589 |
- int ret; |
4590 |
- unsigned long hpa = 0; |
4591 |
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); |
4592 |
- |
4593 |
-- if (!pua || !current || !current->mm) |
4594 |
-+ if (!pua) |
4595 |
- return; |
4596 |
- |
4597 |
-- ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl), |
4598 |
-+ ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl), |
4599 |
- &hpa, &mem); |
4600 |
- if (ret) |
4601 |
- pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", |
4602 |
-@@ -429,7 +509,7 @@ static int tce_iommu_clear(struct tce_container *container, |
4603 |
- continue; |
4604 |
- |
4605 |
- if (container->v2) { |
4606 |
-- tce_iommu_unuse_page_v2(tbl, entry); |
4607 |
-+ tce_iommu_unuse_page_v2(container, tbl, entry); |
4608 |
- continue; |
4609 |
- } |
4610 |
- |
4611 |
-@@ -509,13 +589,19 @@ static long tce_iommu_build_v2(struct tce_container *container, |
4612 |
- unsigned long hpa; |
4613 |
- enum dma_data_direction dirtmp; |
4614 |
- |
4615 |
-+ if (!tbl->it_userspace) { |
4616 |
-+ ret = tce_iommu_userspace_view_alloc(tbl, container->mm); |
4617 |
-+ if (ret) |
4618 |
-+ return ret; |
4619 |
-+ } |
4620 |
-+ |
4621 |
- for (i = 0; i < pages; ++i) { |
4622 |
- struct mm_iommu_table_group_mem_t *mem = NULL; |
4623 |
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, |
4624 |
- entry + i); |
4625 |
- |
4626 |
-- ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl), |
4627 |
-- &hpa, &mem); |
4628 |
-+ ret = tce_iommu_prereg_ua_to_hpa(container, |
4629 |
-+ tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); |
4630 |
- if (ret) |
4631 |
- break; |
4632 |
- |
4633 |
-@@ -536,7 +622,7 @@ static long tce_iommu_build_v2(struct tce_container *container, |
4634 |
- ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp); |
4635 |
- if (ret) { |
4636 |
- /* dirtmp cannot be DMA_NONE here */ |
4637 |
-- tce_iommu_unuse_page_v2(tbl, entry + i); |
4638 |
-+ tce_iommu_unuse_page_v2(container, tbl, entry + i); |
4639 |
- pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n", |
4640 |
- __func__, entry << tbl->it_page_shift, |
4641 |
- tce, ret); |
4642 |
-@@ -544,7 +630,7 @@ static long tce_iommu_build_v2(struct tce_container *container, |
4643 |
- } |
4644 |
- |
4645 |
- if (dirtmp != DMA_NONE) |
4646 |
-- tce_iommu_unuse_page_v2(tbl, entry + i); |
4647 |
-+ tce_iommu_unuse_page_v2(container, tbl, entry + i); |
4648 |
- |
4649 |
- *pua = tce; |
4650 |
- |
4651 |
-@@ -572,7 +658,7 @@ static long tce_iommu_create_table(struct tce_container *container, |
4652 |
- if (!table_size) |
4653 |
- return -EINVAL; |
4654 |
- |
4655 |
-- ret = try_increment_locked_vm(table_size >> PAGE_SHIFT); |
4656 |
-+ ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT); |
4657 |
- if (ret) |
4658 |
- return ret; |
4659 |
- |
4660 |
-@@ -582,25 +668,17 @@ static long tce_iommu_create_table(struct tce_container *container, |
4661 |
- WARN_ON(!ret && !(*ptbl)->it_ops->free); |
4662 |
- WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size)); |
4663 |
- |
4664 |
-- if (!ret && container->v2) { |
4665 |
-- ret = tce_iommu_userspace_view_alloc(*ptbl); |
4666 |
-- if (ret) |
4667 |
-- (*ptbl)->it_ops->free(*ptbl); |
4668 |
-- } |
4669 |
-- |
4670 |
-- if (ret) |
4671 |
-- decrement_locked_vm(table_size >> PAGE_SHIFT); |
4672 |
-- |
4673 |
- return ret; |
4674 |
- } |
4675 |
- |
4676 |
--static void tce_iommu_free_table(struct iommu_table *tbl) |
4677 |
-+static void tce_iommu_free_table(struct tce_container *container, |
4678 |
-+ struct iommu_table *tbl) |
4679 |
- { |
4680 |
- unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; |
4681 |
- |
4682 |
-- tce_iommu_userspace_view_free(tbl); |
4683 |
-+ tce_iommu_userspace_view_free(tbl, container->mm); |
4684 |
- tbl->it_ops->free(tbl); |
4685 |
-- decrement_locked_vm(pages); |
4686 |
-+ decrement_locked_vm(container->mm, pages); |
4687 |
- } |
4688 |
- |
4689 |
- static long tce_iommu_create_window(struct tce_container *container, |
4690 |
-@@ -663,7 +741,7 @@ static long tce_iommu_create_window(struct tce_container *container, |
4691 |
- table_group = iommu_group_get_iommudata(tcegrp->grp); |
4692 |
- table_group->ops->unset_window(table_group, num); |
4693 |
- } |
4694 |
-- tce_iommu_free_table(tbl); |
4695 |
-+ tce_iommu_free_table(container, tbl); |
4696 |
- |
4697 |
- return ret; |
4698 |
- } |
4699 |
-@@ -701,12 +779,41 @@ static long tce_iommu_remove_window(struct tce_container *container, |
4700 |
- |
4701 |
- /* Free table */ |
4702 |
- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); |
4703 |
-- tce_iommu_free_table(tbl); |
4704 |
-+ tce_iommu_free_table(container, tbl); |
4705 |
- container->tables[num] = NULL; |
4706 |
- |
4707 |
- return 0; |
4708 |
- } |
4709 |
- |
4710 |
-+static long tce_iommu_create_default_window(struct tce_container *container) |
4711 |
-+{ |
4712 |
-+ long ret; |
4713 |
-+ __u64 start_addr = 0; |
4714 |
-+ struct tce_iommu_group *tcegrp; |
4715 |
-+ struct iommu_table_group *table_group; |
4716 |
-+ |
4717 |
-+ if (!container->def_window_pending) |
4718 |
-+ return 0; |
4719 |
-+ |
4720 |
-+ if (!tce_groups_attached(container)) |
4721 |
-+ return -ENODEV; |
4722 |
-+ |
4723 |
-+ tcegrp = list_first_entry(&container->group_list, |
4724 |
-+ struct tce_iommu_group, next); |
4725 |
-+ table_group = iommu_group_get_iommudata(tcegrp->grp); |
4726 |
-+ if (!table_group) |
4727 |
-+ return -ENODEV; |
4728 |
-+ |
4729 |
-+ ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K, |
4730 |
-+ table_group->tce32_size, 1, &start_addr); |
4731 |
-+ WARN_ON_ONCE(!ret && start_addr); |
4732 |
-+ |
4733 |
-+ if (!ret) |
4734 |
-+ container->def_window_pending = false; |
4735 |
-+ |
4736 |
-+ return ret; |
4737 |
-+} |
4738 |
-+ |
4739 |
- static long tce_iommu_ioctl(void *iommu_data, |
4740 |
- unsigned int cmd, unsigned long arg) |
4741 |
- { |
4742 |
-@@ -727,7 +834,17 @@ static long tce_iommu_ioctl(void *iommu_data, |
4743 |
- } |
4744 |
- |
4745 |
- return (ret < 0) ? 0 : ret; |
4746 |
-+ } |
4747 |
-+ |
4748 |
-+ /* |
4749 |
-+ * Sanity check to prevent one userspace from manipulating |
4750 |
-+ * another userspace mm. |
4751 |
-+ */ |
4752 |
-+ BUG_ON(!container); |
4753 |
-+ if (container->mm && container->mm != current->mm) |
4754 |
-+ return -EPERM; |
4755 |
- |
4756 |
-+ switch (cmd) { |
4757 |
- case VFIO_IOMMU_SPAPR_TCE_GET_INFO: { |
4758 |
- struct vfio_iommu_spapr_tce_info info; |
4759 |
- struct tce_iommu_group *tcegrp; |
4760 |
-@@ -797,6 +914,10 @@ static long tce_iommu_ioctl(void *iommu_data, |
4761 |
- VFIO_DMA_MAP_FLAG_WRITE)) |
4762 |
- return -EINVAL; |
4763 |
- |
4764 |
-+ ret = tce_iommu_create_default_window(container); |
4765 |
-+ if (ret) |
4766 |
-+ return ret; |
4767 |
-+ |
4768 |
- num = tce_iommu_find_table(container, param.iova, &tbl); |
4769 |
- if (num < 0) |
4770 |
- return -ENXIO; |
4771 |
-@@ -860,6 +981,10 @@ static long tce_iommu_ioctl(void *iommu_data, |
4772 |
- if (param.flags) |
4773 |
- return -EINVAL; |
4774 |
- |
4775 |
-+ ret = tce_iommu_create_default_window(container); |
4776 |
-+ if (ret) |
4777 |
-+ return ret; |
4778 |
-+ |
4779 |
- num = tce_iommu_find_table(container, param.iova, &tbl); |
4780 |
- if (num < 0) |
4781 |
- return -ENXIO; |
4782 |
-@@ -888,6 +1013,10 @@ static long tce_iommu_ioctl(void *iommu_data, |
4783 |
- minsz = offsetofend(struct vfio_iommu_spapr_register_memory, |
4784 |
- size); |
4785 |
- |
4786 |
-+ ret = tce_iommu_mm_set(container); |
4787 |
-+ if (ret) |
4788 |
-+ return ret; |
4789 |
-+ |
4790 |
- if (copy_from_user(¶m, (void __user *)arg, minsz)) |
4791 |
- return -EFAULT; |
4792 |
- |
4793 |
-@@ -911,6 +1040,9 @@ static long tce_iommu_ioctl(void *iommu_data, |
4794 |
- if (!container->v2) |
4795 |
- break; |
4796 |
- |
4797 |
-+ if (!container->mm) |
4798 |
-+ return -EPERM; |
4799 |
-+ |
4800 |
- minsz = offsetofend(struct vfio_iommu_spapr_register_memory, |
4801 |
- size); |
4802 |
- |
4803 |
-@@ -969,6 +1101,10 @@ static long tce_iommu_ioctl(void *iommu_data, |
4804 |
- if (!container->v2) |
4805 |
- break; |
4806 |
- |
4807 |
-+ ret = tce_iommu_mm_set(container); |
4808 |
-+ if (ret) |
4809 |
-+ return ret; |
4810 |
-+ |
4811 |
- if (!tce_groups_attached(container)) |
4812 |
- return -ENXIO; |
4813 |
- |
4814 |
-@@ -986,6 +1122,10 @@ static long tce_iommu_ioctl(void *iommu_data, |
4815 |
- |
4816 |
- mutex_lock(&container->lock); |
4817 |
- |
4818 |
-+ ret = tce_iommu_create_default_window(container); |
4819 |
-+ if (ret) |
4820 |
-+ return ret; |
4821 |
-+ |
4822 |
- ret = tce_iommu_create_window(container, create.page_shift, |
4823 |
- create.window_size, create.levels, |
4824 |
- &create.start_addr); |
4825 |
-@@ -1003,6 +1143,10 @@ static long tce_iommu_ioctl(void *iommu_data, |
4826 |
- if (!container->v2) |
4827 |
- break; |
4828 |
- |
4829 |
-+ ret = tce_iommu_mm_set(container); |
4830 |
-+ if (ret) |
4831 |
-+ return ret; |
4832 |
-+ |
4833 |
- if (!tce_groups_attached(container)) |
4834 |
- return -ENXIO; |
4835 |
- |
4836 |
-@@ -1018,6 +1162,11 @@ static long tce_iommu_ioctl(void *iommu_data, |
4837 |
- if (remove.flags) |
4838 |
- return -EINVAL; |
4839 |
- |
4840 |
-+ if (container->def_window_pending && !remove.start_addr) { |
4841 |
-+ container->def_window_pending = false; |
4842 |
-+ return 0; |
4843 |
-+ } |
4844 |
-+ |
4845 |
- mutex_lock(&container->lock); |
4846 |
- |
4847 |
- ret = tce_iommu_remove_window(container, remove.start_addr); |
4848 |
-@@ -1043,7 +1192,7 @@ static void tce_iommu_release_ownership(struct tce_container *container, |
4849 |
- continue; |
4850 |
- |
4851 |
- tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); |
4852 |
-- tce_iommu_userspace_view_free(tbl); |
4853 |
-+ tce_iommu_userspace_view_free(tbl, container->mm); |
4854 |
- if (tbl->it_map) |
4855 |
- iommu_release_ownership(tbl); |
4856 |
- |
4857 |
-@@ -1062,10 +1211,7 @@ static int tce_iommu_take_ownership(struct tce_container *container, |
4858 |
- if (!tbl || !tbl->it_map) |
4859 |
- continue; |
4860 |
- |
4861 |
-- rc = tce_iommu_userspace_view_alloc(tbl); |
4862 |
-- if (!rc) |
4863 |
-- rc = iommu_take_ownership(tbl); |
4864 |
-- |
4865 |
-+ rc = iommu_take_ownership(tbl); |
4866 |
- if (rc) { |
4867 |
- for (j = 0; j < i; ++j) |
4868 |
- iommu_release_ownership( |
4869 |
-@@ -1100,9 +1246,6 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container, |
4870 |
- static long tce_iommu_take_ownership_ddw(struct tce_container *container, |
4871 |
- struct iommu_table_group *table_group) |
4872 |
- { |
4873 |
-- long i, ret = 0; |
4874 |
-- struct iommu_table *tbl = NULL; |
4875 |
-- |
4876 |
- if (!table_group->ops->create_table || !table_group->ops->set_window || |
4877 |
- !table_group->ops->release_ownership) { |
4878 |
- WARN_ON_ONCE(1); |
4879 |
-@@ -1111,47 +1254,7 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container, |
4880 |
- |
4881 |
- table_group->ops->take_ownership(table_group); |
4882 |
- |
4883 |
-- /* |
4884 |
-- * If it the first group attached, check if there is |
4885 |
-- * a default DMA window and create one if none as |
4886 |
-- * the userspace expects it to exist. |
4887 |
-- */ |
4888 |
-- if (!tce_groups_attached(container) && !container->tables[0]) { |
4889 |
-- ret = tce_iommu_create_table(container, |
4890 |
-- table_group, |
4891 |
-- 0, /* window number */ |
4892 |
-- IOMMU_PAGE_SHIFT_4K, |
4893 |
-- table_group->tce32_size, |
4894 |
-- 1, /* default levels */ |
4895 |
-- &tbl); |
4896 |
-- if (ret) |
4897 |
-- goto release_exit; |
4898 |
-- else |
4899 |
-- container->tables[0] = tbl; |
4900 |
-- } |
4901 |
-- |
4902 |
-- /* Set all windows to the new group */ |
4903 |
-- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { |
4904 |
-- tbl = container->tables[i]; |
4905 |
-- |
4906 |
-- if (!tbl) |
4907 |
-- continue; |
4908 |
-- |
4909 |
-- /* Set the default window to a new group */ |
4910 |
-- ret = table_group->ops->set_window(table_group, i, tbl); |
4911 |
-- if (ret) |
4912 |
-- goto release_exit; |
4913 |
-- } |
4914 |
-- |
4915 |
- return 0; |
4916 |
-- |
4917 |
--release_exit: |
4918 |
-- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) |
4919 |
-- table_group->ops->unset_window(table_group, i); |
4920 |
-- |
4921 |
-- table_group->ops->release_ownership(table_group); |
4922 |
-- |
4923 |
-- return ret; |
4924 |
- } |
4925 |
- |
4926 |
- static int tce_iommu_attach_group(void *iommu_data, |
4927 |
-@@ -1203,10 +1306,13 @@ static int tce_iommu_attach_group(void *iommu_data, |
4928 |
- } |
4929 |
- |
4930 |
- if (!table_group->ops || !table_group->ops->take_ownership || |
4931 |
-- !table_group->ops->release_ownership) |
4932 |
-+ !table_group->ops->release_ownership) { |
4933 |
- ret = tce_iommu_take_ownership(container, table_group); |
4934 |
-- else |
4935 |
-+ } else { |
4936 |
- ret = tce_iommu_take_ownership_ddw(container, table_group); |
4937 |
-+ if (!tce_groups_attached(container) && !container->tables[0]) |
4938 |
-+ container->def_window_pending = true; |
4939 |
-+ } |
4940 |
- |
4941 |
- if (!ret) { |
4942 |
- tcegrp->grp = iommu_group; |
4943 |
-diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h |
4944 |
-index 6aaf425..a13b031 100644 |
4945 |
---- a/include/linux/bpf_verifier.h |
4946 |
-+++ b/include/linux/bpf_verifier.h |
4947 |
-@@ -18,19 +18,12 @@ |
4948 |
- |
4949 |
- struct bpf_reg_state { |
4950 |
- enum bpf_reg_type type; |
4951 |
-- /* |
4952 |
-- * Used to determine if any memory access using this register will |
4953 |
-- * result in a bad access. |
4954 |
-- */ |
4955 |
-- s64 min_value; |
4956 |
-- u64 max_value; |
4957 |
- union { |
4958 |
- /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ |
4959 |
- s64 imm; |
4960 |
- |
4961 |
- /* valid when type == PTR_TO_PACKET* */ |
4962 |
- struct { |
4963 |
-- u32 id; |
4964 |
- u16 off; |
4965 |
- u16 range; |
4966 |
- }; |
4967 |
-@@ -40,6 +33,13 @@ struct bpf_reg_state { |
4968 |
- */ |
4969 |
- struct bpf_map *map_ptr; |
4970 |
- }; |
4971 |
-+ u32 id; |
4972 |
-+ /* Used to determine if any memory access using this register will |
4973 |
-+ * result in a bad access. These two fields must be last. |
4974 |
-+ * See states_equal() |
4975 |
-+ */ |
4976 |
-+ s64 min_value; |
4977 |
-+ u64 max_value; |
4978 |
- }; |
4979 |
- |
4980 |
- enum bpf_stack_slot_type { |
4981 |
-diff --git a/include/linux/dccp.h b/include/linux/dccp.h |
4982 |
-index 61d042b..6844929 100644 |
4983 |
---- a/include/linux/dccp.h |
4984 |
-+++ b/include/linux/dccp.h |
4985 |
-@@ -163,6 +163,7 @@ struct dccp_request_sock { |
4986 |
- __u64 dreq_isr; |
4987 |
- __u64 dreq_gsr; |
4988 |
- __be32 dreq_service; |
4989 |
-+ spinlock_t dreq_lock; |
4990 |
- struct list_head dreq_featneg; |
4991 |
- __u32 dreq_timestamp_echo; |
4992 |
- __u32 dreq_timestamp_time; |
4993 |
-diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h |
4994 |
-index 192eef2f..d596a07 100644 |
4995 |
---- a/include/linux/hyperv.h |
4996 |
-+++ b/include/linux/hyperv.h |
4997 |
-@@ -1548,31 +1548,23 @@ static inline struct vmpacket_descriptor * |
4998 |
- get_next_pkt_raw(struct vmbus_channel *channel) |
4999 |
- { |
5000 |
- struct hv_ring_buffer_info *ring_info = &channel->inbound; |
5001 |
-- u32 read_loc = ring_info->priv_read_index; |
5002 |
-+ u32 priv_read_loc = ring_info->priv_read_index; |
5003 |
- void *ring_buffer = hv_get_ring_buffer(ring_info); |
5004 |
-- struct vmpacket_descriptor *cur_desc; |
5005 |
-- u32 packetlen; |
5006 |
- u32 dsize = ring_info->ring_datasize; |
5007 |
-- u32 delta = read_loc - ring_info->ring_buffer->read_index; |
5008 |
-+ /* |
5009 |
-+ * delta is the difference between what is available to read and |
5010 |
-+ * what was already consumed in place. We commit read index after |
5011 |
-+ * the whole batch is processed. |
5012 |
-+ */ |
5013 |
-+ u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ? |
5014 |
-+ priv_read_loc - ring_info->ring_buffer->read_index : |
5015 |
-+ (dsize - ring_info->ring_buffer->read_index) + priv_read_loc; |
5016 |
- u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); |
5017 |
- |
5018 |
- if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) |
5019 |
- return NULL; |
5020 |
- |
5021 |
-- if ((read_loc + sizeof(*cur_desc)) > dsize) |
5022 |
-- return NULL; |
5023 |
-- |
5024 |
-- cur_desc = ring_buffer + read_loc; |
5025 |
-- packetlen = cur_desc->len8 << 3; |
5026 |
-- |
5027 |
-- /* |
5028 |
-- * If the packet under consideration is wrapping around, |
5029 |
-- * return failure. |
5030 |
-- */ |
5031 |
-- if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) |
5032 |
-- return NULL; |
5033 |
-- |
5034 |
-- return cur_desc; |
5035 |
-+ return ring_buffer + priv_read_loc; |
5036 |
- } |
5037 |
- |
5038 |
- /* |
5039 |
-@@ -1584,16 +1576,14 @@ static inline void put_pkt_raw(struct vmbus_channel *channel, |
5040 |
- struct vmpacket_descriptor *desc) |
5041 |
- { |
5042 |
- struct hv_ring_buffer_info *ring_info = &channel->inbound; |
5043 |
-- u32 read_loc = ring_info->priv_read_index; |
5044 |
- u32 packetlen = desc->len8 << 3; |
5045 |
- u32 dsize = ring_info->ring_datasize; |
5046 |
- |
5047 |
-- if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize) |
5048 |
-- BUG(); |
5049 |
- /* |
5050 |
- * Include the packet trailer. |
5051 |
- */ |
5052 |
- ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; |
5053 |
-+ ring_info->priv_read_index %= dsize; |
5054 |
- } |
5055 |
- |
5056 |
- /* |
5057 |
-diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h |
5058 |
-index d08c63f..0c5d5dd 100644 |
5059 |
---- a/include/uapi/linux/packet_diag.h |
5060 |
-+++ b/include/uapi/linux/packet_diag.h |
5061 |
-@@ -64,7 +64,7 @@ struct packet_diag_mclist { |
5062 |
- __u32 pdmc_count; |
5063 |
- __u16 pdmc_type; |
5064 |
- __u16 pdmc_alen; |
5065 |
-- __u8 pdmc_addr[MAX_ADDR_LEN]; |
5066 |
-+ __u8 pdmc_addr[32]; /* MAX_ADDR_LEN */ |
5067 |
- }; |
5068 |
- |
5069 |
- struct packet_diag_ring { |
5070 |
-diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c |
5071 |
-index 8199821..85d1c94 100644 |
5072 |
---- a/kernel/bpf/verifier.c |
5073 |
-+++ b/kernel/bpf/verifier.c |
5074 |
-@@ -212,9 +212,10 @@ static void print_verifier_state(struct bpf_verifier_state *state) |
5075 |
- else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || |
5076 |
- t == PTR_TO_MAP_VALUE_OR_NULL || |
5077 |
- t == PTR_TO_MAP_VALUE_ADJ) |
5078 |
-- verbose("(ks=%d,vs=%d)", |
5079 |
-+ verbose("(ks=%d,vs=%d,id=%u)", |
5080 |
- reg->map_ptr->key_size, |
5081 |
-- reg->map_ptr->value_size); |
5082 |
-+ reg->map_ptr->value_size, |
5083 |
-+ reg->id); |
5084 |
- if (reg->min_value != BPF_REGISTER_MIN_RANGE) |
5085 |
- verbose(",min_value=%lld", |
5086 |
- (long long)reg->min_value); |
5087 |
-@@ -443,13 +444,19 @@ static void init_reg_state(struct bpf_reg_state *regs) |
5088 |
- regs[BPF_REG_1].type = PTR_TO_CTX; |
5089 |
- } |
5090 |
- |
5091 |
--static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) |
5092 |
-+static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) |
5093 |
- { |
5094 |
-- BUG_ON(regno >= MAX_BPF_REG); |
5095 |
- regs[regno].type = UNKNOWN_VALUE; |
5096 |
-+ regs[regno].id = 0; |
5097 |
- regs[regno].imm = 0; |
5098 |
- } |
5099 |
- |
5100 |
-+static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) |
5101 |
-+{ |
5102 |
-+ BUG_ON(regno >= MAX_BPF_REG); |
5103 |
-+ __mark_reg_unknown_value(regs, regno); |
5104 |
-+} |
5105 |
-+ |
5106 |
- static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) |
5107 |
- { |
5108 |
- regs[regno].min_value = BPF_REGISTER_MIN_RANGE; |
5109 |
-@@ -1252,6 +1259,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id) |
5110 |
- return -EINVAL; |
5111 |
- } |
5112 |
- regs[BPF_REG_0].map_ptr = meta.map_ptr; |
5113 |
-+ regs[BPF_REG_0].id = ++env->id_gen; |
5114 |
- } else { |
5115 |
- verbose("unknown return type %d of func %d\n", |
5116 |
- fn->ret_type, func_id); |
5117 |
-@@ -1668,8 +1676,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) |
5118 |
- insn->src_reg); |
5119 |
- return -EACCES; |
5120 |
- } |
5121 |
-- regs[insn->dst_reg].type = UNKNOWN_VALUE; |
5122 |
-- regs[insn->dst_reg].map_ptr = NULL; |
5123 |
-+ mark_reg_unknown_value(regs, insn->dst_reg); |
5124 |
- } |
5125 |
- } else { |
5126 |
- /* case: R = imm |
5127 |
-@@ -1931,6 +1938,43 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, |
5128 |
- check_reg_overflow(true_reg); |
5129 |
- } |
5130 |
- |
5131 |
-+static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, |
5132 |
-+ enum bpf_reg_type type) |
5133 |
-+{ |
5134 |
-+ struct bpf_reg_state *reg = ®s[regno]; |
5135 |
-+ |
5136 |
-+ if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { |
5137 |
-+ reg->type = type; |
5138 |
-+ /* We don't need id from this point onwards anymore, thus we |
5139 |
-+ * should better reset it, so that state pruning has chances |
5140 |
-+ * to take effect. |
5141 |
-+ */ |
5142 |
-+ reg->id = 0; |
5143 |
-+ if (type == UNKNOWN_VALUE) |
5144 |
-+ __mark_reg_unknown_value(regs, regno); |
5145 |
-+ } |
5146 |
-+} |
5147 |
-+ |
5148 |
-+/* The logic is similar to find_good_pkt_pointers(), both could eventually |
5149 |
-+ * be folded together at some point. |
5150 |
-+ */ |
5151 |
-+static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, |
5152 |
-+ enum bpf_reg_type type) |
5153 |
-+{ |
5154 |
-+ struct bpf_reg_state *regs = state->regs; |
5155 |
-+ u32 id = regs[regno].id; |
5156 |
-+ int i; |
5157 |
-+ |
5158 |
-+ for (i = 0; i < MAX_BPF_REG; i++) |
5159 |
-+ mark_map_reg(regs, i, id, type); |
5160 |
-+ |
5161 |
-+ for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { |
5162 |
-+ if (state->stack_slot_type[i] != STACK_SPILL) |
5163 |
-+ continue; |
5164 |
-+ mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type); |
5165 |
-+ } |
5166 |
-+} |
5167 |
-+ |
5168 |
- static int check_cond_jmp_op(struct bpf_verifier_env *env, |
5169 |
- struct bpf_insn *insn, int *insn_idx) |
5170 |
- { |
5171 |
-@@ -2018,18 +2062,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, |
5172 |
- if (BPF_SRC(insn->code) == BPF_K && |
5173 |
- insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && |
5174 |
- dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { |
5175 |
-- if (opcode == BPF_JEQ) { |
5176 |
-- /* next fallthrough insn can access memory via |
5177 |
-- * this register |
5178 |
-- */ |
5179 |
-- regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; |
5180 |
-- /* branch targer cannot access it, since reg == 0 */ |
5181 |
-- mark_reg_unknown_value(other_branch->regs, |
5182 |
-- insn->dst_reg); |
5183 |
-- } else { |
5184 |
-- other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; |
5185 |
-- mark_reg_unknown_value(regs, insn->dst_reg); |
5186 |
-- } |
5187 |
-+ /* Mark all identical map registers in each branch as either |
5188 |
-+ * safe or unknown depending R == 0 or R != 0 conditional. |
5189 |
-+ */ |
5190 |
-+ mark_map_regs(this_branch, insn->dst_reg, |
5191 |
-+ opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE); |
5192 |
-+ mark_map_regs(other_branch, insn->dst_reg, |
5193 |
-+ opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE); |
5194 |
- } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && |
5195 |
- dst_reg->type == PTR_TO_PACKET && |
5196 |
- regs[insn->src_reg].type == PTR_TO_PACKET_END) { |
5197 |
-@@ -2469,7 +2508,7 @@ static bool states_equal(struct bpf_verifier_env *env, |
5198 |
- * we didn't do a variable access into a map then we are a-ok. |
5199 |
- */ |
5200 |
- if (!varlen_map_access && |
5201 |
-- rold->type == rcur->type && rold->imm == rcur->imm) |
5202 |
-+ memcmp(rold, rcur, offsetofend(struct bpf_reg_state, id)) == 0) |
5203 |
- continue; |
5204 |
- |
5205 |
- /* If we didn't map access then again we don't care about the |
5206 |
-diff --git a/kernel/futex.c b/kernel/futex.c |
5207 |
-index 38b68c2..4c6b6e6 100644 |
5208 |
---- a/kernel/futex.c |
5209 |
-+++ b/kernel/futex.c |
5210 |
-@@ -2813,7 +2813,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
5211 |
- { |
5212 |
- struct hrtimer_sleeper timeout, *to = NULL; |
5213 |
- struct rt_mutex_waiter rt_waiter; |
5214 |
-- struct rt_mutex *pi_mutex = NULL; |
5215 |
- struct futex_hash_bucket *hb; |
5216 |
- union futex_key key2 = FUTEX_KEY_INIT; |
5217 |
- struct futex_q q = futex_q_init; |
5218 |
-@@ -2897,6 +2896,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
5219 |
- if (q.pi_state && (q.pi_state->owner != current)) { |
5220 |
- spin_lock(q.lock_ptr); |
5221 |
- ret = fixup_pi_state_owner(uaddr2, &q, current); |
5222 |
-+ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) |
5223 |
-+ rt_mutex_unlock(&q.pi_state->pi_mutex); |
5224 |
- /* |
5225 |
- * Drop the reference to the pi state which |
5226 |
- * the requeue_pi() code acquired for us. |
5227 |
-@@ -2905,6 +2906,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
5228 |
- spin_unlock(q.lock_ptr); |
5229 |
- } |
5230 |
- } else { |
5231 |
-+ struct rt_mutex *pi_mutex; |
5232 |
-+ |
5233 |
- /* |
5234 |
- * We have been woken up by futex_unlock_pi(), a timeout, or a |
5235 |
- * signal. futex_unlock_pi() will not destroy the lock_ptr nor |
5236 |
-@@ -2928,18 +2931,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
5237 |
- if (res) |
5238 |
- ret = (res < 0) ? res : 0; |
5239 |
- |
5240 |
-+ /* |
5241 |
-+ * If fixup_pi_state_owner() faulted and was unable to handle |
5242 |
-+ * the fault, unlock the rt_mutex and return the fault to |
5243 |
-+ * userspace. |
5244 |
-+ */ |
5245 |
-+ if (ret && rt_mutex_owner(pi_mutex) == current) |
5246 |
-+ rt_mutex_unlock(pi_mutex); |
5247 |
-+ |
5248 |
- /* Unqueue and drop the lock. */ |
5249 |
- unqueue_me_pi(&q); |
5250 |
- } |
5251 |
- |
5252 |
-- /* |
5253 |
-- * If fixup_pi_state_owner() faulted and was unable to handle the |
5254 |
-- * fault, unlock the rt_mutex and return the fault to userspace. |
5255 |
-- */ |
5256 |
-- if (ret == -EFAULT) { |
5257 |
-- if (pi_mutex && rt_mutex_owner(pi_mutex) == current) |
5258 |
-- rt_mutex_unlock(pi_mutex); |
5259 |
-- } else if (ret == -EINTR) { |
5260 |
-+ if (ret == -EINTR) { |
5261 |
- /* |
5262 |
- * We've already been requeued, but cannot restart by calling |
5263 |
- * futex_lock_pi() directly. We could restart this syscall, but |
5264 |
-diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c |
5265 |
-index 1591f6b..2bef4ab 100644 |
5266 |
---- a/kernel/locking/rwsem-spinlock.c |
5267 |
-+++ b/kernel/locking/rwsem-spinlock.c |
5268 |
-@@ -216,10 +216,8 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state) |
5269 |
- */ |
5270 |
- if (sem->count == 0) |
5271 |
- break; |
5272 |
-- if (signal_pending_state(state, current)) { |
5273 |
-- ret = -EINTR; |
5274 |
-- goto out; |
5275 |
-- } |
5276 |
-+ if (signal_pending_state(state, current)) |
5277 |
-+ goto out_nolock; |
5278 |
- set_task_state(tsk, state); |
5279 |
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
5280 |
- schedule(); |
5281 |
-@@ -227,12 +225,19 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state) |
5282 |
- } |
5283 |
- /* got the lock */ |
5284 |
- sem->count = -1; |
5285 |
--out: |
5286 |
- list_del(&waiter.list); |
5287 |
- |
5288 |
- raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
5289 |
- |
5290 |
- return ret; |
5291 |
-+ |
5292 |
-+out_nolock: |
5293 |
-+ list_del(&waiter.list); |
5294 |
-+ if (!list_empty(&sem->wait_list)) |
5295 |
-+ __rwsem_do_wake(sem, 1); |
5296 |
-+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
5297 |
-+ |
5298 |
-+ return -EINTR; |
5299 |
- } |
5300 |
- |
5301 |
- void __sched __down_write(struct rw_semaphore *sem) |
5302 |
-diff --git a/mm/slab.c b/mm/slab.c |
5303 |
-index bd878f0..1f82d16 100644 |
5304 |
---- a/mm/slab.c |
5305 |
-+++ b/mm/slab.c |
5306 |
-@@ -2332,7 +2332,7 @@ static int drain_freelist(struct kmem_cache *cache, |
5307 |
- return nr_freed; |
5308 |
- } |
5309 |
- |
5310 |
--int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) |
5311 |
-+int __kmem_cache_shrink(struct kmem_cache *cachep) |
5312 |
- { |
5313 |
- int ret = 0; |
5314 |
- int node; |
5315 |
-@@ -2352,7 +2352,7 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate) |
5316 |
- |
5317 |
- int __kmem_cache_shutdown(struct kmem_cache *cachep) |
5318 |
- { |
5319 |
-- return __kmem_cache_shrink(cachep, false); |
5320 |
-+ return __kmem_cache_shrink(cachep); |
5321 |
- } |
5322 |
- |
5323 |
- void __kmem_cache_release(struct kmem_cache *cachep) |
5324 |
-diff --git a/mm/slab.h b/mm/slab.h |
5325 |
-index bc05fdc..ceb7d70 100644 |
5326 |
---- a/mm/slab.h |
5327 |
-+++ b/mm/slab.h |
5328 |
-@@ -146,7 +146,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, |
5329 |
- |
5330 |
- int __kmem_cache_shutdown(struct kmem_cache *); |
5331 |
- void __kmem_cache_release(struct kmem_cache *); |
5332 |
--int __kmem_cache_shrink(struct kmem_cache *, bool); |
5333 |
-+int __kmem_cache_shrink(struct kmem_cache *); |
5334 |
- void slab_kmem_cache_release(struct kmem_cache *); |
5335 |
- |
5336 |
- struct seq_file; |
5337 |
-diff --git a/mm/slab_common.c b/mm/slab_common.c |
5338 |
-index 329b038..5d2f24f 100644 |
5339 |
---- a/mm/slab_common.c |
5340 |
-+++ b/mm/slab_common.c |
5341 |
-@@ -573,6 +573,29 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) |
5342 |
- get_online_cpus(); |
5343 |
- get_online_mems(); |
5344 |
- |
5345 |
-+#ifdef CONFIG_SLUB |
5346 |
-+ /* |
5347 |
-+ * In case of SLUB, we need to disable empty slab caching to |
5348 |
-+ * avoid pinning the offline memory cgroup by freeable kmem |
5349 |
-+ * pages charged to it. SLAB doesn't need this, as it |
5350 |
-+ * periodically purges unused slabs. |
5351 |
-+ */ |
5352 |
-+ mutex_lock(&slab_mutex); |
5353 |
-+ list_for_each_entry(s, &slab_caches, list) { |
5354 |
-+ c = is_root_cache(s) ? cache_from_memcg_idx(s, idx) : NULL; |
5355 |
-+ if (c) { |
5356 |
-+ c->cpu_partial = 0; |
5357 |
-+ c->min_partial = 0; |
5358 |
-+ } |
5359 |
-+ } |
5360 |
-+ mutex_unlock(&slab_mutex); |
5361 |
-+ /* |
5362 |
-+ * kmem_cache->cpu_partial is checked locklessly (see |
5363 |
-+ * put_cpu_partial()). Make sure the change is visible. |
5364 |
-+ */ |
5365 |
-+ synchronize_sched(); |
5366 |
-+#endif |
5367 |
-+ |
5368 |
- mutex_lock(&slab_mutex); |
5369 |
- list_for_each_entry(s, &slab_caches, list) { |
5370 |
- if (!is_root_cache(s)) |
5371 |
-@@ -584,7 +607,7 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) |
5372 |
- if (!c) |
5373 |
- continue; |
5374 |
- |
5375 |
-- __kmem_cache_shrink(c, true); |
5376 |
-+ __kmem_cache_shrink(c); |
5377 |
- arr->entries[idx] = NULL; |
5378 |
- } |
5379 |
- mutex_unlock(&slab_mutex); |
5380 |
-@@ -755,7 +778,7 @@ int kmem_cache_shrink(struct kmem_cache *cachep) |
5381 |
- get_online_cpus(); |
5382 |
- get_online_mems(); |
5383 |
- kasan_cache_shrink(cachep); |
5384 |
-- ret = __kmem_cache_shrink(cachep, false); |
5385 |
-+ ret = __kmem_cache_shrink(cachep); |
5386 |
- put_online_mems(); |
5387 |
- put_online_cpus(); |
5388 |
- return ret; |
5389 |
-diff --git a/mm/slob.c b/mm/slob.c |
5390 |
-index 5ec1580..eac04d43 100644 |
5391 |
---- a/mm/slob.c |
5392 |
-+++ b/mm/slob.c |
5393 |
-@@ -634,7 +634,7 @@ void __kmem_cache_release(struct kmem_cache *c) |
5394 |
- { |
5395 |
- } |
5396 |
- |
5397 |
--int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate) |
5398 |
-+int __kmem_cache_shrink(struct kmem_cache *d) |
5399 |
- { |
5400 |
- return 0; |
5401 |
- } |
5402 |
-diff --git a/mm/slub.c b/mm/slub.c |
5403 |
-index 7aa0e97..58c7526 100644 |
5404 |
---- a/mm/slub.c |
5405 |
-+++ b/mm/slub.c |
5406 |
-@@ -3887,7 +3887,7 @@ EXPORT_SYMBOL(kfree); |
5407 |
- * being allocated from last increasing the chance that the last objects |
5408 |
- * are freed in them. |
5409 |
- */ |
5410 |
--int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) |
5411 |
-+int __kmem_cache_shrink(struct kmem_cache *s) |
5412 |
- { |
5413 |
- int node; |
5414 |
- int i; |
5415 |
-@@ -3899,21 +3899,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) |
5416 |
- unsigned long flags; |
5417 |
- int ret = 0; |
5418 |
- |
5419 |
-- if (deactivate) { |
5420 |
-- /* |
5421 |
-- * Disable empty slabs caching. Used to avoid pinning offline |
5422 |
-- * memory cgroups by kmem pages that can be freed. |
5423 |
-- */ |
5424 |
-- s->cpu_partial = 0; |
5425 |
-- s->min_partial = 0; |
5426 |
-- |
5427 |
-- /* |
5428 |
-- * s->cpu_partial is checked locklessly (see put_cpu_partial), |
5429 |
-- * so we have to make sure the change is visible. |
5430 |
-- */ |
5431 |
-- synchronize_sched(); |
5432 |
-- } |
5433 |
-- |
5434 |
- flush_all(s); |
5435 |
- for_each_kmem_cache_node(s, node, n) { |
5436 |
- INIT_LIST_HEAD(&discard); |
5437 |
-@@ -3970,7 +3955,7 @@ static int slab_mem_going_offline_callback(void *arg) |
5438 |
- |
5439 |
- mutex_lock(&slab_mutex); |
5440 |
- list_for_each_entry(s, &slab_caches, list) |
5441 |
-- __kmem_cache_shrink(s, false); |
5442 |
-+ __kmem_cache_shrink(s); |
5443 |
- mutex_unlock(&slab_mutex); |
5444 |
- |
5445 |
- return 0; |
5446 |
-diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c |
5447 |
-index 7cb41ae..8498e35 100644 |
5448 |
---- a/net/bridge/br_forward.c |
5449 |
-+++ b/net/bridge/br_forward.c |
5450 |
-@@ -186,8 +186,9 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb, |
5451 |
- /* Do not flood unicast traffic to ports that turn it off */ |
5452 |
- if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD)) |
5453 |
- continue; |
5454 |
-+ /* Do not flood if mc off, except for traffic we originate */ |
5455 |
- if (pkt_type == BR_PKT_MULTICAST && |
5456 |
-- !(p->flags & BR_MCAST_FLOOD)) |
5457 |
-+ !(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) |
5458 |
- continue; |
5459 |
- |
5460 |
- /* Do not flood to ports that enable proxy ARP */ |
5461 |
-diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c |
5462 |
-index 855b72f..267b46a 100644 |
5463 |
---- a/net/bridge/br_input.c |
5464 |
-+++ b/net/bridge/br_input.c |
5465 |
-@@ -29,6 +29,7 @@ EXPORT_SYMBOL(br_should_route_hook); |
5466 |
- static int |
5467 |
- br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) |
5468 |
- { |
5469 |
-+ br_drop_fake_rtable(skb); |
5470 |
- return netif_receive_skb(skb); |
5471 |
- } |
5472 |
- |
5473 |
-diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c |
5474 |
-index 7fbdbae..aa1df1a 100644 |
5475 |
---- a/net/bridge/br_netfilter_hooks.c |
5476 |
-+++ b/net/bridge/br_netfilter_hooks.c |
5477 |
-@@ -521,21 +521,6 @@ static unsigned int br_nf_pre_routing(void *priv, |
5478 |
- } |
5479 |
- |
5480 |
- |
5481 |
--/* PF_BRIDGE/LOCAL_IN ************************************************/ |
5482 |
--/* The packet is locally destined, which requires a real |
5483 |
-- * dst_entry, so detach the fake one. On the way up, the |
5484 |
-- * packet would pass through PRE_ROUTING again (which already |
5485 |
-- * took place when the packet entered the bridge), but we |
5486 |
-- * register an IPv4 PRE_ROUTING 'sabotage' hook that will |
5487 |
-- * prevent this from happening. */ |
5488 |
--static unsigned int br_nf_local_in(void *priv, |
5489 |
-- struct sk_buff *skb, |
5490 |
-- const struct nf_hook_state *state) |
5491 |
--{ |
5492 |
-- br_drop_fake_rtable(skb); |
5493 |
-- return NF_ACCEPT; |
5494 |
--} |
5495 |
-- |
5496 |
- /* PF_BRIDGE/FORWARD *************************************************/ |
5497 |
- static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
5498 |
- { |
5499 |
-@@ -906,12 +891,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = { |
5500 |
- .priority = NF_BR_PRI_BRNF, |
5501 |
- }, |
5502 |
- { |
5503 |
-- .hook = br_nf_local_in, |
5504 |
-- .pf = NFPROTO_BRIDGE, |
5505 |
-- .hooknum = NF_BR_LOCAL_IN, |
5506 |
-- .priority = NF_BR_PRI_BRNF, |
5507 |
-- }, |
5508 |
-- { |
5509 |
- .hook = br_nf_forward_ip, |
5510 |
- .pf = NFPROTO_BRIDGE, |
5511 |
- .hooknum = NF_BR_FORWARD, |
5512 |
-diff --git a/net/core/dev.c b/net/core/dev.c |
5513 |
-index 60b0a604..2e04fd1 100644 |
5514 |
---- a/net/core/dev.c |
5515 |
-+++ b/net/core/dev.c |
5516 |
-@@ -1697,27 +1697,54 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue); |
5517 |
- static struct static_key netstamp_needed __read_mostly; |
5518 |
- #ifdef HAVE_JUMP_LABEL |
5519 |
- static atomic_t netstamp_needed_deferred; |
5520 |
-+static atomic_t netstamp_wanted; |
5521 |
- static void netstamp_clear(struct work_struct *work) |
5522 |
- { |
5523 |
- int deferred = atomic_xchg(&netstamp_needed_deferred, 0); |
5524 |
-+ int wanted; |
5525 |
- |
5526 |
-- while (deferred--) |
5527 |
-- static_key_slow_dec(&netstamp_needed); |
5528 |
-+ wanted = atomic_add_return(deferred, &netstamp_wanted); |
5529 |
-+ if (wanted > 0) |
5530 |
-+ static_key_enable(&netstamp_needed); |
5531 |
-+ else |
5532 |
-+ static_key_disable(&netstamp_needed); |
5533 |
- } |
5534 |
- static DECLARE_WORK(netstamp_work, netstamp_clear); |
5535 |
- #endif |
5536 |
- |
5537 |
- void net_enable_timestamp(void) |
5538 |
- { |
5539 |
-+#ifdef HAVE_JUMP_LABEL |
5540 |
-+ int wanted; |
5541 |
-+ |
5542 |
-+ while (1) { |
5543 |
-+ wanted = atomic_read(&netstamp_wanted); |
5544 |
-+ if (wanted <= 0) |
5545 |
-+ break; |
5546 |
-+ if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) |
5547 |
-+ return; |
5548 |
-+ } |
5549 |
-+ atomic_inc(&netstamp_needed_deferred); |
5550 |
-+ schedule_work(&netstamp_work); |
5551 |
-+#else |
5552 |
- static_key_slow_inc(&netstamp_needed); |
5553 |
-+#endif |
5554 |
- } |
5555 |
- EXPORT_SYMBOL(net_enable_timestamp); |
5556 |
- |
5557 |
- void net_disable_timestamp(void) |
5558 |
- { |
5559 |
- #ifdef HAVE_JUMP_LABEL |
5560 |
-- /* net_disable_timestamp() can be called from non process context */ |
5561 |
-- atomic_inc(&netstamp_needed_deferred); |
5562 |
-+ int wanted; |
5563 |
-+ |
5564 |
-+ while (1) { |
5565 |
-+ wanted = atomic_read(&netstamp_wanted); |
5566 |
-+ if (wanted <= 1) |
5567 |
-+ break; |
5568 |
-+ if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) |
5569 |
-+ return; |
5570 |
-+ } |
5571 |
-+ atomic_dec(&netstamp_needed_deferred); |
5572 |
- schedule_work(&netstamp_work); |
5573 |
- #else |
5574 |
- static_key_slow_dec(&netstamp_needed); |
5575 |
-diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
5576 |
-index 1e3e008..f0f462c 100644 |
5577 |
---- a/net/core/skbuff.c |
5578 |
-+++ b/net/core/skbuff.c |
5579 |
-@@ -3814,13 +3814,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, |
5580 |
- if (!skb_may_tx_timestamp(sk, false)) |
5581 |
- return; |
5582 |
- |
5583 |
-- /* take a reference to prevent skb_orphan() from freeing the socket */ |
5584 |
-- sock_hold(sk); |
5585 |
-- |
5586 |
-- *skb_hwtstamps(skb) = *hwtstamps; |
5587 |
-- __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); |
5588 |
-- |
5589 |
-- sock_put(sk); |
5590 |
-+ /* Take a reference to prevent skb_orphan() from freeing the socket, |
5591 |
-+ * but only if the socket refcount is not zero. |
5592 |
-+ */ |
5593 |
-+ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { |
5594 |
-+ *skb_hwtstamps(skb) = *hwtstamps; |
5595 |
-+ __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); |
5596 |
-+ sock_put(sk); |
5597 |
-+ } |
5598 |
- } |
5599 |
- EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); |
5600 |
- |
5601 |
-@@ -3871,7 +3872,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) |
5602 |
- { |
5603 |
- struct sock *sk = skb->sk; |
5604 |
- struct sock_exterr_skb *serr; |
5605 |
-- int err; |
5606 |
-+ int err = 1; |
5607 |
- |
5608 |
- skb->wifi_acked_valid = 1; |
5609 |
- skb->wifi_acked = acked; |
5610 |
-@@ -3881,14 +3882,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) |
5611 |
- serr->ee.ee_errno = ENOMSG; |
5612 |
- serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; |
5613 |
- |
5614 |
-- /* take a reference to prevent skb_orphan() from freeing the socket */ |
5615 |
-- sock_hold(sk); |
5616 |
-- |
5617 |
-- err = sock_queue_err_skb(sk, skb); |
5618 |
-+ /* Take a reference to prevent skb_orphan() from freeing the socket, |
5619 |
-+ * but only if the socket refcount is not zero. |
5620 |
-+ */ |
5621 |
-+ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { |
5622 |
-+ err = sock_queue_err_skb(sk, skb); |
5623 |
-+ sock_put(sk); |
5624 |
-+ } |
5625 |
- if (err) |
5626 |
- kfree_skb(skb); |
5627 |
-- |
5628 |
-- sock_put(sk); |
5629 |
- } |
5630 |
- EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); |
5631 |
- |
5632 |
-diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c |
5633 |
-index f053198..5e3a730 100644 |
5634 |
---- a/net/dccp/ccids/ccid2.c |
5635 |
-+++ b/net/dccp/ccids/ccid2.c |
5636 |
-@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk) |
5637 |
- for (i = 0; i < hc->tx_seqbufc; i++) |
5638 |
- kfree(hc->tx_seqbuf[i]); |
5639 |
- hc->tx_seqbufc = 0; |
5640 |
-+ dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); |
5641 |
- } |
5642 |
- |
5643 |
- static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) |
5644 |
-diff --git a/net/dccp/input.c b/net/dccp/input.c |
5645 |
-index 8fedc2d..4a05d78 100644 |
5646 |
---- a/net/dccp/input.c |
5647 |
-+++ b/net/dccp/input.c |
5648 |
-@@ -577,6 +577,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, |
5649 |
- struct dccp_sock *dp = dccp_sk(sk); |
5650 |
- struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); |
5651 |
- const int old_state = sk->sk_state; |
5652 |
-+ bool acceptable; |
5653 |
- int queued = 0; |
5654 |
- |
5655 |
- /* |
5656 |
-@@ -603,8 +604,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, |
5657 |
- */ |
5658 |
- if (sk->sk_state == DCCP_LISTEN) { |
5659 |
- if (dh->dccph_type == DCCP_PKT_REQUEST) { |
5660 |
-- if (inet_csk(sk)->icsk_af_ops->conn_request(sk, |
5661 |
-- skb) < 0) |
5662 |
-+ /* It is possible that we process SYN packets from backlog, |
5663 |
-+ * so we need to make sure to disable BH right there. |
5664 |
-+ */ |
5665 |
-+ local_bh_disable(); |
5666 |
-+ acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0; |
5667 |
-+ local_bh_enable(); |
5668 |
-+ if (!acceptable) |
5669 |
- return 1; |
5670 |
- consume_skb(skb); |
5671 |
- return 0; |
5672 |
-diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c |
5673 |
-index edbe59d..86b0933 100644 |
5674 |
---- a/net/dccp/ipv4.c |
5675 |
-+++ b/net/dccp/ipv4.c |
5676 |
-@@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) |
5677 |
- |
5678 |
- switch (type) { |
5679 |
- case ICMP_REDIRECT: |
5680 |
-- dccp_do_redirect(skb, sk); |
5681 |
-+ if (!sock_owned_by_user(sk)) |
5682 |
-+ dccp_do_redirect(skb, sk); |
5683 |
- goto out; |
5684 |
- case ICMP_SOURCE_QUENCH: |
5685 |
- /* Just silently ignore these. */ |
5686 |
-diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c |
5687 |
-index 7506c03..237d62c 100644 |
5688 |
---- a/net/dccp/ipv6.c |
5689 |
-+++ b/net/dccp/ipv6.c |
5690 |
-@@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
5691 |
- np = inet6_sk(sk); |
5692 |
- |
5693 |
- if (type == NDISC_REDIRECT) { |
5694 |
-- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); |
5695 |
-+ if (!sock_owned_by_user(sk)) { |
5696 |
-+ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); |
5697 |
- |
5698 |
-- if (dst) |
5699 |
-- dst->ops->redirect(dst, sk, skb); |
5700 |
-+ if (dst) |
5701 |
-+ dst->ops->redirect(dst, sk, skb); |
5702 |
-+ } |
5703 |
- goto out; |
5704 |
- } |
5705 |
- |
5706 |
-diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c |
5707 |
-index 53eddf9..39e7e2b 100644 |
5708 |
---- a/net/dccp/minisocks.c |
5709 |
-+++ b/net/dccp/minisocks.c |
5710 |
-@@ -122,6 +122,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk, |
5711 |
- /* It is still raw copy of parent, so invalidate |
5712 |
- * destructor and make plain sk_free() */ |
5713 |
- newsk->sk_destruct = NULL; |
5714 |
-+ bh_unlock_sock(newsk); |
5715 |
- sk_free(newsk); |
5716 |
- return NULL; |
5717 |
- } |
5718 |
-@@ -145,6 +146,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, |
5719 |
- struct dccp_request_sock *dreq = dccp_rsk(req); |
5720 |
- bool own_req; |
5721 |
- |
5722 |
-+ /* TCP/DCCP listeners became lockless. |
5723 |
-+ * DCCP stores complex state in its request_sock, so we need |
5724 |
-+ * a protection for them, now this code runs without being protected |
5725 |
-+ * by the parent (listener) lock. |
5726 |
-+ */ |
5727 |
-+ spin_lock_bh(&dreq->dreq_lock); |
5728 |
-+ |
5729 |
- /* Check for retransmitted REQUEST */ |
5730 |
- if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { |
5731 |
- |
5732 |
-@@ -159,7 +167,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, |
5733 |
- inet_rtx_syn_ack(sk, req); |
5734 |
- } |
5735 |
- /* Network Duplicate, discard packet */ |
5736 |
-- return NULL; |
5737 |
-+ goto out; |
5738 |
- } |
5739 |
- |
5740 |
- DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; |
5741 |
-@@ -185,20 +193,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, |
5742 |
- |
5743 |
- child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, |
5744 |
- req, &own_req); |
5745 |
-- if (!child) |
5746 |
-- goto listen_overflow; |
5747 |
-- |
5748 |
-- return inet_csk_complete_hashdance(sk, child, req, own_req); |
5749 |
-+ if (child) { |
5750 |
-+ child = inet_csk_complete_hashdance(sk, child, req, own_req); |
5751 |
-+ goto out; |
5752 |
-+ } |
5753 |
- |
5754 |
--listen_overflow: |
5755 |
-- dccp_pr_debug("listen_overflow!\n"); |
5756 |
- DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; |
5757 |
- drop: |
5758 |
- if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) |
5759 |
- req->rsk_ops->send_reset(sk, skb); |
5760 |
- |
5761 |
- inet_csk_reqsk_queue_drop(sk, req); |
5762 |
-- return NULL; |
5763 |
-+out: |
5764 |
-+ spin_unlock_bh(&dreq->dreq_lock); |
5765 |
-+ return child; |
5766 |
- } |
5767 |
- |
5768 |
- EXPORT_SYMBOL_GPL(dccp_check_req); |
5769 |
-@@ -249,6 +257,7 @@ int dccp_reqsk_init(struct request_sock *req, |
5770 |
- { |
5771 |
- struct dccp_request_sock *dreq = dccp_rsk(req); |
5772 |
- |
5773 |
-+ spin_lock_init(&dreq->dreq_lock); |
5774 |
- inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport; |
5775 |
- inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport); |
5776 |
- inet_rsk(req)->acked = 0; |
5777 |
-diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c |
5778 |
-index 21514324..971b947 100644 |
5779 |
---- a/net/ipv4/af_inet.c |
5780 |
-+++ b/net/ipv4/af_inet.c |
5781 |
-@@ -1460,8 +1460,10 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff) |
5782 |
- int proto = iph->protocol; |
5783 |
- int err = -ENOSYS; |
5784 |
- |
5785 |
-- if (skb->encapsulation) |
5786 |
-+ if (skb->encapsulation) { |
5787 |
-+ skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP)); |
5788 |
- skb_set_inner_network_header(skb, nhoff); |
5789 |
-+ } |
5790 |
- |
5791 |
- csum_replace2(&iph->check, iph->tot_len, newlen); |
5792 |
- iph->tot_len = newlen; |
5793 |
-diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
5794 |
-index d851cae..17e6fbf 100644 |
5795 |
---- a/net/ipv4/route.c |
5796 |
-+++ b/net/ipv4/route.c |
5797 |
-@@ -1968,6 +1968,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
5798 |
- { |
5799 |
- int res; |
5800 |
- |
5801 |
-+ tos &= IPTOS_RT_MASK; |
5802 |
- rcu_read_lock(); |
5803 |
- |
5804 |
- /* Multicast recognition logic is moved from route cache to here. |
5805 |
-diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
5806 |
-index c71d49c..ce42ded 100644 |
5807 |
---- a/net/ipv4/tcp_input.c |
5808 |
-+++ b/net/ipv4/tcp_input.c |
5809 |
-@@ -5916,9 +5916,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) |
5810 |
- if (th->syn) { |
5811 |
- if (th->fin) |
5812 |
- goto discard; |
5813 |
-- if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) |
5814 |
-- return 1; |
5815 |
-+ /* It is possible that we process SYN packets from backlog, |
5816 |
-+ * so we need to make sure to disable BH right there. |
5817 |
-+ */ |
5818 |
-+ local_bh_disable(); |
5819 |
-+ acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; |
5820 |
-+ local_bh_enable(); |
5821 |
- |
5822 |
-+ if (!acceptable) |
5823 |
-+ return 1; |
5824 |
- consume_skb(skb); |
5825 |
- return 0; |
5826 |
- } |
5827 |
-diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
5828 |
-index 2259114..6988566 100644 |
5829 |
---- a/net/ipv4/tcp_ipv4.c |
5830 |
-+++ b/net/ipv4/tcp_ipv4.c |
5831 |
-@@ -269,10 +269,13 @@ EXPORT_SYMBOL(tcp_v4_connect); |
5832 |
- */ |
5833 |
- void tcp_v4_mtu_reduced(struct sock *sk) |
5834 |
- { |
5835 |
-- struct dst_entry *dst; |
5836 |
- struct inet_sock *inet = inet_sk(sk); |
5837 |
-- u32 mtu = tcp_sk(sk)->mtu_info; |
5838 |
-+ struct dst_entry *dst; |
5839 |
-+ u32 mtu; |
5840 |
- |
5841 |
-+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) |
5842 |
-+ return; |
5843 |
-+ mtu = tcp_sk(sk)->mtu_info; |
5844 |
- dst = inet_csk_update_pmtu(sk, mtu); |
5845 |
- if (!dst) |
5846 |
- return; |
5847 |
-@@ -418,7 +421,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) |
5848 |
- |
5849 |
- switch (type) { |
5850 |
- case ICMP_REDIRECT: |
5851 |
-- do_redirect(icmp_skb, sk); |
5852 |
-+ if (!sock_owned_by_user(sk)) |
5853 |
-+ do_redirect(icmp_skb, sk); |
5854 |
- goto out; |
5855 |
- case ICMP_SOURCE_QUENCH: |
5856 |
- /* Just silently ignore these. */ |
5857 |
-diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c |
5858 |
-index 3ea1cf8..b1e65b3 100644 |
5859 |
---- a/net/ipv4/tcp_timer.c |
5860 |
-+++ b/net/ipv4/tcp_timer.c |
5861 |
-@@ -249,7 +249,8 @@ void tcp_delack_timer_handler(struct sock *sk) |
5862 |
- |
5863 |
- sk_mem_reclaim_partial(sk); |
5864 |
- |
5865 |
-- if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) |
5866 |
-+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || |
5867 |
-+ !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) |
5868 |
- goto out; |
5869 |
- |
5870 |
- if (time_after(icsk->icsk_ack.timeout, jiffies)) { |
5871 |
-@@ -552,7 +553,8 @@ void tcp_write_timer_handler(struct sock *sk) |
5872 |
- struct inet_connection_sock *icsk = inet_csk(sk); |
5873 |
- int event; |
5874 |
- |
5875 |
-- if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) |
5876 |
-+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || |
5877 |
-+ !icsk->icsk_pending) |
5878 |
- goto out; |
5879 |
- |
5880 |
- if (time_after(icsk->icsk_timeout, jiffies)) { |
5881 |
-diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c |
5882 |
-index ef54852..8c88a37 100644 |
5883 |
---- a/net/ipv6/ip6_fib.c |
5884 |
-+++ b/net/ipv6/ip6_fib.c |
5885 |
-@@ -908,6 +908,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, |
5886 |
- ins = &rt->dst.rt6_next; |
5887 |
- iter = *ins; |
5888 |
- while (iter) { |
5889 |
-+ if (iter->rt6i_metric > rt->rt6i_metric) |
5890 |
-+ break; |
5891 |
- if (rt6_qualify_for_ecmp(iter)) { |
5892 |
- *ins = iter->dst.rt6_next; |
5893 |
- fib6_purge_rt(iter, fn, info->nl_net); |
5894 |
-diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c |
5895 |
-index fc7b401..33b04ec 100644 |
5896 |
---- a/net/ipv6/ip6_offload.c |
5897 |
-+++ b/net/ipv6/ip6_offload.c |
5898 |
-@@ -294,8 +294,10 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) |
5899 |
- struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); |
5900 |
- int err = -ENOSYS; |
5901 |
- |
5902 |
-- if (skb->encapsulation) |
5903 |
-+ if (skb->encapsulation) { |
5904 |
-+ skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6)); |
5905 |
- skb_set_inner_network_header(skb, nhoff); |
5906 |
-+ } |
5907 |
- |
5908 |
- iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); |
5909 |
- |
5910 |
-diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
5911 |
-index 9a87bfb..e27b8fd 100644 |
5912 |
---- a/net/ipv6/ip6_output.c |
5913 |
-+++ b/net/ipv6/ip6_output.c |
5914 |
-@@ -757,13 +757,14 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, |
5915 |
- * Fragment the datagram. |
5916 |
- */ |
5917 |
- |
5918 |
-- *prevhdr = NEXTHDR_FRAGMENT; |
5919 |
- troom = rt->dst.dev->needed_tailroom; |
5920 |
- |
5921 |
- /* |
5922 |
- * Keep copying data until we run out. |
5923 |
- */ |
5924 |
- while (left > 0) { |
5925 |
-+ u8 *fragnexthdr_offset; |
5926 |
-+ |
5927 |
- len = left; |
5928 |
- /* IF: it doesn't fit, use 'mtu' - the data space left */ |
5929 |
- if (len > mtu) |
5930 |
-@@ -808,6 +809,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, |
5931 |
- */ |
5932 |
- skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); |
5933 |
- |
5934 |
-+ fragnexthdr_offset = skb_network_header(frag); |
5935 |
-+ fragnexthdr_offset += prevhdr - skb_network_header(skb); |
5936 |
-+ *fragnexthdr_offset = NEXTHDR_FRAGMENT; |
5937 |
-+ |
5938 |
- /* |
5939 |
- * Build fragment header. |
5940 |
- */ |
5941 |
-diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c |
5942 |
-index c299c1e..66c2b4b 100644 |
5943 |
---- a/net/ipv6/ip6_vti.c |
5944 |
-+++ b/net/ipv6/ip6_vti.c |
5945 |
-@@ -691,6 +691,10 @@ vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p) |
5946 |
- u->link = p->link; |
5947 |
- u->i_key = p->i_key; |
5948 |
- u->o_key = p->o_key; |
5949 |
-+ if (u->i_key) |
5950 |
-+ u->i_flags |= GRE_KEY; |
5951 |
-+ if (u->o_key) |
5952 |
-+ u->o_flags |= GRE_KEY; |
5953 |
- u->proto = p->proto; |
5954 |
- |
5955 |
- memcpy(u->name, p->name, sizeof(u->name)); |
5956 |
-diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c |
5957 |
-index 9948b5c..986d4ca 100644 |
5958 |
---- a/net/ipv6/netfilter/nf_conntrack_reasm.c |
5959 |
-+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c |
5960 |
-@@ -589,6 +589,7 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) |
5961 |
- hdr = ipv6_hdr(skb); |
5962 |
- fhdr = (struct frag_hdr *)skb_transport_header(skb); |
5963 |
- |
5964 |
-+ skb_orphan(skb); |
5965 |
- fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, |
5966 |
- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); |
5967 |
- if (fq == NULL) { |
5968 |
-diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
5969 |
-index 6673965..b2e61a0 100644 |
5970 |
---- a/net/ipv6/tcp_ipv6.c |
5971 |
-+++ b/net/ipv6/tcp_ipv6.c |
5972 |
-@@ -375,10 +375,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
5973 |
- np = inet6_sk(sk); |
5974 |
- |
5975 |
- if (type == NDISC_REDIRECT) { |
5976 |
-- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); |
5977 |
-+ if (!sock_owned_by_user(sk)) { |
5978 |
-+ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); |
5979 |
- |
5980 |
-- if (dst) |
5981 |
-- dst->ops->redirect(dst, sk, skb); |
5982 |
-+ if (dst) |
5983 |
-+ dst->ops->redirect(dst, sk, skb); |
5984 |
-+ } |
5985 |
- goto out; |
5986 |
- } |
5987 |
- |
5988 |
-diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c |
5989 |
-index c0f0750..ff750bb 100644 |
5990 |
---- a/net/l2tp/l2tp_ip.c |
5991 |
-+++ b/net/l2tp/l2tp_ip.c |
5992 |
-@@ -388,7 +388,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) |
5993 |
- drop: |
5994 |
- IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); |
5995 |
- kfree_skb(skb); |
5996 |
-- return -1; |
5997 |
-+ return 0; |
5998 |
- } |
5999 |
- |
6000 |
- /* Userspace will call sendmsg() on the tunnel socket to send L2TP |
6001 |
-diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c |
6002 |
-index 5b77377..1309e2c 100644 |
6003 |
---- a/net/mpls/af_mpls.c |
6004 |
-+++ b/net/mpls/af_mpls.c |
6005 |
-@@ -956,7 +956,8 @@ static void mpls_ifdown(struct net_device *dev, int event) |
6006 |
- /* fall through */ |
6007 |
- case NETDEV_CHANGE: |
6008 |
- nh->nh_flags |= RTNH_F_LINKDOWN; |
6009 |
-- ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; |
6010 |
-+ if (event != NETDEV_UNREGISTER) |
6011 |
-+ ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; |
6012 |
- break; |
6013 |
- } |
6014 |
- if (event == NETDEV_UNREGISTER) |
6015 |
-@@ -1696,6 +1697,7 @@ static void mpls_net_exit(struct net *net) |
6016 |
- for (index = 0; index < platform_labels; index++) { |
6017 |
- struct mpls_route *rt = rtnl_dereference(platform_label[index]); |
6018 |
- RCU_INIT_POINTER(platform_label[index], NULL); |
6019 |
-+ mpls_notify_route(net, index, rt, NULL, NULL); |
6020 |
- mpls_rt_free(rt); |
6021 |
- } |
6022 |
- rtnl_unlock(); |
6023 |
-diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c |
6024 |
-index eab210b..48386bf 100644 |
6025 |
---- a/net/openvswitch/conntrack.c |
6026 |
-+++ b/net/openvswitch/conntrack.c |
6027 |
-@@ -367,7 +367,6 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key, |
6028 |
- } else if (key->eth.type == htons(ETH_P_IPV6)) { |
6029 |
- enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; |
6030 |
- |
6031 |
-- skb_orphan(skb); |
6032 |
- memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); |
6033 |
- err = nf_ct_frag6_gather(net, skb, user); |
6034 |
- if (err) { |
6035 |
-diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
6036 |
-index 34de326..f2b04a7 100644 |
6037 |
---- a/net/packet/af_packet.c |
6038 |
-+++ b/net/packet/af_packet.c |
6039 |
-@@ -3140,7 +3140,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, |
6040 |
- int addr_len) |
6041 |
- { |
6042 |
- struct sock *sk = sock->sk; |
6043 |
-- char name[15]; |
6044 |
-+ char name[sizeof(uaddr->sa_data) + 1]; |
6045 |
- |
6046 |
- /* |
6047 |
- * Check legality |
6048 |
-@@ -3148,7 +3148,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, |
6049 |
- |
6050 |
- if (addr_len != sizeof(struct sockaddr)) |
6051 |
- return -EINVAL; |
6052 |
-- strlcpy(name, uaddr->sa_data, sizeof(name)); |
6053 |
-+ /* uaddr->sa_data comes from the userspace, it's not guaranteed to be |
6054 |
-+ * zero-terminated. |
6055 |
-+ */ |
6056 |
-+ memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); |
6057 |
-+ name[sizeof(uaddr->sa_data)] = 0; |
6058 |
- |
6059 |
- return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); |
6060 |
- } |
6061 |
-diff --git a/net/sched/act_api.c b/net/sched/act_api.c |
6062 |
-index c6c2a93..c651cfc 100644 |
6063 |
---- a/net/sched/act_api.c |
6064 |
-+++ b/net/sched/act_api.c |
6065 |
-@@ -820,10 +820,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, |
6066 |
- goto out_module_put; |
6067 |
- |
6068 |
- err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops); |
6069 |
-- if (err < 0) |
6070 |
-+ if (err <= 0) |
6071 |
- goto out_module_put; |
6072 |
-- if (err == 0) |
6073 |
-- goto noflush_out; |
6074 |
- |
6075 |
- nla_nest_end(skb, nest); |
6076 |
- |
6077 |
-@@ -840,7 +838,6 @@ static int tca_action_flush(struct net *net, struct nlattr *nla, |
6078 |
- out_module_put: |
6079 |
- module_put(ops->owner); |
6080 |
- err_out: |
6081 |
--noflush_out: |
6082 |
- kfree_skb(skb); |
6083 |
- return err; |
6084 |
- } |
6085 |
-diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c |
6086 |
-index eae07a2..1191179 100644 |
6087 |
---- a/net/sched/act_connmark.c |
6088 |
-+++ b/net/sched/act_connmark.c |
6089 |
-@@ -113,6 +113,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla, |
6090 |
- if (ret < 0) |
6091 |
- return ret; |
6092 |
- |
6093 |
-+ if (!tb[TCA_CONNMARK_PARMS]) |
6094 |
-+ return -EINVAL; |
6095 |
-+ |
6096 |
- parm = nla_data(tb[TCA_CONNMARK_PARMS]); |
6097 |
- |
6098 |
- if (!tcf_hash_check(tn, parm->index, a, bind)) { |
6099 |
-diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c |
6100 |
-index e7d9638..f85313d 100644 |
6101 |
---- a/net/sched/act_skbmod.c |
6102 |
-+++ b/net/sched/act_skbmod.c |
6103 |
-@@ -228,7 +228,6 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, |
6104 |
- |
6105 |
- return skb->len; |
6106 |
- nla_put_failure: |
6107 |
-- rcu_read_unlock(); |
6108 |
- nlmsg_trim(skb, b); |
6109 |
- return -1; |
6110 |
- } |
6111 |
-diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c |
6112 |
-index 41adf36..b5c279b 100644 |
6113 |
---- a/net/strparser/strparser.c |
6114 |
-+++ b/net/strparser/strparser.c |
6115 |
-@@ -504,6 +504,7 @@ static int __init strp_mod_init(void) |
6116 |
- |
6117 |
- static void __exit strp_mod_exit(void) |
6118 |
- { |
6119 |
-+ destroy_workqueue(strp_wq); |
6120 |
- } |
6121 |
- module_init(strp_mod_init); |
6122 |
- module_exit(strp_mod_exit); |
6123 |
|
6124 |
diff --git a/4.9.18/1017_linux-4.9.18.patch b/4.9.18/1017_linux-4.9.18.patch |
6125 |
deleted file mode 100644 |
6126 |
index 3f957a2..0000000 |
6127 |
--- a/4.9.18/1017_linux-4.9.18.patch |
6128 |
+++ /dev/null |
6129 |
@@ -1,876 +0,0 @@ |
6130 |
-diff --git a/Makefile b/Makefile |
6131 |
-index 004f90a..c10d0e6 100644 |
6132 |
---- a/Makefile |
6133 |
-+++ b/Makefile |
6134 |
-@@ -1,6 +1,6 @@ |
6135 |
- VERSION = 4 |
6136 |
- PATCHLEVEL = 9 |
6137 |
--SUBLEVEL = 17 |
6138 |
-+SUBLEVEL = 18 |
6139 |
- EXTRAVERSION = |
6140 |
- NAME = Roaring Lionus |
6141 |
- |
6142 |
-diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h |
6143 |
-index 7bd69bd..1d8c24d 100644 |
6144 |
---- a/arch/parisc/include/asm/cacheflush.h |
6145 |
-+++ b/arch/parisc/include/asm/cacheflush.h |
6146 |
-@@ -45,28 +45,9 @@ static inline void flush_kernel_dcache_page(struct page *page) |
6147 |
- |
6148 |
- #define flush_kernel_dcache_range(start,size) \ |
6149 |
- flush_kernel_dcache_range_asm((start), (start)+(size)); |
6150 |
--/* vmap range flushes and invalidates. Architecturally, we don't need |
6151 |
-- * the invalidate, because the CPU should refuse to speculate once an |
6152 |
-- * area has been flushed, so invalidate is left empty */ |
6153 |
--static inline void flush_kernel_vmap_range(void *vaddr, int size) |
6154 |
--{ |
6155 |
-- unsigned long start = (unsigned long)vaddr; |
6156 |
-- |
6157 |
-- flush_kernel_dcache_range_asm(start, start + size); |
6158 |
--} |
6159 |
--static inline void invalidate_kernel_vmap_range(void *vaddr, int size) |
6160 |
--{ |
6161 |
-- unsigned long start = (unsigned long)vaddr; |
6162 |
-- void *cursor = vaddr; |
6163 |
- |
6164 |
-- for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { |
6165 |
-- struct page *page = vmalloc_to_page(cursor); |
6166 |
-- |
6167 |
-- if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) |
6168 |
-- flush_kernel_dcache_page(page); |
6169 |
-- } |
6170 |
-- flush_kernel_dcache_range_asm(start, start + size); |
6171 |
--} |
6172 |
-+void flush_kernel_vmap_range(void *vaddr, int size); |
6173 |
-+void invalidate_kernel_vmap_range(void *vaddr, int size); |
6174 |
- |
6175 |
- #define flush_cache_vmap(start, end) flush_cache_all() |
6176 |
- #define flush_cache_vunmap(start, end) flush_cache_all() |
6177 |
-diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c |
6178 |
-index 977f0a4f..53ec75f 100644 |
6179 |
---- a/arch/parisc/kernel/cache.c |
6180 |
-+++ b/arch/parisc/kernel/cache.c |
6181 |
-@@ -633,3 +633,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long |
6182 |
- __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); |
6183 |
- } |
6184 |
- } |
6185 |
-+ |
6186 |
-+void flush_kernel_vmap_range(void *vaddr, int size) |
6187 |
-+{ |
6188 |
-+ unsigned long start = (unsigned long)vaddr; |
6189 |
-+ |
6190 |
-+ if ((unsigned long)size > parisc_cache_flush_threshold) |
6191 |
-+ flush_data_cache(); |
6192 |
-+ else |
6193 |
-+ flush_kernel_dcache_range_asm(start, start + size); |
6194 |
-+} |
6195 |
-+EXPORT_SYMBOL(flush_kernel_vmap_range); |
6196 |
-+ |
6197 |
-+void invalidate_kernel_vmap_range(void *vaddr, int size) |
6198 |
-+{ |
6199 |
-+ unsigned long start = (unsigned long)vaddr; |
6200 |
-+ |
6201 |
-+ if ((unsigned long)size > parisc_cache_flush_threshold) |
6202 |
-+ flush_data_cache(); |
6203 |
-+ else |
6204 |
-+ flush_kernel_dcache_range_asm(start, start + size); |
6205 |
-+} |
6206 |
-+EXPORT_SYMBOL(invalidate_kernel_vmap_range); |
6207 |
-diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c |
6208 |
-index 4063943..e81afc37 100644 |
6209 |
---- a/arch/parisc/kernel/process.c |
6210 |
-+++ b/arch/parisc/kernel/process.c |
6211 |
-@@ -139,6 +139,8 @@ void machine_power_off(void) |
6212 |
- |
6213 |
- printk(KERN_EMERG "System shut down completed.\n" |
6214 |
- "Please power this system off now."); |
6215 |
-+ |
6216 |
-+ for (;;); |
6217 |
- } |
6218 |
- |
6219 |
- void (*pm_power_off)(void) = machine_power_off; |
6220 |
-diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S |
6221 |
-index 861e721..f080abf 100644 |
6222 |
---- a/arch/powerpc/boot/zImage.lds.S |
6223 |
-+++ b/arch/powerpc/boot/zImage.lds.S |
6224 |
-@@ -68,6 +68,7 @@ SECTIONS |
6225 |
- } |
6226 |
- |
6227 |
- #ifdef CONFIG_PPC64_BOOT_WRAPPER |
6228 |
-+ . = ALIGN(256); |
6229 |
- .got : |
6230 |
- { |
6231 |
- __toc_start = .; |
6232 |
-diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c |
6233 |
-index 6e6c1fb..272608f 100644 |
6234 |
---- a/drivers/cpufreq/cpufreq.c |
6235 |
-+++ b/drivers/cpufreq/cpufreq.c |
6236 |
-@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, |
6237 |
- char *buf) |
6238 |
- { |
6239 |
- unsigned int cur_freq = __cpufreq_get(policy); |
6240 |
-- if (!cur_freq) |
6241 |
-- return sprintf(buf, "<unknown>"); |
6242 |
-- return sprintf(buf, "%u\n", cur_freq); |
6243 |
-+ |
6244 |
-+ if (cur_freq) |
6245 |
-+ return sprintf(buf, "%u\n", cur_freq); |
6246 |
-+ |
6247 |
-+ return sprintf(buf, "<unknown>\n"); |
6248 |
- } |
6249 |
- |
6250 |
- /** |
6251 |
-diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c |
6252 |
-index b447a01..09e6a73 100644 |
6253 |
---- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c |
6254 |
-+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c |
6255 |
-@@ -3506,6 +3506,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, |
6256 |
- max_sclk = 75000; |
6257 |
- max_mclk = 80000; |
6258 |
- } |
6259 |
-+ } else if (adev->asic_type == CHIP_OLAND) { |
6260 |
-+ if ((adev->pdev->device == 0x6604) && |
6261 |
-+ (adev->pdev->subsystem_vendor == 0x1028) && |
6262 |
-+ (adev->pdev->subsystem_device == 0x066F)) { |
6263 |
-+ max_sclk = 75000; |
6264 |
-+ } |
6265 |
- } |
6266 |
- /* Apply dpm quirks */ |
6267 |
- while (p && p->chip_device != 0) { |
6268 |
-diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c |
6269 |
-index 8703f56..246d1ae 100644 |
6270 |
---- a/drivers/gpu/drm/vc4/vc4_drv.c |
6271 |
-+++ b/drivers/gpu/drm/vc4/vc4_drv.c |
6272 |
-@@ -61,21 +61,24 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, |
6273 |
- if (ret < 0) |
6274 |
- return ret; |
6275 |
- args->value = V3D_READ(V3D_IDENT0); |
6276 |
-- pm_runtime_put(&vc4->v3d->pdev->dev); |
6277 |
-+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); |
6278 |
-+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); |
6279 |
- break; |
6280 |
- case DRM_VC4_PARAM_V3D_IDENT1: |
6281 |
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); |
6282 |
- if (ret < 0) |
6283 |
- return ret; |
6284 |
- args->value = V3D_READ(V3D_IDENT1); |
6285 |
-- pm_runtime_put(&vc4->v3d->pdev->dev); |
6286 |
-+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); |
6287 |
-+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); |
6288 |
- break; |
6289 |
- case DRM_VC4_PARAM_V3D_IDENT2: |
6290 |
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); |
6291 |
- if (ret < 0) |
6292 |
- return ret; |
6293 |
- args->value = V3D_READ(V3D_IDENT2); |
6294 |
-- pm_runtime_put(&vc4->v3d->pdev->dev); |
6295 |
-+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); |
6296 |
-+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); |
6297 |
- break; |
6298 |
- case DRM_VC4_PARAM_SUPPORTS_BRANCHES: |
6299 |
- args->value = true; |
6300 |
-diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c |
6301 |
-index 18e3717..ab30169 100644 |
6302 |
---- a/drivers/gpu/drm/vc4/vc4_gem.c |
6303 |
-+++ b/drivers/gpu/drm/vc4/vc4_gem.c |
6304 |
-@@ -711,8 +711,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) |
6305 |
- } |
6306 |
- |
6307 |
- mutex_lock(&vc4->power_lock); |
6308 |
-- if (--vc4->power_refcount == 0) |
6309 |
-- pm_runtime_put(&vc4->v3d->pdev->dev); |
6310 |
-+ if (--vc4->power_refcount == 0) { |
6311 |
-+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); |
6312 |
-+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); |
6313 |
-+ } |
6314 |
- mutex_unlock(&vc4->power_lock); |
6315 |
- |
6316 |
- kfree(exec); |
6317 |
-diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c |
6318 |
-index e6d3c60..7cc346a 100644 |
6319 |
---- a/drivers/gpu/drm/vc4/vc4_v3d.c |
6320 |
-+++ b/drivers/gpu/drm/vc4/vc4_v3d.c |
6321 |
-@@ -222,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) |
6322 |
- return ret; |
6323 |
- } |
6324 |
- |
6325 |
-+ pm_runtime_use_autosuspend(dev); |
6326 |
-+ pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */ |
6327 |
- pm_runtime_enable(dev); |
6328 |
- |
6329 |
- return 0; |
6330 |
-diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c |
6331 |
-index 2543cf5..917321c 100644 |
6332 |
---- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c |
6333 |
-+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c |
6334 |
-@@ -608,9 +608,7 @@ static bool |
6335 |
- vc4_validate_branches(struct vc4_shader_validation_state *validation_state) |
6336 |
- { |
6337 |
- uint32_t max_branch_target = 0; |
6338 |
-- bool found_shader_end = false; |
6339 |
- int ip; |
6340 |
-- int shader_end_ip = 0; |
6341 |
- int last_branch = -2; |
6342 |
- |
6343 |
- for (ip = 0; ip < validation_state->max_ip; ip++) { |
6344 |
-@@ -621,8 +619,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) |
6345 |
- uint32_t branch_target_ip; |
6346 |
- |
6347 |
- if (sig == QPU_SIG_PROG_END) { |
6348 |
-- shader_end_ip = ip; |
6349 |
-- found_shader_end = true; |
6350 |
-+ /* There are two delay slots after program end is |
6351 |
-+ * signaled that are still executed, then we're |
6352 |
-+ * finished. validation_state->max_ip is the |
6353 |
-+ * instruction after the last valid instruction in the |
6354 |
-+ * program. |
6355 |
-+ */ |
6356 |
-+ validation_state->max_ip = ip + 3; |
6357 |
- continue; |
6358 |
- } |
6359 |
- |
6360 |
-@@ -676,15 +679,9 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state) |
6361 |
- } |
6362 |
- set_bit(after_delay_ip, validation_state->branch_targets); |
6363 |
- max_branch_target = max(max_branch_target, after_delay_ip); |
6364 |
-- |
6365 |
-- /* There are two delay slots after program end is signaled |
6366 |
-- * that are still executed, then we're finished. |
6367 |
-- */ |
6368 |
-- if (found_shader_end && ip == shader_end_ip + 2) |
6369 |
-- break; |
6370 |
- } |
6371 |
- |
6372 |
-- if (max_branch_target > shader_end_ip) { |
6373 |
-+ if (max_branch_target > validation_state->max_ip - 3) { |
6374 |
- DRM_ERROR("Branch landed after QPU_SIG_PROG_END"); |
6375 |
- return false; |
6376 |
- } |
6377 |
-diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c |
6378 |
-index aecec6d..7f1c625 100644 |
6379 |
---- a/drivers/isdn/gigaset/bas-gigaset.c |
6380 |
-+++ b/drivers/isdn/gigaset/bas-gigaset.c |
6381 |
-@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface, |
6382 |
- return -ENODEV; |
6383 |
- } |
6384 |
- |
6385 |
-+ if (hostif->desc.bNumEndpoints < 1) |
6386 |
-+ return -ENODEV; |
6387 |
-+ |
6388 |
- dev_info(&udev->dev, |
6389 |
- "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n", |
6390 |
- __func__, le16_to_cpu(udev->descriptor.idVendor), |
6391 |
-diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c |
6392 |
-index 39fddda..55b5e0e 100644 |
6393 |
---- a/drivers/md/raid10.c |
6394 |
-+++ b/drivers/md/raid10.c |
6395 |
-@@ -1470,7 +1470,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) |
6396 |
- split = bio; |
6397 |
- } |
6398 |
- |
6399 |
-+ /* |
6400 |
-+ * If a bio is splitted, the first part of bio will pass |
6401 |
-+ * barrier but the bio is queued in current->bio_list (see |
6402 |
-+ * generic_make_request). If there is a raise_barrier() called |
6403 |
-+ * here, the second part of bio can't pass barrier. But since |
6404 |
-+ * the first part bio isn't dispatched to underlaying disks |
6405 |
-+ * yet, the barrier is never released, hence raise_barrier will |
6406 |
-+ * alays wait. We have a deadlock. |
6407 |
-+ * Note, this only happens in read path. For write path, the |
6408 |
-+ * first part of bio is dispatched in a schedule() call |
6409 |
-+ * (because of blk plug) or offloaded to raid10d. |
6410 |
-+ * Quitting from the function immediately can change the bio |
6411 |
-+ * order queued in bio_list and avoid the deadlock. |
6412 |
-+ */ |
6413 |
- __make_request(mddev, split); |
6414 |
-+ if (split != bio && bio_data_dir(bio) == READ) { |
6415 |
-+ generic_make_request(bio); |
6416 |
-+ break; |
6417 |
-+ } |
6418 |
- } while (split != bio); |
6419 |
- |
6420 |
- /* In case raid10d snuck in to freeze_array */ |
6421 |
-diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c |
6422 |
-index f9b6fba..a530f08 100644 |
6423 |
---- a/drivers/scsi/libiscsi.c |
6424 |
-+++ b/drivers/scsi/libiscsi.c |
6425 |
-@@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) |
6426 |
- WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); |
6427 |
- task->state = state; |
6428 |
- |
6429 |
-- if (!list_empty(&task->running)) |
6430 |
-+ spin_lock_bh(&conn->taskqueuelock); |
6431 |
-+ if (!list_empty(&task->running)) { |
6432 |
-+ pr_debug_once("%s while task on list", __func__); |
6433 |
- list_del_init(&task->running); |
6434 |
-+ } |
6435 |
-+ spin_unlock_bh(&conn->taskqueuelock); |
6436 |
- |
6437 |
- if (conn->task == task) |
6438 |
- conn->task = NULL; |
6439 |
-@@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, |
6440 |
- if (session->tt->xmit_task(task)) |
6441 |
- goto free_task; |
6442 |
- } else { |
6443 |
-+ spin_lock_bh(&conn->taskqueuelock); |
6444 |
- list_add_tail(&task->running, &conn->mgmtqueue); |
6445 |
-+ spin_unlock_bh(&conn->taskqueuelock); |
6446 |
- iscsi_conn_queue_work(conn); |
6447 |
- } |
6448 |
- |
6449 |
-@@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task) |
6450 |
- * this may be on the requeue list already if the xmit_task callout |
6451 |
- * is handling the r2ts while we are adding new ones |
6452 |
- */ |
6453 |
-+ spin_lock_bh(&conn->taskqueuelock); |
6454 |
- if (list_empty(&task->running)) |
6455 |
- list_add_tail(&task->running, &conn->requeue); |
6456 |
-+ spin_unlock_bh(&conn->taskqueuelock); |
6457 |
- iscsi_conn_queue_work(conn); |
6458 |
- } |
6459 |
- EXPORT_SYMBOL_GPL(iscsi_requeue_task); |
6460 |
-@@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) |
6461 |
- * only have one nop-out as a ping from us and targets should not |
6462 |
- * overflow us with nop-ins |
6463 |
- */ |
6464 |
-+ spin_lock_bh(&conn->taskqueuelock); |
6465 |
- check_mgmt: |
6466 |
- while (!list_empty(&conn->mgmtqueue)) { |
6467 |
- conn->task = list_entry(conn->mgmtqueue.next, |
6468 |
- struct iscsi_task, running); |
6469 |
- list_del_init(&conn->task->running); |
6470 |
-+ spin_unlock_bh(&conn->taskqueuelock); |
6471 |
- if (iscsi_prep_mgmt_task(conn, conn->task)) { |
6472 |
- /* regular RX path uses back_lock */ |
6473 |
- spin_lock_bh(&conn->session->back_lock); |
6474 |
- __iscsi_put_task(conn->task); |
6475 |
- spin_unlock_bh(&conn->session->back_lock); |
6476 |
- conn->task = NULL; |
6477 |
-+ spin_lock_bh(&conn->taskqueuelock); |
6478 |
- continue; |
6479 |
- } |
6480 |
- rc = iscsi_xmit_task(conn); |
6481 |
- if (rc) |
6482 |
- goto done; |
6483 |
-+ spin_lock_bh(&conn->taskqueuelock); |
6484 |
- } |
6485 |
- |
6486 |
- /* process pending command queue */ |
6487 |
-@@ -1535,19 +1547,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) |
6488 |
- conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, |
6489 |
- running); |
6490 |
- list_del_init(&conn->task->running); |
6491 |
-+ spin_unlock_bh(&conn->taskqueuelock); |
6492 |
- if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { |
6493 |
- fail_scsi_task(conn->task, DID_IMM_RETRY); |
6494 |
-+ spin_lock_bh(&conn->taskqueuelock); |
6495 |
- continue; |
6496 |
- } |
6497 |
- rc = iscsi_prep_scsi_cmd_pdu(conn->task); |
6498 |
- if (rc) { |
6499 |
- if (rc == -ENOMEM || rc == -EACCES) { |
6500 |
-+ spin_lock_bh(&conn->taskqueuelock); |
6501 |
- list_add_tail(&conn->task->running, |
6502 |
- &conn->cmdqueue); |
6503 |
- conn->task = NULL; |
6504 |
-+ spin_unlock_bh(&conn->taskqueuelock); |
6505 |
- goto done; |
6506 |
- } else |
6507 |
- fail_scsi_task(conn->task, DID_ABORT); |
6508 |
-+ spin_lock_bh(&conn->taskqueuelock); |
6509 |
- continue; |
6510 |
- } |
6511 |
- rc = iscsi_xmit_task(conn); |
6512 |
-@@ -1558,6 +1575,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) |
6513 |
- * we need to check the mgmt queue for nops that need to |
6514 |
- * be sent to aviod starvation |
6515 |
- */ |
6516 |
-+ spin_lock_bh(&conn->taskqueuelock); |
6517 |
- if (!list_empty(&conn->mgmtqueue)) |
6518 |
- goto check_mgmt; |
6519 |
- } |
6520 |
-@@ -1577,12 +1595,15 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) |
6521 |
- conn->task = task; |
6522 |
- list_del_init(&conn->task->running); |
6523 |
- conn->task->state = ISCSI_TASK_RUNNING; |
6524 |
-+ spin_unlock_bh(&conn->taskqueuelock); |
6525 |
- rc = iscsi_xmit_task(conn); |
6526 |
- if (rc) |
6527 |
- goto done; |
6528 |
-+ spin_lock_bh(&conn->taskqueuelock); |
6529 |
- if (!list_empty(&conn->mgmtqueue)) |
6530 |
- goto check_mgmt; |
6531 |
- } |
6532 |
-+ spin_unlock_bh(&conn->taskqueuelock); |
6533 |
- spin_unlock_bh(&conn->session->frwd_lock); |
6534 |
- return -ENODATA; |
6535 |
- |
6536 |
-@@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) |
6537 |
- goto prepd_reject; |
6538 |
- } |
6539 |
- } else { |
6540 |
-+ spin_lock_bh(&conn->taskqueuelock); |
6541 |
- list_add_tail(&task->running, &conn->cmdqueue); |
6542 |
-+ spin_unlock_bh(&conn->taskqueuelock); |
6543 |
- iscsi_conn_queue_work(conn); |
6544 |
- } |
6545 |
- |
6546 |
-@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, |
6547 |
- INIT_LIST_HEAD(&conn->mgmtqueue); |
6548 |
- INIT_LIST_HEAD(&conn->cmdqueue); |
6549 |
- INIT_LIST_HEAD(&conn->requeue); |
6550 |
-+ spin_lock_init(&conn->taskqueuelock); |
6551 |
- INIT_WORK(&conn->xmitwork, iscsi_xmitworker); |
6552 |
- |
6553 |
- /* allocate login_task used for the login/text sequences */ |
6554 |
-diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c |
6555 |
-index 734a042..f7e3f27 100644 |
6556 |
---- a/drivers/scsi/lpfc/lpfc_init.c |
6557 |
-+++ b/drivers/scsi/lpfc/lpfc_init.c |
6558 |
-@@ -11393,6 +11393,7 @@ static struct pci_driver lpfc_driver = { |
6559 |
- .id_table = lpfc_id_table, |
6560 |
- .probe = lpfc_pci_probe_one, |
6561 |
- .remove = lpfc_pci_remove_one, |
6562 |
-+ .shutdown = lpfc_pci_remove_one, |
6563 |
- .suspend = lpfc_pci_suspend_one, |
6564 |
- .resume = lpfc_pci_resume_one, |
6565 |
- .err_handler = &lpfc_err_handler, |
6566 |
-diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c |
6567 |
-index bff9689..feab7ea 100644 |
6568 |
---- a/drivers/scsi/qla2xxx/qla_target.c |
6569 |
-+++ b/drivers/scsi/qla2xxx/qla_target.c |
6570 |
-@@ -5375,16 +5375,22 @@ qlt_send_busy(struct scsi_qla_host *vha, |
6571 |
- |
6572 |
- static int |
6573 |
- qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, |
6574 |
-- struct atio_from_isp *atio) |
6575 |
-+ struct atio_from_isp *atio, bool ha_locked) |
6576 |
- { |
6577 |
- struct qla_hw_data *ha = vha->hw; |
6578 |
- uint16_t status; |
6579 |
-+ unsigned long flags; |
6580 |
- |
6581 |
- if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) |
6582 |
- return 0; |
6583 |
- |
6584 |
-+ if (!ha_locked) |
6585 |
-+ spin_lock_irqsave(&ha->hardware_lock, flags); |
6586 |
- status = temp_sam_status; |
6587 |
- qlt_send_busy(vha, atio, status); |
6588 |
-+ if (!ha_locked) |
6589 |
-+ spin_unlock_irqrestore(&ha->hardware_lock, flags); |
6590 |
-+ |
6591 |
- return 1; |
6592 |
- } |
6593 |
- |
6594 |
-@@ -5429,7 +5435,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, |
6595 |
- |
6596 |
- |
6597 |
- if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { |
6598 |
-- rc = qlt_chk_qfull_thresh_hold(vha, atio); |
6599 |
-+ rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked); |
6600 |
- if (rc != 0) { |
6601 |
- tgt->atio_irq_cmd_count--; |
6602 |
- return; |
6603 |
-@@ -5552,7 +5558,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) |
6604 |
- break; |
6605 |
- } |
6606 |
- |
6607 |
-- rc = qlt_chk_qfull_thresh_hold(vha, atio); |
6608 |
-+ rc = qlt_chk_qfull_thresh_hold(vha, atio, true); |
6609 |
- if (rc != 0) { |
6610 |
- tgt->irq_cmd_count--; |
6611 |
- return; |
6612 |
-@@ -6794,6 +6800,8 @@ qlt_handle_abts_recv_work(struct work_struct *work) |
6613 |
- spin_lock_irqsave(&ha->hardware_lock, flags); |
6614 |
- qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); |
6615 |
- spin_unlock_irqrestore(&ha->hardware_lock, flags); |
6616 |
-+ |
6617 |
-+ kfree(op); |
6618 |
- } |
6619 |
- |
6620 |
- void |
6621 |
-diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c |
6622 |
-index 9125d93..ef1c8c1 100644 |
6623 |
---- a/drivers/target/target_core_pscsi.c |
6624 |
-+++ b/drivers/target/target_core_pscsi.c |
6625 |
-@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, |
6626 |
- |
6627 |
- buf = kzalloc(12, GFP_KERNEL); |
6628 |
- if (!buf) |
6629 |
-- return; |
6630 |
-+ goto out_free; |
6631 |
- |
6632 |
- memset(cdb, 0, MAX_COMMAND_SIZE); |
6633 |
- cdb[0] = MODE_SENSE; |
6634 |
-@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, |
6635 |
- * If MODE_SENSE still returns zero, set the default value to 1024. |
6636 |
- */ |
6637 |
- sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); |
6638 |
-+out_free: |
6639 |
- if (!sdev->sector_size) |
6640 |
- sdev->sector_size = 1024; |
6641 |
--out_free: |
6642 |
-+ |
6643 |
- kfree(buf); |
6644 |
- } |
6645 |
- |
6646 |
-@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, |
6647 |
- sd->lun, sd->queue_depth); |
6648 |
- } |
6649 |
- |
6650 |
-- dev->dev_attrib.hw_block_size = sd->sector_size; |
6651 |
-+ dev->dev_attrib.hw_block_size = |
6652 |
-+ min_not_zero((int)sd->sector_size, 512); |
6653 |
- dev->dev_attrib.hw_max_sectors = |
6654 |
-- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); |
6655 |
-+ min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); |
6656 |
- dev->dev_attrib.hw_queue_depth = sd->queue_depth; |
6657 |
- |
6658 |
- /* |
6659 |
-@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, |
6660 |
- /* |
6661 |
- * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. |
6662 |
- */ |
6663 |
-- if (sd->type == TYPE_TAPE) |
6664 |
-+ if (sd->type == TYPE_TAPE) { |
6665 |
- pscsi_tape_read_blocksize(dev, sd); |
6666 |
-+ dev->dev_attrib.hw_block_size = sd->sector_size; |
6667 |
-+ } |
6668 |
- return 0; |
6669 |
- } |
6670 |
- |
6671 |
-@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) |
6672 |
- /* |
6673 |
- * Called with struct Scsi_Host->host_lock called. |
6674 |
- */ |
6675 |
--static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) |
6676 |
-+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) |
6677 |
- __releases(sh->host_lock) |
6678 |
- { |
6679 |
- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; |
6680 |
-@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) |
6681 |
- return 0; |
6682 |
- } |
6683 |
- |
6684 |
--/* |
6685 |
-- * Called with struct Scsi_Host->host_lock called. |
6686 |
-- */ |
6687 |
--static int pscsi_create_type_other(struct se_device *dev, |
6688 |
-- struct scsi_device *sd) |
6689 |
-- __releases(sh->host_lock) |
6690 |
--{ |
6691 |
-- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; |
6692 |
-- struct Scsi_Host *sh = sd->host; |
6693 |
-- int ret; |
6694 |
-- |
6695 |
-- spin_unlock_irq(sh->host_lock); |
6696 |
-- ret = pscsi_add_device_to_list(dev, sd); |
6697 |
-- if (ret) |
6698 |
-- return ret; |
6699 |
-- |
6700 |
-- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n", |
6701 |
-- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, |
6702 |
-- sd->channel, sd->id, sd->lun); |
6703 |
-- return 0; |
6704 |
--} |
6705 |
-- |
6706 |
- static int pscsi_configure_device(struct se_device *dev) |
6707 |
- { |
6708 |
- struct se_hba *hba = dev->se_hba; |
6709 |
-@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev) |
6710 |
- case TYPE_DISK: |
6711 |
- ret = pscsi_create_type_disk(dev, sd); |
6712 |
- break; |
6713 |
-- case TYPE_ROM: |
6714 |
-- ret = pscsi_create_type_rom(dev, sd); |
6715 |
-- break; |
6716 |
- default: |
6717 |
-- ret = pscsi_create_type_other(dev, sd); |
6718 |
-+ ret = pscsi_create_type_nondisk(dev, sd); |
6719 |
- break; |
6720 |
- } |
6721 |
- |
6722 |
-@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev) |
6723 |
- else if (pdv->pdv_lld_host) |
6724 |
- scsi_host_put(pdv->pdv_lld_host); |
6725 |
- |
6726 |
-- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) |
6727 |
-- scsi_device_put(sd); |
6728 |
-+ scsi_device_put(sd); |
6729 |
- |
6730 |
- pdv->pdv_sd = NULL; |
6731 |
- } |
6732 |
-@@ -1069,7 +1047,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev) |
6733 |
- if (pdv->pdv_bd && pdv->pdv_bd->bd_part) |
6734 |
- return pdv->pdv_bd->bd_part->nr_sects; |
6735 |
- |
6736 |
-- dump_stack(); |
6737 |
- return 0; |
6738 |
- } |
6739 |
- |
6740 |
-diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c |
6741 |
-index aabd660..a53fb23 100644 |
6742 |
---- a/drivers/target/target_core_sbc.c |
6743 |
-+++ b/drivers/target/target_core_sbc.c |
6744 |
-@@ -1104,9 +1104,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) |
6745 |
- return ret; |
6746 |
- break; |
6747 |
- case VERIFY: |
6748 |
-+ case VERIFY_16: |
6749 |
- size = 0; |
6750 |
-- sectors = transport_get_sectors_10(cdb); |
6751 |
-- cmd->t_task_lba = transport_lba_32(cdb); |
6752 |
-+ if (cdb[0] == VERIFY) { |
6753 |
-+ sectors = transport_get_sectors_10(cdb); |
6754 |
-+ cmd->t_task_lba = transport_lba_32(cdb); |
6755 |
-+ } else { |
6756 |
-+ sectors = transport_get_sectors_16(cdb); |
6757 |
-+ cmd->t_task_lba = transport_lba_64(cdb); |
6758 |
-+ } |
6759 |
- cmd->execute_cmd = sbc_emulate_noop; |
6760 |
- goto check_lba; |
6761 |
- case REZERO_UNIT: |
6762 |
-diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
6763 |
-index afe29ba..5fa9ba1 100644 |
6764 |
---- a/fs/ext4/super.c |
6765 |
-+++ b/fs/ext4/super.c |
6766 |
-@@ -3830,7 +3830,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
6767 |
- db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / |
6768 |
- EXT4_DESC_PER_BLOCK(sb); |
6769 |
- if (ext4_has_feature_meta_bg(sb)) { |
6770 |
-- if (le32_to_cpu(es->s_first_meta_bg) >= db_count) { |
6771 |
-+ if (le32_to_cpu(es->s_first_meta_bg) > db_count) { |
6772 |
- ext4_msg(sb, KERN_WARNING, |
6773 |
- "first meta block group too large: %u " |
6774 |
- "(group descriptor block count %u)", |
6775 |
-diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h |
6776 |
-index a6a3389..51519c2 100644 |
6777 |
---- a/fs/gfs2/incore.h |
6778 |
-+++ b/fs/gfs2/incore.h |
6779 |
-@@ -207,7 +207,7 @@ struct lm_lockname { |
6780 |
- struct gfs2_sbd *ln_sbd; |
6781 |
- u64 ln_number; |
6782 |
- unsigned int ln_type; |
6783 |
--}; |
6784 |
-+} __packed __aligned(sizeof(int)); |
6785 |
- |
6786 |
- #define lm_name_equal(name1, name2) \ |
6787 |
- (((name1)->ln_number == (name2)->ln_number) && \ |
6788 |
-diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
6789 |
-index 609840d..1536aeb 100644 |
6790 |
---- a/fs/nfs/nfs4proc.c |
6791 |
-+++ b/fs/nfs/nfs4proc.c |
6792 |
-@@ -7426,11 +7426,11 @@ static void nfs4_exchange_id_release(void *data) |
6793 |
- struct nfs41_exchange_id_data *cdata = |
6794 |
- (struct nfs41_exchange_id_data *)data; |
6795 |
- |
6796 |
-- nfs_put_client(cdata->args.client); |
6797 |
- if (cdata->xprt) { |
6798 |
- xprt_put(cdata->xprt); |
6799 |
- rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient); |
6800 |
- } |
6801 |
-+ nfs_put_client(cdata->args.client); |
6802 |
- kfree(cdata->res.impl_id); |
6803 |
- kfree(cdata->res.server_scope); |
6804 |
- kfree(cdata->res.server_owner); |
6805 |
-@@ -7537,10 +7537,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, |
6806 |
- task_setup_data.callback_data = calldata; |
6807 |
- |
6808 |
- task = rpc_run_task(&task_setup_data); |
6809 |
-- if (IS_ERR(task)) { |
6810 |
-- status = PTR_ERR(task); |
6811 |
-- goto out_impl_id; |
6812 |
-- } |
6813 |
-+ if (IS_ERR(task)) |
6814 |
-+ return PTR_ERR(task); |
6815 |
- |
6816 |
- if (!xprt) { |
6817 |
- status = rpc_wait_for_completion_task(task); |
6818 |
-@@ -7568,6 +7566,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, |
6819 |
- kfree(calldata->res.server_owner); |
6820 |
- out_calldata: |
6821 |
- kfree(calldata); |
6822 |
-+ nfs_put_client(clp); |
6823 |
- goto out; |
6824 |
- } |
6825 |
- |
6826 |
-diff --git a/include/linux/log2.h b/include/linux/log2.h |
6827 |
-index fd7ff3d..f38fae2 100644 |
6828 |
---- a/include/linux/log2.h |
6829 |
-+++ b/include/linux/log2.h |
6830 |
-@@ -16,12 +16,6 @@ |
6831 |
- #include <linux/bitops.h> |
6832 |
- |
6833 |
- /* |
6834 |
-- * deal with unrepresentable constant logarithms |
6835 |
-- */ |
6836 |
--extern __attribute__((const, noreturn)) |
6837 |
--int ____ilog2_NaN(void); |
6838 |
-- |
6839 |
--/* |
6840 |
- * non-constant log of base 2 calculators |
6841 |
- * - the arch may override these in asm/bitops.h if they can be implemented |
6842 |
- * more efficiently than using fls() and fls64() |
6843 |
-@@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) |
6844 |
- #define ilog2(n) \ |
6845 |
- ( \ |
6846 |
- __builtin_constant_p(n) ? ( \ |
6847 |
-- (n) < 1 ? ____ilog2_NaN() : \ |
6848 |
-+ (n) < 2 ? 0 : \ |
6849 |
- (n) & (1ULL << 63) ? 63 : \ |
6850 |
- (n) & (1ULL << 62) ? 62 : \ |
6851 |
- (n) & (1ULL << 61) ? 61 : \ |
6852 |
-@@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) |
6853 |
- (n) & (1ULL << 4) ? 4 : \ |
6854 |
- (n) & (1ULL << 3) ? 3 : \ |
6855 |
- (n) & (1ULL << 2) ? 2 : \ |
6856 |
-- (n) & (1ULL << 1) ? 1 : \ |
6857 |
-- (n) & (1ULL << 0) ? 0 : \ |
6858 |
-- ____ilog2_NaN() \ |
6859 |
-- ) : \ |
6860 |
-+ 1 ) : \ |
6861 |
- (sizeof(n) <= 4) ? \ |
6862 |
- __ilog2_u32(n) : \ |
6863 |
- __ilog2_u64(n) \ |
6864 |
-diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h |
6865 |
-index 4d1c46a..c7b1dc7 100644 |
6866 |
---- a/include/scsi/libiscsi.h |
6867 |
-+++ b/include/scsi/libiscsi.h |
6868 |
-@@ -196,6 +196,7 @@ struct iscsi_conn { |
6869 |
- struct iscsi_task *task; /* xmit task in progress */ |
6870 |
- |
6871 |
- /* xmit */ |
6872 |
-+ spinlock_t taskqueuelock; /* protects the next three lists */ |
6873 |
- struct list_head mgmtqueue; /* mgmt (control) xmit queue */ |
6874 |
- struct list_head cmdqueue; /* data-path cmd queue */ |
6875 |
- struct list_head requeue; /* tasks needing another run */ |
6876 |
-diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c |
6877 |
-index 2bd6737..a57242e 100644 |
6878 |
---- a/kernel/cgroup_pids.c |
6879 |
-+++ b/kernel/cgroup_pids.c |
6880 |
-@@ -229,7 +229,7 @@ static int pids_can_fork(struct task_struct *task) |
6881 |
- /* Only log the first time events_limit is incremented. */ |
6882 |
- if (atomic64_inc_return(&pids->events_limit) == 1) { |
6883 |
- pr_info("cgroup: fork rejected by pids controller in "); |
6884 |
-- pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id)); |
6885 |
-+ pr_cont_cgroup_path(css->cgroup); |
6886 |
- pr_cont("\n"); |
6887 |
- } |
6888 |
- cgroup_file_notify(&pids->events_file); |
6889 |
-diff --git a/kernel/events/core.c b/kernel/events/core.c |
6890 |
-index 4b33231..07c0dc8 100644 |
6891 |
---- a/kernel/events/core.c |
6892 |
-+++ b/kernel/events/core.c |
6893 |
-@@ -10333,6 +10333,17 @@ void perf_event_free_task(struct task_struct *task) |
6894 |
- continue; |
6895 |
- |
6896 |
- mutex_lock(&ctx->mutex); |
6897 |
-+ raw_spin_lock_irq(&ctx->lock); |
6898 |
-+ /* |
6899 |
-+ * Destroy the task <-> ctx relation and mark the context dead. |
6900 |
-+ * |
6901 |
-+ * This is important because even though the task hasn't been |
6902 |
-+ * exposed yet the context has been (through child_list). |
6903 |
-+ */ |
6904 |
-+ RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); |
6905 |
-+ WRITE_ONCE(ctx->task, TASK_TOMBSTONE); |
6906 |
-+ put_task_struct(task); /* cannot be last */ |
6907 |
-+ raw_spin_unlock_irq(&ctx->lock); |
6908 |
- again: |
6909 |
- list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, |
6910 |
- group_entry) |
6911 |
-@@ -10586,7 +10597,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) |
6912 |
- ret = inherit_task_group(event, parent, parent_ctx, |
6913 |
- child, ctxn, &inherited_all); |
6914 |
- if (ret) |
6915 |
-- break; |
6916 |
-+ goto out_unlock; |
6917 |
- } |
6918 |
- |
6919 |
- /* |
6920 |
-@@ -10602,7 +10613,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) |
6921 |
- ret = inherit_task_group(event, parent, parent_ctx, |
6922 |
- child, ctxn, &inherited_all); |
6923 |
- if (ret) |
6924 |
-- break; |
6925 |
-+ goto out_unlock; |
6926 |
- } |
6927 |
- |
6928 |
- raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
6929 |
-@@ -10630,6 +10641,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn) |
6930 |
- } |
6931 |
- |
6932 |
- raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); |
6933 |
-+out_unlock: |
6934 |
- mutex_unlock(&parent_ctx->mutex); |
6935 |
- |
6936 |
- perf_unpin_context(parent_ctx); |
6937 |
-diff --git a/mm/percpu.c b/mm/percpu.c |
6938 |
-index 2557143..f014ceb 100644 |
6939 |
---- a/mm/percpu.c |
6940 |
-+++ b/mm/percpu.c |
6941 |
-@@ -1010,8 +1010,11 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, |
6942 |
- mutex_unlock(&pcpu_alloc_mutex); |
6943 |
- } |
6944 |
- |
6945 |
-- if (chunk != pcpu_reserved_chunk) |
6946 |
-+ if (chunk != pcpu_reserved_chunk) { |
6947 |
-+ spin_lock_irqsave(&pcpu_lock, flags); |
6948 |
- pcpu_nr_empty_pop_pages -= occ_pages; |
6949 |
-+ spin_unlock_irqrestore(&pcpu_lock, flags); |
6950 |
-+ } |
6951 |
- |
6952 |
- if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) |
6953 |
- pcpu_schedule_balance_work(); |
6954 |
-diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c |
6955 |
-index e2c37061..69502fa 100644 |
6956 |
---- a/net/sunrpc/xprtrdma/verbs.c |
6957 |
-+++ b/net/sunrpc/xprtrdma/verbs.c |
6958 |
-@@ -486,7 +486,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, |
6959 |
- struct ib_cq *sendcq, *recvcq; |
6960 |
- int rc; |
6961 |
- |
6962 |
-- max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES); |
6963 |
-+ max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge, |
6964 |
-+ RPCRDMA_MAX_SEND_SGES); |
6965 |
- if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
6966 |
- pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); |
6967 |
- return -ENOMEM; |
6968 |
-diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h |
6969 |
-index 4144666..d5677d3 100644 |
6970 |
---- a/tools/include/linux/log2.h |
6971 |
-+++ b/tools/include/linux/log2.h |
6972 |
-@@ -13,12 +13,6 @@ |
6973 |
- #define _TOOLS_LINUX_LOG2_H |
6974 |
- |
6975 |
- /* |
6976 |
-- * deal with unrepresentable constant logarithms |
6977 |
-- */ |
6978 |
--extern __attribute__((const, noreturn)) |
6979 |
--int ____ilog2_NaN(void); |
6980 |
-- |
6981 |
--/* |
6982 |
- * non-constant log of base 2 calculators |
6983 |
- * - the arch may override these in asm/bitops.h if they can be implemented |
6984 |
- * more efficiently than using fls() and fls64() |
6985 |
-@@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) |
6986 |
- #define ilog2(n) \ |
6987 |
- ( \ |
6988 |
- __builtin_constant_p(n) ? ( \ |
6989 |
-- (n) < 1 ? ____ilog2_NaN() : \ |
6990 |
-+ (n) < 2 ? 0 : \ |
6991 |
- (n) & (1ULL << 63) ? 63 : \ |
6992 |
- (n) & (1ULL << 62) ? 62 : \ |
6993 |
- (n) & (1ULL << 61) ? 61 : \ |
6994 |
-@@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) |
6995 |
- (n) & (1ULL << 4) ? 4 : \ |
6996 |
- (n) & (1ULL << 3) ? 3 : \ |
6997 |
- (n) & (1ULL << 2) ? 2 : \ |
6998 |
-- (n) & (1ULL << 1) ? 1 : \ |
6999 |
-- (n) & (1ULL << 0) ? 0 : \ |
7000 |
-- ____ilog2_NaN() \ |
7001 |
-- ) : \ |
7002 |
-+ 1 ) : \ |
7003 |
- (sizeof(n) <= 4) ? \ |
7004 |
- __ilog2_u32(n) : \ |
7005 |
- __ilog2_u64(n) \ |
7006 |
|
7007 |
diff --git a/4.9.18/0000_README b/4.9.20/0000_README |
7008 |
similarity index 89% |
7009 |
rename from 4.9.18/0000_README |
7010 |
rename to 4.9.20/0000_README |
7011 |
index 8c12f63..a960856 100644 |
7012 |
--- a/4.9.18/0000_README |
7013 |
+++ b/4.9.20/0000_README |
7014 |
@@ -2,15 +2,7 @@ README |
7015 |
----------------------------------------------------------------------------- |
7016 |
Individual Patch Descriptions: |
7017 |
----------------------------------------------------------------------------- |
7018 |
-Patch: 1016_linux-4.9.17.patch |
7019 |
-From: http://www.kernel.org |
7020 |
-Desc: Linux 4.9.17 |
7021 |
- |
7022 |
-Patch: 1017_linux-4.9.18.patch |
7023 |
-From: http://www.kernel.org |
7024 |
-Desc: Linux 4.9.18 |
7025 |
- |
7026 |
-Patch: 4420_grsecurity-3.1-4.9.18-201703261106.patch |
7027 |
+Patch: 4420_grsecurity-3.1-4.9.20-201703310823.patch |
7028 |
From: http://www.grsecurity.net |
7029 |
Desc: hardened-sources base patch from upstream grsecurity |
7030 |
|
7031 |
|
7032 |
diff --git a/4.9.18/4420_grsecurity-3.1-4.9.18-201703261106.patch b/4.9.20/4420_grsecurity-3.1-4.9.20-201703310823.patch |
7033 |
similarity index 99% |
7034 |
rename from 4.9.18/4420_grsecurity-3.1-4.9.18-201703261106.patch |
7035 |
rename to 4.9.20/4420_grsecurity-3.1-4.9.20-201703310823.patch |
7036 |
index 3659b97..f803149 100644 |
7037 |
--- a/4.9.18/4420_grsecurity-3.1-4.9.18-201703261106.patch |
7038 |
+++ b/4.9.20/4420_grsecurity-3.1-4.9.20-201703310823.patch |
7039 |
@@ -419,7 +419,7 @@ index 3d0ae15..84e5412 100644 |
7040 |
cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags) |
7041 |
|
7042 |
diff --git a/Makefile b/Makefile |
7043 |
-index c10d0e6..54799eb2 100644 |
7044 |
+index 4496018..3f9a080 100644 |
7045 |
--- a/Makefile |
7046 |
+++ b/Makefile |
7047 |
@@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ |
7048 |
@@ -7351,10 +7351,10 @@ index 1652f36..0e22377 100644 |
7049 |
{ |
7050 |
struct pt_regs *regs; |
7051 |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c |
7052 |
-index a92994d..e389b11 100644 |
7053 |
+index bf83dc1..775bed8 100644 |
7054 |
--- a/arch/mips/kernel/ptrace.c |
7055 |
+++ b/arch/mips/kernel/ptrace.c |
7056 |
-@@ -882,6 +882,10 @@ long arch_ptrace(struct task_struct *child, long request, |
7057 |
+@@ -883,6 +883,10 @@ long arch_ptrace(struct task_struct *child, long request, |
7058 |
return ret; |
7059 |
} |
7060 |
|
7061 |
@@ -7365,7 +7365,7 @@ index a92994d..e389b11 100644 |
7062 |
/* |
7063 |
* Notification of system call entry/exit |
7064 |
* - triggered by current->work.syscall_trace |
7065 |
-@@ -899,6 +903,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) |
7066 |
+@@ -900,6 +904,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) |
7067 |
if (secure_computing(NULL) == -1) |
7068 |
return -1; |
7069 |
|
7070 |
@@ -11324,7 +11324,7 @@ index 79cc0d1..46d6233 100644 |
7071 |
.getproplen = prom_getproplen, |
7072 |
.getproperty = prom_getproperty, |
7073 |
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c |
7074 |
-index ac082dd..7170942 100644 |
7075 |
+index 7037ca3..070b51b 100644 |
7076 |
--- a/arch/sparc/kernel/ptrace_64.c |
7077 |
+++ b/arch/sparc/kernel/ptrace_64.c |
7078 |
@@ -1068,6 +1068,10 @@ long arch_ptrace(struct task_struct *child, long request, |
7079 |
@@ -36277,7 +36277,7 @@ index 69b8f8a..0cf39f5 100644 |
7080 |
|
7081 |
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) |
7082 |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
7083 |
-index 731044e..399463d 100644 |
7084 |
+index e5bc139..5a1766b 100644 |
7085 |
--- a/arch/x86/kvm/x86.c |
7086 |
+++ b/arch/x86/kvm/x86.c |
7087 |
@@ -2005,8 +2005,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) |
7088 |
@@ -49817,7 +49817,7 @@ index 4d3ec92..cf501fc 100644 |
7089 |
ret = cpufreq_register_driver(&dt_cpufreq_driver); |
7090 |
if (ret) |
7091 |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c |
7092 |
-index 272608f..5c4a47a 100644 |
7093 |
+index cac4a92..93c0aed 100644 |
7094 |
--- a/drivers/cpufreq/cpufreq.c |
7095 |
+++ b/drivers/cpufreq/cpufreq.c |
7096 |
@@ -528,12 +528,12 @@ EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); |
7097 |
@@ -49835,7 +49835,7 @@ index 272608f..5c4a47a 100644 |
7098 |
const char *buf, size_t count) |
7099 |
{ |
7100 |
int ret, enable; |
7101 |
-@@ -2116,7 +2116,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) |
7102 |
+@@ -2119,7 +2119,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) |
7103 |
read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
7104 |
|
7105 |
mutex_lock(&cpufreq_governor_mutex); |
7106 |
@@ -49844,7 +49844,7 @@ index 272608f..5c4a47a 100644 |
7107 |
mutex_unlock(&cpufreq_governor_mutex); |
7108 |
return; |
7109 |
} |
7110 |
-@@ -2336,13 +2336,17 @@ int cpufreq_boost_trigger_state(int state) |
7111 |
+@@ -2339,13 +2339,17 @@ int cpufreq_boost_trigger_state(int state) |
7112 |
return 0; |
7113 |
|
7114 |
write_lock_irqsave(&cpufreq_driver_lock, flags); |
7115 |
@@ -49864,7 +49864,7 @@ index 272608f..5c4a47a 100644 |
7116 |
write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
7117 |
|
7118 |
pr_err("%s: Cannot %s BOOST\n", |
7119 |
-@@ -2383,7 +2387,9 @@ int cpufreq_enable_boost_support(void) |
7120 |
+@@ -2386,7 +2390,9 @@ int cpufreq_enable_boost_support(void) |
7121 |
if (cpufreq_boost_supported()) |
7122 |
return 0; |
7123 |
|
7124 |
@@ -49875,7 +49875,7 @@ index 272608f..5c4a47a 100644 |
7125 |
|
7126 |
/* This will get removed on driver unregister */ |
7127 |
return create_boost_sysfs_file(); |
7128 |
-@@ -2441,8 +2447,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) |
7129 |
+@@ -2444,8 +2450,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) |
7130 |
cpufreq_driver = driver_data; |
7131 |
write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
7132 |
|
7133 |
@@ -52248,7 +52248,7 @@ index 1fd6eac..e4206c9 100644 |
7134 |
return 0; |
7135 |
} |
7136 |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c |
7137 |
-index e84faec..03aaa9f 100644 |
7138 |
+index f5815e1..106f6e1 100644 |
7139 |
--- a/drivers/gpu/drm/drm_fops.c |
7140 |
+++ b/drivers/gpu/drm/drm_fops.c |
7141 |
@@ -132,7 +132,7 @@ int drm_open(struct inode *inode, struct file *filp) |
7142 |
@@ -52935,7 +52935,7 @@ index 97f3a56..32c712e 100644 |
7143 |
ret = drm_ioctl(filp, cmd, arg); |
7144 |
|
7145 |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c |
7146 |
-index 3fc286c..4c19f25 100644 |
7147 |
+index 3fc286cd..4c19f25 100644 |
7148 |
--- a/drivers/gpu/drm/i915/i915_irq.c |
7149 |
+++ b/drivers/gpu/drm/i915/i915_irq.c |
7150 |
@@ -4511,15 +4511,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv) |
7151 |
@@ -55328,7 +55328,7 @@ index c13fb5b..55a3802 100644 |
7152 |
|
7153 |
*off += size; |
7154 |
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c |
7155 |
-index be34547..df73ac5 100644 |
7156 |
+index 1606e7f..b207d4b 100644 |
7157 |
--- a/drivers/hv/channel.c |
7158 |
+++ b/drivers/hv/channel.c |
7159 |
@@ -404,7 +404,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, |
7160 |
@@ -55340,7 +55340,7 @@ index be34547..df73ac5 100644 |
7161 |
|
7162 |
ret = create_gpadl_header(kbuffer, size, &msginfo); |
7163 |
if (ret) |
7164 |
-@@ -734,9 +734,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, |
7165 |
+@@ -737,9 +737,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, |
7166 |
* Adjust the size down since vmbus_channel_packet_page_buffer is the |
7167 |
* largest size we support |
7168 |
*/ |
7169 |
@@ -57462,10 +57462,10 @@ index 4a95b22..874c182 100644 |
7170 |
#include <linux/gameport.h> |
7171 |
#include <linux/jiffies.h> |
7172 |
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c |
7173 |
-index 9c0ea36..1e1a411 100644 |
7174 |
+index f4e8fbe..0efd9d6 100644 |
7175 |
--- a/drivers/input/misc/ims-pcu.c |
7176 |
+++ b/drivers/input/misc/ims-pcu.c |
7177 |
-@@ -1855,7 +1855,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id) |
7178 |
+@@ -1859,7 +1859,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id) |
7179 |
|
7180 |
static int ims_pcu_init_application_mode(struct ims_pcu *pcu) |
7181 |
{ |
7182 |
@@ -57474,7 +57474,7 @@ index 9c0ea36..1e1a411 100644 |
7183 |
|
7184 |
const struct ims_pcu_device_info *info; |
7185 |
int error; |
7186 |
-@@ -1886,7 +1886,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu) |
7187 |
+@@ -1890,7 +1890,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu) |
7188 |
} |
7189 |
|
7190 |
/* Device appears to be operable, complete initialization */ |
7191 |
@@ -65843,10 +65843,10 @@ index 9b56b40..f183a4d 100644 |
7192 |
struct lance_private *lp = netdev_priv(dev); |
7193 |
int entry, skblen, len; |
7194 |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h |
7195 |
-index bbef959..999ab1d 100644 |
7196 |
+index 1592e1c..26df6c5 100644 |
7197 |
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h |
7198 |
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h |
7199 |
-@@ -1283,14 +1283,14 @@ do { \ |
7200 |
+@@ -1285,14 +1285,14 @@ do { \ |
7201 |
* operations, everything works on mask values. |
7202 |
*/ |
7203 |
#define XMDIO_READ(_pdata, _mmd, _reg) \ |
7204 |
@@ -65935,10 +65935,10 @@ index b3bc87f..5bdfdd3 100644 |
7205 |
+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init, |
7206 |
+}; |
7207 |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c |
7208 |
-index 1babcc1..aa7f8f4e 100644 |
7209 |
+index ca106d4..36c4702 100644 |
7210 |
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c |
7211 |
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c |
7212 |
-@@ -2816,7 +2816,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) |
7213 |
+@@ -2818,7 +2818,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) |
7214 |
|
7215 |
static int xgbe_init(struct xgbe_prv_data *pdata) |
7216 |
{ |
7217 |
@@ -65947,7 +65947,7 @@ index 1babcc1..aa7f8f4e 100644 |
7218 |
int ret; |
7219 |
|
7220 |
DBGPR("-->xgbe_init\n"); |
7221 |
-@@ -2882,107 +2882,102 @@ static int xgbe_init(struct xgbe_prv_data *pdata) |
7222 |
+@@ -2884,107 +2884,102 @@ static int xgbe_init(struct xgbe_prv_data *pdata) |
7223 |
return 0; |
7224 |
} |
7225 |
|
7226 |
@@ -66133,7 +66133,7 @@ index 1babcc1..aa7f8f4e 100644 |
7227 |
+ .set_rss_lookup_table = xgbe_set_rss_lookup_table, |
7228 |
+}; |
7229 |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c |
7230 |
-index 7f9216d..26872f6 100644 |
7231 |
+index 0f0f3014..882be95 100644 |
7232 |
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c |
7233 |
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c |
7234 |
@@ -245,7 +245,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, |
7235 |
@@ -66380,7 +66380,7 @@ index 7f9216d..26872f6 100644 |
7236 |
struct xgbe_ring *ring = channel->rx_ring; |
7237 |
struct xgbe_ring_data *rdata; |
7238 |
|
7239 |
-@@ -1794,8 +1794,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, |
7240 |
+@@ -1812,8 +1812,8 @@ static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata, |
7241 |
static int xgbe_tx_poll(struct xgbe_channel *channel) |
7242 |
{ |
7243 |
struct xgbe_prv_data *pdata = channel->pdata; |
7244 |
@@ -66391,7 +66391,7 @@ index 7f9216d..26872f6 100644 |
7245 |
struct xgbe_ring *ring = channel->tx_ring; |
7246 |
struct xgbe_ring_data *rdata; |
7247 |
struct xgbe_ring_desc *rdesc; |
7248 |
-@@ -1865,7 +1865,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) |
7249 |
+@@ -1883,7 +1883,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel) |
7250 |
static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) |
7251 |
{ |
7252 |
struct xgbe_prv_data *pdata = channel->pdata; |
7253 |
@@ -68516,7 +68516,7 @@ index 75d07fa..d766d8e 100644 |
7254 |
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); |
7255 |
|
7256 |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c |
7257 |
-index 0c9ef87..c10ec50 100644 |
7258 |
+index 7a196a0..da60bf8 100644 |
7259 |
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c |
7260 |
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c |
7261 |
@@ -1312,7 +1312,7 @@ static void remove_one(struct pci_dev *pdev) |
7262 |
@@ -70074,10 +70074,10 @@ index 51fc0c3..6cc1baa 100644 |
7263 |
#define VIRTNET_DRIVER_VERSION "1.0.0" |
7264 |
|
7265 |
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c |
7266 |
-index bc744ac..2abf77e 100644 |
7267 |
+index a2afb8e..6d66a2e 100644 |
7268 |
--- a/drivers/net/vrf.c |
7269 |
+++ b/drivers/net/vrf.c |
7270 |
-@@ -1297,7 +1297,7 @@ static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { |
7271 |
+@@ -1299,7 +1299,7 @@ static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { |
7272 |
[IFLA_VRF_TABLE] = { .type = NLA_U32 }, |
7273 |
}; |
7274 |
|
7275 |
@@ -70086,7 +70086,7 @@ index bc744ac..2abf77e 100644 |
7276 |
.kind = DRV_NAME, |
7277 |
.priv_size = sizeof(struct net_vrf), |
7278 |
|
7279 |
-@@ -1334,7 +1334,7 @@ static int vrf_device_event(struct notifier_block *unused, |
7280 |
+@@ -1336,7 +1336,7 @@ static int vrf_device_event(struct notifier_block *unused, |
7281 |
return NOTIFY_DONE; |
7282 |
} |
7283 |
|
7284 |
@@ -86198,7 +86198,7 @@ index 479e223..ba82b75 100644 |
7285 |
wake_up(&usb_kill_urb_queue); |
7286 |
usb_put_urb(urb); |
7287 |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
7288 |
-index aef81a1..cf6b268 100644 |
7289 |
+index c28ccf1..0f884ac 100644 |
7290 |
--- a/drivers/usb/core/hub.c |
7291 |
+++ b/drivers/usb/core/hub.c |
7292 |
@@ -26,6 +26,7 @@ |
7293 |
@@ -86220,6 +86220,19 @@ index aef81a1..cf6b268 100644 |
7294 |
if (hub_is_superspeed(hub->hdev)) |
7295 |
unit_load = 150; |
7296 |
else |
7297 |
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c |
7298 |
+index 3a47077..5cf8b9c 100644 |
7299 |
+--- a/drivers/usb/core/message.c |
7300 |
++++ b/drivers/usb/core/message.c |
7301 |
+@@ -982,7 +982,7 @@ EXPORT_SYMBOL_GPL(usb_get_status); |
7302 |
+ * Return: Zero on success, or else the status code returned by the |
7303 |
+ * underlying usb_control_msg() call. |
7304 |
+ */ |
7305 |
+-int usb_clear_halt(struct usb_device *dev, int pipe) |
7306 |
++int usb_clear_halt(struct usb_device *dev, unsigned int pipe) |
7307 |
+ { |
7308 |
+ int result; |
7309 |
+ int endp = usb_pipeendpoint(pipe); |
7310 |
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c |
7311 |
index c953a0f..54c64f4 100644 |
7312 |
--- a/drivers/usb/core/sysfs.c |
7313 |
@@ -86894,6 +86907,37 @@ index 8fae28b..8b4bfec 100644 |
7314 |
|
7315 |
|
7316 |
/* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */ |
7317 |
+diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c |
7318 |
+index 191b176..960b4ae 100644 |
7319 |
+--- a/drivers/usb/usbip/stub_rx.c |
7320 |
++++ b/drivers/usb/usbip/stub_rx.c |
7321 |
+@@ -80,7 +80,7 @@ static int tweak_clear_halt_cmd(struct urb *urb) |
7322 |
+ struct usb_ctrlrequest *req; |
7323 |
+ int target_endp; |
7324 |
+ int target_dir; |
7325 |
+- int target_pipe; |
7326 |
++ unsigned int target_pipe; |
7327 |
+ int ret; |
7328 |
+ |
7329 |
+ req = (struct usb_ctrlrequest *) urb->setup_packet; |
7330 |
+@@ -336,7 +336,7 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev, |
7331 |
+ return priv; |
7332 |
+ } |
7333 |
+ |
7334 |
+-static int get_pipe(struct stub_device *sdev, int epnum, int dir) |
7335 |
++static unsigned int get_pipe(struct stub_device *sdev, int epnum, int dir) |
7336 |
+ { |
7337 |
+ struct usb_device *udev = sdev->udev; |
7338 |
+ struct usb_host_endpoint *ep; |
7339 |
+@@ -447,7 +447,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, |
7340 |
+ struct stub_priv *priv; |
7341 |
+ struct usbip_device *ud = &sdev->ud; |
7342 |
+ struct usb_device *udev = sdev->udev; |
7343 |
+- int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); |
7344 |
++ unsigned int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); |
7345 |
+ |
7346 |
+ priv = stub_priv_alloc(sdev, pdu); |
7347 |
+ if (!priv) |
7348 |
diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h |
7349 |
index 88b71c4..31cc1ca6 100644 |
7350 |
--- a/drivers/usb/usbip/vhci.h |
7351 |
@@ -87227,7 +87271,7 @@ index 9269d56..78d2a06 100644 |
7352 |
}; |
7353 |
EXPORT_SYMBOL_GPL(dummy_con); |
7354 |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c |
7355 |
-index b87f5cf..6aad4f8 100644 |
7356 |
+index 4db10d7..582743d 100644 |
7357 |
--- a/drivers/video/console/fbcon.c |
7358 |
+++ b/drivers/video/console/fbcon.c |
7359 |
@@ -106,7 +106,7 @@ static int fbcon_softback_size = 32768; |
7360 |
@@ -107732,10 +107776,10 @@ index 42145be..1f1db90 100644 |
7361 |
static ssize_t session_write_kbytes_show(struct ext4_attr *a, |
7362 |
struct ext4_sb_info *sbi, char *buf) |
7363 |
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c |
7364 |
-index 4448ed3..523c675 100644 |
7365 |
+index 3eeed8f..d68ad95 100644 |
7366 |
--- a/fs/ext4/xattr.c |
7367 |
+++ b/fs/ext4/xattr.c |
7368 |
-@@ -414,7 +414,7 @@ static int |
7369 |
+@@ -409,7 +409,7 @@ static int |
7370 |
ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry, |
7371 |
char *buffer, size_t buffer_size) |
7372 |
{ |
7373 |
@@ -107744,7 +107788,7 @@ index 4448ed3..523c675 100644 |
7374 |
|
7375 |
for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { |
7376 |
const struct xattr_handler *handler = |
7377 |
-@@ -435,9 +435,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry, |
7378 |
+@@ -430,9 +430,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry, |
7379 |
*buffer++ = 0; |
7380 |
} |
7381 |
rest -= size; |
7382 |
@@ -136720,7 +136764,7 @@ index ede6b97..1f5b11f 100644 |
7383 |
int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, |
7384 |
struct scatterlist *src, unsigned int nbytes, |
7385 |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h |
7386 |
-index 6726440..96d599d 100644 |
7387 |
+index e9fb2e8..872cabe 100644 |
7388 |
--- a/include/drm/drmP.h |
7389 |
+++ b/include/drm/drmP.h |
7390 |
@@ -61,6 +61,7 @@ |
7391 |
@@ -136767,7 +136811,7 @@ index 6726440..96d599d 100644 |
7392 |
|
7393 |
/** |
7394 |
* Creates a driver or general drm_ioctl_desc array entry for the given |
7395 |
-@@ -713,7 +716,8 @@ struct drm_driver { |
7396 |
+@@ -714,7 +717,8 @@ struct drm_driver { |
7397 |
|
7398 |
/* List of devices hanging off this driver with stealth attach. */ |
7399 |
struct list_head legacy_dev_list; |
7400 |
@@ -136777,7 +136821,7 @@ index 6726440..96d599d 100644 |
7401 |
|
7402 |
enum drm_minor_type { |
7403 |
DRM_MINOR_PRIMARY, |
7404 |
-@@ -731,7 +735,8 @@ struct drm_info_list { |
7405 |
+@@ -732,7 +736,8 @@ struct drm_info_list { |
7406 |
int (*show)(struct seq_file*, void*); /** show callback */ |
7407 |
u32 driver_features; /**< Required driver features for this entry */ |
7408 |
void *data; |
7409 |
@@ -136787,7 +136831,7 @@ index 6726440..96d599d 100644 |
7410 |
|
7411 |
/** |
7412 |
* debugfs node structure. This structure represents a debugfs file. |
7413 |
-@@ -792,7 +797,7 @@ struct drm_device { |
7414 |
+@@ -793,7 +798,7 @@ struct drm_device { |
7415 |
|
7416 |
/** \name Usage Counters */ |
7417 |
/*@{ */ |
7418 |
@@ -144884,7 +144928,7 @@ index 33383ca..44211d6 100644 |
7419 |
|
7420 |
static __always_inline void put_unaligned_le16(u16 val, void *p) |
7421 |
diff --git a/include/linux/usb.h b/include/linux/usb.h |
7422 |
-index eba1f10..94c966f 100644 |
7423 |
+index eba1f10..eac1b52 100644 |
7424 |
--- a/include/linux/usb.h |
7425 |
+++ b/include/linux/usb.h |
7426 |
@@ -370,7 +370,7 @@ struct usb_bus { |
7427 |
@@ -144905,6 +144949,15 @@ index eba1f10..94c966f 100644 |
7428 |
|
7429 |
unsigned long active_duration; |
7430 |
|
7431 |
+@@ -1700,7 +1700,7 @@ extern int usb_string(struct usb_device *dev, int index, |
7432 |
+ char *buf, size_t size); |
7433 |
+ |
7434 |
+ /* wrappers that also update important state inside usbcore */ |
7435 |
+-extern int usb_clear_halt(struct usb_device *dev, int pipe); |
7436 |
++extern int usb_clear_halt(struct usb_device *dev, unsigned int pipe); |
7437 |
+ extern int usb_reset_configuration(struct usb_device *dev); |
7438 |
+ extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate); |
7439 |
+ extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr); |
7440 |
@@ -1793,10 +1793,10 @@ void usb_sg_wait(struct usb_sg_request *io); |
7441 |
|
7442 |
/* NOTE: these are not the standard USB_ENDPOINT_XFER_* values!! */ |
7443 |
@@ -144920,6 +144973,15 @@ index eba1f10..94c966f 100644 |
7444 |
|
7445 |
#define usb_pipein(pipe) ((pipe) & USB_DIR_IN) |
7446 |
#define usb_pipeout(pipe) (!usb_pipein(pipe)) |
7447 |
+@@ -1845,7 +1845,7 @@ usb_pipe_endpoint(struct usb_device *dev, unsigned int pipe) |
7448 |
+ /*-------------------------------------------------------------------------*/ |
7449 |
+ |
7450 |
+ static inline __u16 |
7451 |
+-usb_maxpacket(struct usb_device *udev, int pipe, int is_out) |
7452 |
++usb_maxpacket(struct usb_device *udev, unsigned int pipe, int is_out) |
7453 |
+ { |
7454 |
+ struct usb_host_endpoint *ep; |
7455 |
+ unsigned epnum = usb_pipeendpoint(pipe); |
7456 |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h |
7457 |
index 66fc137..9602956 100644 |
7458 |
--- a/include/linux/usb/hcd.h |
7459 |
@@ -152684,7 +152746,7 @@ index 154fd68..f95f804 100644 |
7460 |
(void *)current->task_state_change, |
7461 |
(void *)current->task_state_change); |
7462 |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c |
7463 |
-index 37e2449..61f57aa 100644 |
7464 |
+index c95c512..16f39ee 100644 |
7465 |
--- a/kernel/sched/deadline.c |
7466 |
+++ b/kernel/sched/deadline.c |
7467 |
@@ -219,8 +219,8 @@ static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) |
7468 |
@@ -152846,7 +152908,7 @@ index c242944..c6a1086 100644 |
7469 |
struct rq *this_rq = this_rq(); |
7470 |
enum cpu_idle_type idle = this_rq->idle_balance ? |
7471 |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c |
7472 |
-index 2516b8d..251b6ab 100644 |
7473 |
+index f139f22..c040b45 100644 |
7474 |
--- a/kernel/sched/rt.c |
7475 |
+++ b/kernel/sched/rt.c |
7476 |
@@ -362,8 +362,8 @@ static inline int has_pushable_tasks(struct rq *rq) |
7477 |
@@ -158185,7 +158247,7 @@ index 66ce6b4..c5f0a41 100644 |
7478 |
err = -EPERM; |
7479 |
goto out; |
7480 |
diff --git a/mm/mlock.c b/mm/mlock.c |
7481 |
-index 665ab75..41833e6 100644 |
7482 |
+index 665ab75..70e0033 100644 |
7483 |
--- a/mm/mlock.c |
7484 |
+++ b/mm/mlock.c |
7485 |
@@ -14,6 +14,7 @@ |
7486 |
@@ -158231,6 +158293,20 @@ index 665ab75..41833e6 100644 |
7487 |
newflags |= flags; |
7488 |
|
7489 |
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
7490 |
+@@ -629,11 +639,11 @@ static int apply_vma_lock_flags(unsigned long start, size_t len, |
7491 |
+ * is also counted. |
7492 |
+ * Return value: previously mlocked page counts |
7493 |
+ */ |
7494 |
+-static int count_mm_mlocked_page_nr(struct mm_struct *mm, |
7495 |
++static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, |
7496 |
+ unsigned long start, size_t len) |
7497 |
+ { |
7498 |
+ struct vm_area_struct *vma; |
7499 |
+- int count = 0; |
7500 |
++ unsigned long count = 0; |
7501 |
+ |
7502 |
+ if (mm == NULL) |
7503 |
+ mm = current->mm; |
7504 |
@@ -695,6 +705,10 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla |
7505 |
} |
7506 |
|
7507 |
@@ -164507,7 +164583,7 @@ index f0f462c..e5d59e8 100644 |
7508 |
} |
7509 |
|
7510 |
diff --git a/net/core/sock.c b/net/core/sock.c |
7511 |
-index bc6543f..d9e3e41 100644 |
7512 |
+index 470a204..f5adedf 100644 |
7513 |
--- a/net/core/sock.c |
7514 |
+++ b/net/core/sock.c |
7515 |
@@ -411,13 +411,13 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
7516 |
@@ -164633,7 +164709,7 @@ index bc6543f..d9e3e41 100644 |
7517 |
return -EFAULT; |
7518 |
lenout: |
7519 |
if (put_user(len, optlen)) |
7520 |
-@@ -1517,7 +1520,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) |
7521 |
+@@ -1522,7 +1525,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) |
7522 |
newsk->sk_dst_cache = NULL; |
7523 |
newsk->sk_wmem_queued = 0; |
7524 |
newsk->sk_forward_alloc = 0; |
7525 |
@@ -164642,7 +164718,7 @@ index bc6543f..d9e3e41 100644 |
7526 |
newsk->sk_send_head = NULL; |
7527 |
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; |
7528 |
|
7529 |
-@@ -1547,7 +1550,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) |
7530 |
+@@ -1558,7 +1561,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) |
7531 |
newsk->sk_err_soft = 0; |
7532 |
newsk->sk_priority = 0; |
7533 |
newsk->sk_incoming_cpu = raw_smp_processor_id(); |
7534 |
@@ -164651,7 +164727,7 @@ index bc6543f..d9e3e41 100644 |
7535 |
|
7536 |
mem_cgroup_sk_alloc(newsk); |
7537 |
cgroup_sk_alloc(&newsk->sk_cgrp_data); |
7538 |
-@@ -2477,7 +2480,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) |
7539 |
+@@ -2488,7 +2491,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) |
7540 |
*/ |
7541 |
smp_wmb(); |
7542 |
atomic_set(&sk->sk_refcnt, 1); |
7543 |
@@ -164660,7 +164736,7 @@ index bc6543f..d9e3e41 100644 |
7544 |
} |
7545 |
EXPORT_SYMBOL(sock_init_data); |
7546 |
|
7547 |
-@@ -2601,6 +2604,7 @@ void sock_enable_timestamp(struct sock *sk, int flag) |
7548 |
+@@ -2612,6 +2615,7 @@ void sock_enable_timestamp(struct sock *sk, int flag) |
7549 |
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, |
7550 |
int level, int type) |
7551 |
{ |
7552 |
@@ -164668,7 +164744,7 @@ index bc6543f..d9e3e41 100644 |
7553 |
struct sock_exterr_skb *serr; |
7554 |
struct sk_buff *skb; |
7555 |
int copied, err; |
7556 |
-@@ -2622,7 +2626,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, |
7557 |
+@@ -2633,7 +2637,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, |
7558 |
sock_recv_timestamp(msg, sk, skb); |
7559 |
|
7560 |
serr = SKB_EXT_ERR(skb); |
7561 |
@@ -164678,7 +164754,7 @@ index bc6543f..d9e3e41 100644 |
7562 |
|
7563 |
msg->msg_flags |= MSG_ERRQUEUE; |
7564 |
err = copied; |
7565 |
-@@ -2885,8 +2890,9 @@ static int req_prot_init(const struct proto *prot) |
7566 |
+@@ -2891,8 +2896,9 @@ static int req_prot_init(const struct proto *prot) |
7567 |
int proto_register(struct proto *prot, int alloc_slab) |
7568 |
{ |
7569 |
if (alloc_slab) { |
7570 |
@@ -164689,7 +164765,7 @@ index bc6543f..d9e3e41 100644 |
7571 |
NULL); |
7572 |
|
7573 |
if (prot->slab == NULL) { |
7574 |
-@@ -3074,7 +3080,7 @@ static __net_exit void proto_exit_net(struct net *net) |
7575 |
+@@ -3080,7 +3086,7 @@ static __net_exit void proto_exit_net(struct net *net) |
7576 |
} |
7577 |
|
7578 |
|
7579 |
@@ -165264,10 +165340,10 @@ index 062a67c..cb05c97 100644 |
7580 |
.exit = devinet_exit_net, |
7581 |
}; |
7582 |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c |
7583 |
-index 5b03d7f..6c62eaf 100644 |
7584 |
+index 6789e48..d779c45 100644 |
7585 |
--- a/net/ipv4/fib_frontend.c |
7586 |
+++ b/net/ipv4/fib_frontend.c |
7587 |
-@@ -1140,12 +1140,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, |
7588 |
+@@ -1141,12 +1141,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, |
7589 |
#ifdef CONFIG_IP_ROUTE_MULTIPATH |
7590 |
fib_sync_up(dev, RTNH_F_DEAD); |
7591 |
#endif |
7592 |
@@ -165282,7 +165358,7 @@ index 5b03d7f..6c62eaf 100644 |
7593 |
if (!ifa->ifa_dev->ifa_list) { |
7594 |
/* Last address was deleted from this interface. |
7595 |
* Disable IP. |
7596 |
-@@ -1185,7 +1185,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo |
7597 |
+@@ -1186,7 +1186,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo |
7598 |
#ifdef CONFIG_IP_ROUTE_MULTIPATH |
7599 |
fib_sync_up(dev, RTNH_F_DEAD); |
7600 |
#endif |
7601 |
@@ -166119,7 +166195,7 @@ index 80bc36b..d70d622 100644 |
7602 |
.exit = ipv4_sysctl_exit_net, |
7603 |
}; |
7604 |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
7605 |
-index ce42ded..9c93e33 100644 |
7606 |
+index 7727ffe..9488999 100644 |
7607 |
--- a/net/ipv4/tcp_input.c |
7608 |
+++ b/net/ipv4/tcp_input.c |
7609 |
@@ -288,11 +288,13 @@ static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) |
7610 |
@@ -166270,7 +166346,7 @@ index bf1f3b2..83f355d 100644 |
7611 |
.exit = tcp_net_metrics_exit, |
7612 |
}; |
7613 |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c |
7614 |
-index 6234eba..8007145 100644 |
7615 |
+index 8615a6b..772fcdb 100644 |
7616 |
--- a/net/ipv4/tcp_minisocks.c |
7617 |
+++ b/net/ipv4/tcp_minisocks.c |
7618 |
@@ -27,6 +27,10 @@ |
7619 |
@@ -166284,7 +166360,7 @@ index 6234eba..8007145 100644 |
7620 |
int sysctl_tcp_abort_on_overflow __read_mostly; |
7621 |
|
7622 |
struct inet_timewait_death_row tcp_death_row = { |
7623 |
-@@ -786,7 +790,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, |
7624 |
+@@ -787,7 +791,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, |
7625 |
* avoid becoming vulnerable to outside attack aiming at |
7626 |
* resetting legit local connections. |
7627 |
*/ |
7628 |
@@ -167308,7 +167384,7 @@ index b2e61a0..bf47484 100644 |
7629 |
} |
7630 |
|
7631 |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c |
7632 |
-index e4a8000..ae30c92 100644 |
7633 |
+index 40a289f..c8715aa 100644 |
7634 |
--- a/net/ipv6/udp.c |
7635 |
+++ b/net/ipv6/udp.c |
7636 |
@@ -78,6 +78,10 @@ static u32 udp6_ehashfn(const struct net *net, |
7637 |
@@ -172274,7 +172350,7 @@ index 0917f04..f4e3d8c 100644 |
7638 |
|
7639 |
if (!proc_create("x25/route", S_IRUGO, init_net.proc_net, |
7640 |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c |
7641 |
-index 5bf7e1bf..5ef3f83 100644 |
7642 |
+index e0437a7..05fba66 100644 |
7643 |
--- a/net/xfrm/xfrm_policy.c |
7644 |
+++ b/net/xfrm/xfrm_policy.c |
7645 |
@@ -338,7 +338,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy) |
7646 |
@@ -172501,10 +172577,10 @@ index 35a7e79..35847ab 100644 |
7647 |
__xfrm_sysctl_init(net); |
7648 |
|
7649 |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c |
7650 |
-index 671a1d0..1b8c39e 100644 |
7651 |
+index a7e27e1..0040091 100644 |
7652 |
--- a/net/xfrm/xfrm_user.c |
7653 |
+++ b/net/xfrm/xfrm_user.c |
7654 |
-@@ -2471,7 +2471,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
7655 |
+@@ -2478,7 +2478,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
7656 |
return -EINVAL; |
7657 |
|
7658 |
{ |
7659 |
@@ -224530,7 +224606,7 @@ index cd0e0eb..89543da 100644 |
7660 |
} |
7661 |
} |
7662 |
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c |
7663 |
-index 4c93520..e4032f9 100644 |
7664 |
+index f3b1d7f..6645b81 100644 |
7665 |
--- a/sound/core/seq/seq_clientmgr.c |
7666 |
+++ b/sound/core/seq/seq_clientmgr.c |
7667 |
@@ -403,7 +403,7 @@ static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count, |
7668 |
@@ -224568,7 +224644,7 @@ index 4c93520..e4032f9 100644 |
7669 |
} |
7670 |
#endif |
7671 |
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c |
7672 |
-index 86240d0..08b468d 100644 |
7673 |
+index 3f4efcb..da7bb45 100644 |
7674 |
--- a/sound/core/seq/seq_fifo.c |
7675 |
+++ b/sound/core/seq/seq_fifo.c |
7676 |
@@ -50,7 +50,7 @@ struct snd_seq_fifo *snd_seq_fifo_new(int poolsize) |
7677 |
@@ -224580,7 +224656,7 @@ index 86240d0..08b468d 100644 |
7678 |
|
7679 |
f->head = NULL; |
7680 |
f->tail = NULL; |
7681 |
-@@ -96,7 +96,7 @@ void snd_seq_fifo_clear(struct snd_seq_fifo *f) |
7682 |
+@@ -99,7 +99,7 @@ void snd_seq_fifo_clear(struct snd_seq_fifo *f) |
7683 |
unsigned long flags; |
7684 |
|
7685 |
/* clear overflow flag */ |
7686 |
@@ -224589,7 +224665,7 @@ index 86240d0..08b468d 100644 |
7687 |
|
7688 |
snd_use_lock_sync(&f->use_lock); |
7689 |
spin_lock_irqsave(&f->lock, flags); |
7690 |
-@@ -123,7 +123,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f, |
7691 |
+@@ -126,7 +126,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f, |
7692 |
err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ |
7693 |
if (err < 0) { |
7694 |
if ((err == -ENOMEM) || (err == -EAGAIN)) |
7695 |
@@ -224612,7 +224688,7 @@ index 062c446..a4b6f4c 100644 |
7696 |
}; |
7697 |
|
7698 |
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c |
7699 |
-index dfa5156..05c2b75 100644 |
7700 |
+index 5847c44..cfec4ed 100644 |
7701 |
--- a/sound/core/seq/seq_memory.c |
7702 |
+++ b/sound/core/seq/seq_memory.c |
7703 |
@@ -87,7 +87,7 @@ int snd_seq_dump_var_event(const struct snd_seq_event *event, |
7704 |
|
7705 |
diff --git a/4.9.18/4425_grsec_remove_EI_PAX.patch b/4.9.20/4425_grsec_remove_EI_PAX.patch |
7706 |
similarity index 100% |
7707 |
rename from 4.9.18/4425_grsec_remove_EI_PAX.patch |
7708 |
rename to 4.9.20/4425_grsec_remove_EI_PAX.patch |
7709 |
|
7710 |
diff --git a/4.9.18/4426_default_XATTR_PAX_FLAGS.patch b/4.9.20/4426_default_XATTR_PAX_FLAGS.patch |
7711 |
similarity index 100% |
7712 |
rename from 4.9.18/4426_default_XATTR_PAX_FLAGS.patch |
7713 |
rename to 4.9.20/4426_default_XATTR_PAX_FLAGS.patch |
7714 |
|
7715 |
diff --git a/4.9.18/4427_force_XATTR_PAX_tmpfs.patch b/4.9.20/4427_force_XATTR_PAX_tmpfs.patch |
7716 |
similarity index 100% |
7717 |
rename from 4.9.18/4427_force_XATTR_PAX_tmpfs.patch |
7718 |
rename to 4.9.20/4427_force_XATTR_PAX_tmpfs.patch |
7719 |
|
7720 |
diff --git a/4.9.18/4430_grsec-remove-localversion-grsec.patch b/4.9.20/4430_grsec-remove-localversion-grsec.patch |
7721 |
similarity index 100% |
7722 |
rename from 4.9.18/4430_grsec-remove-localversion-grsec.patch |
7723 |
rename to 4.9.20/4430_grsec-remove-localversion-grsec.patch |
7724 |
|
7725 |
diff --git a/4.9.18/4435_grsec-mute-warnings.patch b/4.9.20/4435_grsec-mute-warnings.patch |
7726 |
similarity index 100% |
7727 |
rename from 4.9.18/4435_grsec-mute-warnings.patch |
7728 |
rename to 4.9.20/4435_grsec-mute-warnings.patch |
7729 |
|
7730 |
diff --git a/4.9.18/4440_grsec-remove-protected-paths.patch b/4.9.20/4440_grsec-remove-protected-paths.patch |
7731 |
similarity index 100% |
7732 |
rename from 4.9.18/4440_grsec-remove-protected-paths.patch |
7733 |
rename to 4.9.20/4440_grsec-remove-protected-paths.patch |
7734 |
|
7735 |
diff --git a/4.9.18/4450_grsec-kconfig-default-gids.patch b/4.9.20/4450_grsec-kconfig-default-gids.patch |
7736 |
similarity index 100% |
7737 |
rename from 4.9.18/4450_grsec-kconfig-default-gids.patch |
7738 |
rename to 4.9.20/4450_grsec-kconfig-default-gids.patch |
7739 |
|
7740 |
diff --git a/4.9.18/4465_selinux-avc_audit-log-curr_ip.patch b/4.9.20/4465_selinux-avc_audit-log-curr_ip.patch |
7741 |
similarity index 100% |
7742 |
rename from 4.9.18/4465_selinux-avc_audit-log-curr_ip.patch |
7743 |
rename to 4.9.20/4465_selinux-avc_audit-log-curr_ip.patch |
7744 |
|
7745 |
diff --git a/4.9.18/4470_disable-compat_vdso.patch b/4.9.20/4470_disable-compat_vdso.patch |
7746 |
similarity index 100% |
7747 |
rename from 4.9.18/4470_disable-compat_vdso.patch |
7748 |
rename to 4.9.20/4470_disable-compat_vdso.patch |
7749 |
|
7750 |
diff --git a/4.9.18/4475_emutramp_default_on.patch b/4.9.20/4475_emutramp_default_on.patch |
7751 |
similarity index 100% |
7752 |
rename from 4.9.18/4475_emutramp_default_on.patch |
7753 |
rename to 4.9.20/4475_emutramp_default_on.patch |