1 |
commit: 921209b50bed8224e6851746ec54a689edec3123 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sat Jul 28 10:39:35 2018 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Sat Jul 28 10:39:35 2018 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=921209b5 |
7 |
|
8 |
Linux patch 4.14.59 |
9 |
|
10 |
0000_README | 4 + |
11 |
1058_linux-4.14.59.patch | 2213 ++++++++++++++++++++++++++++++++++++++++++++++ |
12 |
2 files changed, 2217 insertions(+) |
13 |
|
14 |
diff --git a/0000_README b/0000_README |
15 |
index 7e760df..7ca7e18 100644 |
16 |
--- a/0000_README |
17 |
+++ b/0000_README |
18 |
@@ -275,6 +275,10 @@ Patch: 1057_linux-4.14.58.patch |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 4.14.58 |
21 |
|
22 |
+Patch: 1058_linux-4.14.59.patch |
23 |
+From: http://www.kernel.org |
24 |
+Desc: Linux 4.14.59 |
25 |
+ |
26 |
Patch: 1500_XATTR_USER_PREFIX.patch |
27 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
28 |
Desc: Support for namespace user.pax.* on tmpfs. |
29 |
|
30 |
diff --git a/1058_linux-4.14.59.patch b/1058_linux-4.14.59.patch |
31 |
new file mode 100644 |
32 |
index 0000000..d6b2600 |
33 |
--- /dev/null |
34 |
+++ b/1058_linux-4.14.59.patch |
35 |
@@ -0,0 +1,2213 @@ |
36 |
+diff --git a/Makefile b/Makefile |
37 |
+index ffc9b4e3867e..81b0e99dce80 100644 |
38 |
+--- a/Makefile |
39 |
++++ b/Makefile |
40 |
+@@ -1,7 +1,7 @@ |
41 |
+ # SPDX-License-Identifier: GPL-2.0 |
42 |
+ VERSION = 4 |
43 |
+ PATCHLEVEL = 14 |
44 |
+-SUBLEVEL = 58 |
45 |
++SUBLEVEL = 59 |
46 |
+ EXTRAVERSION = |
47 |
+ NAME = Petit Gorille |
48 |
+ |
49 |
+@@ -642,6 +642,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) |
50 |
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) |
51 |
+ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) |
52 |
+ KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) |
53 |
++KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) |
54 |
+ |
55 |
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE |
56 |
+ KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) |
57 |
+diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c |
58 |
+index 10a405d593df..c782b10ddf50 100644 |
59 |
+--- a/arch/mips/ath79/common.c |
60 |
++++ b/arch/mips/ath79/common.c |
61 |
+@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init); |
62 |
+ |
63 |
+ void ath79_ddr_wb_flush(u32 reg) |
64 |
+ { |
65 |
+- void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg; |
66 |
++ void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4); |
67 |
+ |
68 |
+ /* Flush the DDR write buffer. */ |
69 |
+ __raw_writel(0x1, flush_reg); |
70 |
+diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c |
71 |
+index 9632436d74d7..c2e94cf5ecda 100644 |
72 |
+--- a/arch/mips/pci/pci.c |
73 |
++++ b/arch/mips/pci/pci.c |
74 |
+@@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, |
75 |
+ phys_addr_t size = resource_size(rsrc); |
76 |
+ |
77 |
+ *start = fixup_bigphys_addr(rsrc->start, size); |
78 |
+- *end = rsrc->start + size; |
79 |
++ *end = rsrc->start + size - 1; |
80 |
+ } |
81 |
+diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h |
82 |
+index 44fdf4786638..6f67ff5a5267 100644 |
83 |
+--- a/arch/powerpc/include/asm/mmu_context.h |
84 |
++++ b/arch/powerpc/include/asm/mmu_context.h |
85 |
+@@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( |
86 |
+ extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
87 |
+ unsigned long ua, unsigned long entries); |
88 |
+ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
89 |
+- unsigned long ua, unsigned long *hpa); |
90 |
++ unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
91 |
+ extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
92 |
+- unsigned long ua, unsigned long *hpa); |
93 |
++ unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
94 |
+ extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
95 |
+ extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); |
96 |
+ #endif |
97 |
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c |
98 |
+index 4dffa611376d..e14cec6bc339 100644 |
99 |
+--- a/arch/powerpc/kvm/book3s_64_vio.c |
100 |
++++ b/arch/powerpc/kvm/book3s_64_vio.c |
101 |
+@@ -433,7 +433,7 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, |
102 |
+ /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ |
103 |
+ return H_TOO_HARD; |
104 |
+ |
105 |
+- if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa))) |
106 |
++ if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) |
107 |
+ return H_HARDWARE; |
108 |
+ |
109 |
+ if (mm_iommu_mapped_inc(mem)) |
110 |
+diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c |
111 |
+index c32e9bfe75b1..648cf6c01348 100644 |
112 |
+--- a/arch/powerpc/kvm/book3s_64_vio_hv.c |
113 |
++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c |
114 |
+@@ -262,7 +262,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, |
115 |
+ if (!mem) |
116 |
+ return H_TOO_HARD; |
117 |
+ |
118 |
+- if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa))) |
119 |
++ if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, |
120 |
++ &hpa))) |
121 |
+ return H_HARDWARE; |
122 |
+ |
123 |
+ pua = (void *) vmalloc_to_phys(pua); |
124 |
+@@ -431,7 +432,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, |
125 |
+ |
126 |
+ mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); |
127 |
+ if (mem) |
128 |
+- prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0; |
129 |
++ prereg = mm_iommu_ua_to_hpa_rm(mem, ua, |
130 |
++ IOMMU_PAGE_SHIFT_4K, &tces) == 0; |
131 |
+ } |
132 |
+ |
133 |
+ if (!prereg) { |
134 |
+diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c |
135 |
+index e0a2d8e806ed..816055927ee4 100644 |
136 |
+--- a/arch/powerpc/mm/mmu_context_iommu.c |
137 |
++++ b/arch/powerpc/mm/mmu_context_iommu.c |
138 |
+@@ -19,6 +19,7 @@ |
139 |
+ #include <linux/hugetlb.h> |
140 |
+ #include <linux/swap.h> |
141 |
+ #include <asm/mmu_context.h> |
142 |
++#include <asm/pte-walk.h> |
143 |
+ |
144 |
+ static DEFINE_MUTEX(mem_list_mutex); |
145 |
+ |
146 |
+@@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t { |
147 |
+ struct rcu_head rcu; |
148 |
+ unsigned long used; |
149 |
+ atomic64_t mapped; |
150 |
++ unsigned int pageshift; |
151 |
+ u64 ua; /* userspace address */ |
152 |
+ u64 entries; /* number of entries in hpas[] */ |
153 |
+ u64 *hpas; /* vmalloc'ed */ |
154 |
+@@ -126,6 +128,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
155 |
+ { |
156 |
+ struct mm_iommu_table_group_mem_t *mem; |
157 |
+ long i, j, ret = 0, locked_entries = 0; |
158 |
++ unsigned int pageshift; |
159 |
++ unsigned long flags; |
160 |
+ struct page *page = NULL; |
161 |
+ |
162 |
+ mutex_lock(&mem_list_mutex); |
163 |
+@@ -160,6 +164,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
164 |
+ goto unlock_exit; |
165 |
+ } |
166 |
+ |
167 |
++ /* |
168 |
++ * For a starting point for a maximum page size calculation |
169 |
++ * we use @ua and @entries natural alignment to allow IOMMU pages |
170 |
++ * smaller than huge pages but still bigger than PAGE_SIZE. |
171 |
++ */ |
172 |
++ mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); |
173 |
+ mem->hpas = vzalloc(entries * sizeof(mem->hpas[0])); |
174 |
+ if (!mem->hpas) { |
175 |
+ kfree(mem); |
176 |
+@@ -200,6 +210,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
177 |
+ } |
178 |
+ } |
179 |
+ populate: |
180 |
++ pageshift = PAGE_SHIFT; |
181 |
++ if (PageCompound(page)) { |
182 |
++ pte_t *pte; |
183 |
++ struct page *head = compound_head(page); |
184 |
++ unsigned int compshift = compound_order(head); |
185 |
++ |
186 |
++ local_irq_save(flags); /* disables as well */ |
187 |
++ pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift); |
188 |
++ local_irq_restore(flags); |
189 |
++ |
190 |
++ /* Double check it is still the same pinned page */ |
191 |
++ if (pte && pte_page(*pte) == head && |
192 |
++ pageshift == compshift) |
193 |
++ pageshift = max_t(unsigned int, pageshift, |
194 |
++ PAGE_SHIFT); |
195 |
++ } |
196 |
++ mem->pageshift = min(mem->pageshift, pageshift); |
197 |
+ mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; |
198 |
+ } |
199 |
+ |
200 |
+@@ -350,7 +377,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
201 |
+ EXPORT_SYMBOL_GPL(mm_iommu_find); |
202 |
+ |
203 |
+ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
204 |
+- unsigned long ua, unsigned long *hpa) |
205 |
++ unsigned long ua, unsigned int pageshift, unsigned long *hpa) |
206 |
+ { |
207 |
+ const long entry = (ua - mem->ua) >> PAGE_SHIFT; |
208 |
+ u64 *va = &mem->hpas[entry]; |
209 |
+@@ -358,6 +385,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
210 |
+ if (entry >= mem->entries) |
211 |
+ return -EFAULT; |
212 |
+ |
213 |
++ if (pageshift > mem->pageshift) |
214 |
++ return -EFAULT; |
215 |
++ |
216 |
+ *hpa = *va | (ua & ~PAGE_MASK); |
217 |
+ |
218 |
+ return 0; |
219 |
+@@ -365,7 +395,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
220 |
+ EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); |
221 |
+ |
222 |
+ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
223 |
+- unsigned long ua, unsigned long *hpa) |
224 |
++ unsigned long ua, unsigned int pageshift, unsigned long *hpa) |
225 |
+ { |
226 |
+ const long entry = (ua - mem->ua) >> PAGE_SHIFT; |
227 |
+ void *va = &mem->hpas[entry]; |
228 |
+@@ -374,6 +404,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
229 |
+ if (entry >= mem->entries) |
230 |
+ return -EFAULT; |
231 |
+ |
232 |
++ if (pageshift > mem->pageshift) |
233 |
++ return -EFAULT; |
234 |
++ |
235 |
+ pa = (void *) vmalloc_to_phys(va); |
236 |
+ if (!pa) |
237 |
+ return -EFAULT; |
238 |
+diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S |
239 |
+index e1a5fbeae08d..5d7554c025fd 100644 |
240 |
+--- a/arch/x86/xen/xen-pvh.S |
241 |
++++ b/arch/x86/xen/xen-pvh.S |
242 |
+@@ -54,6 +54,9 @@ |
243 |
+ * charge of setting up it's own stack, GDT and IDT. |
244 |
+ */ |
245 |
+ |
246 |
++#define PVH_GDT_ENTRY_CANARY 4 |
247 |
++#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8) |
248 |
++ |
249 |
+ ENTRY(pvh_start_xen) |
250 |
+ cld |
251 |
+ |
252 |
+@@ -98,6 +101,12 @@ ENTRY(pvh_start_xen) |
253 |
+ /* 64-bit entry point. */ |
254 |
+ .code64 |
255 |
+ 1: |
256 |
++ /* Set base address in stack canary descriptor. */ |
257 |
++ mov $MSR_GS_BASE,%ecx |
258 |
++ mov $_pa(canary), %eax |
259 |
++ xor %edx, %edx |
260 |
++ wrmsr |
261 |
++ |
262 |
+ call xen_prepare_pvh |
263 |
+ |
264 |
+ /* startup_64 expects boot_params in %rsi. */ |
265 |
+@@ -107,6 +116,17 @@ ENTRY(pvh_start_xen) |
266 |
+ |
267 |
+ #else /* CONFIG_X86_64 */ |
268 |
+ |
269 |
++ /* Set base address in stack canary descriptor. */ |
270 |
++ movl $_pa(gdt_start),%eax |
271 |
++ movl $_pa(canary),%ecx |
272 |
++ movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax) |
273 |
++ shrl $16, %ecx |
274 |
++ movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax) |
275 |
++ movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax) |
276 |
++ |
277 |
++ mov $PVH_CANARY_SEL,%eax |
278 |
++ mov %eax,%gs |
279 |
++ |
280 |
+ call mk_early_pgtbl_32 |
281 |
+ |
282 |
+ mov $_pa(initial_page_table), %eax |
283 |
+@@ -150,9 +170,13 @@ gdt_start: |
284 |
+ .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* __KERNEL_CS */ |
285 |
+ #endif |
286 |
+ .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* __KERNEL_DS */ |
287 |
++ .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */ |
288 |
+ gdt_end: |
289 |
+ |
290 |
+- .balign 4 |
291 |
++ .balign 16 |
292 |
++canary: |
293 |
++ .fill 48, 1, 0 |
294 |
++ |
295 |
+ early_stack: |
296 |
+ .fill 256, 1, 0 |
297 |
+ early_stack_end: |
298 |
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c |
299 |
+index ad44b40fe284..55fc31f6fe7f 100644 |
300 |
+--- a/drivers/base/dd.c |
301 |
++++ b/drivers/base/dd.c |
302 |
+@@ -401,14 +401,6 @@ re_probe: |
303 |
+ goto probe_failed; |
304 |
+ } |
305 |
+ |
306 |
+- /* |
307 |
+- * Ensure devices are listed in devices_kset in correct order |
308 |
+- * It's important to move Dev to the end of devices_kset before |
309 |
+- * calling .probe, because it could be recursive and parent Dev |
310 |
+- * should always go first |
311 |
+- */ |
312 |
+- devices_kset_move_last(dev); |
313 |
+- |
314 |
+ if (dev->bus->probe) { |
315 |
+ ret = dev->bus->probe(dev); |
316 |
+ if (ret) |
317 |
+diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c |
318 |
+index 5b9d549aa791..e7926da59214 100644 |
319 |
+--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c |
320 |
++++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c |
321 |
+@@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev) |
322 |
+ nouveau_display(dev)->init = nv04_display_init; |
323 |
+ nouveau_display(dev)->fini = nv04_display_fini; |
324 |
+ |
325 |
++ /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */ |
326 |
++ dev->driver->driver_features &= ~DRIVER_ATOMIC; |
327 |
++ |
328 |
+ nouveau_hw_save_vga_fonts(dev, 1); |
329 |
+ |
330 |
+ nv04_crtc_create(dev, 0); |
331 |
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c |
332 |
+index 595630d1fb9e..362a34cb435d 100644 |
333 |
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c |
334 |
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c |
335 |
+@@ -79,6 +79,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, " |
336 |
+ int nouveau_modeset = -1; |
337 |
+ module_param_named(modeset, nouveau_modeset, int, 0400); |
338 |
+ |
339 |
++MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); |
340 |
++static int nouveau_atomic = 0; |
341 |
++module_param_named(atomic, nouveau_atomic, int, 0400); |
342 |
++ |
343 |
+ MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); |
344 |
+ static int nouveau_runtime_pm = -1; |
345 |
+ module_param_named(runpm, nouveau_runtime_pm, int, 0400); |
346 |
+@@ -383,6 +387,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev, |
347 |
+ |
348 |
+ pci_set_master(pdev); |
349 |
+ |
350 |
++ if (nouveau_atomic) |
351 |
++ driver_pci.driver_features |= DRIVER_ATOMIC; |
352 |
++ |
353 |
+ ret = drm_get_pci_dev(pdev, pent, &driver_pci); |
354 |
+ if (ret) { |
355 |
+ nvkm_device_del(&device); |
356 |
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c |
357 |
+index a29474528e85..926ec51ba5be 100644 |
358 |
+--- a/drivers/gpu/drm/nouveau/nv50_display.c |
359 |
++++ b/drivers/gpu/drm/nouveau/nv50_display.c |
360 |
+@@ -4150,7 +4150,7 @@ nv50_disp_atomic_commit(struct drm_device *dev, |
361 |
+ nv50_disp_atomic_commit_tail(state); |
362 |
+ |
363 |
+ drm_for_each_crtc(crtc, dev) { |
364 |
+- if (crtc->state->enable) { |
365 |
++ if (crtc->state->active) { |
366 |
+ if (!drm->have_disp_power_ref) { |
367 |
+ drm->have_disp_power_ref = true; |
368 |
+ return 0; |
369 |
+@@ -4398,10 +4398,6 @@ nv50_display_destroy(struct drm_device *dev) |
370 |
+ kfree(disp); |
371 |
+ } |
372 |
+ |
373 |
+-MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); |
374 |
+-static int nouveau_atomic = 0; |
375 |
+-module_param_named(atomic, nouveau_atomic, int, 0400); |
376 |
+- |
377 |
+ int |
378 |
+ nv50_display_create(struct drm_device *dev) |
379 |
+ { |
380 |
+@@ -4426,8 +4422,6 @@ nv50_display_create(struct drm_device *dev) |
381 |
+ disp->disp = &nouveau_display(dev)->disp; |
382 |
+ dev->mode_config.funcs = &nv50_disp_func; |
383 |
+ dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; |
384 |
+- if (nouveau_atomic) |
385 |
+- dev->driver->driver_features |= DRIVER_ATOMIC; |
386 |
+ |
387 |
+ /* small shared memory area we use for notifiers and semaphores */ |
388 |
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
389 |
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c |
390 |
+index 61084ba69a99..3d154eb63dcf 100644 |
391 |
+--- a/drivers/net/bonding/bond_options.c |
392 |
++++ b/drivers/net/bonding/bond_options.c |
393 |
+@@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option) |
394 |
+ static int bond_option_mode_set(struct bonding *bond, |
395 |
+ const struct bond_opt_value *newval) |
396 |
+ { |
397 |
+- if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) { |
398 |
+- netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", |
399 |
+- newval->string); |
400 |
+- /* disable arp monitoring */ |
401 |
+- bond->params.arp_interval = 0; |
402 |
+- /* set miimon to default value */ |
403 |
+- bond->params.miimon = BOND_DEFAULT_MIIMON; |
404 |
+- netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", |
405 |
+- bond->params.miimon); |
406 |
++ if (!bond_mode_uses_arp(newval->value)) { |
407 |
++ if (bond->params.arp_interval) { |
408 |
++ netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", |
409 |
++ newval->string); |
410 |
++ /* disable arp monitoring */ |
411 |
++ bond->params.arp_interval = 0; |
412 |
++ } |
413 |
++ |
414 |
++ if (!bond->params.miimon) { |
415 |
++ /* set miimon to default value */ |
416 |
++ bond->params.miimon = BOND_DEFAULT_MIIMON; |
417 |
++ netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", |
418 |
++ bond->params.miimon); |
419 |
++ } |
420 |
+ } |
421 |
+ |
422 |
+ if (newval->value == BOND_MODE_ALB) |
423 |
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c |
424 |
+index 5d4e61741476..ca3fa82316c2 100644 |
425 |
+--- a/drivers/net/can/m_can/m_can.c |
426 |
++++ b/drivers/net/can/m_can/m_can.c |
427 |
+@@ -1073,7 +1073,8 @@ static void m_can_chip_config(struct net_device *dev) |
428 |
+ |
429 |
+ } else { |
430 |
+ /* Version 3.1.x or 3.2.x */ |
431 |
+- cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE); |
432 |
++ cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | |
433 |
++ CCCR_NISO); |
434 |
+ |
435 |
+ /* Only 3.2.x has NISO Bit implemented */ |
436 |
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) |
437 |
+diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c |
438 |
+index 3c51a884db87..fa689854f16b 100644 |
439 |
+--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c |
440 |
++++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c |
441 |
+@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2"); |
442 |
+ #define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ |
443 |
+ #define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ |
444 |
+ |
445 |
++#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \ |
446 |
++ ((u32)(y) << 16) | \ |
447 |
++ ((u32)(z) << 8)) |
448 |
++ |
449 |
+ /* System Control Registers Bits */ |
450 |
+ #define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ |
451 |
+ #define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ |
452 |
+@@ -783,6 +787,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev, |
453 |
+ "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, |
454 |
+ hw_ver_major, hw_ver_minor, hw_ver_sub); |
455 |
+ |
456 |
++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
457 |
++ /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and |
458 |
++ * 64-bit logical addresses: this workaround forces usage of 32-bit |
459 |
++ * DMA addresses only when such a fw is detected. |
460 |
++ */ |
461 |
++ if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) < |
462 |
++ PCIEFD_FW_VERSION(3, 3, 0)) { |
463 |
++ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
464 |
++ if (err) |
465 |
++ dev_warn(&pdev->dev, |
466 |
++ "warning: can't set DMA mask %llxh (err %d)\n", |
467 |
++ DMA_BIT_MASK(32), err); |
468 |
++ } |
469 |
++#endif |
470 |
++ |
471 |
+ /* stop system clock */ |
472 |
+ pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, |
473 |
+ PCIEFD_REG_SYS_CTL_CLR); |
474 |
+diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c |
475 |
+index 89aec07c225f..5a24039733ef 100644 |
476 |
+--- a/drivers/net/can/xilinx_can.c |
477 |
++++ b/drivers/net/can/xilinx_can.c |
478 |
+@@ -2,6 +2,7 @@ |
479 |
+ * |
480 |
+ * Copyright (C) 2012 - 2014 Xilinx, Inc. |
481 |
+ * Copyright (C) 2009 PetaLogix. All rights reserved. |
482 |
++ * Copyright (C) 2017 Sandvik Mining and Construction Oy |
483 |
+ * |
484 |
+ * Description: |
485 |
+ * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. |
486 |
+@@ -25,8 +26,10 @@ |
487 |
+ #include <linux/module.h> |
488 |
+ #include <linux/netdevice.h> |
489 |
+ #include <linux/of.h> |
490 |
++#include <linux/of_device.h> |
491 |
+ #include <linux/platform_device.h> |
492 |
+ #include <linux/skbuff.h> |
493 |
++#include <linux/spinlock.h> |
494 |
+ #include <linux/string.h> |
495 |
+ #include <linux/types.h> |
496 |
+ #include <linux/can/dev.h> |
497 |
+@@ -101,7 +104,7 @@ enum xcan_reg { |
498 |
+ #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ |
499 |
+ XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ |
500 |
+ XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ |
501 |
+- XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) |
502 |
++ XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK) |
503 |
+ |
504 |
+ /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ |
505 |
+ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ |
506 |
+@@ -118,6 +121,7 @@ enum xcan_reg { |
507 |
+ /** |
508 |
+ * struct xcan_priv - This definition define CAN driver instance |
509 |
+ * @can: CAN private data structure. |
510 |
++ * @tx_lock: Lock for synchronizing TX interrupt handling |
511 |
+ * @tx_head: Tx CAN packets ready to send on the queue |
512 |
+ * @tx_tail: Tx CAN packets successfully sended on the queue |
513 |
+ * @tx_max: Maximum number packets the driver can send |
514 |
+@@ -132,6 +136,7 @@ enum xcan_reg { |
515 |
+ */ |
516 |
+ struct xcan_priv { |
517 |
+ struct can_priv can; |
518 |
++ spinlock_t tx_lock; |
519 |
+ unsigned int tx_head; |
520 |
+ unsigned int tx_tail; |
521 |
+ unsigned int tx_max; |
522 |
+@@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = { |
523 |
+ .brp_inc = 1, |
524 |
+ }; |
525 |
+ |
526 |
++#define XCAN_CAP_WATERMARK 0x0001 |
527 |
++struct xcan_devtype_data { |
528 |
++ unsigned int caps; |
529 |
++}; |
530 |
++ |
531 |
+ /** |
532 |
+ * xcan_write_reg_le - Write a value to the device register little endian |
533 |
+ * @priv: Driver private data structure |
534 |
+@@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev) |
535 |
+ usleep_range(500, 10000); |
536 |
+ } |
537 |
+ |
538 |
++ /* reset clears FIFOs */ |
539 |
++ priv->tx_head = 0; |
540 |
++ priv->tx_tail = 0; |
541 |
++ |
542 |
+ return 0; |
543 |
+ } |
544 |
+ |
545 |
+@@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
546 |
+ struct net_device_stats *stats = &ndev->stats; |
547 |
+ struct can_frame *cf = (struct can_frame *)skb->data; |
548 |
+ u32 id, dlc, data[2] = {0, 0}; |
549 |
++ unsigned long flags; |
550 |
+ |
551 |
+ if (can_dropped_invalid_skb(ndev, skb)) |
552 |
+ return NETDEV_TX_OK; |
553 |
+@@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
554 |
+ data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); |
555 |
+ |
556 |
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); |
557 |
++ |
558 |
++ spin_lock_irqsave(&priv->tx_lock, flags); |
559 |
++ |
560 |
+ priv->tx_head++; |
561 |
+ |
562 |
+ /* Write the Frame to Xilinx CAN TX FIFO */ |
563 |
+@@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
564 |
+ stats->tx_bytes += cf->can_dlc; |
565 |
+ } |
566 |
+ |
567 |
++ /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ |
568 |
++ if (priv->tx_max > 1) |
569 |
++ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); |
570 |
++ |
571 |
+ /* Check if the TX buffer is full */ |
572 |
+ if ((priv->tx_head - priv->tx_tail) == priv->tx_max) |
573 |
+ netif_stop_queue(ndev); |
574 |
+ |
575 |
++ spin_unlock_irqrestore(&priv->tx_lock, flags); |
576 |
++ |
577 |
+ return NETDEV_TX_OK; |
578 |
+ } |
579 |
+ |
580 |
+@@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev) |
581 |
+ return 1; |
582 |
+ } |
583 |
+ |
584 |
++/** |
585 |
++ * xcan_current_error_state - Get current error state from HW |
586 |
++ * @ndev: Pointer to net_device structure |
587 |
++ * |
588 |
++ * Checks the current CAN error state from the HW. Note that this |
589 |
++ * only checks for ERROR_PASSIVE and ERROR_WARNING. |
590 |
++ * |
591 |
++ * Return: |
592 |
++ * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE |
593 |
++ * otherwise. |
594 |
++ */ |
595 |
++static enum can_state xcan_current_error_state(struct net_device *ndev) |
596 |
++{ |
597 |
++ struct xcan_priv *priv = netdev_priv(ndev); |
598 |
++ u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); |
599 |
++ |
600 |
++ if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) |
601 |
++ return CAN_STATE_ERROR_PASSIVE; |
602 |
++ else if (status & XCAN_SR_ERRWRN_MASK) |
603 |
++ return CAN_STATE_ERROR_WARNING; |
604 |
++ else |
605 |
++ return CAN_STATE_ERROR_ACTIVE; |
606 |
++} |
607 |
++ |
608 |
++/** |
609 |
++ * xcan_set_error_state - Set new CAN error state |
610 |
++ * @ndev: Pointer to net_device structure |
611 |
++ * @new_state: The new CAN state to be set |
612 |
++ * @cf: Error frame to be populated or NULL |
613 |
++ * |
614 |
++ * Set new CAN error state for the device, updating statistics and |
615 |
++ * populating the error frame if given. |
616 |
++ */ |
617 |
++static void xcan_set_error_state(struct net_device *ndev, |
618 |
++ enum can_state new_state, |
619 |
++ struct can_frame *cf) |
620 |
++{ |
621 |
++ struct xcan_priv *priv = netdev_priv(ndev); |
622 |
++ u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); |
623 |
++ u32 txerr = ecr & XCAN_ECR_TEC_MASK; |
624 |
++ u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; |
625 |
++ |
626 |
++ priv->can.state = new_state; |
627 |
++ |
628 |
++ if (cf) { |
629 |
++ cf->can_id |= CAN_ERR_CRTL; |
630 |
++ cf->data[6] = txerr; |
631 |
++ cf->data[7] = rxerr; |
632 |
++ } |
633 |
++ |
634 |
++ switch (new_state) { |
635 |
++ case CAN_STATE_ERROR_PASSIVE: |
636 |
++ priv->can.can_stats.error_passive++; |
637 |
++ if (cf) |
638 |
++ cf->data[1] = (rxerr > 127) ? |
639 |
++ CAN_ERR_CRTL_RX_PASSIVE : |
640 |
++ CAN_ERR_CRTL_TX_PASSIVE; |
641 |
++ break; |
642 |
++ case CAN_STATE_ERROR_WARNING: |
643 |
++ priv->can.can_stats.error_warning++; |
644 |
++ if (cf) |
645 |
++ cf->data[1] |= (txerr > rxerr) ? |
646 |
++ CAN_ERR_CRTL_TX_WARNING : |
647 |
++ CAN_ERR_CRTL_RX_WARNING; |
648 |
++ break; |
649 |
++ case CAN_STATE_ERROR_ACTIVE: |
650 |
++ if (cf) |
651 |
++ cf->data[1] |= CAN_ERR_CRTL_ACTIVE; |
652 |
++ break; |
653 |
++ default: |
654 |
++ /* non-ERROR states are handled elsewhere */ |
655 |
++ WARN_ON(1); |
656 |
++ break; |
657 |
++ } |
658 |
++} |
659 |
++ |
660 |
++/** |
661 |
++ * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX |
662 |
++ * @ndev: Pointer to net_device structure |
663 |
++ * |
664 |
++ * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if |
665 |
++ * the performed RX/TX has caused it to drop to a lesser state and set |
666 |
++ * the interface state accordingly. |
667 |
++ */ |
668 |
++static void xcan_update_error_state_after_rxtx(struct net_device *ndev) |
669 |
++{ |
670 |
++ struct xcan_priv *priv = netdev_priv(ndev); |
671 |
++ enum can_state old_state = priv->can.state; |
672 |
++ enum can_state new_state; |
673 |
++ |
674 |
++ /* changing error state due to successful frame RX/TX can only |
675 |
++ * occur from these states |
676 |
++ */ |
677 |
++ if (old_state != CAN_STATE_ERROR_WARNING && |
678 |
++ old_state != CAN_STATE_ERROR_PASSIVE) |
679 |
++ return; |
680 |
++ |
681 |
++ new_state = xcan_current_error_state(ndev); |
682 |
++ |
683 |
++ if (new_state != old_state) { |
684 |
++ struct sk_buff *skb; |
685 |
++ struct can_frame *cf; |
686 |
++ |
687 |
++ skb = alloc_can_err_skb(ndev, &cf); |
688 |
++ |
689 |
++ xcan_set_error_state(ndev, new_state, skb ? cf : NULL); |
690 |
++ |
691 |
++ if (skb) { |
692 |
++ struct net_device_stats *stats = &ndev->stats; |
693 |
++ |
694 |
++ stats->rx_packets++; |
695 |
++ stats->rx_bytes += cf->can_dlc; |
696 |
++ netif_rx(skb); |
697 |
++ } |
698 |
++ } |
699 |
++} |
700 |
++ |
701 |
+ /** |
702 |
+ * xcan_err_interrupt - error frame Isr |
703 |
+ * @ndev: net_device pointer |
704 |
+@@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) |
705 |
+ struct net_device_stats *stats = &ndev->stats; |
706 |
+ struct can_frame *cf; |
707 |
+ struct sk_buff *skb; |
708 |
+- u32 err_status, status, txerr = 0, rxerr = 0; |
709 |
++ u32 err_status; |
710 |
+ |
711 |
+ skb = alloc_can_err_skb(ndev, &cf); |
712 |
+ |
713 |
+ err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); |
714 |
+ priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); |
715 |
+- txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; |
716 |
+- rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & |
717 |
+- XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); |
718 |
+- status = priv->read_reg(priv, XCAN_SR_OFFSET); |
719 |
+ |
720 |
+ if (isr & XCAN_IXR_BSOFF_MASK) { |
721 |
+ priv->can.state = CAN_STATE_BUS_OFF; |
722 |
+@@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) |
723 |
+ can_bus_off(ndev); |
724 |
+ if (skb) |
725 |
+ cf->can_id |= CAN_ERR_BUSOFF; |
726 |
+- } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { |
727 |
+- priv->can.state = CAN_STATE_ERROR_PASSIVE; |
728 |
+- priv->can.can_stats.error_passive++; |
729 |
+- if (skb) { |
730 |
+- cf->can_id |= CAN_ERR_CRTL; |
731 |
+- cf->data[1] = (rxerr > 127) ? |
732 |
+- CAN_ERR_CRTL_RX_PASSIVE : |
733 |
+- CAN_ERR_CRTL_TX_PASSIVE; |
734 |
+- cf->data[6] = txerr; |
735 |
+- cf->data[7] = rxerr; |
736 |
+- } |
737 |
+- } else if (status & XCAN_SR_ERRWRN_MASK) { |
738 |
+- priv->can.state = CAN_STATE_ERROR_WARNING; |
739 |
+- priv->can.can_stats.error_warning++; |
740 |
+- if (skb) { |
741 |
+- cf->can_id |= CAN_ERR_CRTL; |
742 |
+- cf->data[1] |= (txerr > rxerr) ? |
743 |
+- CAN_ERR_CRTL_TX_WARNING : |
744 |
+- CAN_ERR_CRTL_RX_WARNING; |
745 |
+- cf->data[6] = txerr; |
746 |
+- cf->data[7] = rxerr; |
747 |
+- } |
748 |
++ } else { |
749 |
++ enum can_state new_state = xcan_current_error_state(ndev); |
750 |
++ |
751 |
++ xcan_set_error_state(ndev, new_state, skb ? cf : NULL); |
752 |
+ } |
753 |
+ |
754 |
+ /* Check for Arbitration lost interrupt */ |
755 |
+@@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) |
756 |
+ if (isr & XCAN_IXR_RXOFLW_MASK) { |
757 |
+ stats->rx_over_errors++; |
758 |
+ stats->rx_errors++; |
759 |
+- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); |
760 |
+ if (skb) { |
761 |
+ cf->can_id |= CAN_ERR_CRTL; |
762 |
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; |
763 |
+@@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) |
764 |
+ |
765 |
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
766 |
+ while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { |
767 |
+- if (isr & XCAN_IXR_RXOK_MASK) { |
768 |
+- priv->write_reg(priv, XCAN_ICR_OFFSET, |
769 |
+- XCAN_IXR_RXOK_MASK); |
770 |
+- work_done += xcan_rx(ndev); |
771 |
+- } else { |
772 |
+- priv->write_reg(priv, XCAN_ICR_OFFSET, |
773 |
+- XCAN_IXR_RXNEMP_MASK); |
774 |
+- break; |
775 |
+- } |
776 |
++ work_done += xcan_rx(ndev); |
777 |
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); |
778 |
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
779 |
+ } |
780 |
+ |
781 |
+- if (work_done) |
782 |
++ if (work_done) { |
783 |
+ can_led_event(ndev, CAN_LED_EVENT_RX); |
784 |
++ xcan_update_error_state_after_rxtx(ndev); |
785 |
++ } |
786 |
+ |
787 |
+ if (work_done < quota) { |
788 |
+ napi_complete_done(napi, work_done); |
789 |
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET); |
790 |
+- ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); |
791 |
++ ier |= XCAN_IXR_RXNEMP_MASK; |
792 |
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier); |
793 |
+ } |
794 |
+ return work_done; |
795 |
+@@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) |
796 |
+ { |
797 |
+ struct xcan_priv *priv = netdev_priv(ndev); |
798 |
+ struct net_device_stats *stats = &ndev->stats; |
799 |
++ unsigned int frames_in_fifo; |
800 |
++ int frames_sent = 1; /* TXOK => at least 1 frame was sent */ |
801 |
++ unsigned long flags; |
802 |
++ int retries = 0; |
803 |
++ |
804 |
++ /* Synchronize with xmit as we need to know the exact number |
805 |
++ * of frames in the FIFO to stay in sync due to the TXFEMP |
806 |
++ * handling. |
807 |
++ * This also prevents a race between netif_wake_queue() and |
808 |
++ * netif_stop_queue(). |
809 |
++ */ |
810 |
++ spin_lock_irqsave(&priv->tx_lock, flags); |
811 |
++ |
812 |
++ frames_in_fifo = priv->tx_head - priv->tx_tail; |
813 |
++ |
814 |
++ if (WARN_ON_ONCE(frames_in_fifo == 0)) { |
815 |
++ /* clear TXOK anyway to avoid getting back here */ |
816 |
++ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); |
817 |
++ spin_unlock_irqrestore(&priv->tx_lock, flags); |
818 |
++ return; |
819 |
++ } |
820 |
++ |
821 |
++ /* Check if 2 frames were sent (TXOK only means that at least 1 |
822 |
++ * frame was sent). |
823 |
++ */ |
824 |
++ if (frames_in_fifo > 1) { |
825 |
++ WARN_ON(frames_in_fifo > priv->tx_max); |
826 |
++ |
827 |
++ /* Synchronize TXOK and isr so that after the loop: |
828 |
++ * (1) isr variable is up-to-date at least up to TXOK clear |
829 |
++ * time. This avoids us clearing a TXOK of a second frame |
830 |
++ * but not noticing that the FIFO is now empty and thus |
831 |
++ * marking only a single frame as sent. |
832 |
++ * (2) No TXOK is left. Having one could mean leaving a |
833 |
++ * stray TXOK as we might process the associated frame |
834 |
++ * via TXFEMP handling as we read TXFEMP *after* TXOK |
835 |
++ * clear to satisfy (1). |
836 |
++ */ |
837 |
++ while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) { |
838 |
++ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); |
839 |
++ isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
840 |
++ } |
841 |
+ |
842 |
+- while ((priv->tx_head - priv->tx_tail > 0) && |
843 |
+- (isr & XCAN_IXR_TXOK_MASK)) { |
844 |
++ if (isr & XCAN_IXR_TXFEMP_MASK) { |
845 |
++ /* nothing in FIFO anymore */ |
846 |
++ frames_sent = frames_in_fifo; |
847 |
++ } |
848 |
++ } else { |
849 |
++ /* single frame in fifo, just clear TXOK */ |
850 |
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); |
851 |
++ } |
852 |
++ |
853 |
++ while (frames_sent--) { |
854 |
+ can_get_echo_skb(ndev, priv->tx_tail % |
855 |
+ priv->tx_max); |
856 |
+ priv->tx_tail++; |
857 |
+ stats->tx_packets++; |
858 |
+- isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
859 |
+ } |
860 |
+- can_led_event(ndev, CAN_LED_EVENT_TX); |
861 |
++ |
862 |
+ netif_wake_queue(ndev); |
863 |
++ |
864 |
++ spin_unlock_irqrestore(&priv->tx_lock, flags); |
865 |
++ |
866 |
++ can_led_event(ndev, CAN_LED_EVENT_TX); |
867 |
++ xcan_update_error_state_after_rxtx(ndev); |
868 |
+ } |
869 |
+ |
870 |
+ /** |
871 |
+@@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) |
872 |
+ struct net_device *ndev = (struct net_device *)dev_id; |
873 |
+ struct xcan_priv *priv = netdev_priv(ndev); |
874 |
+ u32 isr, ier; |
875 |
++ u32 isr_errors; |
876 |
+ |
877 |
+ /* Get the interrupt status from Xilinx CAN */ |
878 |
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
879 |
+@@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) |
880 |
+ xcan_tx_interrupt(ndev, isr); |
881 |
+ |
882 |
+ /* Check for the type of error interrupt and Processing it */ |
883 |
+- if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | |
884 |
+- XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { |
885 |
+- priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | |
886 |
+- XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | |
887 |
+- XCAN_IXR_ARBLST_MASK)); |
888 |
++ isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | |
889 |
++ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK); |
890 |
++ if (isr_errors) { |
891 |
++ priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); |
892 |
+ xcan_err_interrupt(ndev, isr); |
893 |
+ } |
894 |
+ |
895 |
+ /* Check for the type of receive interrupt and Processing it */ |
896 |
+- if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { |
897 |
++ if (isr & XCAN_IXR_RXNEMP_MASK) { |
898 |
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET); |
899 |
+- ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); |
900 |
++ ier &= ~XCAN_IXR_RXNEMP_MASK; |
901 |
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier); |
902 |
+ napi_schedule(&priv->napi); |
903 |
+ } |
904 |
+@@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) |
905 |
+ static void xcan_chip_stop(struct net_device *ndev) |
906 |
+ { |
907 |
+ struct xcan_priv *priv = netdev_priv(ndev); |
908 |
+- u32 ier; |
909 |
+ |
910 |
+ /* Disable interrupts and leave the can in configuration mode */ |
911 |
+- ier = priv->read_reg(priv, XCAN_IER_OFFSET); |
912 |
+- ier &= ~XCAN_INTR_ALL; |
913 |
+- priv->write_reg(priv, XCAN_IER_OFFSET, ier); |
914 |
+- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); |
915 |
++ set_reset_mode(ndev); |
916 |
+ priv->can.state = CAN_STATE_STOPPED; |
917 |
+ } |
918 |
+ |
919 |
+@@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = { |
920 |
+ */ |
921 |
+ static int __maybe_unused xcan_suspend(struct device *dev) |
922 |
+ { |
923 |
+- if (!device_may_wakeup(dev)) |
924 |
+- return pm_runtime_force_suspend(dev); |
925 |
++ struct net_device *ndev = dev_get_drvdata(dev); |
926 |
+ |
927 |
+- return 0; |
928 |
++ if (netif_running(ndev)) { |
929 |
++ netif_stop_queue(ndev); |
930 |
++ netif_device_detach(ndev); |
931 |
++ xcan_chip_stop(ndev); |
932 |
++ } |
933 |
++ |
934 |
++ return pm_runtime_force_suspend(dev); |
935 |
+ } |
936 |
+ |
937 |
+ /** |
938 |
+@@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev) |
939 |
+ */ |
940 |
+ static int __maybe_unused xcan_resume(struct device *dev) |
941 |
+ { |
942 |
+- if (!device_may_wakeup(dev)) |
943 |
+- return pm_runtime_force_resume(dev); |
944 |
++ struct net_device *ndev = dev_get_drvdata(dev); |
945 |
++ int ret; |
946 |
+ |
947 |
+- return 0; |
948 |
++ ret = pm_runtime_force_resume(dev); |
949 |
++ if (ret) { |
950 |
++ dev_err(dev, "pm_runtime_force_resume failed on resume\n"); |
951 |
++ return ret; |
952 |
++ } |
953 |
++ |
954 |
++ if (netif_running(ndev)) { |
955 |
++ ret = xcan_chip_start(ndev); |
956 |
++ if (ret) { |
957 |
++ dev_err(dev, "xcan_chip_start failed on resume\n"); |
958 |
++ return ret; |
959 |
++ } |
960 |
++ |
961 |
++ netif_device_attach(ndev); |
962 |
++ netif_start_queue(ndev); |
963 |
++ } |
964 |
+ |
965 |
++ return 0; |
966 |
+ } |
967 |
+ |
968 |
+ /** |
969 |
+@@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev) |
970 |
+ struct net_device *ndev = dev_get_drvdata(dev); |
971 |
+ struct xcan_priv *priv = netdev_priv(ndev); |
972 |
+ |
973 |
+- if (netif_running(ndev)) { |
974 |
+- netif_stop_queue(ndev); |
975 |
+- netif_device_detach(ndev); |
976 |
+- } |
977 |
+- |
978 |
+- priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK); |
979 |
+- priv->can.state = CAN_STATE_SLEEPING; |
980 |
+- |
981 |
+ clk_disable_unprepare(priv->bus_clk); |
982 |
+ clk_disable_unprepare(priv->can_clk); |
983 |
+ |
984 |
+@@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) |
985 |
+ struct net_device *ndev = dev_get_drvdata(dev); |
986 |
+ struct xcan_priv *priv = netdev_priv(ndev); |
987 |
+ int ret; |
988 |
+- u32 isr, status; |
989 |
+ |
990 |
+ ret = clk_prepare_enable(priv->bus_clk); |
991 |
+ if (ret) { |
992 |
+@@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) |
993 |
+ return ret; |
994 |
+ } |
995 |
+ |
996 |
+- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); |
997 |
+- isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
998 |
+- status = priv->read_reg(priv, XCAN_SR_OFFSET); |
999 |
+- |
1000 |
+- if (netif_running(ndev)) { |
1001 |
+- if (isr & XCAN_IXR_BSOFF_MASK) { |
1002 |
+- priv->can.state = CAN_STATE_BUS_OFF; |
1003 |
+- priv->write_reg(priv, XCAN_SRR_OFFSET, |
1004 |
+- XCAN_SRR_RESET_MASK); |
1005 |
+- } else if ((status & XCAN_SR_ESTAT_MASK) == |
1006 |
+- XCAN_SR_ESTAT_MASK) { |
1007 |
+- priv->can.state = CAN_STATE_ERROR_PASSIVE; |
1008 |
+- } else if (status & XCAN_SR_ERRWRN_MASK) { |
1009 |
+- priv->can.state = CAN_STATE_ERROR_WARNING; |
1010 |
+- } else { |
1011 |
+- priv->can.state = CAN_STATE_ERROR_ACTIVE; |
1012 |
+- } |
1013 |
+- netif_device_attach(ndev); |
1014 |
+- netif_start_queue(ndev); |
1015 |
+- } |
1016 |
+- |
1017 |
+ return 0; |
1018 |
+ } |
1019 |
+ |
1020 |
+@@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { |
1021 |
+ SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) |
1022 |
+ }; |
1023 |
+ |
1024 |
++static const struct xcan_devtype_data xcan_zynq_data = { |
1025 |
++ .caps = XCAN_CAP_WATERMARK, |
1026 |
++}; |
1027 |
++ |
1028 |
++/* Match table for OF platform binding */ |
1029 |
++static const struct of_device_id xcan_of_match[] = { |
1030 |
++ { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, |
1031 |
++ { .compatible = "xlnx,axi-can-1.00.a", }, |
1032 |
++ { /* end of list */ }, |
1033 |
++}; |
1034 |
++MODULE_DEVICE_TABLE(of, xcan_of_match); |
1035 |
++ |
1036 |
+ /** |
1037 |
+ * xcan_probe - Platform registration call |
1038 |
+ * @pdev: Handle to the platform device structure |
1039 |
+@@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev) |
1040 |
+ struct resource *res; /* IO mem resources */ |
1041 |
+ struct net_device *ndev; |
1042 |
+ struct xcan_priv *priv; |
1043 |
++ const struct of_device_id *of_id; |
1044 |
++ int caps = 0; |
1045 |
+ void __iomem *addr; |
1046 |
+- int ret, rx_max, tx_max; |
1047 |
++ int ret, rx_max, tx_max, tx_fifo_depth; |
1048 |
+ |
1049 |
+ /* Get the virtual base address for the device */ |
1050 |
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1051 |
+@@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev) |
1052 |
+ goto err; |
1053 |
+ } |
1054 |
+ |
1055 |
+- ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); |
1056 |
++ ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", |
1057 |
++ &tx_fifo_depth); |
1058 |
+ if (ret < 0) |
1059 |
+ goto err; |
1060 |
+ |
1061 |
+@@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev) |
1062 |
+ if (ret < 0) |
1063 |
+ goto err; |
1064 |
+ |
1065 |
++ of_id = of_match_device(xcan_of_match, &pdev->dev); |
1066 |
++ if (of_id) { |
1067 |
++ const struct xcan_devtype_data *devtype_data = of_id->data; |
1068 |
++ |
1069 |
++ if (devtype_data) |
1070 |
++ caps = devtype_data->caps; |
1071 |
++ } |
1072 |
++ |
1073 |
++ /* There is no way to directly figure out how many frames have been |
1074 |
++ * sent when the TXOK interrupt is processed. If watermark programming |
1075 |
++ * is supported, we can have 2 frames in the FIFO and use TXFEMP |
1076 |
++ * to determine if 1 or 2 frames have been sent. |
1077 |
++ * Theoretically we should be able to use TXFWMEMP to determine up |
1078 |
++ * to 3 frames, but it seems that after putting a second frame in the |
1079 |
++ * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less |
1080 |
++ * than 2 frames in FIFO) is set anyway with no TXOK (a frame was |
1081 |
++ * sent), which is not a sensible state - possibly TXFWMEMP is not |
1082 |
++ * completely synchronized with the rest of the bits? |
1083 |
++ */ |
1084 |
++ if (caps & XCAN_CAP_WATERMARK) |
1085 |
++ tx_max = min(tx_fifo_depth, 2); |
1086 |
++ else |
1087 |
++ tx_max = 1; |
1088 |
++ |
1089 |
+ /* Create a CAN device instance */ |
1090 |
+ ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); |
1091 |
+ if (!ndev) |
1092 |
+@@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev) |
1093 |
+ CAN_CTRLMODE_BERR_REPORTING; |
1094 |
+ priv->reg_base = addr; |
1095 |
+ priv->tx_max = tx_max; |
1096 |
++ spin_lock_init(&priv->tx_lock); |
1097 |
+ |
1098 |
+ /* Get IRQ for the device */ |
1099 |
+ ndev->irq = platform_get_irq(pdev, 0); |
1100 |
+@@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev) |
1101 |
+ |
1102 |
+ pm_runtime_put(&pdev->dev); |
1103 |
+ |
1104 |
+- netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", |
1105 |
++ netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n", |
1106 |
+ priv->reg_base, ndev->irq, priv->can.clock.freq, |
1107 |
+- priv->tx_max); |
1108 |
++ tx_fifo_depth, priv->tx_max); |
1109 |
+ |
1110 |
+ return 0; |
1111 |
+ |
1112 |
+@@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev) |
1113 |
+ return 0; |
1114 |
+ } |
1115 |
+ |
1116 |
+-/* Match table for OF platform binding */ |
1117 |
+-static const struct of_device_id xcan_of_match[] = { |
1118 |
+- { .compatible = "xlnx,zynq-can-1.0", }, |
1119 |
+- { .compatible = "xlnx,axi-can-1.00.a", }, |
1120 |
+- { /* end of list */ }, |
1121 |
+-}; |
1122 |
+-MODULE_DEVICE_TABLE(of, xcan_of_match); |
1123 |
+- |
1124 |
+ static struct platform_driver xcan_driver = { |
1125 |
+ .probe = xcan_probe, |
1126 |
+ .remove = xcan_remove, |
1127 |
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
1128 |
+index a069fcc823c3..b26da0952a4d 100644 |
1129 |
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
1130 |
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
1131 |
+@@ -2957,7 +2957,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, |
1132 |
+ u32 srqn = qp_get_srqn(qpc) & 0xffffff; |
1133 |
+ int use_srq = (qp_get_srqn(qpc) >> 24) & 1; |
1134 |
+ struct res_srq *srq; |
1135 |
+- int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; |
1136 |
++ int local_qpn = vhcr->in_modifier & 0xffffff; |
1137 |
+ |
1138 |
+ err = adjust_qp_sched_queue(dev, slave, qpc, inbox); |
1139 |
+ if (err) |
1140 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c |
1141 |
+index 12d3ced61114..e87923e046c9 100644 |
1142 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c |
1143 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c |
1144 |
+@@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) |
1145 |
+ HLIST_HEAD(del_list); |
1146 |
+ spin_lock_bh(&priv->fs.arfs.arfs_lock); |
1147 |
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { |
1148 |
+- if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) |
1149 |
+- break; |
1150 |
+ if (!work_pending(&arfs_rule->arfs_work) && |
1151 |
+ rps_may_expire_flow(priv->netdev, |
1152 |
+ arfs_rule->rxq, arfs_rule->flow_id, |
1153 |
+ arfs_rule->filter_id)) { |
1154 |
+ hlist_del_init(&arfs_rule->hlist); |
1155 |
+ hlist_add_head(&arfs_rule->hlist, &del_list); |
1156 |
++ if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) |
1157 |
++ break; |
1158 |
+ } |
1159 |
+ } |
1160 |
+ spin_unlock_bh(&priv->fs.arfs.arfs_lock); |
1161 |
+@@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, |
1162 |
+ skb->protocol != htons(ETH_P_IPV6)) |
1163 |
+ return -EPROTONOSUPPORT; |
1164 |
+ |
1165 |
++ if (skb->encapsulation) |
1166 |
++ return -EPROTONOSUPPORT; |
1167 |
++ |
1168 |
+ arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); |
1169 |
+ if (!arfs_t) |
1170 |
+ return -EPROTONOSUPPORT; |
1171 |
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c |
1172 |
+index 84dd63e74041..27040009d87a 100644 |
1173 |
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c |
1174 |
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c |
1175 |
+@@ -545,6 +545,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv, |
1176 |
+ void mlx5e_timestamp_init(struct mlx5e_priv *priv) |
1177 |
+ { |
1178 |
+ struct mlx5e_tstamp *tstamp = &priv->tstamp; |
1179 |
++ u64 overflow_cycles; |
1180 |
+ u64 ns; |
1181 |
+ u64 frac = 0; |
1182 |
+ u32 dev_freq; |
1183 |
+@@ -569,10 +570,17 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) |
1184 |
+ |
1185 |
+ /* Calculate period in seconds to call the overflow watchdog - to make |
1186 |
+ * sure counter is checked at least once every wrap around. |
1187 |
++ * The period is calculated as the minimum between max HW cycles count |
1188 |
++ * (The clock source mask) and max amount of cycles that can be |
1189 |
++ * multiplied by clock multiplier where the result doesn't exceed |
1190 |
++ * 64bits. |
1191 |
+ */ |
1192 |
+- ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask, |
1193 |
++ overflow_cycles = div64_u64(~0ULL >> 1, tstamp->cycles.mult); |
1194 |
++ overflow_cycles = min(overflow_cycles, tstamp->cycles.mask >> 1); |
1195 |
++ |
1196 |
++ ns = cyclecounter_cyc2ns(&tstamp->cycles, overflow_cycles, |
1197 |
+ frac, &frac); |
1198 |
+- do_div(ns, NSEC_PER_SEC / 2 / HZ); |
1199 |
++ do_div(ns, NSEC_PER_SEC / HZ); |
1200 |
+ tstamp->overflow_period = ns; |
1201 |
+ |
1202 |
+ INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out); |
1203 |
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c |
1204 |
+index dba6d17ad885..47d2ef2fb9b3 100644 |
1205 |
+--- a/drivers/net/phy/phy.c |
1206 |
++++ b/drivers/net/phy/phy.c |
1207 |
+@@ -511,7 +511,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) |
1208 |
+ * negotiation may already be done and aneg interrupt may not be |
1209 |
+ * generated. |
1210 |
+ */ |
1211 |
+- if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { |
1212 |
++ if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) { |
1213 |
+ err = phy_aneg_done(phydev); |
1214 |
+ if (err > 0) { |
1215 |
+ trigger = true; |
1216 |
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
1217 |
+index bbdb46916dc3..13d39a72fe0d 100644 |
1218 |
+--- a/drivers/net/vxlan.c |
1219 |
++++ b/drivers/net/vxlan.c |
1220 |
+@@ -636,8 +636,61 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) |
1221 |
+ return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); |
1222 |
+ } |
1223 |
+ |
1224 |
+-/* Add new entry to forwarding table -- assumes lock held */ |
1225 |
++static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, |
1226 |
++ const u8 *mac, __u16 state, |
1227 |
++ __be32 src_vni, __u8 ndm_flags) |
1228 |
++{ |
1229 |
++ struct vxlan_fdb *f; |
1230 |
++ |
1231 |
++ f = kmalloc(sizeof(*f), GFP_ATOMIC); |
1232 |
++ if (!f) |
1233 |
++ return NULL; |
1234 |
++ f->state = state; |
1235 |
++ f->flags = ndm_flags; |
1236 |
++ f->updated = f->used = jiffies; |
1237 |
++ f->vni = src_vni; |
1238 |
++ INIT_LIST_HEAD(&f->remotes); |
1239 |
++ memcpy(f->eth_addr, mac, ETH_ALEN); |
1240 |
++ |
1241 |
++ return f; |
1242 |
++} |
1243 |
++ |
1244 |
+ static int vxlan_fdb_create(struct vxlan_dev *vxlan, |
1245 |
++ const u8 *mac, union vxlan_addr *ip, |
1246 |
++ __u16 state, __be16 port, __be32 src_vni, |
1247 |
++ __be32 vni, __u32 ifindex, __u8 ndm_flags, |
1248 |
++ struct vxlan_fdb **fdb) |
1249 |
++{ |
1250 |
++ struct vxlan_rdst *rd = NULL; |
1251 |
++ struct vxlan_fdb *f; |
1252 |
++ int rc; |
1253 |
++ |
1254 |
++ if (vxlan->cfg.addrmax && |
1255 |
++ vxlan->addrcnt >= vxlan->cfg.addrmax) |
1256 |
++ return -ENOSPC; |
1257 |
++ |
1258 |
++ netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); |
1259 |
++ f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); |
1260 |
++ if (!f) |
1261 |
++ return -ENOMEM; |
1262 |
++ |
1263 |
++ rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); |
1264 |
++ if (rc < 0) { |
1265 |
++ kfree(f); |
1266 |
++ return rc; |
1267 |
++ } |
1268 |
++ |
1269 |
++ ++vxlan->addrcnt; |
1270 |
++ hlist_add_head_rcu(&f->hlist, |
1271 |
++ vxlan_fdb_head(vxlan, mac, src_vni)); |
1272 |
++ |
1273 |
++ *fdb = f; |
1274 |
++ |
1275 |
++ return 0; |
1276 |
++} |
1277 |
++ |
1278 |
++/* Add new entry to forwarding table -- assumes lock held */ |
1279 |
++static int vxlan_fdb_update(struct vxlan_dev *vxlan, |
1280 |
+ const u8 *mac, union vxlan_addr *ip, |
1281 |
+ __u16 state, __u16 flags, |
1282 |
+ __be16 port, __be32 src_vni, __be32 vni, |
1283 |
+@@ -687,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, |
1284 |
+ if (!(flags & NLM_F_CREATE)) |
1285 |
+ return -ENOENT; |
1286 |
+ |
1287 |
+- if (vxlan->cfg.addrmax && |
1288 |
+- vxlan->addrcnt >= vxlan->cfg.addrmax) |
1289 |
+- return -ENOSPC; |
1290 |
+- |
1291 |
+ /* Disallow replace to add a multicast entry */ |
1292 |
+ if ((flags & NLM_F_REPLACE) && |
1293 |
+ (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) |
1294 |
+ return -EOPNOTSUPP; |
1295 |
+ |
1296 |
+ netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); |
1297 |
+- f = kmalloc(sizeof(*f), GFP_ATOMIC); |
1298 |
+- if (!f) |
1299 |
+- return -ENOMEM; |
1300 |
+- |
1301 |
+- notify = 1; |
1302 |
+- f->state = state; |
1303 |
+- f->flags = ndm_flags; |
1304 |
+- f->updated = f->used = jiffies; |
1305 |
+- f->vni = src_vni; |
1306 |
+- INIT_LIST_HEAD(&f->remotes); |
1307 |
+- memcpy(f->eth_addr, mac, ETH_ALEN); |
1308 |
+- |
1309 |
+- rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); |
1310 |
+- if (rc < 0) { |
1311 |
+- kfree(f); |
1312 |
++ rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, |
1313 |
++ vni, ifindex, ndm_flags, &f); |
1314 |
++ if (rc < 0) |
1315 |
+ return rc; |
1316 |
+- } |
1317 |
+- |
1318 |
+- ++vxlan->addrcnt; |
1319 |
+- hlist_add_head_rcu(&f->hlist, |
1320 |
+- vxlan_fdb_head(vxlan, mac, src_vni)); |
1321 |
++ notify = 1; |
1322 |
+ } |
1323 |
+ |
1324 |
+ if (notify) { |
1325 |
+@@ -741,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head) |
1326 |
+ kfree(f); |
1327 |
+ } |
1328 |
+ |
1329 |
+-static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) |
1330 |
++static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, |
1331 |
++ bool do_notify) |
1332 |
+ { |
1333 |
+ netdev_dbg(vxlan->dev, |
1334 |
+ "delete %pM\n", f->eth_addr); |
1335 |
+ |
1336 |
+ --vxlan->addrcnt; |
1337 |
+- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); |
1338 |
++ if (do_notify) |
1339 |
++ vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); |
1340 |
+ |
1341 |
+ hlist_del_rcu(&f->hlist); |
1342 |
+ call_rcu(&f->rcu, vxlan_fdb_free); |
1343 |
+@@ -863,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], |
1344 |
+ return -EAFNOSUPPORT; |
1345 |
+ |
1346 |
+ spin_lock_bh(&vxlan->hash_lock); |
1347 |
+- err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, |
1348 |
++ err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, |
1349 |
+ port, src_vni, vni, ifindex, ndm->ndm_flags); |
1350 |
+ spin_unlock_bh(&vxlan->hash_lock); |
1351 |
+ |
1352 |
+@@ -897,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, |
1353 |
+ goto out; |
1354 |
+ } |
1355 |
+ |
1356 |
+- vxlan_fdb_destroy(vxlan, f); |
1357 |
++ vxlan_fdb_destroy(vxlan, f, true); |
1358 |
+ |
1359 |
+ out: |
1360 |
+ return 0; |
1361 |
+@@ -1006,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev, |
1362 |
+ |
1363 |
+ /* close off race between vxlan_flush and incoming packets */ |
1364 |
+ if (netif_running(dev)) |
1365 |
+- vxlan_fdb_create(vxlan, src_mac, src_ip, |
1366 |
++ vxlan_fdb_update(vxlan, src_mac, src_ip, |
1367 |
+ NUD_REACHABLE, |
1368 |
+ NLM_F_EXCL|NLM_F_CREATE, |
1369 |
+ vxlan->cfg.dst_port, |
1370 |
+@@ -2360,7 +2395,7 @@ static void vxlan_cleanup(unsigned long arg) |
1371 |
+ "garbage collect %pM\n", |
1372 |
+ f->eth_addr); |
1373 |
+ f->state = NUD_STALE; |
1374 |
+- vxlan_fdb_destroy(vxlan, f); |
1375 |
++ vxlan_fdb_destroy(vxlan, f, true); |
1376 |
+ } else if (time_before(timeout, next_timer)) |
1377 |
+ next_timer = timeout; |
1378 |
+ } |
1379 |
+@@ -2411,7 +2446,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) |
1380 |
+ spin_lock_bh(&vxlan->hash_lock); |
1381 |
+ f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); |
1382 |
+ if (f) |
1383 |
+- vxlan_fdb_destroy(vxlan, f); |
1384 |
++ vxlan_fdb_destroy(vxlan, f, true); |
1385 |
+ spin_unlock_bh(&vxlan->hash_lock); |
1386 |
+ } |
1387 |
+ |
1388 |
+@@ -2465,7 +2500,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) |
1389 |
+ continue; |
1390 |
+ /* the all_zeros_mac entry is deleted at vxlan_uninit */ |
1391 |
+ if (!is_zero_ether_addr(f->eth_addr)) |
1392 |
+- vxlan_fdb_destroy(vxlan, f); |
1393 |
++ vxlan_fdb_destroy(vxlan, f, true); |
1394 |
+ } |
1395 |
+ } |
1396 |
+ spin_unlock_bh(&vxlan->hash_lock); |
1397 |
+@@ -3157,6 +3192,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, |
1398 |
+ { |
1399 |
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id); |
1400 |
+ struct vxlan_dev *vxlan = netdev_priv(dev); |
1401 |
++ struct vxlan_fdb *f = NULL; |
1402 |
+ int err; |
1403 |
+ |
1404 |
+ err = vxlan_dev_configure(net, dev, conf, false, extack); |
1405 |
+@@ -3170,24 +3206,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, |
1406 |
+ err = vxlan_fdb_create(vxlan, all_zeros_mac, |
1407 |
+ &vxlan->default_dst.remote_ip, |
1408 |
+ NUD_REACHABLE | NUD_PERMANENT, |
1409 |
+- NLM_F_EXCL | NLM_F_CREATE, |
1410 |
+ vxlan->cfg.dst_port, |
1411 |
+ vxlan->default_dst.remote_vni, |
1412 |
+ vxlan->default_dst.remote_vni, |
1413 |
+ vxlan->default_dst.remote_ifindex, |
1414 |
+- NTF_SELF); |
1415 |
++ NTF_SELF, &f); |
1416 |
+ if (err) |
1417 |
+ return err; |
1418 |
+ } |
1419 |
+ |
1420 |
+ err = register_netdevice(dev); |
1421 |
++ if (err) |
1422 |
++ goto errout; |
1423 |
++ |
1424 |
++ err = rtnl_configure_link(dev, NULL); |
1425 |
+ if (err) { |
1426 |
+- vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); |
1427 |
+- return err; |
1428 |
++ unregister_netdevice(dev); |
1429 |
++ goto errout; |
1430 |
+ } |
1431 |
+ |
1432 |
++ /* notify default fdb entry */ |
1433 |
++ if (f) |
1434 |
++ vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); |
1435 |
++ |
1436 |
+ list_add(&vxlan->next, &vn->vxlan_list); |
1437 |
+ return 0; |
1438 |
++errout: |
1439 |
++ if (f) |
1440 |
++ vxlan_fdb_destroy(vxlan, f, false); |
1441 |
++ return err; |
1442 |
+ } |
1443 |
+ |
1444 |
+ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], |
1445 |
+@@ -3416,6 +3463,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], |
1446 |
+ struct vxlan_rdst *dst = &vxlan->default_dst; |
1447 |
+ struct vxlan_rdst old_dst; |
1448 |
+ struct vxlan_config conf; |
1449 |
++ struct vxlan_fdb *f = NULL; |
1450 |
+ int err; |
1451 |
+ |
1452 |
+ err = vxlan_nl2conf(tb, data, |
1453 |
+@@ -3444,16 +3492,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], |
1454 |
+ err = vxlan_fdb_create(vxlan, all_zeros_mac, |
1455 |
+ &dst->remote_ip, |
1456 |
+ NUD_REACHABLE | NUD_PERMANENT, |
1457 |
+- NLM_F_CREATE | NLM_F_APPEND, |
1458 |
+ vxlan->cfg.dst_port, |
1459 |
+ dst->remote_vni, |
1460 |
+ dst->remote_vni, |
1461 |
+ dst->remote_ifindex, |
1462 |
+- NTF_SELF); |
1463 |
++ NTF_SELF, &f); |
1464 |
+ if (err) { |
1465 |
+ spin_unlock_bh(&vxlan->hash_lock); |
1466 |
+ return err; |
1467 |
+ } |
1468 |
++ vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); |
1469 |
+ } |
1470 |
+ spin_unlock_bh(&vxlan->hash_lock); |
1471 |
+ } |
1472 |
+diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c |
1473 |
+index d99daf69e501..fe229d63deec 100644 |
1474 |
+--- a/drivers/staging/speakup/speakup_soft.c |
1475 |
++++ b/drivers/staging/speakup/speakup_soft.c |
1476 |
+@@ -207,11 +207,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, |
1477 |
+ int chars_sent = 0; |
1478 |
+ char __user *cp; |
1479 |
+ char *init; |
1480 |
++ size_t bytes_per_ch = unicode ? 3 : 1; |
1481 |
+ u16 ch; |
1482 |
+ int empty; |
1483 |
+ unsigned long flags; |
1484 |
+ DEFINE_WAIT(wait); |
1485 |
+ |
1486 |
++ if (count < bytes_per_ch) |
1487 |
++ return -EINVAL; |
1488 |
++ |
1489 |
+ spin_lock_irqsave(&speakup_info.spinlock, flags); |
1490 |
+ while (1) { |
1491 |
+ prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); |
1492 |
+@@ -237,7 +241,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, |
1493 |
+ init = get_initstring(); |
1494 |
+ |
1495 |
+ /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ |
1496 |
+- while (chars_sent <= count - 3) { |
1497 |
++ while (chars_sent <= count - bytes_per_ch) { |
1498 |
+ if (speakup_info.flushing) { |
1499 |
+ speakup_info.flushing = 0; |
1500 |
+ ch = '\x18'; |
1501 |
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
1502 |
+index 3b9aadd007f5..f2f31fc16f29 100644 |
1503 |
+--- a/drivers/usb/class/cdc-acm.c |
1504 |
++++ b/drivers/usb/class/cdc-acm.c |
1505 |
+@@ -1844,6 +1844,9 @@ static const struct usb_device_id acm_ids[] = { |
1506 |
+ { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ |
1507 |
+ .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ |
1508 |
+ }, |
1509 |
++ { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */ |
1510 |
++ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ |
1511 |
++ }, |
1512 |
+ |
1513 |
+ { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ |
1514 |
+ .driver_info = CLEAR_HALT_CONDITIONS, |
1515 |
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
1516 |
+index e5f77e611451..a8bc48b26c23 100644 |
1517 |
+--- a/drivers/usb/core/hub.c |
1518 |
++++ b/drivers/usb/core/hub.c |
1519 |
+@@ -1141,10 +1141,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) |
1520 |
+ |
1521 |
+ if (!udev || udev->state == USB_STATE_NOTATTACHED) { |
1522 |
+ /* Tell hub_wq to disconnect the device or |
1523 |
+- * check for a new connection |
1524 |
++ * check for a new connection or over current condition. |
1525 |
++ * Based on USB2.0 Spec Section 11.12.5, |
1526 |
++ * C_PORT_OVER_CURRENT could be set while |
1527 |
++ * PORT_OVER_CURRENT is not. So check for any of them. |
1528 |
+ */ |
1529 |
+ if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || |
1530 |
+- (portstatus & USB_PORT_STAT_OVERCURRENT)) |
1531 |
++ (portstatus & USB_PORT_STAT_OVERCURRENT) || |
1532 |
++ (portchange & USB_PORT_STAT_C_OVERCURRENT)) |
1533 |
+ set_bit(port1, hub->change_bits); |
1534 |
+ |
1535 |
+ } else if (portstatus & USB_PORT_STAT_ENABLE) { |
1536 |
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c |
1537 |
+index 87484f71b2ab..46d3b0fc00c5 100644 |
1538 |
+--- a/drivers/usb/dwc2/hcd.c |
1539 |
++++ b/drivers/usb/dwc2/hcd.c |
1540 |
+@@ -2606,34 +2606,29 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, |
1541 |
+ |
1542 |
+ #define DWC2_USB_DMA_ALIGN 4 |
1543 |
+ |
1544 |
+-struct dma_aligned_buffer { |
1545 |
+- void *kmalloc_ptr; |
1546 |
+- void *old_xfer_buffer; |
1547 |
+- u8 data[0]; |
1548 |
+-}; |
1549 |
+- |
1550 |
+ static void dwc2_free_dma_aligned_buffer(struct urb *urb) |
1551 |
+ { |
1552 |
+- struct dma_aligned_buffer *temp; |
1553 |
++ void *stored_xfer_buffer; |
1554 |
+ |
1555 |
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) |
1556 |
+ return; |
1557 |
+ |
1558 |
+- temp = container_of(urb->transfer_buffer, |
1559 |
+- struct dma_aligned_buffer, data); |
1560 |
++ /* Restore urb->transfer_buffer from the end of the allocated area */ |
1561 |
++ memcpy(&stored_xfer_buffer, urb->transfer_buffer + |
1562 |
++ urb->transfer_buffer_length, sizeof(urb->transfer_buffer)); |
1563 |
+ |
1564 |
+ if (usb_urb_dir_in(urb)) |
1565 |
+- memcpy(temp->old_xfer_buffer, temp->data, |
1566 |
++ memcpy(stored_xfer_buffer, urb->transfer_buffer, |
1567 |
+ urb->transfer_buffer_length); |
1568 |
+- urb->transfer_buffer = temp->old_xfer_buffer; |
1569 |
+- kfree(temp->kmalloc_ptr); |
1570 |
++ kfree(urb->transfer_buffer); |
1571 |
++ urb->transfer_buffer = stored_xfer_buffer; |
1572 |
+ |
1573 |
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; |
1574 |
+ } |
1575 |
+ |
1576 |
+ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) |
1577 |
+ { |
1578 |
+- struct dma_aligned_buffer *temp, *kmalloc_ptr; |
1579 |
++ void *kmalloc_ptr; |
1580 |
+ size_t kmalloc_size; |
1581 |
+ |
1582 |
+ if (urb->num_sgs || urb->sg || |
1583 |
+@@ -2641,22 +2636,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) |
1584 |
+ !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1))) |
1585 |
+ return 0; |
1586 |
+ |
1587 |
+- /* Allocate a buffer with enough padding for alignment */ |
1588 |
++ /* |
1589 |
++ * Allocate a buffer with enough padding for original transfer_buffer |
1590 |
++ * pointer. This allocation is guaranteed to be aligned properly for |
1591 |
++ * DMA |
1592 |
++ */ |
1593 |
+ kmalloc_size = urb->transfer_buffer_length + |
1594 |
+- sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1; |
1595 |
++ sizeof(urb->transfer_buffer); |
1596 |
+ |
1597 |
+ kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); |
1598 |
+ if (!kmalloc_ptr) |
1599 |
+ return -ENOMEM; |
1600 |
+ |
1601 |
+- /* Position our struct dma_aligned_buffer such that data is aligned */ |
1602 |
+- temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1; |
1603 |
+- temp->kmalloc_ptr = kmalloc_ptr; |
1604 |
+- temp->old_xfer_buffer = urb->transfer_buffer; |
1605 |
++ /* |
1606 |
++ * Position value of original urb->transfer_buffer pointer to the end |
1607 |
++ * of allocation for later referencing |
1608 |
++ */ |
1609 |
++ memcpy(kmalloc_ptr + urb->transfer_buffer_length, |
1610 |
++ &urb->transfer_buffer, sizeof(urb->transfer_buffer)); |
1611 |
++ |
1612 |
+ if (usb_urb_dir_out(urb)) |
1613 |
+- memcpy(temp->data, urb->transfer_buffer, |
1614 |
++ memcpy(kmalloc_ptr, urb->transfer_buffer, |
1615 |
+ urb->transfer_buffer_length); |
1616 |
+- urb->transfer_buffer = temp->data; |
1617 |
++ urb->transfer_buffer = kmalloc_ptr; |
1618 |
+ |
1619 |
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; |
1620 |
+ |
1621 |
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c |
1622 |
+index 7b53ac548b1a..52e6897fa35a 100644 |
1623 |
+--- a/drivers/usb/gadget/function/f_fs.c |
1624 |
++++ b/drivers/usb/gadget/function/f_fs.c |
1625 |
+@@ -3243,7 +3243,7 @@ static int ffs_func_setup(struct usb_function *f, |
1626 |
+ __ffs_event_add(ffs, FUNCTIONFS_SETUP); |
1627 |
+ spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); |
1628 |
+ |
1629 |
+- return USB_GADGET_DELAYED_STATUS; |
1630 |
++ return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0; |
1631 |
+ } |
1632 |
+ |
1633 |
+ static bool ffs_func_req_match(struct usb_function *f, |
1634 |
+diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c |
1635 |
+index b751dd60e41a..b4c68f3b82be 100644 |
1636 |
+--- a/drivers/vfio/vfio_iommu_spapr_tce.c |
1637 |
++++ b/drivers/vfio/vfio_iommu_spapr_tce.c |
1638 |
+@@ -467,7 +467,7 @@ static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, |
1639 |
+ if (!mem) |
1640 |
+ return -EINVAL; |
1641 |
+ |
1642 |
+- ret = mm_iommu_ua_to_hpa(mem, tce, phpa); |
1643 |
++ ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa); |
1644 |
+ if (ret) |
1645 |
+ return -EINVAL; |
1646 |
+ |
1647 |
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
1648 |
+index 0480cd9a9e81..71b81980787f 100644 |
1649 |
+--- a/fs/cifs/smb2pdu.c |
1650 |
++++ b/fs/cifs/smb2pdu.c |
1651 |
+@@ -338,10 +338,7 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, |
1652 |
+ return rc; |
1653 |
+ |
1654 |
+ /* BB eventually switch this to SMB2 specific small buf size */ |
1655 |
+- if (smb2_command == SMB2_SET_INFO) |
1656 |
+- *request_buf = cifs_buf_get(); |
1657 |
+- else |
1658 |
+- *request_buf = cifs_small_buf_get(); |
1659 |
++ *request_buf = cifs_small_buf_get(); |
1660 |
+ if (*request_buf == NULL) { |
1661 |
+ /* BB should we add a retry in here if not a writepage? */ |
1662 |
+ return -ENOMEM; |
1663 |
+@@ -3171,7 +3168,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, |
1664 |
+ } |
1665 |
+ |
1666 |
+ rc = SendReceive2(xid, ses, iov, num, &resp_buftype, flags, &rsp_iov); |
1667 |
+- cifs_buf_release(req); |
1668 |
++ cifs_small_buf_release(req); |
1669 |
+ rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; |
1670 |
+ |
1671 |
+ if (rc != 0) |
1672 |
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
1673 |
+index 9cf971c68401..6dd77767fd5b 100644 |
1674 |
+--- a/include/linux/skbuff.h |
1675 |
++++ b/include/linux/skbuff.h |
1676 |
+@@ -3167,6 +3167,8 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) |
1677 |
+ return __skb_grow(skb, len); |
1678 |
+ } |
1679 |
+ |
1680 |
++#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) |
1681 |
++ |
1682 |
+ #define skb_queue_walk(queue, skb) \ |
1683 |
+ for (skb = (queue)->next; \ |
1684 |
+ skb != (struct sk_buff *)(queue); \ |
1685 |
+diff --git a/include/net/tcp.h b/include/net/tcp.h |
1686 |
+index fb653736f335..3173dd12b8cc 100644 |
1687 |
+--- a/include/net/tcp.h |
1688 |
++++ b/include/net/tcp.h |
1689 |
+@@ -372,6 +372,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, |
1690 |
+ struct pipe_inode_info *pipe, size_t len, |
1691 |
+ unsigned int flags); |
1692 |
+ |
1693 |
++void tcp_enter_quickack_mode(struct sock *sk); |
1694 |
+ static inline void tcp_dec_quickack_mode(struct sock *sk, |
1695 |
+ const unsigned int pkts) |
1696 |
+ { |
1697 |
+@@ -560,6 +561,7 @@ void tcp_send_fin(struct sock *sk); |
1698 |
+ void tcp_send_active_reset(struct sock *sk, gfp_t priority); |
1699 |
+ int tcp_send_synack(struct sock *); |
1700 |
+ void tcp_push_one(struct sock *, unsigned int mss_now); |
1701 |
++void __tcp_send_ack(struct sock *sk, u32 rcv_nxt); |
1702 |
+ void tcp_send_ack(struct sock *sk); |
1703 |
+ void tcp_send_delayed_ack(struct sock *sk); |
1704 |
+ void tcp_send_loss_probe(struct sock *sk); |
1705 |
+@@ -857,6 +859,11 @@ struct tcp_skb_cb { |
1706 |
+ * as TCP moves IP6CB into a different location in skb->cb[] |
1707 |
+ */ |
1708 |
+ static inline int tcp_v6_iif(const struct sk_buff *skb) |
1709 |
++{ |
1710 |
++ return TCP_SKB_CB(skb)->header.h6.iif; |
1711 |
++} |
1712 |
++ |
1713 |
++static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb) |
1714 |
+ { |
1715 |
+ bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); |
1716 |
+ |
1717 |
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
1718 |
+index 4cfdad08aca0..efe396cc77b5 100644 |
1719 |
+--- a/net/core/rtnetlink.c |
1720 |
++++ b/net/core/rtnetlink.c |
1721 |
+@@ -2402,9 +2402,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) |
1722 |
+ return err; |
1723 |
+ } |
1724 |
+ |
1725 |
+- dev->rtnl_link_state = RTNL_LINK_INITIALIZED; |
1726 |
+- |
1727 |
+- __dev_notify_flags(dev, old_flags, ~0U); |
1728 |
++ if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { |
1729 |
++ __dev_notify_flags(dev, old_flags, 0U); |
1730 |
++ } else { |
1731 |
++ dev->rtnl_link_state = RTNL_LINK_INITIALIZED; |
1732 |
++ __dev_notify_flags(dev, old_flags, ~0U); |
1733 |
++ } |
1734 |
+ return 0; |
1735 |
+ } |
1736 |
+ EXPORT_SYMBOL(rtnl_configure_link); |
1737 |
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
1738 |
+index 23041b5c0b27..2e5eeba97de9 100644 |
1739 |
+--- a/net/core/skbuff.c |
1740 |
++++ b/net/core/skbuff.c |
1741 |
+@@ -3675,6 +3675,7 @@ normal: |
1742 |
+ net_warn_ratelimited( |
1743 |
+ "skb_segment: too many frags: %u %u\n", |
1744 |
+ pos, mss); |
1745 |
++ err = -EINVAL; |
1746 |
+ goto err; |
1747 |
+ } |
1748 |
+ |
1749 |
+@@ -3713,11 +3714,10 @@ skip_fraglist: |
1750 |
+ |
1751 |
+ perform_csum_check: |
1752 |
+ if (!csum) { |
1753 |
+- if (skb_has_shared_frag(nskb)) { |
1754 |
+- err = __skb_linearize(nskb); |
1755 |
+- if (err) |
1756 |
+- goto err; |
1757 |
+- } |
1758 |
++ if (skb_has_shared_frag(nskb) && |
1759 |
++ __skb_linearize(nskb)) |
1760 |
++ goto err; |
1761 |
++ |
1762 |
+ if (!nskb->remcsum_offload) |
1763 |
+ nskb->ip_summed = CHECKSUM_NONE; |
1764 |
+ SKB_GSO_CB(nskb)->csum = |
1765 |
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c |
1766 |
+index fbeb35ad804b..502aae3e3ab8 100644 |
1767 |
+--- a/net/ipv4/igmp.c |
1768 |
++++ b/net/ipv4/igmp.c |
1769 |
+@@ -1201,8 +1201,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) |
1770 |
+ if (pmc) { |
1771 |
+ im->interface = pmc->interface; |
1772 |
+ im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
1773 |
+- im->sfmode = pmc->sfmode; |
1774 |
+- if (pmc->sfmode == MCAST_INCLUDE) { |
1775 |
++ if (im->sfmode == MCAST_INCLUDE) { |
1776 |
+ im->tomb = pmc->tomb; |
1777 |
+ im->sources = pmc->sources; |
1778 |
+ for (psf = im->sources; psf; psf = psf->sf_next) |
1779 |
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
1780 |
+index 63d5d66e040a..e2dd325bed9b 100644 |
1781 |
+--- a/net/ipv4/ip_output.c |
1782 |
++++ b/net/ipv4/ip_output.c |
1783 |
+@@ -523,6 +523,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) |
1784 |
+ to->dev = from->dev; |
1785 |
+ to->mark = from->mark; |
1786 |
+ |
1787 |
++ skb_copy_hash(to, from); |
1788 |
++ |
1789 |
+ /* Copy the flags to each fragment. */ |
1790 |
+ IPCB(to)->flags = IPCB(from)->flags; |
1791 |
+ |
1792 |
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c |
1793 |
+index d07ba4d5917b..048d5f6dd320 100644 |
1794 |
+--- a/net/ipv4/ip_sockglue.c |
1795 |
++++ b/net/ipv4/ip_sockglue.c |
1796 |
+@@ -148,15 +148,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) |
1797 |
+ { |
1798 |
+ struct sockaddr_in sin; |
1799 |
+ const struct iphdr *iph = ip_hdr(skb); |
1800 |
+- __be16 *ports = (__be16 *)skb_transport_header(skb); |
1801 |
++ __be16 *ports; |
1802 |
++ int end; |
1803 |
+ |
1804 |
+- if (skb_transport_offset(skb) + 4 > (int)skb->len) |
1805 |
++ end = skb_transport_offset(skb) + 4; |
1806 |
++ if (end > 0 && !pskb_may_pull(skb, end)) |
1807 |
+ return; |
1808 |
+ |
1809 |
+ /* All current transport protocols have the port numbers in the |
1810 |
+ * first four bytes of the transport header and this function is |
1811 |
+ * written with this assumption in mind. |
1812 |
+ */ |
1813 |
++ ports = (__be16 *)skb_transport_header(skb); |
1814 |
+ |
1815 |
+ sin.sin_family = AF_INET; |
1816 |
+ sin.sin_addr.s_addr = iph->daddr; |
1817 |
+diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c |
1818 |
+index 5f5e5936760e..c78fb53988a1 100644 |
1819 |
+--- a/net/ipv4/tcp_dctcp.c |
1820 |
++++ b/net/ipv4/tcp_dctcp.c |
1821 |
+@@ -131,23 +131,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) |
1822 |
+ struct dctcp *ca = inet_csk_ca(sk); |
1823 |
+ struct tcp_sock *tp = tcp_sk(sk); |
1824 |
+ |
1825 |
+- /* State has changed from CE=0 to CE=1 and delayed |
1826 |
+- * ACK has not sent yet. |
1827 |
+- */ |
1828 |
+- if (!ca->ce_state && ca->delayed_ack_reserved) { |
1829 |
+- u32 tmp_rcv_nxt; |
1830 |
+- |
1831 |
+- /* Save current rcv_nxt. */ |
1832 |
+- tmp_rcv_nxt = tp->rcv_nxt; |
1833 |
+- |
1834 |
+- /* Generate previous ack with CE=0. */ |
1835 |
+- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; |
1836 |
+- tp->rcv_nxt = ca->prior_rcv_nxt; |
1837 |
+- |
1838 |
+- tcp_send_ack(sk); |
1839 |
+- |
1840 |
+- /* Recover current rcv_nxt. */ |
1841 |
+- tp->rcv_nxt = tmp_rcv_nxt; |
1842 |
++ if (!ca->ce_state) { |
1843 |
++ /* State has changed from CE=0 to CE=1, force an immediate |
1844 |
++ * ACK to reflect the new CE state. If an ACK was delayed, |
1845 |
++ * send that first to reflect the prior CE state. |
1846 |
++ */ |
1847 |
++ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) |
1848 |
++ __tcp_send_ack(sk, ca->prior_rcv_nxt); |
1849 |
++ tcp_enter_quickack_mode(sk); |
1850 |
+ } |
1851 |
+ |
1852 |
+ ca->prior_rcv_nxt = tp->rcv_nxt; |
1853 |
+@@ -161,23 +152,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) |
1854 |
+ struct dctcp *ca = inet_csk_ca(sk); |
1855 |
+ struct tcp_sock *tp = tcp_sk(sk); |
1856 |
+ |
1857 |
+- /* State has changed from CE=1 to CE=0 and delayed |
1858 |
+- * ACK has not sent yet. |
1859 |
+- */ |
1860 |
+- if (ca->ce_state && ca->delayed_ack_reserved) { |
1861 |
+- u32 tmp_rcv_nxt; |
1862 |
+- |
1863 |
+- /* Save current rcv_nxt. */ |
1864 |
+- tmp_rcv_nxt = tp->rcv_nxt; |
1865 |
+- |
1866 |
+- /* Generate previous ack with CE=1. */ |
1867 |
+- tp->ecn_flags |= TCP_ECN_DEMAND_CWR; |
1868 |
+- tp->rcv_nxt = ca->prior_rcv_nxt; |
1869 |
+- |
1870 |
+- tcp_send_ack(sk); |
1871 |
+- |
1872 |
+- /* Recover current rcv_nxt. */ |
1873 |
+- tp->rcv_nxt = tmp_rcv_nxt; |
1874 |
++ if (ca->ce_state) { |
1875 |
++ /* State has changed from CE=1 to CE=0, force an immediate |
1876 |
++ * ACK to reflect the new CE state. If an ACK was delayed, |
1877 |
++ * send that first to reflect the prior CE state. |
1878 |
++ */ |
1879 |
++ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) |
1880 |
++ __tcp_send_ack(sk, ca->prior_rcv_nxt); |
1881 |
++ tcp_enter_quickack_mode(sk); |
1882 |
+ } |
1883 |
+ |
1884 |
+ ca->prior_rcv_nxt = tp->rcv_nxt; |
1885 |
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
1886 |
+index 5711b1b12d28..b86e7b8beb1d 100644 |
1887 |
+--- a/net/ipv4/tcp_input.c |
1888 |
++++ b/net/ipv4/tcp_input.c |
1889 |
+@@ -209,13 +209,14 @@ static void tcp_incr_quickack(struct sock *sk) |
1890 |
+ icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); |
1891 |
+ } |
1892 |
+ |
1893 |
+-static void tcp_enter_quickack_mode(struct sock *sk) |
1894 |
++void tcp_enter_quickack_mode(struct sock *sk) |
1895 |
+ { |
1896 |
+ struct inet_connection_sock *icsk = inet_csk(sk); |
1897 |
+ tcp_incr_quickack(sk); |
1898 |
+ icsk->icsk_ack.pingpong = 0; |
1899 |
+ icsk->icsk_ack.ato = TCP_ATO_MIN; |
1900 |
+ } |
1901 |
++EXPORT_SYMBOL(tcp_enter_quickack_mode); |
1902 |
+ |
1903 |
+ /* Send ACKs quickly, if "quick" count is not exhausted |
1904 |
+ * and the session is not interactive. |
1905 |
+@@ -4331,6 +4332,23 @@ static bool tcp_try_coalesce(struct sock *sk, |
1906 |
+ return true; |
1907 |
+ } |
1908 |
+ |
1909 |
++static bool tcp_ooo_try_coalesce(struct sock *sk, |
1910 |
++ struct sk_buff *to, |
1911 |
++ struct sk_buff *from, |
1912 |
++ bool *fragstolen) |
1913 |
++{ |
1914 |
++ bool res = tcp_try_coalesce(sk, OOO_QUEUE, to, from, fragstolen); |
1915 |
++ |
1916 |
++ /* In case tcp_drop() is called later, update to->gso_segs */ |
1917 |
++ if (res) { |
1918 |
++ u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) + |
1919 |
++ max_t(u16, 1, skb_shinfo(from)->gso_segs); |
1920 |
++ |
1921 |
++ skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF); |
1922 |
++ } |
1923 |
++ return res; |
1924 |
++} |
1925 |
++ |
1926 |
+ static void tcp_drop(struct sock *sk, struct sk_buff *skb) |
1927 |
+ { |
1928 |
+ sk_drops_add(sk, skb); |
1929 |
+@@ -4462,8 +4480,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) |
1930 |
+ /* In the typical case, we are adding an skb to the end of the list. |
1931 |
+ * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. |
1932 |
+ */ |
1933 |
+- if (tcp_try_coalesce(sk, OOO_QUEUE, tp->ooo_last_skb, |
1934 |
+- skb, &fragstolen)) { |
1935 |
++ if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, |
1936 |
++ skb, &fragstolen)) { |
1937 |
+ coalesce_done: |
1938 |
+ tcp_grow_window(sk, skb); |
1939 |
+ kfree_skb_partial(skb, fragstolen); |
1940 |
+@@ -4491,7 +4509,7 @@ coalesce_done: |
1941 |
+ /* All the bits are present. Drop. */ |
1942 |
+ NET_INC_STATS(sock_net(sk), |
1943 |
+ LINUX_MIB_TCPOFOMERGE); |
1944 |
+- __kfree_skb(skb); |
1945 |
++ tcp_drop(sk, skb); |
1946 |
+ skb = NULL; |
1947 |
+ tcp_dsack_set(sk, seq, end_seq); |
1948 |
+ goto add_sack; |
1949 |
+@@ -4510,11 +4528,11 @@ coalesce_done: |
1950 |
+ TCP_SKB_CB(skb1)->end_seq); |
1951 |
+ NET_INC_STATS(sock_net(sk), |
1952 |
+ LINUX_MIB_TCPOFOMERGE); |
1953 |
+- __kfree_skb(skb1); |
1954 |
++ tcp_drop(sk, skb1); |
1955 |
+ goto merge_right; |
1956 |
+ } |
1957 |
+- } else if (tcp_try_coalesce(sk, OOO_QUEUE, skb1, |
1958 |
+- skb, &fragstolen)) { |
1959 |
++ } else if (tcp_ooo_try_coalesce(sk, skb1, |
1960 |
++ skb, &fragstolen)) { |
1961 |
+ goto coalesce_done; |
1962 |
+ } |
1963 |
+ p = &parent->rb_right; |
1964 |
+@@ -4876,6 +4894,7 @@ end: |
1965 |
+ static void tcp_collapse_ofo_queue(struct sock *sk) |
1966 |
+ { |
1967 |
+ struct tcp_sock *tp = tcp_sk(sk); |
1968 |
++ u32 range_truesize, sum_tiny = 0; |
1969 |
+ struct sk_buff *skb, *head; |
1970 |
+ struct rb_node *p; |
1971 |
+ u32 start, end; |
1972 |
+@@ -4894,6 +4913,7 @@ new_range: |
1973 |
+ } |
1974 |
+ start = TCP_SKB_CB(skb)->seq; |
1975 |
+ end = TCP_SKB_CB(skb)->end_seq; |
1976 |
++ range_truesize = skb->truesize; |
1977 |
+ |
1978 |
+ for (head = skb;;) { |
1979 |
+ skb = tcp_skb_next(skb, NULL); |
1980 |
+@@ -4904,11 +4924,20 @@ new_range: |
1981 |
+ if (!skb || |
1982 |
+ after(TCP_SKB_CB(skb)->seq, end) || |
1983 |
+ before(TCP_SKB_CB(skb)->end_seq, start)) { |
1984 |
+- tcp_collapse(sk, NULL, &tp->out_of_order_queue, |
1985 |
+- head, skb, start, end); |
1986 |
++ /* Do not attempt collapsing tiny skbs */ |
1987 |
++ if (range_truesize != head->truesize || |
1988 |
++ end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) { |
1989 |
++ tcp_collapse(sk, NULL, &tp->out_of_order_queue, |
1990 |
++ head, skb, start, end); |
1991 |
++ } else { |
1992 |
++ sum_tiny += range_truesize; |
1993 |
++ if (sum_tiny > sk->sk_rcvbuf >> 3) |
1994 |
++ return; |
1995 |
++ } |
1996 |
+ goto new_range; |
1997 |
+ } |
1998 |
+ |
1999 |
++ range_truesize += skb->truesize; |
2000 |
+ if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) |
2001 |
+ start = TCP_SKB_CB(skb)->seq; |
2002 |
+ if (after(TCP_SKB_CB(skb)->end_seq, end)) |
2003 |
+@@ -4923,6 +4952,7 @@ new_range: |
2004 |
+ * 2) not add too big latencies if thousands of packets sit there. |
2005 |
+ * (But if application shrinks SO_RCVBUF, we could still end up |
2006 |
+ * freeing whole queue here) |
2007 |
++ * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks. |
2008 |
+ * |
2009 |
+ * Return true if queue has shrunk. |
2010 |
+ */ |
2011 |
+@@ -4930,20 +4960,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk) |
2012 |
+ { |
2013 |
+ struct tcp_sock *tp = tcp_sk(sk); |
2014 |
+ struct rb_node *node, *prev; |
2015 |
++ int goal; |
2016 |
+ |
2017 |
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) |
2018 |
+ return false; |
2019 |
+ |
2020 |
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); |
2021 |
++ goal = sk->sk_rcvbuf >> 3; |
2022 |
+ node = &tp->ooo_last_skb->rbnode; |
2023 |
+ do { |
2024 |
+ prev = rb_prev(node); |
2025 |
+ rb_erase(node, &tp->out_of_order_queue); |
2026 |
++ goal -= rb_to_skb(node)->truesize; |
2027 |
+ tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode)); |
2028 |
+- sk_mem_reclaim(sk); |
2029 |
+- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && |
2030 |
+- !tcp_under_memory_pressure(sk)) |
2031 |
+- break; |
2032 |
++ if (!prev || goal <= 0) { |
2033 |
++ sk_mem_reclaim(sk); |
2034 |
++ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && |
2035 |
++ !tcp_under_memory_pressure(sk)) |
2036 |
++ break; |
2037 |
++ goal = sk->sk_rcvbuf >> 3; |
2038 |
++ } |
2039 |
+ node = prev; |
2040 |
+ } while (node); |
2041 |
+ tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode); |
2042 |
+@@ -4978,6 +5014,9 @@ static int tcp_prune_queue(struct sock *sk) |
2043 |
+ else if (tcp_under_memory_pressure(sk)) |
2044 |
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); |
2045 |
+ |
2046 |
++ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) |
2047 |
++ return 0; |
2048 |
++ |
2049 |
+ tcp_collapse_ofo_queue(sk); |
2050 |
+ if (!skb_queue_empty(&sk->sk_receive_queue)) |
2051 |
+ tcp_collapse(sk, &sk->sk_receive_queue, NULL, |
2052 |
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
2053 |
+index abae5196cd3a..3d8f6f342cb1 100644 |
2054 |
+--- a/net/ipv4/tcp_output.c |
2055 |
++++ b/net/ipv4/tcp_output.c |
2056 |
+@@ -175,8 +175,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp, |
2057 |
+ } |
2058 |
+ |
2059 |
+ /* Account for an ACK we sent. */ |
2060 |
+-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) |
2061 |
++static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, |
2062 |
++ u32 rcv_nxt) |
2063 |
+ { |
2064 |
++ struct tcp_sock *tp = tcp_sk(sk); |
2065 |
++ |
2066 |
++ if (unlikely(rcv_nxt != tp->rcv_nxt)) |
2067 |
++ return; /* Special ACK sent by DCTCP to reflect ECN */ |
2068 |
+ tcp_dec_quickack_mode(sk, pkts); |
2069 |
+ inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); |
2070 |
+ } |
2071 |
+@@ -984,8 +989,8 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb) |
2072 |
+ * We are working here with either a clone of the original |
2073 |
+ * SKB, or a fresh unique copy made by the retransmit engine. |
2074 |
+ */ |
2075 |
+-static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
2076 |
+- gfp_t gfp_mask) |
2077 |
++static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, |
2078 |
++ int clone_it, gfp_t gfp_mask, u32 rcv_nxt) |
2079 |
+ { |
2080 |
+ const struct inet_connection_sock *icsk = inet_csk(sk); |
2081 |
+ struct inet_sock *inet; |
2082 |
+@@ -1057,7 +1062,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
2083 |
+ th->source = inet->inet_sport; |
2084 |
+ th->dest = inet->inet_dport; |
2085 |
+ th->seq = htonl(tcb->seq); |
2086 |
+- th->ack_seq = htonl(tp->rcv_nxt); |
2087 |
++ th->ack_seq = htonl(rcv_nxt); |
2088 |
+ *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | |
2089 |
+ tcb->tcp_flags); |
2090 |
+ |
2091 |
+@@ -1098,7 +1103,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
2092 |
+ icsk->icsk_af_ops->send_check(sk, skb); |
2093 |
+ |
2094 |
+ if (likely(tcb->tcp_flags & TCPHDR_ACK)) |
2095 |
+- tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); |
2096 |
++ tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); |
2097 |
+ |
2098 |
+ if (skb->len != tcp_header_size) { |
2099 |
+ tcp_event_data_sent(tp, sk); |
2100 |
+@@ -1135,6 +1140,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
2101 |
+ return err; |
2102 |
+ } |
2103 |
+ |
2104 |
++static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
2105 |
++ gfp_t gfp_mask) |
2106 |
++{ |
2107 |
++ return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, |
2108 |
++ tcp_sk(sk)->rcv_nxt); |
2109 |
++} |
2110 |
++ |
2111 |
+ /* This routine just queues the buffer for sending. |
2112 |
+ * |
2113 |
+ * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, |
2114 |
+@@ -3551,7 +3563,7 @@ void tcp_send_delayed_ack(struct sock *sk) |
2115 |
+ } |
2116 |
+ |
2117 |
+ /* This routine sends an ack and also updates the window. */ |
2118 |
+-void tcp_send_ack(struct sock *sk) |
2119 |
++void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) |
2120 |
+ { |
2121 |
+ struct sk_buff *buff; |
2122 |
+ |
2123 |
+@@ -3586,9 +3598,14 @@ void tcp_send_ack(struct sock *sk) |
2124 |
+ skb_set_tcp_pure_ack(buff); |
2125 |
+ |
2126 |
+ /* Send it off, this clears delayed acks for us. */ |
2127 |
+- tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); |
2128 |
++ __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); |
2129 |
++} |
2130 |
++EXPORT_SYMBOL_GPL(__tcp_send_ack); |
2131 |
++ |
2132 |
++void tcp_send_ack(struct sock *sk) |
2133 |
++{ |
2134 |
++ __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); |
2135 |
+ } |
2136 |
+-EXPORT_SYMBOL_GPL(tcp_send_ack); |
2137 |
+ |
2138 |
+ /* This routine sends a packet with an out of date sequence |
2139 |
+ * number. It assumes the other end will try to ack it. |
2140 |
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c |
2141 |
+index 453dc3726199..461825e0680f 100644 |
2142 |
+--- a/net/ipv6/datagram.c |
2143 |
++++ b/net/ipv6/datagram.c |
2144 |
+@@ -708,13 +708,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, |
2145 |
+ } |
2146 |
+ if (np->rxopt.bits.rxorigdstaddr) { |
2147 |
+ struct sockaddr_in6 sin6; |
2148 |
+- __be16 *ports = (__be16 *) skb_transport_header(skb); |
2149 |
++ __be16 *ports; |
2150 |
++ int end; |
2151 |
+ |
2152 |
+- if (skb_transport_offset(skb) + 4 <= (int)skb->len) { |
2153 |
++ end = skb_transport_offset(skb) + 4; |
2154 |
++ if (end <= 0 || pskb_may_pull(skb, end)) { |
2155 |
+ /* All current transport protocols have the port numbers in the |
2156 |
+ * first four bytes of the transport header and this function is |
2157 |
+ * written with this assumption in mind. |
2158 |
+ */ |
2159 |
++ ports = (__be16 *)skb_transport_header(skb); |
2160 |
+ |
2161 |
+ sin6.sin6_family = AF_INET6; |
2162 |
+ sin6.sin6_addr = ipv6_hdr(skb)->daddr; |
2163 |
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c |
2164 |
+index 5acb54405b10..c5f2b17b7ee1 100644 |
2165 |
+--- a/net/ipv6/icmp.c |
2166 |
++++ b/net/ipv6/icmp.c |
2167 |
+@@ -405,9 +405,10 @@ static int icmp6_iif(const struct sk_buff *skb) |
2168 |
+ |
2169 |
+ /* for local traffic to local address, skb dev is the loopback |
2170 |
+ * device. Check if there is a dst attached to the skb and if so |
2171 |
+- * get the real device index. |
2172 |
++ * get the real device index. Same is needed for replies to a link |
2173 |
++ * local address on a device enslaved to an L3 master device |
2174 |
+ */ |
2175 |
+- if (unlikely(iif == LOOPBACK_IFINDEX)) { |
2176 |
++ if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) { |
2177 |
+ const struct rt6_info *rt6 = skb_rt6_info(skb); |
2178 |
+ |
2179 |
+ if (rt6) |
2180 |
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
2181 |
+index 32fcce711855..1da021527fcd 100644 |
2182 |
+--- a/net/ipv6/ip6_output.c |
2183 |
++++ b/net/ipv6/ip6_output.c |
2184 |
+@@ -595,6 +595,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) |
2185 |
+ to->dev = from->dev; |
2186 |
+ to->mark = from->mark; |
2187 |
+ |
2188 |
++ skb_copy_hash(to, from); |
2189 |
++ |
2190 |
+ #ifdef CONFIG_NET_SCHED |
2191 |
+ to->tc_index = from->tc_index; |
2192 |
+ #endif |
2193 |
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c |
2194 |
+index 9a38a2c641fa..6fd913d63835 100644 |
2195 |
+--- a/net/ipv6/mcast.c |
2196 |
++++ b/net/ipv6/mcast.c |
2197 |
+@@ -771,8 +771,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) |
2198 |
+ if (pmc) { |
2199 |
+ im->idev = pmc->idev; |
2200 |
+ im->mca_crcount = idev->mc_qrv; |
2201 |
+- im->mca_sfmode = pmc->mca_sfmode; |
2202 |
+- if (pmc->mca_sfmode == MCAST_INCLUDE) { |
2203 |
++ if (im->mca_sfmode == MCAST_INCLUDE) { |
2204 |
+ im->mca_tomb = pmc->mca_tomb; |
2205 |
+ im->mca_sources = pmc->mca_sources; |
2206 |
+ for (psf = im->mca_sources; psf; psf = psf->sf_next) |
2207 |
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
2208 |
+index 35e8aef9ceed..ba8586aadffa 100644 |
2209 |
+--- a/net/ipv6/tcp_ipv6.c |
2210 |
++++ b/net/ipv6/tcp_ipv6.c |
2211 |
+@@ -918,7 +918,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) |
2212 |
+ &tcp_hashinfo, NULL, 0, |
2213 |
+ &ipv6h->saddr, |
2214 |
+ th->source, &ipv6h->daddr, |
2215 |
+- ntohs(th->source), tcp_v6_iif(skb), |
2216 |
++ ntohs(th->source), |
2217 |
++ tcp_v6_iif_l3_slave(skb), |
2218 |
+ tcp_v6_sdif(skb)); |
2219 |
+ if (!sk1) |
2220 |
+ goto out; |
2221 |
+@@ -1573,7 +1574,8 @@ do_time_wait: |
2222 |
+ skb, __tcp_hdrlen(th), |
2223 |
+ &ipv6_hdr(skb)->saddr, th->source, |
2224 |
+ &ipv6_hdr(skb)->daddr, |
2225 |
+- ntohs(th->dest), tcp_v6_iif(skb), |
2226 |
++ ntohs(th->dest), |
2227 |
++ tcp_v6_iif_l3_slave(skb), |
2228 |
+ sdif); |
2229 |
+ if (sk2) { |
2230 |
+ struct inet_timewait_sock *tw = inet_twsk(sk); |
2231 |
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c |
2232 |
+index 8ee4e667a414..fb79caf56d0e 100644 |
2233 |
+--- a/net/tls/tls_sw.c |
2234 |
++++ b/net/tls/tls_sw.c |
2235 |
+@@ -135,9 +135,10 @@ static int alloc_sg(struct sock *sk, int len, struct scatterlist *sg, |
2236 |
+ pfrag->offset += use; |
2237 |
+ |
2238 |
+ sge = sg + num_elem - 1; |
2239 |
+- if (num_elem > first_coalesce && sg_page(sg) == pfrag->page && |
2240 |
+- sg->offset + sg->length == orig_offset) { |
2241 |
+- sg->length += use; |
2242 |
++ |
2243 |
++ if (num_elem > first_coalesce && sg_page(sge) == pfrag->page && |
2244 |
++ sge->offset + sge->length == orig_offset) { |
2245 |
++ sge->length += use; |
2246 |
+ } else { |
2247 |
+ sge++; |
2248 |
+ sg_unmark_end(sge); |