1 |
commit: 44705e257afae352ada6c5bd078b9d9eafc0243b |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Wed Apr 29 14:26:24 2015 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Wed Apr 29 14:26:24 2015 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=44705e25 |
7 |
|
8 |
Linux patch 3.19.6 |
9 |
|
10 |
0000_README | 4 + |
11 |
1005_linux-3.19.6.patch | 1674 +++++++++++++++++++++++++++++++++++++++++++++++ |
12 |
2 files changed, 1678 insertions(+) |
13 |
|
14 |
diff --git a/0000_README b/0000_README |
15 |
index 89efc26..25bc90f 100644 |
16 |
--- a/0000_README |
17 |
+++ b/0000_README |
18 |
@@ -63,6 +63,10 @@ Patch: 1004_linux-3.19.5.patch |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 3.19.5 |
21 |
|
22 |
+Patch: 1005_linux-3.19.6.patch |
23 |
+From: http://www.kernel.org |
24 |
+Desc: Linux 3.19.6 |
25 |
+ |
26 |
Patch: 1500_XATTR_USER_PREFIX.patch |
27 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
28 |
Desc: Support for namespace user.pax.* on tmpfs. |
29 |
|
30 |
diff --git a/1005_linux-3.19.6.patch b/1005_linux-3.19.6.patch |
31 |
new file mode 100644 |
32 |
index 0000000..e702f01 |
33 |
--- /dev/null |
34 |
+++ b/1005_linux-3.19.6.patch |
35 |
@@ -0,0 +1,1674 @@ |
36 |
+diff --git a/Makefile b/Makefile |
37 |
+index 633b5f0f11a0..65c7c8756803 100644 |
38 |
+--- a/Makefile |
39 |
++++ b/Makefile |
40 |
+@@ -1,6 +1,6 @@ |
41 |
+ VERSION = 3 |
42 |
+ PATCHLEVEL = 19 |
43 |
+-SUBLEVEL = 5 |
44 |
++SUBLEVEL = 6 |
45 |
+ EXTRAVERSION = |
46 |
+ NAME = Diseased Newt |
47 |
+ |
48 |
+diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c |
49 |
+index 66781bf34077..c72412415093 100644 |
50 |
+--- a/arch/arm/mm/hugetlbpage.c |
51 |
++++ b/arch/arm/mm/hugetlbpage.c |
52 |
+@@ -36,12 +36,6 @@ |
53 |
+ * of type casting from pmd_t * to pte_t *. |
54 |
+ */ |
55 |
+ |
56 |
+-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, |
57 |
+- int write) |
58 |
+-{ |
59 |
+- return ERR_PTR(-EINVAL); |
60 |
+-} |
61 |
+- |
62 |
+ int pud_huge(pud_t pud) |
63 |
+ { |
64 |
+ return 0; |
65 |
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c |
66 |
+index 023747bf4dd7..2de9d2e59d96 100644 |
67 |
+--- a/arch/arm64/mm/hugetlbpage.c |
68 |
++++ b/arch/arm64/mm/hugetlbpage.c |
69 |
+@@ -38,12 +38,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
70 |
+ } |
71 |
+ #endif |
72 |
+ |
73 |
+-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, |
74 |
+- int write) |
75 |
+-{ |
76 |
+- return ERR_PTR(-EINVAL); |
77 |
+-} |
78 |
+- |
79 |
+ int pmd_huge(pmd_t pmd) |
80 |
+ { |
81 |
+ return !(pmd_val(pmd) & PMD_TABLE_BIT); |
82 |
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c |
83 |
+index 76069c18ee42..52b7604b5215 100644 |
84 |
+--- a/arch/ia64/mm/hugetlbpage.c |
85 |
++++ b/arch/ia64/mm/hugetlbpage.c |
86 |
+@@ -114,12 +114,6 @@ int pud_huge(pud_t pud) |
87 |
+ return 0; |
88 |
+ } |
89 |
+ |
90 |
+-struct page * |
91 |
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) |
92 |
+-{ |
93 |
+- return NULL; |
94 |
+-} |
95 |
+- |
96 |
+ void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
97 |
+ unsigned long addr, unsigned long end, |
98 |
+ unsigned long floor, unsigned long ceiling) |
99 |
+diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c |
100 |
+index 3c32075d2945..7ca80ac42ed5 100644 |
101 |
+--- a/arch/metag/mm/hugetlbpage.c |
102 |
++++ b/arch/metag/mm/hugetlbpage.c |
103 |
+@@ -94,12 +94,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
104 |
+ return 0; |
105 |
+ } |
106 |
+ |
107 |
+-struct page *follow_huge_addr(struct mm_struct *mm, |
108 |
+- unsigned long address, int write) |
109 |
+-{ |
110 |
+- return ERR_PTR(-EINVAL); |
111 |
+-} |
112 |
+- |
113 |
+ int pmd_huge(pmd_t pmd) |
114 |
+ { |
115 |
+ return pmd_page_shift(pmd) > PAGE_SHIFT; |
116 |
+diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c |
117 |
+index 4ec8ee10d371..06e0f421b41b 100644 |
118 |
+--- a/arch/mips/mm/hugetlbpage.c |
119 |
++++ b/arch/mips/mm/hugetlbpage.c |
120 |
+@@ -68,12 +68,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len) |
121 |
+ return 0; |
122 |
+ } |
123 |
+ |
124 |
+-struct page * |
125 |
+-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
126 |
+-{ |
127 |
+- return ERR_PTR(-EINVAL); |
128 |
+-} |
129 |
+- |
130 |
+ int pmd_huge(pmd_t pmd) |
131 |
+ { |
132 |
+ return (pmd_val(pmd) & _PAGE_HUGE) != 0; |
133 |
+@@ -83,15 +77,3 @@ int pud_huge(pud_t pud) |
134 |
+ { |
135 |
+ return (pud_val(pud) & _PAGE_HUGE) != 0; |
136 |
+ } |
137 |
+- |
138 |
+-struct page * |
139 |
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
140 |
+- pmd_t *pmd, int write) |
141 |
+-{ |
142 |
+- struct page *page; |
143 |
+- |
144 |
+- page = pte_page(*(pte_t *)pmd); |
145 |
+- if (page) |
146 |
+- page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); |
147 |
+- return page; |
148 |
+-} |
149 |
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c |
150 |
+index 620d0ec93e6f..7e408bfc7948 100644 |
151 |
+--- a/arch/powerpc/mm/hugetlbpage.c |
152 |
++++ b/arch/powerpc/mm/hugetlbpage.c |
153 |
+@@ -714,6 +714,14 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
154 |
+ return NULL; |
155 |
+ } |
156 |
+ |
157 |
++struct page * |
158 |
++follow_huge_pud(struct mm_struct *mm, unsigned long address, |
159 |
++ pud_t *pud, int write) |
160 |
++{ |
161 |
++ BUG(); |
162 |
++ return NULL; |
163 |
++} |
164 |
++ |
165 |
+ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, |
166 |
+ unsigned long sz) |
167 |
+ { |
168 |
+diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c |
169 |
+index 3c80d2e38f03..210ffede0153 100644 |
170 |
+--- a/arch/s390/mm/hugetlbpage.c |
171 |
++++ b/arch/s390/mm/hugetlbpage.c |
172 |
+@@ -192,12 +192,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
173 |
+ return 0; |
174 |
+ } |
175 |
+ |
176 |
+-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, |
177 |
+- int write) |
178 |
+-{ |
179 |
+- return ERR_PTR(-EINVAL); |
180 |
+-} |
181 |
+- |
182 |
+ int pmd_huge(pmd_t pmd) |
183 |
+ { |
184 |
+ if (!MACHINE_HAS_HPAGE) |
185 |
+@@ -210,17 +204,3 @@ int pud_huge(pud_t pud) |
186 |
+ { |
187 |
+ return 0; |
188 |
+ } |
189 |
+- |
190 |
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
191 |
+- pmd_t *pmdp, int write) |
192 |
+-{ |
193 |
+- struct page *page; |
194 |
+- |
195 |
+- if (!MACHINE_HAS_HPAGE) |
196 |
+- return NULL; |
197 |
+- |
198 |
+- page = pmd_page(*pmdp); |
199 |
+- if (page) |
200 |
+- page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); |
201 |
+- return page; |
202 |
+-} |
203 |
+diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c |
204 |
+index d7762349ea48..534bc978af8a 100644 |
205 |
+--- a/arch/sh/mm/hugetlbpage.c |
206 |
++++ b/arch/sh/mm/hugetlbpage.c |
207 |
+@@ -67,12 +67,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
208 |
+ return 0; |
209 |
+ } |
210 |
+ |
211 |
+-struct page *follow_huge_addr(struct mm_struct *mm, |
212 |
+- unsigned long address, int write) |
213 |
+-{ |
214 |
+- return ERR_PTR(-EINVAL); |
215 |
+-} |
216 |
+- |
217 |
+ int pmd_huge(pmd_t pmd) |
218 |
+ { |
219 |
+ return 0; |
220 |
+@@ -82,9 +76,3 @@ int pud_huge(pud_t pud) |
221 |
+ { |
222 |
+ return 0; |
223 |
+ } |
224 |
+- |
225 |
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
226 |
+- pmd_t *pmd, int write) |
227 |
+-{ |
228 |
+- return NULL; |
229 |
+-} |
230 |
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c |
231 |
+index d329537739c6..4242eab12e10 100644 |
232 |
+--- a/arch/sparc/mm/hugetlbpage.c |
233 |
++++ b/arch/sparc/mm/hugetlbpage.c |
234 |
+@@ -215,12 +215,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
235 |
+ return entry; |
236 |
+ } |
237 |
+ |
238 |
+-struct page *follow_huge_addr(struct mm_struct *mm, |
239 |
+- unsigned long address, int write) |
240 |
+-{ |
241 |
+- return ERR_PTR(-EINVAL); |
242 |
+-} |
243 |
+- |
244 |
+ int pmd_huge(pmd_t pmd) |
245 |
+ { |
246 |
+ return 0; |
247 |
+@@ -230,9 +224,3 @@ int pud_huge(pud_t pud) |
248 |
+ { |
249 |
+ return 0; |
250 |
+ } |
251 |
+- |
252 |
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
253 |
+- pmd_t *pmd, int write) |
254 |
+-{ |
255 |
+- return NULL; |
256 |
+-} |
257 |
+diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c |
258 |
+index 3270e0019266..8416240c322c 100644 |
259 |
+--- a/arch/tile/mm/hugetlbpage.c |
260 |
++++ b/arch/tile/mm/hugetlbpage.c |
261 |
+@@ -150,12 +150,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
262 |
+ return NULL; |
263 |
+ } |
264 |
+ |
265 |
+-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, |
266 |
+- int write) |
267 |
+-{ |
268 |
+- return ERR_PTR(-EINVAL); |
269 |
+-} |
270 |
+- |
271 |
+ int pmd_huge(pmd_t pmd) |
272 |
+ { |
273 |
+ return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE); |
274 |
+@@ -166,28 +160,6 @@ int pud_huge(pud_t pud) |
275 |
+ return !!(pud_val(pud) & _PAGE_HUGE_PAGE); |
276 |
+ } |
277 |
+ |
278 |
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
279 |
+- pmd_t *pmd, int write) |
280 |
+-{ |
281 |
+- struct page *page; |
282 |
+- |
283 |
+- page = pte_page(*(pte_t *)pmd); |
284 |
+- if (page) |
285 |
+- page += ((address & ~PMD_MASK) >> PAGE_SHIFT); |
286 |
+- return page; |
287 |
+-} |
288 |
+- |
289 |
+-struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, |
290 |
+- pud_t *pud, int write) |
291 |
+-{ |
292 |
+- struct page *page; |
293 |
+- |
294 |
+- page = pte_page(*(pte_t *)pud); |
295 |
+- if (page) |
296 |
+- page += ((address & ~PUD_MASK) >> PAGE_SHIFT); |
297 |
+- return page; |
298 |
+-} |
299 |
+- |
300 |
+ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
301 |
+ { |
302 |
+ return 0; |
303 |
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
304 |
+index d4c58d884838..312446418b36 100644 |
305 |
+--- a/arch/x86/kvm/vmx.c |
306 |
++++ b/arch/x86/kvm/vmx.c |
307 |
+@@ -2404,8 +2404,7 @@ static __init void nested_vmx_setup_ctls_msrs(void) |
308 |
+ |
309 |
+ if (enable_ept) { |
310 |
+ /* nested EPT: emulate EPT also to L1 */ |
311 |
+- nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT | |
312 |
+- SECONDARY_EXEC_UNRESTRICTED_GUEST; |
313 |
++ nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT; |
314 |
+ nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | |
315 |
+ VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | |
316 |
+ VMX_EPT_INVEPT_BIT; |
317 |
+@@ -2419,6 +2418,10 @@ static __init void nested_vmx_setup_ctls_msrs(void) |
318 |
+ } else |
319 |
+ nested_vmx_ept_caps = 0; |
320 |
+ |
321 |
++ if (enable_unrestricted_guest) |
322 |
++ nested_vmx_secondary_ctls_high |= |
323 |
++ SECONDARY_EXEC_UNRESTRICTED_GUEST; |
324 |
++ |
325 |
+ /* miscellaneous data */ |
326 |
+ rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high); |
327 |
+ nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA; |
328 |
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c |
329 |
+index 006cc914994b..9161f764121e 100644 |
330 |
+--- a/arch/x86/mm/hugetlbpage.c |
331 |
++++ b/arch/x86/mm/hugetlbpage.c |
332 |
+@@ -52,20 +52,8 @@ int pud_huge(pud_t pud) |
333 |
+ return 0; |
334 |
+ } |
335 |
+ |
336 |
+-struct page * |
337 |
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
338 |
+- pmd_t *pmd, int write) |
339 |
+-{ |
340 |
+- return NULL; |
341 |
+-} |
342 |
+ #else |
343 |
+ |
344 |
+-struct page * |
345 |
+-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
346 |
+-{ |
347 |
+- return ERR_PTR(-EINVAL); |
348 |
+-} |
349 |
+- |
350 |
+ /* |
351 |
+ * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal |
352 |
+ * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. |
353 |
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
354 |
+index 0dceba1a2ba1..68ad39a4b221 100644 |
355 |
+--- a/drivers/net/bonding/bond_main.c |
356 |
++++ b/drivers/net/bonding/bond_main.c |
357 |
+@@ -3797,7 +3797,8 @@ static inline int bond_slave_override(struct bonding *bond, |
358 |
+ /* Find out if any slaves have the same mapping as this skb. */ |
359 |
+ bond_for_each_slave_rcu(bond, slave, iter) { |
360 |
+ if (slave->queue_id == skb->queue_mapping) { |
361 |
+- if (bond_slave_can_tx(slave)) { |
362 |
++ if (bond_slave_is_up(slave) && |
363 |
++ slave->link == BOND_LINK_UP) { |
364 |
+ bond_dev_queue_xmit(bond, skb, slave->dev); |
365 |
+ return 0; |
366 |
+ } |
367 |
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h |
368 |
+index c3a6072134f5..2559206d8704 100644 |
369 |
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h |
370 |
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h |
371 |
+@@ -531,20 +531,8 @@ struct bnx2x_fastpath { |
372 |
+ struct napi_struct napi; |
373 |
+ |
374 |
+ #ifdef CONFIG_NET_RX_BUSY_POLL |
375 |
+- unsigned int state; |
376 |
+-#define BNX2X_FP_STATE_IDLE 0 |
377 |
+-#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ |
378 |
+-#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ |
379 |
+-#define BNX2X_FP_STATE_DISABLED (1 << 2) |
380 |
+-#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ |
381 |
+-#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ |
382 |
+-#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) |
383 |
+-#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) |
384 |
+-#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) |
385 |
+-#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) |
386 |
+- /* protect state */ |
387 |
+- spinlock_t lock; |
388 |
+-#endif /* CONFIG_NET_RX_BUSY_POLL */ |
389 |
++ unsigned long busy_poll_state; |
390 |
++#endif |
391 |
+ |
392 |
+ union host_hc_status_block status_blk; |
393 |
+ /* chip independent shortcuts into sb structure */ |
394 |
+@@ -619,104 +607,83 @@ struct bnx2x_fastpath { |
395 |
+ #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) |
396 |
+ |
397 |
+ #ifdef CONFIG_NET_RX_BUSY_POLL |
398 |
+-static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) |
399 |
++ |
400 |
++enum bnx2x_fp_state { |
401 |
++ BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */ |
402 |
++ |
403 |
++ BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */ |
404 |
++ BNX2X_STATE_FP_NAPI_REQ = BIT(1), |
405 |
++ |
406 |
++ BNX2X_STATE_FP_POLL_BIT = 2, |
407 |
++ BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */ |
408 |
++ |
409 |
++ BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */ |
410 |
++}; |
411 |
++ |
412 |
++static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) |
413 |
+ { |
414 |
+- spin_lock_init(&fp->lock); |
415 |
+- fp->state = BNX2X_FP_STATE_IDLE; |
416 |
++ WRITE_ONCE(fp->busy_poll_state, 0); |
417 |
+ } |
418 |
+ |
419 |
+ /* called from the device poll routine to get ownership of a FP */ |
420 |
+ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) |
421 |
+ { |
422 |
+- bool rc = true; |
423 |
+- |
424 |
+- spin_lock_bh(&fp->lock); |
425 |
+- if (fp->state & BNX2X_FP_LOCKED) { |
426 |
+- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); |
427 |
+- fp->state |= BNX2X_FP_STATE_NAPI_YIELD; |
428 |
+- rc = false; |
429 |
+- } else { |
430 |
+- /* we don't care if someone yielded */ |
431 |
+- fp->state = BNX2X_FP_STATE_NAPI; |
432 |
++ unsigned long prev, old = READ_ONCE(fp->busy_poll_state); |
433 |
++ |
434 |
++ while (1) { |
435 |
++ switch (old) { |
436 |
++ case BNX2X_STATE_FP_POLL: |
437 |
++ /* make sure bnx2x_fp_lock_poll() wont starve us */ |
438 |
++ set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT, |
439 |
++ &fp->busy_poll_state); |
440 |
++ /* fallthrough */ |
441 |
++ case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ: |
442 |
++ return false; |
443 |
++ default: |
444 |
++ break; |
445 |
++ } |
446 |
++ prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI); |
447 |
++ if (unlikely(prev != old)) { |
448 |
++ old = prev; |
449 |
++ continue; |
450 |
++ } |
451 |
++ return true; |
452 |
+ } |
453 |
+- spin_unlock_bh(&fp->lock); |
454 |
+- return rc; |
455 |
+ } |
456 |
+ |
457 |
+-/* returns true is someone tried to get the FP while napi had it */ |
458 |
+-static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) |
459 |
++static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) |
460 |
+ { |
461 |
+- bool rc = false; |
462 |
+- |
463 |
+- spin_lock_bh(&fp->lock); |
464 |
+- WARN_ON(fp->state & |
465 |
+- (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); |
466 |
+- |
467 |
+- if (fp->state & BNX2X_FP_STATE_POLL_YIELD) |
468 |
+- rc = true; |
469 |
+- |
470 |
+- /* state ==> idle, unless currently disabled */ |
471 |
+- fp->state &= BNX2X_FP_STATE_DISABLED; |
472 |
+- spin_unlock_bh(&fp->lock); |
473 |
+- return rc; |
474 |
++ smp_wmb(); |
475 |
++ fp->busy_poll_state = 0; |
476 |
+ } |
477 |
+ |
478 |
+ /* called from bnx2x_low_latency_poll() */ |
479 |
+ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) |
480 |
+ { |
481 |
+- bool rc = true; |
482 |
+- |
483 |
+- spin_lock_bh(&fp->lock); |
484 |
+- if ((fp->state & BNX2X_FP_LOCKED)) { |
485 |
+- fp->state |= BNX2X_FP_STATE_POLL_YIELD; |
486 |
+- rc = false; |
487 |
+- } else { |
488 |
+- /* preserve yield marks */ |
489 |
+- fp->state |= BNX2X_FP_STATE_POLL; |
490 |
+- } |
491 |
+- spin_unlock_bh(&fp->lock); |
492 |
+- return rc; |
493 |
++ return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0; |
494 |
+ } |
495 |
+ |
496 |
+-/* returns true if someone tried to get the FP while it was locked */ |
497 |
+-static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) |
498 |
++static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) |
499 |
+ { |
500 |
+- bool rc = false; |
501 |
+- |
502 |
+- spin_lock_bh(&fp->lock); |
503 |
+- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); |
504 |
+- |
505 |
+- if (fp->state & BNX2X_FP_STATE_POLL_YIELD) |
506 |
+- rc = true; |
507 |
+- |
508 |
+- /* state ==> idle, unless currently disabled */ |
509 |
+- fp->state &= BNX2X_FP_STATE_DISABLED; |
510 |
+- spin_unlock_bh(&fp->lock); |
511 |
+- return rc; |
512 |
++ smp_mb__before_atomic(); |
513 |
++ clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state); |
514 |
+ } |
515 |
+ |
516 |
+-/* true if a socket is polling, even if it did not get the lock */ |
517 |
++/* true if a socket is polling */ |
518 |
+ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) |
519 |
+ { |
520 |
+- WARN_ON(!(fp->state & BNX2X_FP_OWNED)); |
521 |
+- return fp->state & BNX2X_FP_USER_PEND; |
522 |
++ return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL; |
523 |
+ } |
524 |
+ |
525 |
+ /* false if fp is currently owned */ |
526 |
+ static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) |
527 |
+ { |
528 |
+- int rc = true; |
529 |
+- |
530 |
+- spin_lock_bh(&fp->lock); |
531 |
+- if (fp->state & BNX2X_FP_OWNED) |
532 |
+- rc = false; |
533 |
+- fp->state |= BNX2X_FP_STATE_DISABLED; |
534 |
+- spin_unlock_bh(&fp->lock); |
535 |
++ set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state); |
536 |
++ return !bnx2x_fp_ll_polling(fp); |
537 |
+ |
538 |
+- return rc; |
539 |
+ } |
540 |
+ #else |
541 |
+-static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) |
542 |
++static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp) |
543 |
+ { |
544 |
+ } |
545 |
+ |
546 |
+@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) |
547 |
+ return true; |
548 |
+ } |
549 |
+ |
550 |
+-static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) |
551 |
++static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) |
552 |
+ { |
553 |
+- return false; |
554 |
+ } |
555 |
+ |
556 |
+ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) |
557 |
+@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) |
558 |
+ return false; |
559 |
+ } |
560 |
+ |
561 |
+-static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) |
562 |
++static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) |
563 |
+ { |
564 |
+- return false; |
565 |
+ } |
566 |
+ |
567 |
+ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) |
568 |
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
569 |
+index e468ed3f210f..2b8e8b2ce0b6 100644 |
570 |
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
571 |
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
572 |
+@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp) |
573 |
+ int i; |
574 |
+ |
575 |
+ for_each_rx_queue_cnic(bp, i) { |
576 |
+- bnx2x_fp_init_lock(&bp->fp[i]); |
577 |
++ bnx2x_fp_busy_poll_init(&bp->fp[i]); |
578 |
+ napi_enable(&bnx2x_fp(bp, i, napi)); |
579 |
+ } |
580 |
+ } |
581 |
+@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp) |
582 |
+ int i; |
583 |
+ |
584 |
+ for_each_eth_queue(bp, i) { |
585 |
+- bnx2x_fp_init_lock(&bp->fp[i]); |
586 |
++ bnx2x_fp_busy_poll_init(&bp->fp[i]); |
587 |
+ napi_enable(&bnx2x_fp(bp, i, napi)); |
588 |
+ } |
589 |
+ } |
590 |
+@@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) |
591 |
+ } |
592 |
+ } |
593 |
+ |
594 |
++ bnx2x_fp_unlock_napi(fp); |
595 |
++ |
596 |
+ /* Fall out from the NAPI loop if needed */ |
597 |
+- if (!bnx2x_fp_unlock_napi(fp) && |
598 |
+- !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { |
599 |
++ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { |
600 |
+ |
601 |
+ /* No need to update SB for FCoE L2 ring as long as |
602 |
+ * it's connected to the default SB and the SB |
603 |
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c |
604 |
+index 96bf01ba32dd..05ae12690117 100644 |
605 |
+--- a/drivers/net/ethernet/broadcom/tg3.c |
606 |
++++ b/drivers/net/ethernet/broadcom/tg3.c |
607 |
+@@ -17868,8 +17868,10 @@ static int tg3_init_one(struct pci_dev *pdev, |
608 |
+ */ |
609 |
+ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || |
610 |
+ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { |
611 |
++ tg3_full_lock(tp, 0); |
612 |
+ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); |
613 |
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
614 |
++ tg3_full_unlock(tp); |
615 |
+ } |
616 |
+ |
617 |
+ err = tg3_test_dma(tp); |
618 |
+diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c |
619 |
+index 5c93d1451c44..9842bf963648 100644 |
620 |
+--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c |
621 |
++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c |
622 |
+@@ -585,7 +585,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, |
623 |
+ * on the host, we deprecate the error message for this |
624 |
+ * specific command/input_mod/opcode_mod/fw-status to be debug. |
625 |
+ */ |
626 |
+- if (op == MLX4_CMD_SET_PORT && in_modifier == 1 && |
627 |
++ if (op == MLX4_CMD_SET_PORT && |
628 |
++ (in_modifier == 1 || in_modifier == 2) && |
629 |
+ op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE) |
630 |
+ mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n", |
631 |
+ op, context->fw_status); |
632 |
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
633 |
+index ac6a8f1eea6c..2617c9d68d9b 100644 |
634 |
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
635 |
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
636 |
+@@ -2627,13 +2627,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, |
637 |
+ netif_carrier_off(dev); |
638 |
+ mlx4_en_set_default_moderation(priv); |
639 |
+ |
640 |
+- err = register_netdev(dev); |
641 |
+- if (err) { |
642 |
+- en_err(priv, "Netdev registration failed for port %d\n", port); |
643 |
+- goto out; |
644 |
+- } |
645 |
+- priv->registered = 1; |
646 |
+- |
647 |
+ en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); |
648 |
+ en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); |
649 |
+ |
650 |
+@@ -2673,6 +2666,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, |
651 |
+ queue_delayed_work(mdev->workqueue, &priv->service_task, |
652 |
+ SERVICE_TASK_DELAY); |
653 |
+ |
654 |
++ err = register_netdev(dev); |
655 |
++ if (err) { |
656 |
++ en_err(priv, "Netdev registration failed for port %d\n", port); |
657 |
++ goto out; |
658 |
++ } |
659 |
++ |
660 |
++ priv->registered = 1; |
661 |
++ |
662 |
+ return 0; |
663 |
+ |
664 |
+ out: |
665 |
+diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c |
666 |
+index 2f398fa4b9e6..24c028473383 100644 |
667 |
+--- a/drivers/net/ethernet/rocker/rocker.c |
668 |
++++ b/drivers/net/ethernet/rocker/rocker.c |
669 |
+@@ -4305,10 +4305,16 @@ static int rocker_port_master_changed(struct net_device *dev) |
670 |
+ struct net_device *master = netdev_master_upper_dev_get(dev); |
671 |
+ int err = 0; |
672 |
+ |
673 |
++ /* There are currently three cases handled here: |
674 |
++ * 1. Joining a bridge |
675 |
++ * 2. Leaving a previously joined bridge |
676 |
++ * 3. Other, e.g. being added to or removed from a bond or openvswitch, |
677 |
++ * in which case nothing is done |
678 |
++ */ |
679 |
+ if (master && master->rtnl_link_ops && |
680 |
+ !strcmp(master->rtnl_link_ops->kind, "bridge")) |
681 |
+ err = rocker_port_bridge_join(rocker_port, master); |
682 |
+- else |
683 |
++ else if (rocker_port_is_bridged(rocker_port)) |
684 |
+ err = rocker_port_bridge_leave(rocker_port); |
685 |
+ |
686 |
+ return err; |
687 |
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
688 |
+index 10f9e4021b5a..9a409a8f3b19 100644 |
689 |
+--- a/drivers/net/tun.c |
690 |
++++ b/drivers/net/tun.c |
691 |
+@@ -1368,7 +1368,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, |
692 |
+ skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0, |
693 |
+ &peeked, &off, &err); |
694 |
+ if (!skb) |
695 |
+- return 0; |
696 |
++ return err; |
697 |
+ |
698 |
+ ret = tun_put_user(tun, tfile, skb, to); |
699 |
+ if (unlikely(ret < 0)) |
700 |
+diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c |
701 |
+index 5c55f11572ba..75d6f26729a3 100644 |
702 |
+--- a/drivers/net/usb/asix_common.c |
703 |
++++ b/drivers/net/usb/asix_common.c |
704 |
+@@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, |
705 |
+ memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); |
706 |
+ skb_put(skb, sizeof(padbytes)); |
707 |
+ } |
708 |
++ |
709 |
++ usbnet_set_skb_tx_stats(skb, 1, 0); |
710 |
+ return skb; |
711 |
+ } |
712 |
+ |
713 |
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c |
714 |
+index 80a844e0ae03..c3e4da9e79ca 100644 |
715 |
+--- a/drivers/net/usb/cdc_ncm.c |
716 |
++++ b/drivers/net/usb/cdc_ncm.c |
717 |
+@@ -1172,17 +1172,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) |
718 |
+ |
719 |
+ /* return skb */ |
720 |
+ ctx->tx_curr_skb = NULL; |
721 |
+- dev->net->stats.tx_packets += ctx->tx_curr_frame_num; |
722 |
+ |
723 |
+ /* keep private stats: framing overhead and number of NTBs */ |
724 |
+ ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; |
725 |
+ ctx->tx_ntbs++; |
726 |
+ |
727 |
+- /* usbnet has already counted all the framing overhead. |
728 |
++ /* usbnet will count all the framing overhead by default. |
729 |
+ * Adjust the stats so that the tx_bytes counter show real |
730 |
+ * payload data instead. |
731 |
+ */ |
732 |
+- dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload; |
733 |
++ usbnet_set_skb_tx_stats(skb_out, n, |
734 |
++ ctx->tx_curr_frame_payload - skb_out->len); |
735 |
+ |
736 |
+ return skb_out; |
737 |
+ |
738 |
+diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c |
739 |
+index b94a0fbb8b3b..953de13267df 100644 |
740 |
+--- a/drivers/net/usb/sr9800.c |
741 |
++++ b/drivers/net/usb/sr9800.c |
742 |
+@@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb, |
743 |
+ skb_put(skb, sizeof(padbytes)); |
744 |
+ } |
745 |
+ |
746 |
++ usbnet_set_skb_tx_stats(skb, 1, 0); |
747 |
+ return skb; |
748 |
+ } |
749 |
+ |
750 |
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c |
751 |
+index 3a6770a65d78..e7ed2513b1d1 100644 |
752 |
+--- a/drivers/net/usb/usbnet.c |
753 |
++++ b/drivers/net/usb/usbnet.c |
754 |
+@@ -1189,8 +1189,7 @@ static void tx_complete (struct urb *urb) |
755 |
+ struct usbnet *dev = entry->dev; |
756 |
+ |
757 |
+ if (urb->status == 0) { |
758 |
+- if (!(dev->driver_info->flags & FLAG_MULTI_PACKET)) |
759 |
+- dev->net->stats.tx_packets++; |
760 |
++ dev->net->stats.tx_packets += entry->packets; |
761 |
+ dev->net->stats.tx_bytes += entry->length; |
762 |
+ } else { |
763 |
+ dev->net->stats.tx_errors++; |
764 |
+@@ -1348,7 +1347,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, |
765 |
+ } else |
766 |
+ urb->transfer_flags |= URB_ZERO_PACKET; |
767 |
+ } |
768 |
+- entry->length = urb->transfer_buffer_length = length; |
769 |
++ urb->transfer_buffer_length = length; |
770 |
++ |
771 |
++ if (info->flags & FLAG_MULTI_PACKET) { |
772 |
++ /* Driver has set number of packets and a length delta. |
773 |
++ * Calculate the complete length and ensure that it's |
774 |
++ * positive. |
775 |
++ */ |
776 |
++ entry->length += length; |
777 |
++ if (WARN_ON_ONCE(entry->length <= 0)) |
778 |
++ entry->length = length; |
779 |
++ } else { |
780 |
++ usbnet_set_skb_tx_stats(skb, 1, length); |
781 |
++ } |
782 |
+ |
783 |
+ spin_lock_irqsave(&dev->txq.lock, flags); |
784 |
+ retval = usb_autopm_get_interface_async(dev->intf); |
785 |
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
786 |
+index a8c755dcab14..6c83846f914c 100644 |
787 |
+--- a/drivers/net/vxlan.c |
788 |
++++ b/drivers/net/vxlan.c |
789 |
+@@ -1578,12 +1578,6 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, |
790 |
+ int err; |
791 |
+ bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk); |
792 |
+ |
793 |
+- skb = udp_tunnel_handle_offloads(skb, udp_sum); |
794 |
+- if (IS_ERR(skb)) { |
795 |
+- err = -EINVAL; |
796 |
+- goto err; |
797 |
+- } |
798 |
+- |
799 |
+ skb_scrub_packet(skb, xnet); |
800 |
+ |
801 |
+ min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len |
802 |
+@@ -1603,6 +1597,12 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, |
803 |
+ goto err; |
804 |
+ } |
805 |
+ |
806 |
++ skb = udp_tunnel_handle_offloads(skb, udp_sum); |
807 |
++ if (IS_ERR(skb)) { |
808 |
++ err = -EINVAL; |
809 |
++ goto err; |
810 |
++ } |
811 |
++ |
812 |
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); |
813 |
+ vxh->vx_flags = htonl(VXLAN_FLAGS); |
814 |
+ vxh->vx_vni = vni; |
815 |
+@@ -1628,10 +1628,6 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, |
816 |
+ int err; |
817 |
+ bool udp_sum = !vs->sock->sk->sk_no_check_tx; |
818 |
+ |
819 |
+- skb = udp_tunnel_handle_offloads(skb, udp_sum); |
820 |
+- if (IS_ERR(skb)) |
821 |
+- return PTR_ERR(skb); |
822 |
+- |
823 |
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len |
824 |
+ + VXLAN_HLEN + sizeof(struct iphdr) |
825 |
+ + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); |
826 |
+@@ -1647,6 +1643,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, |
827 |
+ if (WARN_ON(!skb)) |
828 |
+ return -ENOMEM; |
829 |
+ |
830 |
++ skb = udp_tunnel_handle_offloads(skb, udp_sum); |
831 |
++ if (IS_ERR(skb)) |
832 |
++ return PTR_ERR(skb); |
833 |
++ |
834 |
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); |
835 |
+ vxh->vx_flags = htonl(VXLAN_FLAGS); |
836 |
+ vxh->vx_vni = vni; |
837 |
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c |
838 |
+index a5186bb7c63e..8c45cf44ce24 100644 |
839 |
+--- a/drivers/net/wireless/rtlwifi/pci.c |
840 |
++++ b/drivers/net/wireless/rtlwifi/pci.c |
841 |
+@@ -578,6 +578,13 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) |
842 |
+ else |
843 |
+ entry = (u8 *)(&ring->desc[ring->idx]); |
844 |
+ |
845 |
++ if (rtlpriv->cfg->ops->get_available_desc && |
846 |
++ rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) { |
847 |
++ RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG, |
848 |
++ "no available desc!\n"); |
849 |
++ return; |
850 |
++ } |
851 |
++ |
852 |
+ if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx)) |
853 |
+ return; |
854 |
+ ring->idx = (ring->idx + 1) % ring->entries; |
855 |
+@@ -641,10 +648,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) |
856 |
+ |
857 |
+ ieee80211_tx_status_irqsafe(hw, skb); |
858 |
+ |
859 |
+- if ((ring->entries - skb_queue_len(&ring->queue)) |
860 |
+- == 2) { |
861 |
++ if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) { |
862 |
+ |
863 |
+- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, |
864 |
++ RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, |
865 |
+ "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n", |
866 |
+ prio, ring->idx, |
867 |
+ skb_queue_len(&ring->queue)); |
868 |
+@@ -793,7 +799,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) |
869 |
+ rx_remained_cnt = |
870 |
+ rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw, |
871 |
+ hw_queue); |
872 |
+- if (rx_remained_cnt < 1) |
873 |
++ if (rx_remained_cnt == 0) |
874 |
+ return; |
875 |
+ |
876 |
+ } else { /* rx descriptor */ |
877 |
+@@ -845,18 +851,18 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) |
878 |
+ else |
879 |
+ skb_reserve(skb, stats.rx_drvinfo_size + |
880 |
+ stats.rx_bufshift); |
881 |
+- |
882 |
+ } else { |
883 |
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, |
884 |
+ "skb->end - skb->tail = %d, len is %d\n", |
885 |
+ skb->end - skb->tail, len); |
886 |
+- break; |
887 |
++ dev_kfree_skb_any(skb); |
888 |
++ goto new_trx_end; |
889 |
+ } |
890 |
+ /* handle command packet here */ |
891 |
+ if (rtlpriv->cfg->ops->rx_command_packet && |
892 |
+ rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) { |
893 |
+ dev_kfree_skb_any(skb); |
894 |
+- goto end; |
895 |
++ goto new_trx_end; |
896 |
+ } |
897 |
+ |
898 |
+ /* |
899 |
+@@ -906,6 +912,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) |
900 |
+ } else { |
901 |
+ dev_kfree_skb_any(skb); |
902 |
+ } |
903 |
++new_trx_end: |
904 |
+ if (rtlpriv->use_new_trx_flow) { |
905 |
+ rtlpci->rx_ring[hw_queue].next_rx_rp += 1; |
906 |
+ rtlpci->rx_ring[hw_queue].next_rx_rp %= |
907 |
+@@ -921,7 +928,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) |
908 |
+ rtlpriv->enter_ps = false; |
909 |
+ schedule_work(&rtlpriv->works.lps_change_work); |
910 |
+ } |
911 |
+-end: |
912 |
+ skb = new_skb; |
913 |
+ no_new: |
914 |
+ if (rtlpriv->use_new_trx_flow) { |
915 |
+@@ -1695,6 +1701,15 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, |
916 |
+ } |
917 |
+ } |
918 |
+ |
919 |
++ if (rtlpriv->cfg->ops->get_available_desc && |
920 |
++ rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) { |
921 |
++ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, |
922 |
++ "get_available_desc fail\n"); |
923 |
++ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, |
924 |
++ flags); |
925 |
++ return skb->len; |
926 |
++ } |
927 |
++ |
928 |
+ if (ieee80211_is_data_qos(fc)) { |
929 |
+ tid = rtl_get_tid(skb); |
930 |
+ if (sta) { |
931 |
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c |
932 |
+index 9b5a7d5be121..c31c6bfb536d 100644 |
933 |
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c |
934 |
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c |
935 |
+@@ -113,8 +113,6 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) |
936 |
+ RCR_HTC_LOC_CTRL | |
937 |
+ RCR_AMF | |
938 |
+ RCR_ACF | |
939 |
+- RCR_ADF | |
940 |
+- RCR_AICV | |
941 |
+ RCR_ACRC32 | |
942 |
+ RCR_AB | |
943 |
+ RCR_AM | |
944 |
+@@ -241,6 +239,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = { |
945 |
+ .set_desc = rtl92ee_set_desc, |
946 |
+ .get_desc = rtl92ee_get_desc, |
947 |
+ .is_tx_desc_closed = rtl92ee_is_tx_desc_closed, |
948 |
++ .get_available_desc = rtl92ee_get_available_desc, |
949 |
+ .tx_polling = rtl92ee_tx_polling, |
950 |
+ .enable_hw_sec = rtl92ee_enable_hw_security_config, |
951 |
+ .set_key = rtl92ee_set_key, |
952 |
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c |
953 |
+index 00690040be37..1f6d160877e1 100644 |
954 |
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c |
955 |
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c |
956 |
+@@ -707,7 +707,7 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index) |
957 |
+ return desc_address; |
958 |
+ } |
959 |
+ |
960 |
+-void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) |
961 |
++u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) |
962 |
+ { |
963 |
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
964 |
+ struct rtl_priv *rtlpriv = rtl_priv(hw); |
965 |
+@@ -721,11 +721,12 @@ void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) |
966 |
+ current_tx_write_point = (u16)((tmp_4byte) & 0x0fff); |
967 |
+ |
968 |
+ point_diff = ((current_tx_read_point > current_tx_write_point) ? |
969 |
+- (current_tx_read_point - current_tx_write_point) : |
970 |
+- (TX_DESC_NUM_92E - current_tx_write_point + |
971 |
++ (current_tx_read_point - current_tx_write_point - 1) : |
972 |
++ (TX_DESC_NUM_92E - 1 - current_tx_write_point + |
973 |
+ current_tx_read_point)); |
974 |
+ |
975 |
+ rtlpci->tx_ring[q_idx].avl_desc = point_diff; |
976 |
++ return point_diff; |
977 |
+ } |
978 |
+ |
979 |
+ void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, |
980 |
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h |
981 |
+index 8effef9b13dd..b489dd9c8401 100644 |
982 |
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h |
983 |
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h |
984 |
+@@ -831,7 +831,7 @@ void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc, |
985 |
+ u8 queue_index); |
986 |
+ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, |
987 |
+ u8 queue_index); |
988 |
+-void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index); |
989 |
++u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index); |
990 |
+ void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw, |
991 |
+ u8 *tx_bd_desc, u8 *desc, u8 queue_index, |
992 |
+ struct sk_buff *skb, dma_addr_t addr); |
993 |
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h |
994 |
+index 6866dcf24340..27822fe34d9a 100644 |
995 |
+--- a/drivers/net/wireless/rtlwifi/wifi.h |
996 |
++++ b/drivers/net/wireless/rtlwifi/wifi.h |
997 |
+@@ -2161,6 +2161,7 @@ struct rtl_hal_ops { |
998 |
+ void (*add_wowlan_pattern)(struct ieee80211_hw *hw, |
999 |
+ struct rtl_wow_pattern *rtl_pattern, |
1000 |
+ u8 index); |
1001 |
++ u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx); |
1002 |
+ }; |
1003 |
+ |
1004 |
+ struct rtl_intf_ops { |
1005 |
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c |
1006 |
+index d8c10764f130..76ce69cc1382 100644 |
1007 |
+--- a/drivers/net/xen-netfront.c |
1008 |
++++ b/drivers/net/xen-netfront.c |
1009 |
+@@ -1062,8 +1062,7 @@ err: |
1010 |
+ |
1011 |
+ static int xennet_change_mtu(struct net_device *dev, int mtu) |
1012 |
+ { |
1013 |
+- int max = xennet_can_sg(dev) ? |
1014 |
+- XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; |
1015 |
++ int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; |
1016 |
+ |
1017 |
+ if (mtu > max) |
1018 |
+ return -EINVAL; |
1019 |
+@@ -1333,8 +1332,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) |
1020 |
+ netdev->ethtool_ops = &xennet_ethtool_ops; |
1021 |
+ SET_NETDEV_DEV(netdev, &dev->dev); |
1022 |
+ |
1023 |
+- netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); |
1024 |
+- |
1025 |
+ np->netdev = netdev; |
1026 |
+ |
1027 |
+ netif_carrier_off(netdev); |
1028 |
+diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c |
1029 |
+index d02df7d0c629..57b7bc2e037a 100644 |
1030 |
+--- a/drivers/staging/comedi/drivers/adv_pci1710.c |
1031 |
++++ b/drivers/staging/comedi/drivers/adv_pci1710.c |
1032 |
+@@ -455,7 +455,6 @@ static int pci171x_insn_read_ai(struct comedi_device *dev, |
1033 |
+ struct comedi_insn *insn, unsigned int *data) |
1034 |
+ { |
1035 |
+ struct pci1710_private *devpriv = dev->private; |
1036 |
+- unsigned int chan = CR_CHAN(insn->chanspec); |
1037 |
+ int ret = 0; |
1038 |
+ int i; |
1039 |
+ |
1040 |
+@@ -477,7 +476,7 @@ static int pci171x_insn_read_ai(struct comedi_device *dev, |
1041 |
+ break; |
1042 |
+ |
1043 |
+ val = inw(dev->iobase + PCI171x_AD_DATA); |
1044 |
+- ret = pci171x_ai_dropout(dev, s, chan, val); |
1045 |
++ ret = pci171x_ai_dropout(dev, s, 0, val); |
1046 |
+ if (ret) |
1047 |
+ break; |
1048 |
+ |
1049 |
+diff --git a/fs/exec.c b/fs/exec.c |
1050 |
+index ad8798e26be9..4617a4ec52e3 100644 |
1051 |
+--- a/fs/exec.c |
1052 |
++++ b/fs/exec.c |
1053 |
+@@ -1259,6 +1259,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm) |
1054 |
+ spin_unlock(&p->fs->lock); |
1055 |
+ } |
1056 |
+ |
1057 |
++static void bprm_fill_uid(struct linux_binprm *bprm) |
1058 |
++{ |
1059 |
++ struct inode *inode; |
1060 |
++ unsigned int mode; |
1061 |
++ kuid_t uid; |
1062 |
++ kgid_t gid; |
1063 |
++ |
1064 |
++ /* clear any previous set[ug]id data from a previous binary */ |
1065 |
++ bprm->cred->euid = current_euid(); |
1066 |
++ bprm->cred->egid = current_egid(); |
1067 |
++ |
1068 |
++ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) |
1069 |
++ return; |
1070 |
++ |
1071 |
++ if (task_no_new_privs(current)) |
1072 |
++ return; |
1073 |
++ |
1074 |
++ inode = file_inode(bprm->file); |
1075 |
++ mode = READ_ONCE(inode->i_mode); |
1076 |
++ if (!(mode & (S_ISUID|S_ISGID))) |
1077 |
++ return; |
1078 |
++ |
1079 |
++ /* Be careful if suid/sgid is set */ |
1080 |
++ mutex_lock(&inode->i_mutex); |
1081 |
++ |
1082 |
++ /* reload atomically mode/uid/gid now that lock held */ |
1083 |
++ mode = inode->i_mode; |
1084 |
++ uid = inode->i_uid; |
1085 |
++ gid = inode->i_gid; |
1086 |
++ mutex_unlock(&inode->i_mutex); |
1087 |
++ |
1088 |
++ /* We ignore suid/sgid if there are no mappings for them in the ns */ |
1089 |
++ if (!kuid_has_mapping(bprm->cred->user_ns, uid) || |
1090 |
++ !kgid_has_mapping(bprm->cred->user_ns, gid)) |
1091 |
++ return; |
1092 |
++ |
1093 |
++ if (mode & S_ISUID) { |
1094 |
++ bprm->per_clear |= PER_CLEAR_ON_SETID; |
1095 |
++ bprm->cred->euid = uid; |
1096 |
++ } |
1097 |
++ |
1098 |
++ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { |
1099 |
++ bprm->per_clear |= PER_CLEAR_ON_SETID; |
1100 |
++ bprm->cred->egid = gid; |
1101 |
++ } |
1102 |
++} |
1103 |
++ |
1104 |
+ /* |
1105 |
+ * Fill the binprm structure from the inode. |
1106 |
+ * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes |
1107 |
+@@ -1267,36 +1314,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm) |
1108 |
+ */ |
1109 |
+ int prepare_binprm(struct linux_binprm *bprm) |
1110 |
+ { |
1111 |
+- struct inode *inode = file_inode(bprm->file); |
1112 |
+- umode_t mode = inode->i_mode; |
1113 |
+ int retval; |
1114 |
+ |
1115 |
+- |
1116 |
+- /* clear any previous set[ug]id data from a previous binary */ |
1117 |
+- bprm->cred->euid = current_euid(); |
1118 |
+- bprm->cred->egid = current_egid(); |
1119 |
+- |
1120 |
+- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) && |
1121 |
+- !task_no_new_privs(current) && |
1122 |
+- kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) && |
1123 |
+- kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) { |
1124 |
+- /* Set-uid? */ |
1125 |
+- if (mode & S_ISUID) { |
1126 |
+- bprm->per_clear |= PER_CLEAR_ON_SETID; |
1127 |
+- bprm->cred->euid = inode->i_uid; |
1128 |
+- } |
1129 |
+- |
1130 |
+- /* Set-gid? */ |
1131 |
+- /* |
1132 |
+- * If setgid is set but no group execute bit then this |
1133 |
+- * is a candidate for mandatory locking, not a setgid |
1134 |
+- * executable. |
1135 |
+- */ |
1136 |
+- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { |
1137 |
+- bprm->per_clear |= PER_CLEAR_ON_SETID; |
1138 |
+- bprm->cred->egid = inode->i_gid; |
1139 |
+- } |
1140 |
+- } |
1141 |
++ bprm_fill_uid(bprm); |
1142 |
+ |
1143 |
+ /* fill in binprm security blob */ |
1144 |
+ retval = security_bprm_set_creds(bprm); |
1145 |
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h |
1146 |
+index 431b7fc605c9..e235ec5f1f28 100644 |
1147 |
+--- a/include/linux/hugetlb.h |
1148 |
++++ b/include/linux/hugetlb.h |
1149 |
+@@ -99,9 +99,9 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); |
1150 |
+ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, |
1151 |
+ int write); |
1152 |
+ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
1153 |
+- pmd_t *pmd, int write); |
1154 |
++ pmd_t *pmd, int flags); |
1155 |
+ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, |
1156 |
+- pud_t *pud, int write); |
1157 |
++ pud_t *pud, int flags); |
1158 |
+ int pmd_huge(pmd_t pmd); |
1159 |
+ int pud_huge(pud_t pmd); |
1160 |
+ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
1161 |
+@@ -133,8 +133,8 @@ static inline void hugetlb_report_meminfo(struct seq_file *m) |
1162 |
+ static inline void hugetlb_show_meminfo(void) |
1163 |
+ { |
1164 |
+ } |
1165 |
+-#define follow_huge_pmd(mm, addr, pmd, write) NULL |
1166 |
+-#define follow_huge_pud(mm, addr, pud, write) NULL |
1167 |
++#define follow_huge_pmd(mm, addr, pmd, flags) NULL |
1168 |
++#define follow_huge_pud(mm, addr, pud, flags) NULL |
1169 |
+ #define prepare_hugepage_range(file, addr, len) (-EINVAL) |
1170 |
+ #define pmd_huge(x) 0 |
1171 |
+ #define pud_huge(x) 0 |
1172 |
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h |
1173 |
+index 52fd8e8694cf..840fb7f7c3de 100644 |
1174 |
+--- a/include/linux/netdevice.h |
1175 |
++++ b/include/linux/netdevice.h |
1176 |
+@@ -2159,6 +2159,12 @@ void netdev_freemem(struct net_device *dev); |
1177 |
+ void synchronize_net(void); |
1178 |
+ int init_dummy_netdev(struct net_device *dev); |
1179 |
+ |
1180 |
++DECLARE_PER_CPU(int, xmit_recursion); |
1181 |
++static inline int dev_recursion_level(void) |
1182 |
++{ |
1183 |
++ return this_cpu_read(xmit_recursion); |
1184 |
++} |
1185 |
++ |
1186 |
+ struct net_device *dev_get_by_index(struct net *net, int ifindex); |
1187 |
+ struct net_device *__dev_get_by_index(struct net *net, int ifindex); |
1188 |
+ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); |
1189 |
+diff --git a/include/linux/swapops.h b/include/linux/swapops.h |
1190 |
+index 6adfb7bfbf44..e288d5c016a7 100644 |
1191 |
+--- a/include/linux/swapops.h |
1192 |
++++ b/include/linux/swapops.h |
1193 |
+@@ -137,6 +137,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry) |
1194 |
+ *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry)); |
1195 |
+ } |
1196 |
+ |
1197 |
++extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, |
1198 |
++ spinlock_t *ptl); |
1199 |
+ extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
1200 |
+ unsigned long address); |
1201 |
+ extern void migration_entry_wait_huge(struct vm_area_struct *vma, |
1202 |
+@@ -150,6 +152,8 @@ static inline int is_migration_entry(swp_entry_t swp) |
1203 |
+ } |
1204 |
+ #define migration_entry_to_page(swp) NULL |
1205 |
+ static inline void make_migration_entry_read(swp_entry_t *entryp) { } |
1206 |
++static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, |
1207 |
++ spinlock_t *ptl) { } |
1208 |
+ static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
1209 |
+ unsigned long address) { } |
1210 |
+ static inline void migration_entry_wait_huge(struct vm_area_struct *vma, |
1211 |
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h |
1212 |
+index d9a4905e01d0..6e0ce8c7b8cb 100644 |
1213 |
+--- a/include/linux/usb/usbnet.h |
1214 |
++++ b/include/linux/usb/usbnet.h |
1215 |
+@@ -227,9 +227,23 @@ struct skb_data { /* skb->cb is one of these */ |
1216 |
+ struct urb *urb; |
1217 |
+ struct usbnet *dev; |
1218 |
+ enum skb_state state; |
1219 |
+- size_t length; |
1220 |
++ long length; |
1221 |
++ unsigned long packets; |
1222 |
+ }; |
1223 |
+ |
1224 |
++/* Drivers that set FLAG_MULTI_PACKET must call this in their |
1225 |
++ * tx_fixup method before returning an skb. |
1226 |
++ */ |
1227 |
++static inline void |
1228 |
++usbnet_set_skb_tx_stats(struct sk_buff *skb, |
1229 |
++ unsigned long packets, long bytes_delta) |
1230 |
++{ |
1231 |
++ struct skb_data *entry = (struct skb_data *) skb->cb; |
1232 |
++ |
1233 |
++ entry->packets = packets; |
1234 |
++ entry->length = bytes_delta; |
1235 |
++} |
1236 |
++ |
1237 |
+ extern int usbnet_open(struct net_device *net); |
1238 |
+ extern int usbnet_stop(struct net_device *net); |
1239 |
+ extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, |
1240 |
+diff --git a/include/net/ip.h b/include/net/ip.h |
1241 |
+index 09cf5aebb283..c0c26c3deeb5 100644 |
1242 |
+--- a/include/net/ip.h |
1243 |
++++ b/include/net/ip.h |
1244 |
+@@ -453,22 +453,6 @@ static __inline__ void inet_reset_saddr(struct sock *sk) |
1245 |
+ |
1246 |
+ #endif |
1247 |
+ |
1248 |
+-static inline int sk_mc_loop(struct sock *sk) |
1249 |
+-{ |
1250 |
+- if (!sk) |
1251 |
+- return 1; |
1252 |
+- switch (sk->sk_family) { |
1253 |
+- case AF_INET: |
1254 |
+- return inet_sk(sk)->mc_loop; |
1255 |
+-#if IS_ENABLED(CONFIG_IPV6) |
1256 |
+- case AF_INET6: |
1257 |
+- return inet6_sk(sk)->mc_loop; |
1258 |
+-#endif |
1259 |
+- } |
1260 |
+- WARN_ON(1); |
1261 |
+- return 1; |
1262 |
+-} |
1263 |
+- |
1264 |
+ bool ip_call_ra_chain(struct sk_buff *skb); |
1265 |
+ |
1266 |
+ /* |
1267 |
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h |
1268 |
+index 1d09b46c1e48..eda131d179d9 100644 |
1269 |
+--- a/include/net/ip6_route.h |
1270 |
++++ b/include/net/ip6_route.h |
1271 |
+@@ -174,7 +174,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); |
1272 |
+ |
1273 |
+ static inline int ip6_skb_dst_mtu(struct sk_buff *skb) |
1274 |
+ { |
1275 |
+- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; |
1276 |
++ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? |
1277 |
++ inet6_sk(skb->sk) : NULL; |
1278 |
+ |
1279 |
+ return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ? |
1280 |
+ skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); |
1281 |
+diff --git a/include/net/sock.h b/include/net/sock.h |
1282 |
+index 2210fec65669..45b54d3fcb04 100644 |
1283 |
+--- a/include/net/sock.h |
1284 |
++++ b/include/net/sock.h |
1285 |
+@@ -1812,6 +1812,8 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); |
1286 |
+ |
1287 |
+ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); |
1288 |
+ |
1289 |
++bool sk_mc_loop(struct sock *sk); |
1290 |
++ |
1291 |
+ static inline bool sk_can_gso(const struct sock *sk) |
1292 |
+ { |
1293 |
+ return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); |
1294 |
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c |
1295 |
+index a28e09c7825d..36508e69e92a 100644 |
1296 |
+--- a/kernel/bpf/verifier.c |
1297 |
++++ b/kernel/bpf/verifier.c |
1298 |
+@@ -1380,7 +1380,8 @@ peek_stack: |
1299 |
+ /* tell verifier to check for equivalent states |
1300 |
+ * after every call and jump |
1301 |
+ */ |
1302 |
+- env->explored_states[t + 1] = STATE_LIST_MARK; |
1303 |
++ if (t + 1 < insn_cnt) |
1304 |
++ env->explored_states[t + 1] = STATE_LIST_MARK; |
1305 |
+ } else { |
1306 |
+ /* conditional jump with two edges */ |
1307 |
+ ret = push_insn(t, t + 1, FALLTHROUGH, env); |
1308 |
+diff --git a/mm/gup.c b/mm/gup.c |
1309 |
+index 9b2afbfe67e3..e29c3745a893 100644 |
1310 |
+--- a/mm/gup.c |
1311 |
++++ b/mm/gup.c |
1312 |
+@@ -167,10 +167,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma, |
1313 |
+ if (pud_none(*pud)) |
1314 |
+ return no_page_table(vma, flags); |
1315 |
+ if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { |
1316 |
+- if (flags & FOLL_GET) |
1317 |
+- return NULL; |
1318 |
+- page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); |
1319 |
+- return page; |
1320 |
++ page = follow_huge_pud(mm, address, pud, flags); |
1321 |
++ if (page) |
1322 |
++ return page; |
1323 |
++ return no_page_table(vma, flags); |
1324 |
+ } |
1325 |
+ if (unlikely(pud_bad(*pud))) |
1326 |
+ return no_page_table(vma, flags); |
1327 |
+@@ -179,19 +179,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma, |
1328 |
+ if (pmd_none(*pmd)) |
1329 |
+ return no_page_table(vma, flags); |
1330 |
+ if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { |
1331 |
+- page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); |
1332 |
+- if (flags & FOLL_GET) { |
1333 |
+- /* |
1334 |
+- * Refcount on tail pages are not well-defined and |
1335 |
+- * shouldn't be taken. The caller should handle a NULL |
1336 |
+- * return when trying to follow tail pages. |
1337 |
+- */ |
1338 |
+- if (PageHead(page)) |
1339 |
+- get_page(page); |
1340 |
+- else |
1341 |
+- page = NULL; |
1342 |
+- } |
1343 |
+- return page; |
1344 |
++ page = follow_huge_pmd(mm, address, pmd, flags); |
1345 |
++ if (page) |
1346 |
++ return page; |
1347 |
++ return no_page_table(vma, flags); |
1348 |
+ } |
1349 |
+ if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) |
1350 |
+ return no_page_table(vma, flags); |
1351 |
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
1352 |
+index 267e41971100..a2bfd02e289f 100644 |
1353 |
+--- a/mm/hugetlb.c |
1354 |
++++ b/mm/hugetlb.c |
1355 |
+@@ -3700,44 +3700,64 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
1356 |
+ return (pte_t *) pmd; |
1357 |
+ } |
1358 |
+ |
1359 |
+-struct page * |
1360 |
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
1361 |
+- pmd_t *pmd, int write) |
1362 |
+-{ |
1363 |
+- struct page *page; |
1364 |
++#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ |
1365 |
+ |
1366 |
+- if (!pmd_present(*pmd)) |
1367 |
+- return NULL; |
1368 |
+- page = pte_page(*(pte_t *)pmd); |
1369 |
+- if (page) |
1370 |
+- page += ((address & ~PMD_MASK) >> PAGE_SHIFT); |
1371 |
+- return page; |
1372 |
++/* |
1373 |
++ * These functions are overwritable if your architecture needs its own |
1374 |
++ * behavior. |
1375 |
++ */ |
1376 |
++struct page * __weak |
1377 |
++follow_huge_addr(struct mm_struct *mm, unsigned long address, |
1378 |
++ int write) |
1379 |
++{ |
1380 |
++ return ERR_PTR(-EINVAL); |
1381 |
+ } |
1382 |
+ |
1383 |
+-struct page * |
1384 |
+-follow_huge_pud(struct mm_struct *mm, unsigned long address, |
1385 |
+- pud_t *pud, int write) |
1386 |
++struct page * __weak |
1387 |
++follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
1388 |
++ pmd_t *pmd, int flags) |
1389 |
+ { |
1390 |
+- struct page *page; |
1391 |
+- |
1392 |
+- page = pte_page(*(pte_t *)pud); |
1393 |
+- if (page) |
1394 |
+- page += ((address & ~PUD_MASK) >> PAGE_SHIFT); |
1395 |
++ struct page *page = NULL; |
1396 |
++ spinlock_t *ptl; |
1397 |
++retry: |
1398 |
++ ptl = pmd_lockptr(mm, pmd); |
1399 |
++ spin_lock(ptl); |
1400 |
++ /* |
1401 |
++ * make sure that the address range covered by this pmd is not |
1402 |
++ * unmapped from other threads. |
1403 |
++ */ |
1404 |
++ if (!pmd_huge(*pmd)) |
1405 |
++ goto out; |
1406 |
++ if (pmd_present(*pmd)) { |
1407 |
++ page = pte_page(*(pte_t *)pmd) + |
1408 |
++ ((address & ~PMD_MASK) >> PAGE_SHIFT); |
1409 |
++ if (flags & FOLL_GET) |
1410 |
++ get_page(page); |
1411 |
++ } else { |
1412 |
++ if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) { |
1413 |
++ spin_unlock(ptl); |
1414 |
++ __migration_entry_wait(mm, (pte_t *)pmd, ptl); |
1415 |
++ goto retry; |
1416 |
++ } |
1417 |
++ /* |
1418 |
++ * hwpoisoned entry is treated as no_page_table in |
1419 |
++ * follow_page_mask(). |
1420 |
++ */ |
1421 |
++ } |
1422 |
++out: |
1423 |
++ spin_unlock(ptl); |
1424 |
+ return page; |
1425 |
+ } |
1426 |
+ |
1427 |
+-#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */ |
1428 |
+- |
1429 |
+-/* Can be overriden by architectures */ |
1430 |
+ struct page * __weak |
1431 |
+ follow_huge_pud(struct mm_struct *mm, unsigned long address, |
1432 |
+- pud_t *pud, int write) |
1433 |
++ pud_t *pud, int flags) |
1434 |
+ { |
1435 |
+- BUG(); |
1436 |
+- return NULL; |
1437 |
+-} |
1438 |
++ if (flags & FOLL_GET) |
1439 |
++ return NULL; |
1440 |
+ |
1441 |
+-#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ |
1442 |
++ return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); |
1443 |
++} |
1444 |
+ |
1445 |
+ #ifdef CONFIG_MEMORY_FAILURE |
1446 |
+ |
1447 |
+diff --git a/mm/migrate.c b/mm/migrate.c |
1448 |
+index 344cdf692fc8..be6d1edcfcb7 100644 |
1449 |
+--- a/mm/migrate.c |
1450 |
++++ b/mm/migrate.c |
1451 |
+@@ -229,7 +229,7 @@ static void remove_migration_ptes(struct page *old, struct page *new) |
1452 |
+ * get to the page and wait until migration is finished. |
1453 |
+ * When we return from this function the fault will be retried. |
1454 |
+ */ |
1455 |
+-static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, |
1456 |
++void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, |
1457 |
+ spinlock_t *ptl) |
1458 |
+ { |
1459 |
+ pte_t pte; |
1460 |
+@@ -1268,7 +1268,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm, |
1461 |
+ goto put_and_set; |
1462 |
+ |
1463 |
+ if (PageHuge(page)) { |
1464 |
+- isolate_huge_page(page, &pagelist); |
1465 |
++ if (PageHead(page)) |
1466 |
++ isolate_huge_page(page, &pagelist); |
1467 |
+ goto put_and_set; |
1468 |
+ } |
1469 |
+ |
1470 |
+diff --git a/net/core/dev.c b/net/core/dev.c |
1471 |
+index 4ff46f8054d4..5dd905ca2654 100644 |
1472 |
+--- a/net/core/dev.c |
1473 |
++++ b/net/core/dev.c |
1474 |
+@@ -2821,7 +2821,9 @@ static void skb_update_prio(struct sk_buff *skb) |
1475 |
+ #define skb_update_prio(skb) |
1476 |
+ #endif |
1477 |
+ |
1478 |
+-static DEFINE_PER_CPU(int, xmit_recursion); |
1479 |
++DEFINE_PER_CPU(int, xmit_recursion); |
1480 |
++EXPORT_SYMBOL(xmit_recursion); |
1481 |
++ |
1482 |
+ #define RECURSION_LIMIT 10 |
1483 |
+ |
1484 |
+ /** |
1485 |
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
1486 |
+index 62c67bebcaf5..39c444c1206d 100644 |
1487 |
+--- a/net/core/skbuff.c |
1488 |
++++ b/net/core/skbuff.c |
1489 |
+@@ -4141,18 +4141,20 @@ EXPORT_SYMBOL(skb_try_coalesce); |
1490 |
+ */ |
1491 |
+ void skb_scrub_packet(struct sk_buff *skb, bool xnet) |
1492 |
+ { |
1493 |
+- if (xnet) |
1494 |
+- skb_orphan(skb); |
1495 |
+ skb->tstamp.tv64 = 0; |
1496 |
+ skb->pkt_type = PACKET_HOST; |
1497 |
+ skb->skb_iif = 0; |
1498 |
+ skb->ignore_df = 0; |
1499 |
+ skb_dst_drop(skb); |
1500 |
+- skb->mark = 0; |
1501 |
+- skb_init_secmark(skb); |
1502 |
+ secpath_reset(skb); |
1503 |
+ nf_reset(skb); |
1504 |
+ nf_reset_trace(skb); |
1505 |
++ |
1506 |
++ if (!xnet) |
1507 |
++ return; |
1508 |
++ |
1509 |
++ skb_orphan(skb); |
1510 |
++ skb->mark = 0; |
1511 |
+ } |
1512 |
+ EXPORT_SYMBOL_GPL(skb_scrub_packet); |
1513 |
+ |
1514 |
+diff --git a/net/core/sock.c b/net/core/sock.c |
1515 |
+index 1c7a33db1314..a91f99f26420 100644 |
1516 |
+--- a/net/core/sock.c |
1517 |
++++ b/net/core/sock.c |
1518 |
+@@ -651,6 +651,25 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) |
1519 |
+ sock_reset_flag(sk, bit); |
1520 |
+ } |
1521 |
+ |
1522 |
++bool sk_mc_loop(struct sock *sk) |
1523 |
++{ |
1524 |
++ if (dev_recursion_level()) |
1525 |
++ return false; |
1526 |
++ if (!sk) |
1527 |
++ return true; |
1528 |
++ switch (sk->sk_family) { |
1529 |
++ case AF_INET: |
1530 |
++ return inet_sk(sk)->mc_loop; |
1531 |
++#if IS_ENABLED(CONFIG_IPV6) |
1532 |
++ case AF_INET6: |
1533 |
++ return inet6_sk(sk)->mc_loop; |
1534 |
++#endif |
1535 |
++ } |
1536 |
++ WARN_ON(1); |
1537 |
++ return true; |
1538 |
++} |
1539 |
++EXPORT_SYMBOL(sk_mc_loop); |
1540 |
++ |
1541 |
+ /* |
1542 |
+ * This is meant for all protocols to use and covers goings on |
1543 |
+ * at the socket level. Everything here is generic. |
1544 |
+diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c |
1545 |
+index 394a200f93c1..69711d81a88b 100644 |
1546 |
+--- a/net/ipv4/geneve.c |
1547 |
++++ b/net/ipv4/geneve.c |
1548 |
+@@ -121,10 +121,6 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, |
1549 |
+ int min_headroom; |
1550 |
+ int err; |
1551 |
+ |
1552 |
+- skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx); |
1553 |
+- if (IS_ERR(skb)) |
1554 |
+- return PTR_ERR(skb); |
1555 |
+- |
1556 |
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len |
1557 |
+ + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr) |
1558 |
+ + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); |
1559 |
+@@ -139,6 +135,10 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, |
1560 |
+ if (unlikely(!skb)) |
1561 |
+ return -ENOMEM; |
1562 |
+ |
1563 |
++ skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx); |
1564 |
++ if (IS_ERR(skb)) |
1565 |
++ return PTR_ERR(skb); |
1566 |
++ |
1567 |
+ gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); |
1568 |
+ geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); |
1569 |
+ |
1570 |
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
1571 |
+index 075ab4d5af5e..08ccca6a8035 100644 |
1572 |
+--- a/net/ipv4/tcp_input.c |
1573 |
++++ b/net/ipv4/tcp_input.c |
1574 |
+@@ -3104,10 +3104,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, |
1575 |
+ if (!first_ackt.v64) |
1576 |
+ first_ackt = last_ackt; |
1577 |
+ |
1578 |
+- if (!(sacked & TCPCB_SACKED_ACKED)) |
1579 |
++ if (!(sacked & TCPCB_SACKED_ACKED)) { |
1580 |
+ reord = min(pkts_acked, reord); |
1581 |
+- if (!after(scb->end_seq, tp->high_seq)) |
1582 |
+- flag |= FLAG_ORIG_SACK_ACKED; |
1583 |
++ if (!after(scb->end_seq, tp->high_seq)) |
1584 |
++ flag |= FLAG_ORIG_SACK_ACKED; |
1585 |
++ } |
1586 |
+ } |
1587 |
+ |
1588 |
+ if (sacked & TCPCB_SACKED_ACKED) |
1589 |
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c |
1590 |
+index d22f54482bab..982347eee104 100644 |
1591 |
+--- a/net/ipv4/tcp_ipv4.c |
1592 |
++++ b/net/ipv4/tcp_ipv4.c |
1593 |
+@@ -1516,7 +1516,7 @@ void tcp_v4_early_demux(struct sk_buff *skb) |
1594 |
+ skb->sk = sk; |
1595 |
+ skb->destructor = sock_edemux; |
1596 |
+ if (sk->sk_state != TCP_TIME_WAIT) { |
1597 |
+- struct dst_entry *dst = sk->sk_rx_dst; |
1598 |
++ struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
1599 |
+ |
1600 |
+ if (dst) |
1601 |
+ dst = dst_check(dst, 0); |
1602 |
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
1603 |
+index 9790f396ce5e..9f29453049dc 100644 |
1604 |
+--- a/net/ipv4/tcp_output.c |
1605 |
++++ b/net/ipv4/tcp_output.c |
1606 |
+@@ -2931,6 +2931,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, |
1607 |
+ } |
1608 |
+ #endif |
1609 |
+ |
1610 |
++ /* Do not fool tcpdump (if any), clean our debris */ |
1611 |
++ skb->tstamp.tv64 = 0; |
1612 |
+ return skb; |
1613 |
+ } |
1614 |
+ EXPORT_SYMBOL(tcp_make_synack); |
1615 |
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
1616 |
+index 3f5aa9959076..0bf56e562f76 100644 |
1617 |
+--- a/net/ipv6/ip6_output.c |
1618 |
++++ b/net/ipv6/ip6_output.c |
1619 |
+@@ -541,7 +541,8 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) |
1620 |
+ { |
1621 |
+ struct sk_buff *frag; |
1622 |
+ struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); |
1623 |
+- struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; |
1624 |
++ struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? |
1625 |
++ inet6_sk(skb->sk) : NULL; |
1626 |
+ struct ipv6hdr *tmp_hdr; |
1627 |
+ struct frag_hdr *fh; |
1628 |
+ unsigned int mtu, hlen, left, len; |
1629 |
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c |
1630 |
+index 682866777d53..d375ce60463e 100644 |
1631 |
+--- a/net/ipv6/ndisc.c |
1632 |
++++ b/net/ipv6/ndisc.c |
1633 |
+@@ -1216,7 +1216,14 @@ static void ndisc_router_discovery(struct sk_buff *skb) |
1634 |
+ if (rt) |
1635 |
+ rt6_set_expires(rt, jiffies + (HZ * lifetime)); |
1636 |
+ if (ra_msg->icmph.icmp6_hop_limit) { |
1637 |
+- in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; |
1638 |
++ /* Only set hop_limit on the interface if it is higher than |
1639 |
++ * the current hop_limit. |
1640 |
++ */ |
1641 |
++ if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) { |
1642 |
++ in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; |
1643 |
++ } else { |
1644 |
++ ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n"); |
1645 |
++ } |
1646 |
+ if (rt) |
1647 |
+ dst_metric_set(&rt->dst, RTAX_HOPLIMIT, |
1648 |
+ ra_msg->icmph.icmp6_hop_limit); |
1649 |
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
1650 |
+index 9c0b54e87b47..b89979312fbb 100644 |
1651 |
+--- a/net/ipv6/tcp_ipv6.c |
1652 |
++++ b/net/ipv6/tcp_ipv6.c |
1653 |
+@@ -1409,6 +1409,15 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, |
1654 |
+ TCP_SKB_CB(skb)->sacked = 0; |
1655 |
+ } |
1656 |
+ |
1657 |
++static void tcp_v6_restore_cb(struct sk_buff *skb) |
1658 |
++{ |
1659 |
++ /* We need to move header back to the beginning if xfrm6_policy_check() |
1660 |
++ * and tcp_v6_fill_cb() are going to be called again. |
1661 |
++ */ |
1662 |
++ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, |
1663 |
++ sizeof(struct inet6_skb_parm)); |
1664 |
++} |
1665 |
++ |
1666 |
+ static int tcp_v6_rcv(struct sk_buff *skb) |
1667 |
+ { |
1668 |
+ const struct tcphdr *th; |
1669 |
+@@ -1541,6 +1550,7 @@ do_time_wait: |
1670 |
+ inet_twsk_deschedule(tw, &tcp_death_row); |
1671 |
+ inet_twsk_put(tw); |
1672 |
+ sk = sk2; |
1673 |
++ tcp_v6_restore_cb(skb); |
1674 |
+ goto process; |
1675 |
+ } |
1676 |
+ /* Fall through to ACK */ |
1677 |
+@@ -1549,6 +1559,7 @@ do_time_wait: |
1678 |
+ tcp_v6_timewait_ack(sk, skb); |
1679 |
+ break; |
1680 |
+ case TCP_TW_RST: |
1681 |
++ tcp_v6_restore_cb(skb); |
1682 |
+ goto no_tcp_socket; |
1683 |
+ case TCP_TW_SUCCESS: |
1684 |
+ ; |
1685 |
+@@ -1583,7 +1594,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb) |
1686 |
+ skb->sk = sk; |
1687 |
+ skb->destructor = sock_edemux; |
1688 |
+ if (sk->sk_state != TCP_TIME_WAIT) { |
1689 |
+- struct dst_entry *dst = sk->sk_rx_dst; |
1690 |
++ struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); |
1691 |
+ |
1692 |
+ if (dst) |
1693 |
+ dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); |
1694 |
+diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c |
1695 |
+index 2034c6d9cb5a..296cc246f0a3 100644 |
1696 |
+--- a/net/openvswitch/vport.c |
1697 |
++++ b/net/openvswitch/vport.c |
1698 |
+@@ -274,10 +274,8 @@ void ovs_vport_del(struct vport *vport) |
1699 |
+ ASSERT_OVSL(); |
1700 |
+ |
1701 |
+ hlist_del_rcu(&vport->hash_node); |
1702 |
+- |
1703 |
+- vport->ops->destroy(vport); |
1704 |
+- |
1705 |
+ module_put(vport->ops->owner); |
1706 |
++ vport->ops->destroy(vport); |
1707 |
+ } |
1708 |
+ |
1709 |
+ /** |