Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.0 commit in: /
Date: Wed, 29 Apr 2015 13:35:43
Message-Id: 1430314522.b5c2b5b2947190cece9bf6218aa9dca795670288.mpagano@gentoo
1 commit: b5c2b5b2947190cece9bf6218aa9dca795670288
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 29 13:35:22 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 29 13:35:22 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b5c2b5b2
7
8 Linux patch 4.0.1
9
10 0000_README | 4 +
11 1000_linux-4.0.1.patch | 479 +++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 483 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 0cdee6d..483ca42 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -43,6 +43,10 @@ EXPERIMENTAL
19 Individual Patch Descriptions:
20 --------------------------------------------------------------------------
21
22 +Patch: 1000_linux-4.0.1.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.0.1
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1000_linux-4.0.1.patch b/1000_linux-4.0.1.patch
31 new file mode 100644
32 index 0000000..ac58552
33 --- /dev/null
34 +++ b/1000_linux-4.0.1.patch
35 @@ -0,0 +1,479 @@
36 +diff --git a/Makefile b/Makefile
37 +index fbd43bfe4445..f499cd2f5738 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 0
43 +-SUBLEVEL = 0
44 ++SUBLEVEL = 1
45 + EXTRAVERSION =
46 + NAME = Hurr durr I'ma sheep
47 +
48 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
49 +index 4085c4b31047..355d5fea5be9 100644
50 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
51 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
52 +@@ -531,20 +531,8 @@ struct bnx2x_fastpath {
53 + struct napi_struct napi;
54 +
55 + #ifdef CONFIG_NET_RX_BUSY_POLL
56 +- unsigned int state;
57 +-#define BNX2X_FP_STATE_IDLE 0
58 +-#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
59 +-#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
60 +-#define BNX2X_FP_STATE_DISABLED (1 << 2)
61 +-#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
62 +-#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
63 +-#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
64 +-#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
65 +-#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
66 +-#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
67 +- /* protect state */
68 +- spinlock_t lock;
69 +-#endif /* CONFIG_NET_RX_BUSY_POLL */
70 ++ unsigned long busy_poll_state;
71 ++#endif
72 +
73 + union host_hc_status_block status_blk;
74 + /* chip independent shortcuts into sb structure */
75 +@@ -619,104 +607,83 @@ struct bnx2x_fastpath {
76 + #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
77 +
78 + #ifdef CONFIG_NET_RX_BUSY_POLL
79 +-static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
80 ++
81 ++enum bnx2x_fp_state {
82 ++ BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */
83 ++
84 ++ BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
85 ++ BNX2X_STATE_FP_NAPI_REQ = BIT(1),
86 ++
87 ++ BNX2X_STATE_FP_POLL_BIT = 2,
88 ++ BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */
89 ++
90 ++ BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
91 ++};
92 ++
93 ++static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
94 + {
95 +- spin_lock_init(&fp->lock);
96 +- fp->state = BNX2X_FP_STATE_IDLE;
97 ++ WRITE_ONCE(fp->busy_poll_state, 0);
98 + }
99 +
100 + /* called from the device poll routine to get ownership of a FP */
101 + static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
102 + {
103 +- bool rc = true;
104 +-
105 +- spin_lock_bh(&fp->lock);
106 +- if (fp->state & BNX2X_FP_LOCKED) {
107 +- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
108 +- fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
109 +- rc = false;
110 +- } else {
111 +- /* we don't care if someone yielded */
112 +- fp->state = BNX2X_FP_STATE_NAPI;
113 ++ unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
114 ++
115 ++ while (1) {
116 ++ switch (old) {
117 ++ case BNX2X_STATE_FP_POLL:
118 ++ /* make sure bnx2x_fp_lock_poll() wont starve us */
119 ++ set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
120 ++ &fp->busy_poll_state);
121 ++ /* fallthrough */
122 ++ case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
123 ++ return false;
124 ++ default:
125 ++ break;
126 ++ }
127 ++ prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
128 ++ if (unlikely(prev != old)) {
129 ++ old = prev;
130 ++ continue;
131 ++ }
132 ++ return true;
133 + }
134 +- spin_unlock_bh(&fp->lock);
135 +- return rc;
136 + }
137 +
138 +-/* returns true is someone tried to get the FP while napi had it */
139 +-static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
140 ++static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
141 + {
142 +- bool rc = false;
143 +-
144 +- spin_lock_bh(&fp->lock);
145 +- WARN_ON(fp->state &
146 +- (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
147 +-
148 +- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
149 +- rc = true;
150 +-
151 +- /* state ==> idle, unless currently disabled */
152 +- fp->state &= BNX2X_FP_STATE_DISABLED;
153 +- spin_unlock_bh(&fp->lock);
154 +- return rc;
155 ++ smp_wmb();
156 ++ fp->busy_poll_state = 0;
157 + }
158 +
159 + /* called from bnx2x_low_latency_poll() */
160 + static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
161 + {
162 +- bool rc = true;
163 +-
164 +- spin_lock_bh(&fp->lock);
165 +- if ((fp->state & BNX2X_FP_LOCKED)) {
166 +- fp->state |= BNX2X_FP_STATE_POLL_YIELD;
167 +- rc = false;
168 +- } else {
169 +- /* preserve yield marks */
170 +- fp->state |= BNX2X_FP_STATE_POLL;
171 +- }
172 +- spin_unlock_bh(&fp->lock);
173 +- return rc;
174 ++ return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
175 + }
176 +
177 +-/* returns true if someone tried to get the FP while it was locked */
178 +-static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
179 ++static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
180 + {
181 +- bool rc = false;
182 +-
183 +- spin_lock_bh(&fp->lock);
184 +- WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
185 +-
186 +- if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
187 +- rc = true;
188 +-
189 +- /* state ==> idle, unless currently disabled */
190 +- fp->state &= BNX2X_FP_STATE_DISABLED;
191 +- spin_unlock_bh(&fp->lock);
192 +- return rc;
193 ++ smp_mb__before_atomic();
194 ++ clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
195 + }
196 +
197 +-/* true if a socket is polling, even if it did not get the lock */
198 ++/* true if a socket is polling */
199 + static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
200 + {
201 +- WARN_ON(!(fp->state & BNX2X_FP_OWNED));
202 +- return fp->state & BNX2X_FP_USER_PEND;
203 ++ return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
204 + }
205 +
206 + /* false if fp is currently owned */
207 + static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
208 + {
209 +- int rc = true;
210 +-
211 +- spin_lock_bh(&fp->lock);
212 +- if (fp->state & BNX2X_FP_OWNED)
213 +- rc = false;
214 +- fp->state |= BNX2X_FP_STATE_DISABLED;
215 +- spin_unlock_bh(&fp->lock);
216 ++ set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
217 ++ return !bnx2x_fp_ll_polling(fp);
218 +
219 +- return rc;
220 + }
221 + #else
222 +-static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
223 ++static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
224 + {
225 + }
226 +
227 +@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
228 + return true;
229 + }
230 +
231 +-static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
232 ++static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
233 + {
234 +- return false;
235 + }
236 +
237 + static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
238 +@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
239 + return false;
240 + }
241 +
242 +-static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
243 ++static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
244 + {
245 +- return false;
246 + }
247 +
248 + static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
249 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
250 +index 0a9faa134a9a..2f63467bce46 100644
251 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
252 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
253 +@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
254 + int i;
255 +
256 + for_each_rx_queue_cnic(bp, i) {
257 +- bnx2x_fp_init_lock(&bp->fp[i]);
258 ++ bnx2x_fp_busy_poll_init(&bp->fp[i]);
259 + napi_enable(&bnx2x_fp(bp, i, napi));
260 + }
261 + }
262 +@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
263 + int i;
264 +
265 + for_each_eth_queue(bp, i) {
266 +- bnx2x_fp_init_lock(&bp->fp[i]);
267 ++ bnx2x_fp_busy_poll_init(&bp->fp[i]);
268 + napi_enable(&bnx2x_fp(bp, i, napi));
269 + }
270 + }
271 +@@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
272 + }
273 + }
274 +
275 ++ bnx2x_fp_unlock_napi(fp);
276 ++
277 + /* Fall out from the NAPI loop if needed */
278 +- if (!bnx2x_fp_unlock_napi(fp) &&
279 +- !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
280 ++ if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
281 +
282 + /* No need to update SB for FCoE L2 ring as long as
283 + * it's connected to the default SB and the SB
284 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
285 +index f8528a4cf54f..fceb637efd6b 100644
286 +--- a/drivers/net/vxlan.c
287 ++++ b/drivers/net/vxlan.c
288 +@@ -1713,12 +1713,6 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
289 + }
290 + }
291 +
292 +- skb = iptunnel_handle_offloads(skb, udp_sum, type);
293 +- if (IS_ERR(skb)) {
294 +- err = -EINVAL;
295 +- goto err;
296 +- }
297 +-
298 + skb_scrub_packet(skb, xnet);
299 +
300 + min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
301 +@@ -1738,6 +1732,12 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sk_buff *skb,
302 + goto err;
303 + }
304 +
305 ++ skb = iptunnel_handle_offloads(skb, udp_sum, type);
306 ++ if (IS_ERR(skb)) {
307 ++ err = -EINVAL;
308 ++ goto err;
309 ++ }
310 ++
311 + vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
312 + vxh->vx_flags = htonl(VXLAN_HF_VNI);
313 + vxh->vx_vni = md->vni;
314 +@@ -1798,10 +1798,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
315 + }
316 + }
317 +
318 +- skb = iptunnel_handle_offloads(skb, udp_sum, type);
319 +- if (IS_ERR(skb))
320 +- return PTR_ERR(skb);
321 +-
322 + min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
323 + + VXLAN_HLEN + sizeof(struct iphdr)
324 + + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
325 +@@ -1817,6 +1813,10 @@ int vxlan_xmit_skb(struct rtable *rt, struct sk_buff *skb,
326 + if (WARN_ON(!skb))
327 + return -ENOMEM;
328 +
329 ++ skb = iptunnel_handle_offloads(skb, udp_sum, type);
330 ++ if (IS_ERR(skb))
331 ++ return PTR_ERR(skb);
332 ++
333 + vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
334 + vxh->vx_flags = htonl(VXLAN_HF_VNI);
335 + vxh->vx_vni = md->vni;
336 +diff --git a/fs/exec.c b/fs/exec.c
337 +index c7f9b733406d..00400cf522dc 100644
338 +--- a/fs/exec.c
339 ++++ b/fs/exec.c
340 +@@ -1265,6 +1265,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
341 + spin_unlock(&p->fs->lock);
342 + }
343 +
344 ++static void bprm_fill_uid(struct linux_binprm *bprm)
345 ++{
346 ++ struct inode *inode;
347 ++ unsigned int mode;
348 ++ kuid_t uid;
349 ++ kgid_t gid;
350 ++
351 ++ /* clear any previous set[ug]id data from a previous binary */
352 ++ bprm->cred->euid = current_euid();
353 ++ bprm->cred->egid = current_egid();
354 ++
355 ++ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
356 ++ return;
357 ++
358 ++ if (task_no_new_privs(current))
359 ++ return;
360 ++
361 ++ inode = file_inode(bprm->file);
362 ++ mode = READ_ONCE(inode->i_mode);
363 ++ if (!(mode & (S_ISUID|S_ISGID)))
364 ++ return;
365 ++
366 ++ /* Be careful if suid/sgid is set */
367 ++ mutex_lock(&inode->i_mutex);
368 ++
369 ++ /* reload atomically mode/uid/gid now that lock held */
370 ++ mode = inode->i_mode;
371 ++ uid = inode->i_uid;
372 ++ gid = inode->i_gid;
373 ++ mutex_unlock(&inode->i_mutex);
374 ++
375 ++ /* We ignore suid/sgid if there are no mappings for them in the ns */
376 ++ if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
377 ++ !kgid_has_mapping(bprm->cred->user_ns, gid))
378 ++ return;
379 ++
380 ++ if (mode & S_ISUID) {
381 ++ bprm->per_clear |= PER_CLEAR_ON_SETID;
382 ++ bprm->cred->euid = uid;
383 ++ }
384 ++
385 ++ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
386 ++ bprm->per_clear |= PER_CLEAR_ON_SETID;
387 ++ bprm->cred->egid = gid;
388 ++ }
389 ++}
390 ++
391 + /*
392 + * Fill the binprm structure from the inode.
393 + * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
394 +@@ -1273,36 +1320,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
395 + */
396 + int prepare_binprm(struct linux_binprm *bprm)
397 + {
398 +- struct inode *inode = file_inode(bprm->file);
399 +- umode_t mode = inode->i_mode;
400 + int retval;
401 +
402 +-
403 +- /* clear any previous set[ug]id data from a previous binary */
404 +- bprm->cred->euid = current_euid();
405 +- bprm->cred->egid = current_egid();
406 +-
407 +- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
408 +- !task_no_new_privs(current) &&
409 +- kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
410 +- kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
411 +- /* Set-uid? */
412 +- if (mode & S_ISUID) {
413 +- bprm->per_clear |= PER_CLEAR_ON_SETID;
414 +- bprm->cred->euid = inode->i_uid;
415 +- }
416 +-
417 +- /* Set-gid? */
418 +- /*
419 +- * If setgid is set but no group execute bit then this
420 +- * is a candidate for mandatory locking, not a setgid
421 +- * executable.
422 +- */
423 +- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
424 +- bprm->per_clear |= PER_CLEAR_ON_SETID;
425 +- bprm->cred->egid = inode->i_gid;
426 +- }
427 +- }
428 ++ bprm_fill_uid(bprm);
429 +
430 + /* fill in binprm security blob */
431 + retval = security_bprm_set_creds(bprm);
432 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
433 +index a28e09c7825d..36508e69e92a 100644
434 +--- a/kernel/bpf/verifier.c
435 ++++ b/kernel/bpf/verifier.c
436 +@@ -1380,7 +1380,8 @@ peek_stack:
437 + /* tell verifier to check for equivalent states
438 + * after every call and jump
439 + */
440 +- env->explored_states[t + 1] = STATE_LIST_MARK;
441 ++ if (t + 1 < insn_cnt)
442 ++ env->explored_states[t + 1] = STATE_LIST_MARK;
443 + } else {
444 + /* conditional jump with two edges */
445 + ret = push_insn(t, t + 1, FALLTHROUGH, env);
446 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
447 +index 8e4ac97c8477..98d45fe72f51 100644
448 +--- a/net/core/skbuff.c
449 ++++ b/net/core/skbuff.c
450 +@@ -4169,19 +4169,21 @@ EXPORT_SYMBOL(skb_try_coalesce);
451 + */
452 + void skb_scrub_packet(struct sk_buff *skb, bool xnet)
453 + {
454 +- if (xnet)
455 +- skb_orphan(skb);
456 + skb->tstamp.tv64 = 0;
457 + skb->pkt_type = PACKET_HOST;
458 + skb->skb_iif = 0;
459 + skb->ignore_df = 0;
460 + skb_dst_drop(skb);
461 +- skb->mark = 0;
462 + skb_sender_cpu_clear(skb);
463 +- skb_init_secmark(skb);
464 + secpath_reset(skb);
465 + nf_reset(skb);
466 + nf_reset_trace(skb);
467 ++
468 ++ if (!xnet)
469 ++ return;
470 ++
471 ++ skb_orphan(skb);
472 ++ skb->mark = 0;
473 + }
474 + EXPORT_SYMBOL_GPL(skb_scrub_packet);
475 +
476 +diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
477 +index 5a4828ba05ad..a566a2e4715b 100644
478 +--- a/net/ipv4/geneve.c
479 ++++ b/net/ipv4/geneve.c
480 +@@ -113,10 +113,6 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
481 + int min_headroom;
482 + int err;
483 +
484 +- skb = udp_tunnel_handle_offloads(skb, csum);
485 +- if (IS_ERR(skb))
486 +- return PTR_ERR(skb);
487 +-
488 + min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
489 + + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
490 + + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
491 +@@ -131,6 +127,10 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
492 + if (unlikely(!skb))
493 + return -ENOMEM;
494 +
495 ++ skb = udp_tunnel_handle_offloads(skb, csum);
496 ++ if (IS_ERR(skb))
497 ++ return PTR_ERR(skb);
498 ++
499 + gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
500 + geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
501 +
502 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
503 +index 1db253e36045..d520492ba698 100644
504 +--- a/net/ipv4/tcp_output.c
505 ++++ b/net/ipv4/tcp_output.c
506 +@@ -2929,6 +2929,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
507 + }
508 + #endif
509 +
510 ++ /* Do not fool tcpdump (if any), clean our debris */
511 ++ skb->tstamp.tv64 = 0;
512 + return skb;
513 + }
514 + EXPORT_SYMBOL(tcp_make_synack);