1 |
commit: 07f04985317caf6e1e030ebb0ff156f8990449a4 |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Sun Aug 13 16:51:56 2017 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Sun Aug 13 16:51:56 2017 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=07f04985 |
7 |
|
8 |
Linux patch 4.4.81 |
9 |
|
10 |
0000_README | 4 + |
11 |
1081_linux-4.4.82.patch | 330 ++++++++++++++++++++++++++++++++++++++++++++++++ |
12 |
2 files changed, 334 insertions(+) |
13 |
|
14 |
diff --git a/0000_README b/0000_README |
15 |
index c396c3a..0fe7ce9 100644 |
16 |
--- a/0000_README |
17 |
+++ b/0000_README |
18 |
@@ -367,6 +367,10 @@ Patch: 1080_linux-4.4.81.patch |
19 |
From: http://www.kernel.org |
20 |
Desc: Linux 4.4.81 |
21 |
|
22 |
+Patch: 1081_linux-4.4.82.patch |
23 |
+From: http://www.kernel.org |
24 |
+Desc: Linux 4.4.82 |
25 |
+ |
26 |
Patch: 1500_XATTR_USER_PREFIX.patch |
27 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
28 |
Desc: Support for namespace user.pax.* on tmpfs. |
29 |
|
30 |
diff --git a/1081_linux-4.4.82.patch b/1081_linux-4.4.82.patch |
31 |
new file mode 100644 |
32 |
index 0000000..f61b767 |
33 |
--- /dev/null |
34 |
+++ b/1081_linux-4.4.82.patch |
35 |
@@ -0,0 +1,330 @@ |
36 |
+diff --git a/Makefile b/Makefile |
37 |
+index d049e53a6960..52f2dd8dcebd 100644 |
38 |
+--- a/Makefile |
39 |
++++ b/Makefile |
40 |
+@@ -1,6 +1,6 @@ |
41 |
+ VERSION = 4 |
42 |
+ PATCHLEVEL = 4 |
43 |
+-SUBLEVEL = 81 |
44 |
++SUBLEVEL = 82 |
45 |
+ EXTRAVERSION = |
46 |
+ NAME = Blurry Fish Butt |
47 |
+ |
48 |
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c |
49 |
+index 1f1ff7e7b9cf..ba079e279b58 100644 |
50 |
+--- a/arch/arm/kvm/mmu.c |
51 |
++++ b/arch/arm/kvm/mmu.c |
52 |
+@@ -1629,12 +1629,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) |
53 |
+ |
54 |
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) |
55 |
+ { |
56 |
++ if (!kvm->arch.pgd) |
57 |
++ return 0; |
58 |
+ trace_kvm_age_hva(start, end); |
59 |
+ return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); |
60 |
+ } |
61 |
+ |
62 |
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) |
63 |
+ { |
64 |
++ if (!kvm->arch.pgd) |
65 |
++ return 0; |
66 |
+ trace_kvm_test_age_hva(hva); |
67 |
+ return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); |
68 |
+ } |
69 |
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c |
70 |
+index 0e2919dd8df3..1395eeb6005f 100644 |
71 |
+--- a/arch/s390/net/bpf_jit_comp.c |
72 |
++++ b/arch/s390/net/bpf_jit_comp.c |
73 |
+@@ -1250,7 +1250,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp) |
74 |
+ insn_count = bpf_jit_insn(jit, fp, i); |
75 |
+ if (insn_count < 0) |
76 |
+ return -1; |
77 |
+- jit->addrs[i + 1] = jit->prg; /* Next instruction address */ |
78 |
++ /* Next instruction address */ |
79 |
++ jit->addrs[i + insn_count] = jit->prg; |
80 |
+ } |
81 |
+ bpf_jit_epilogue(jit); |
82 |
+ |
83 |
+diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h |
84 |
+index 349dd23e2876..0cdeb2b483a0 100644 |
85 |
+--- a/arch/sparc/include/asm/mmu_context_64.h |
86 |
++++ b/arch/sparc/include/asm/mmu_context_64.h |
87 |
+@@ -25,9 +25,11 @@ void destroy_context(struct mm_struct *mm); |
88 |
+ void __tsb_context_switch(unsigned long pgd_pa, |
89 |
+ struct tsb_config *tsb_base, |
90 |
+ struct tsb_config *tsb_huge, |
91 |
+- unsigned long tsb_descr_pa); |
92 |
++ unsigned long tsb_descr_pa, |
93 |
++ unsigned long secondary_ctx); |
94 |
+ |
95 |
+-static inline void tsb_context_switch(struct mm_struct *mm) |
96 |
++static inline void tsb_context_switch_ctx(struct mm_struct *mm, |
97 |
++ unsigned long ctx) |
98 |
+ { |
99 |
+ __tsb_context_switch(__pa(mm->pgd), |
100 |
+ &mm->context.tsb_block[0], |
101 |
+@@ -38,9 +40,12 @@ static inline void tsb_context_switch(struct mm_struct *mm) |
102 |
+ #else |
103 |
+ NULL |
104 |
+ #endif |
105 |
+- , __pa(&mm->context.tsb_descr[0])); |
106 |
++ , __pa(&mm->context.tsb_descr[0]), |
107 |
++ ctx); |
108 |
+ } |
109 |
+ |
110 |
++#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0) |
111 |
++ |
112 |
+ void tsb_grow(struct mm_struct *mm, |
113 |
+ unsigned long tsb_index, |
114 |
+ unsigned long mm_rss); |
115 |
+@@ -110,8 +115,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str |
116 |
+ * cpu0 to update it's TSB because at that point the cpu_vm_mask |
117 |
+ * only had cpu1 set in it. |
118 |
+ */ |
119 |
+- load_secondary_context(mm); |
120 |
+- tsb_context_switch(mm); |
121 |
++ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); |
122 |
+ |
123 |
+ /* Any time a processor runs a context on an address space |
124 |
+ * for the first time, we must flush that context out of the |
125 |
+diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S |
126 |
+index 395ec1800530..7d961f6e3907 100644 |
127 |
+--- a/arch/sparc/kernel/tsb.S |
128 |
++++ b/arch/sparc/kernel/tsb.S |
129 |
+@@ -375,6 +375,7 @@ tsb_flush: |
130 |
+ * %o1: TSB base config pointer |
131 |
+ * %o2: TSB huge config pointer, or NULL if none |
132 |
+ * %o3: Hypervisor TSB descriptor physical address |
133 |
++ * %o4: Secondary context to load, if non-zero |
134 |
+ * |
135 |
+ * We have to run this whole thing with interrupts |
136 |
+ * disabled so that the current cpu doesn't change |
137 |
+@@ -387,6 +388,17 @@ __tsb_context_switch: |
138 |
+ rdpr %pstate, %g1 |
139 |
+ wrpr %g1, PSTATE_IE, %pstate |
140 |
+ |
141 |
++ brz,pn %o4, 1f |
142 |
++ mov SECONDARY_CONTEXT, %o5 |
143 |
++ |
144 |
++661: stxa %o4, [%o5] ASI_DMMU |
145 |
++ .section .sun4v_1insn_patch, "ax" |
146 |
++ .word 661b |
147 |
++ stxa %o4, [%o5] ASI_MMU |
148 |
++ .previous |
149 |
++ flush %g6 |
150 |
++ |
151 |
++1: |
152 |
+ TRAP_LOAD_TRAP_BLOCK(%g2, %g3) |
153 |
+ |
154 |
+ stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] |
155 |
+diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c |
156 |
+index 17bd2e167e07..df707a8ad311 100644 |
157 |
+--- a/arch/sparc/power/hibernate.c |
158 |
++++ b/arch/sparc/power/hibernate.c |
159 |
+@@ -35,6 +35,5 @@ void restore_processor_state(void) |
160 |
+ { |
161 |
+ struct mm_struct *mm = current->active_mm; |
162 |
+ |
163 |
+- load_secondary_context(mm); |
164 |
+- tsb_context_switch(mm); |
165 |
++ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); |
166 |
+ } |
167 |
+diff --git a/mm/mempool.c b/mm/mempool.c |
168 |
+index 004d42b1dfaf..7924f4f58a6d 100644 |
169 |
+--- a/mm/mempool.c |
170 |
++++ b/mm/mempool.c |
171 |
+@@ -135,8 +135,8 @@ static void *remove_element(mempool_t *pool) |
172 |
+ void *element = pool->elements[--pool->curr_nr]; |
173 |
+ |
174 |
+ BUG_ON(pool->curr_nr < 0); |
175 |
+- check_element(pool, element); |
176 |
+ kasan_unpoison_element(pool, element); |
177 |
++ check_element(pool, element); |
178 |
+ return element; |
179 |
+ } |
180 |
+ |
181 |
+diff --git a/net/core/dev.c b/net/core/dev.c |
182 |
+index 4b0853194a03..24d243084aab 100644 |
183 |
+--- a/net/core/dev.c |
184 |
++++ b/net/core/dev.c |
185 |
+@@ -2551,7 +2551,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) |
186 |
+ { |
187 |
+ if (tx_path) |
188 |
+ return skb->ip_summed != CHECKSUM_PARTIAL && |
189 |
+- skb->ip_summed != CHECKSUM_NONE; |
190 |
++ skb->ip_summed != CHECKSUM_UNNECESSARY; |
191 |
+ |
192 |
+ return skb->ip_summed == CHECKSUM_NONE; |
193 |
+ } |
194 |
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
195 |
+index 5d58a6703a43..09c73dd541c5 100644 |
196 |
+--- a/net/ipv4/ip_output.c |
197 |
++++ b/net/ipv4/ip_output.c |
198 |
+@@ -922,11 +922,12 @@ static int __ip_append_data(struct sock *sk, |
199 |
+ csummode = CHECKSUM_PARTIAL; |
200 |
+ |
201 |
+ cork->length += length; |
202 |
+- if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) || |
203 |
+- (skb && skb_is_gso(skb))) && |
204 |
++ if ((skb && skb_is_gso(skb)) || |
205 |
++ (((length + (skb ? skb->len : fragheaderlen)) > mtu) && |
206 |
++ (skb_queue_len(queue) <= 1) && |
207 |
+ (sk->sk_protocol == IPPROTO_UDP) && |
208 |
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && |
209 |
+- (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { |
210 |
++ (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) { |
211 |
+ err = ip_ufo_append_data(sk, queue, getfrag, from, length, |
212 |
+ hh_len, fragheaderlen, transhdrlen, |
213 |
+ maxfraglen, flags); |
214 |
+@@ -1242,6 +1243,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, |
215 |
+ return -EINVAL; |
216 |
+ |
217 |
+ if ((size + skb->len > mtu) && |
218 |
++ (skb_queue_len(&sk->sk_write_queue) == 1) && |
219 |
+ (sk->sk_protocol == IPPROTO_UDP) && |
220 |
+ (rt->dst.dev->features & NETIF_F_UFO)) { |
221 |
+ if (skb->ip_summed != CHECKSUM_PARTIAL) |
222 |
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
223 |
+index 8f13b2eaabf8..f0dabd125c43 100644 |
224 |
+--- a/net/ipv4/tcp_input.c |
225 |
++++ b/net/ipv4/tcp_input.c |
226 |
+@@ -2503,8 +2503,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk) |
227 |
+ struct tcp_sock *tp = tcp_sk(sk); |
228 |
+ |
229 |
+ /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ |
230 |
+- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || |
231 |
+- (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { |
232 |
++ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && |
233 |
++ (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { |
234 |
+ tp->snd_cwnd = tp->snd_ssthresh; |
235 |
+ tp->snd_cwnd_stamp = tcp_time_stamp; |
236 |
+ } |
237 |
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
238 |
+index 3fdcdc730f71..850d1b5bfd81 100644 |
239 |
+--- a/net/ipv4/tcp_output.c |
240 |
++++ b/net/ipv4/tcp_output.c |
241 |
+@@ -3256,6 +3256,9 @@ int tcp_connect(struct sock *sk) |
242 |
+ struct sk_buff *buff; |
243 |
+ int err; |
244 |
+ |
245 |
++ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) |
246 |
++ return -EHOSTUNREACH; /* Routing failure or similar. */ |
247 |
++ |
248 |
+ tcp_connect_init(sk); |
249 |
+ |
250 |
+ if (unlikely(tp->repair)) { |
251 |
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c |
252 |
+index ebb34d0c5e80..1ec12a4f327e 100644 |
253 |
+--- a/net/ipv4/tcp_timer.c |
254 |
++++ b/net/ipv4/tcp_timer.c |
255 |
+@@ -606,7 +606,8 @@ static void tcp_keepalive_timer (unsigned long data) |
256 |
+ goto death; |
257 |
+ } |
258 |
+ |
259 |
+- if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) |
260 |
++ if (!sock_flag(sk, SOCK_KEEPOPEN) || |
261 |
++ ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) |
262 |
+ goto out; |
263 |
+ |
264 |
+ elapsed = keepalive_time_when(tp); |
265 |
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c |
266 |
+index e9513e397c4f..301e60829c7e 100644 |
267 |
+--- a/net/ipv4/udp.c |
268 |
++++ b/net/ipv4/udp.c |
269 |
+@@ -819,7 +819,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) |
270 |
+ if (is_udplite) /* UDP-Lite */ |
271 |
+ csum = udplite_csum(skb); |
272 |
+ |
273 |
+- else if (sk->sk_no_check_tx) { /* UDP csum disabled */ |
274 |
++ else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */ |
275 |
+ |
276 |
+ skb->ip_summed = CHECKSUM_NONE; |
277 |
+ goto send; |
278 |
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c |
279 |
+index 6396f1c80ae9..6dfc3daf7c21 100644 |
280 |
+--- a/net/ipv4/udp_offload.c |
281 |
++++ b/net/ipv4/udp_offload.c |
282 |
+@@ -231,7 +231,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, |
283 |
+ if (uh->check == 0) |
284 |
+ uh->check = CSUM_MANGLED_0; |
285 |
+ |
286 |
+- skb->ip_summed = CHECKSUM_NONE; |
287 |
++ skb->ip_summed = CHECKSUM_UNNECESSARY; |
288 |
+ |
289 |
+ /* Fragment the skb. IP headers of the fragments are updated in |
290 |
+ * inet_gso_segment() |
291 |
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
292 |
+index 0de3245ea42f..e22339fad10b 100644 |
293 |
+--- a/net/ipv6/ip6_output.c |
294 |
++++ b/net/ipv6/ip6_output.c |
295 |
+@@ -1357,11 +1357,12 @@ emsgsize: |
296 |
+ */ |
297 |
+ |
298 |
+ cork->length += length; |
299 |
+- if ((((length + (skb ? skb->len : headersize)) > mtu) || |
300 |
+- (skb && skb_is_gso(skb))) && |
301 |
++ if ((skb && skb_is_gso(skb)) || |
302 |
++ (((length + (skb ? skb->len : headersize)) > mtu) && |
303 |
++ (skb_queue_len(queue) <= 1) && |
304 |
+ (sk->sk_protocol == IPPROTO_UDP) && |
305 |
+ (rt->dst.dev->features & NETIF_F_UFO) && |
306 |
+- (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { |
307 |
++ (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) { |
308 |
+ err = ip6_ufo_append_data(sk, queue, getfrag, from, length, |
309 |
+ hh_len, fragheaderlen, exthdrlen, |
310 |
+ transhdrlen, mtu, flags, fl6); |
311 |
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c |
312 |
+index 01582966ffa0..2e3c12eeca07 100644 |
313 |
+--- a/net/ipv6/udp_offload.c |
314 |
++++ b/net/ipv6/udp_offload.c |
315 |
+@@ -86,7 +86,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, |
316 |
+ if (uh->check == 0) |
317 |
+ uh->check = CSUM_MANGLED_0; |
318 |
+ |
319 |
+- skb->ip_summed = CHECKSUM_NONE; |
320 |
++ skb->ip_summed = CHECKSUM_UNNECESSARY; |
321 |
+ |
322 |
+ /* Check if there is enough headroom to insert fragment header. */ |
323 |
+ tnl_hlen = skb_tnl_header_len(skb); |
324 |
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
325 |
+index 061771ca2582..148ec130d99d 100644 |
326 |
+--- a/net/packet/af_packet.c |
327 |
++++ b/net/packet/af_packet.c |
328 |
+@@ -3622,14 +3622,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv |
329 |
+ |
330 |
+ if (optlen != sizeof(val)) |
331 |
+ return -EINVAL; |
332 |
+- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
333 |
+- return -EBUSY; |
334 |
+ if (copy_from_user(&val, optval, sizeof(val))) |
335 |
+ return -EFAULT; |
336 |
+ if (val > INT_MAX) |
337 |
+ return -EINVAL; |
338 |
+- po->tp_reserve = val; |
339 |
+- return 0; |
340 |
++ lock_sock(sk); |
341 |
++ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { |
342 |
++ ret = -EBUSY; |
343 |
++ } else { |
344 |
++ po->tp_reserve = val; |
345 |
++ ret = 0; |
346 |
++ } |
347 |
++ release_sock(sk); |
348 |
++ return ret; |
349 |
+ } |
350 |
+ case PACKET_LOSS: |
351 |
+ { |
352 |
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c |
353 |
+index d05869646515..0915d448ba23 100644 |
354 |
+--- a/net/sched/act_ipt.c |
355 |
++++ b/net/sched/act_ipt.c |
356 |
+@@ -42,8 +42,8 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int |
357 |
+ return PTR_ERR(target); |
358 |
+ |
359 |
+ t->u.kernel.target = target; |
360 |
++ memset(&par, 0, sizeof(par)); |
361 |
+ par.table = table; |
362 |
+- par.entryinfo = NULL; |
363 |
+ par.target = target; |
364 |
+ par.targinfo = t->data; |
365 |
+ par.hook_mask = hook; |