Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.10 commit in: /
Date: Fri, 27 Feb 2015 18:35:23
Message-Id: 1425061426.67b9d04ff69ca7c7a8be3fefb3247bd81d2241cc.mpagano@gentoo
1 commit: 67b9d04ff69ca7c7a8be3fefb3247bd81d2241cc
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Feb 27 18:23:46 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Feb 27 18:23:46 2015 +0000
6 URL: http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=67b9d04f
7
8 Linux patch 3.10.70. Enable link security restrictions by default.
9
10 ---
11 0000_README | 4 +
12 1069_linux-3.10.70.patch | 569 +++++++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 573 insertions(+)
14
15 diff --git a/0000_README b/0000_README
16 index 1791ea5..59f86bb 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -318,6 +318,10 @@ Patch: 1068_linux-3.10.69.patch
20 From: http://www.kernel.org
21 Desc: Linux 3.10.69
22
23 +Patch: 1069_linux-3.10.70.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.10.70
26 +
27 Patch: 1500_XATTR_USER_PREFIX.patch
28 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
29 Desc: Support for namespace user.pax.* on tmpfs.
30
31 diff --git a/1069_linux-3.10.70.patch b/1069_linux-3.10.70.patch
32 new file mode 100644
33 index 0000000..eb0b700
34 --- /dev/null
35 +++ b/1069_linux-3.10.70.patch
36 @@ -0,0 +1,569 @@
37 +diff --git a/Makefile b/Makefile
38 +index 81ede20061cf..402cbb7c27f1 100644
39 +--- a/Makefile
40 ++++ b/Makefile
41 +@@ -1,6 +1,6 @@
42 + VERSION = 3
43 + PATCHLEVEL = 10
44 +-SUBLEVEL = 69
45 ++SUBLEVEL = 70
46 + EXTRAVERSION =
47 + NAME = TOSSUG Baby Fish
48 +
49 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
50 +index 9951e66b8502..7e3f45105f11 100644
51 +--- a/drivers/block/rbd.c
52 ++++ b/drivers/block/rbd.c
53 +@@ -2149,7 +2149,6 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
54 + rbd_assert(img_request->obj_request_count > 0);
55 + rbd_assert(which != BAD_WHICH);
56 + rbd_assert(which < img_request->obj_request_count);
57 +- rbd_assert(which >= img_request->next_completion);
58 +
59 + spin_lock_irq(&img_request->completion_lock);
60 + if (which != img_request->next_completion)
61 +diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
62 +index e4561264e124..a895ed02da86 100644
63 +--- a/drivers/media/rc/ir-lirc-codec.c
64 ++++ b/drivers/media/rc/ir-lirc-codec.c
65 +@@ -42,11 +42,17 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
66 + return -EINVAL;
67 +
68 + /* Packet start */
69 +- if (ev.reset)
70 +- return 0;
71 ++ if (ev.reset) {
72 ++ /* Userspace expects a long space event before the start of
73 ++ * the signal to use as a sync. This may be done with repeat
74 ++ * packets and normal samples. But if a reset has been sent
75 ++ * then we assume that a long time has passed, so we send a
76 ++ * space with the maximum time value. */
77 ++ sample = LIRC_SPACE(LIRC_VALUE_MASK);
78 ++ IR_dprintk(2, "delivering reset sync space to lirc_dev\n");
79 +
80 + /* Carrier reports */
81 +- if (ev.carrier_report) {
82 ++ } else if (ev.carrier_report) {
83 + sample = LIRC_FREQUENCY(ev.carrier);
84 + IR_dprintk(2, "carrier report (freq: %d)\n", sample);
85 +
86 +diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
87 +index af951f343ff6..50104a7e963f 100644
88 +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
89 ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
90 +@@ -2315,7 +2315,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
91 +
92 + work_done = netxen_process_rcv_ring(sds_ring, budget);
93 +
94 +- if ((work_done < budget) && tx_complete) {
95 ++ if (!tx_complete)
96 ++ work_done = budget;
97 ++
98 ++ if (work_done < budget) {
99 + napi_complete(&sds_ring->napi);
100 + if (test_bit(__NX_DEV_UP, &adapter->state))
101 + netxen_nic_enable_int(sds_ring);
102 +diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
103 +index 602c625d95d5..b5edc7f96a39 100644
104 +--- a/drivers/net/ppp/ppp_deflate.c
105 ++++ b/drivers/net/ppp/ppp_deflate.c
106 +@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
107 + /*
108 + * See if we managed to reduce the size of the packet.
109 + */
110 +- if (olen < isize) {
111 ++ if (olen < isize && olen <= osize) {
112 + state->stats.comp_bytes += olen;
113 + state->stats.comp_packets++;
114 + } else {
115 +diff --git a/include/net/ip.h b/include/net/ip.h
116 +index 8695359982d1..0a62365149e2 100644
117 +--- a/include/net/ip.h
118 ++++ b/include/net/ip.h
119 +@@ -37,11 +37,12 @@ struct inet_skb_parm {
120 + struct ip_options opt; /* Compiled IP options */
121 + unsigned char flags;
122 +
123 +-#define IPSKB_FORWARDED 1
124 +-#define IPSKB_XFRM_TUNNEL_SIZE 2
125 +-#define IPSKB_XFRM_TRANSFORMED 4
126 +-#define IPSKB_FRAG_COMPLETE 8
127 +-#define IPSKB_REROUTED 16
128 ++#define IPSKB_FORWARDED BIT(0)
129 ++#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
130 ++#define IPSKB_XFRM_TRANSFORMED BIT(2)
131 ++#define IPSKB_FRAG_COMPLETE BIT(3)
132 ++#define IPSKB_REROUTED BIT(4)
133 ++#define IPSKB_DOREDIRECT BIT(5)
134 +
135 + u16 frag_max_size;
136 + };
137 +@@ -162,7 +163,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
138 + return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
139 + }
140 +
141 +-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
142 ++void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
143 + __be32 saddr, const struct ip_reply_arg *arg,
144 + unsigned int len);
145 +
146 +diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
147 +index 2ba9de89e8ec..03e6378d5353 100644
148 +--- a/include/net/netns/ipv4.h
149 ++++ b/include/net/netns/ipv4.h
150 +@@ -43,6 +43,7 @@ struct netns_ipv4 {
151 + struct inet_peer_base *peers;
152 + struct tcpm_hash_bucket *tcp_metrics_hash;
153 + unsigned int tcp_metrics_hash_log;
154 ++ struct sock * __percpu *tcp_sk;
155 + struct netns_frags frags;
156 + #ifdef CONFIG_NETFILTER
157 + struct xt_table *iptable_filter;
158 +diff --git a/net/core/dev.c b/net/core/dev.c
159 +index cca7ae0ba915..c310440309bb 100644
160 +--- a/net/core/dev.c
161 ++++ b/net/core/dev.c
162 +@@ -6015,10 +6015,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
163 + oldsd->output_queue = NULL;
164 + oldsd->output_queue_tailp = &oldsd->output_queue;
165 + }
166 +- /* Append NAPI poll list from offline CPU. */
167 +- if (!list_empty(&oldsd->poll_list)) {
168 +- list_splice_init(&oldsd->poll_list, &sd->poll_list);
169 +- raise_softirq_irqoff(NET_RX_SOFTIRQ);
170 ++ /* Append NAPI poll list from offline CPU, with one exception :
171 ++ * process_backlog() must be called by cpu owning percpu backlog.
172 ++ * We properly handle process_queue & input_pkt_queue later.
173 ++ */
174 ++ while (!list_empty(&oldsd->poll_list)) {
175 ++ struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
176 ++ struct napi_struct,
177 ++ poll_list);
178 ++
179 ++ list_del_init(&napi->poll_list);
180 ++ if (napi->poll == process_backlog)
181 ++ napi->state = 0;
182 ++ else
183 ++ ____napi_schedule(sd, napi);
184 + }
185 +
186 + raise_softirq_irqoff(NET_TX_SOFTIRQ);
187 +@@ -6029,7 +6039,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
188 + netif_rx(skb);
189 + input_queue_head_incr(oldsd);
190 + }
191 +- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
192 ++ while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
193 + netif_rx(skb);
194 + input_queue_head_incr(oldsd);
195 + }
196 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
197 +index 25c4dd563a79..279b5dcf09ae 100644
198 +--- a/net/core/rtnetlink.c
199 ++++ b/net/core/rtnetlink.c
200 +@@ -2477,12 +2477,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
201 + goto errout;
202 + }
203 +
204 ++ if (!skb->len)
205 ++ goto errout;
206 ++
207 + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
208 + return 0;
209 + errout:
210 + WARN_ON(err == -EMSGSIZE);
211 + kfree_skb(skb);
212 +- rtnl_set_sk_err(net, RTNLGRP_LINK, err);
213 ++ if (err)
214 ++ rtnl_set_sk_err(net, RTNLGRP_LINK, err);
215 + return err;
216 + }
217 +
218 +diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
219 +index bd1c5baf69be..31ee5c6033df 100644
220 +--- a/net/ipv4/ip_forward.c
221 ++++ b/net/ipv4/ip_forward.c
222 +@@ -175,7 +175,8 @@ int ip_forward(struct sk_buff *skb)
223 + * We now generate an ICMP HOST REDIRECT giving the route
224 + * we calculated.
225 + */
226 +- if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
227 ++ if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
228 ++ !skb_sec_path(skb))
229 + ip_rt_send_redirect(skb);
230 +
231 + skb->priority = rt_tos2priority(iph->tos);
232 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
233 +index 22fa05e041ea..def18547748e 100644
234 +--- a/net/ipv4/ip_output.c
235 ++++ b/net/ipv4/ip_output.c
236 +@@ -1454,23 +1454,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
237 + /*
238 + * Generic function to send a packet as reply to another packet.
239 + * Used to send some TCP resets/acks so far.
240 +- *
241 +- * Use a fake percpu inet socket to avoid false sharing and contention.
242 + */
243 +-static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
244 +- .sk = {
245 +- .__sk_common = {
246 +- .skc_refcnt = ATOMIC_INIT(1),
247 +- },
248 +- .sk_wmem_alloc = ATOMIC_INIT(1),
249 +- .sk_allocation = GFP_ATOMIC,
250 +- .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
251 +- },
252 +- .pmtudisc = IP_PMTUDISC_WANT,
253 +- .uc_ttl = -1,
254 +-};
255 +-
256 +-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
257 ++void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
258 + __be32 saddr, const struct ip_reply_arg *arg,
259 + unsigned int len)
260 + {
261 +@@ -1478,9 +1463,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
262 + struct ipcm_cookie ipc;
263 + struct flowi4 fl4;
264 + struct rtable *rt = skb_rtable(skb);
265 ++ struct net *net = sock_net(sk);
266 + struct sk_buff *nskb;
267 +- struct sock *sk;
268 +- struct inet_sock *inet;
269 + int err;
270 +
271 + if (ip_options_echo(&replyopts.opt.opt, skb))
272 +@@ -1508,15 +1492,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
273 + if (IS_ERR(rt))
274 + return;
275 +
276 +- inet = &get_cpu_var(unicast_sock);
277 ++ inet_sk(sk)->tos = arg->tos;
278 +
279 +- inet->tos = arg->tos;
280 +- sk = &inet->sk;
281 + sk->sk_priority = skb->priority;
282 + sk->sk_protocol = ip_hdr(skb)->protocol;
283 + sk->sk_bound_dev_if = arg->bound_dev_if;
284 +- sock_net_set(sk, net);
285 +- __skb_queue_head_init(&sk->sk_write_queue);
286 + sk->sk_sndbuf = sysctl_wmem_default;
287 + err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
288 + len, 0, &ipc, &rt, MSG_DONTWAIT);
289 +@@ -1532,13 +1512,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
290 + arg->csumoffset) = csum_fold(csum_add(nskb->csum,
291 + arg->csum));
292 + nskb->ip_summed = CHECKSUM_NONE;
293 +- skb_orphan(nskb);
294 + skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
295 + ip_push_pending_frames(sk, &fl4);
296 + }
297 + out:
298 +- put_cpu_var(unicast_sock);
299 +-
300 + ip_rt_put(rt);
301 + }
302 +
303 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
304 +index 23e6ab0a2dc0..f6603142cb33 100644
305 +--- a/net/ipv4/ip_sockglue.c
306 ++++ b/net/ipv4/ip_sockglue.c
307 +@@ -410,15 +410,11 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
308 +
309 + memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
310 + sin = &errhdr.offender;
311 +- sin->sin_family = AF_UNSPEC;
312 ++ memset(sin, 0, sizeof(*sin));
313 + if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) {
314 +- struct inet_sock *inet = inet_sk(sk);
315 +-
316 + sin->sin_family = AF_INET;
317 + sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
318 +- sin->sin_port = 0;
319 +- memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
320 +- if (inet->cmsg_flags)
321 ++ if (inet_sk(sk)->cmsg_flags)
322 + ip_cmsg_recv(msg, skb);
323 + }
324 +
325 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
326 +index aa857a4a06a8..698f3a2ac5ae 100644
327 +--- a/net/ipv4/ping.c
328 ++++ b/net/ipv4/ping.c
329 +@@ -720,8 +720,11 @@ void ping_rcv(struct sk_buff *skb)
330 + sk = ping_v4_lookup(net, saddr, daddr, ntohs(icmph->un.echo.id),
331 + skb->dev->ifindex);
332 + if (sk != NULL) {
333 ++ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
334 ++
335 + pr_debug("rcv on socket %p\n", sk);
336 +- ping_queue_rcv_skb(sk, skb_get(skb));
337 ++ if (skb2)
338 ++ ping_queue_rcv_skb(sk, skb2);
339 + sock_put(sk);
340 + return;
341 + }
342 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
343 +index d4d162eac4df..e23c5f64286b 100644
344 +--- a/net/ipv4/route.c
345 ++++ b/net/ipv4/route.c
346 +@@ -1514,11 +1514,10 @@ static int __mkroute_input(struct sk_buff *skb,
347 +
348 + do_cache = res->fi && !itag;
349 + if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
350 ++ skb->protocol == htons(ETH_P_IP) &&
351 + (IN_DEV_SHARED_MEDIA(out_dev) ||
352 +- inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
353 +- flags |= RTCF_DOREDIRECT;
354 +- do_cache = false;
355 +- }
356 ++ inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
357 ++ IPCB(skb)->flags |= IPSKB_DOREDIRECT;
358 +
359 + if (skb->protocol != htons(ETH_P_IP)) {
360 + /* Not IP (i.e. ARP). Do not create route, if it is
361 +@@ -2255,6 +2254,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
362 + r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
363 + if (rt->rt_flags & RTCF_NOTIFY)
364 + r->rtm_flags |= RTM_F_NOTIFY;
365 ++ if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
366 ++ r->rtm_flags |= RTCF_DOREDIRECT;
367 +
368 + if (nla_put_be32(skb, RTA_DST, dst))
369 + goto nla_put_failure;
370 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
371 +index e025c1c788a1..cce35e5a7ee6 100644
372 +--- a/net/ipv4/tcp_ipv4.c
373 ++++ b/net/ipv4/tcp_ipv4.c
374 +@@ -707,7 +707,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
375 +
376 + net = dev_net(skb_dst(skb)->dev);
377 + arg.tos = ip_hdr(skb)->tos;
378 +- ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
379 ++ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
380 ++ skb, ip_hdr(skb)->saddr,
381 + ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
382 +
383 + TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
384 +@@ -790,7 +791,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
385 + if (oif)
386 + arg.bound_dev_if = oif;
387 + arg.tos = tos;
388 +- ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
389 ++ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
390 ++ skb, ip_hdr(skb)->saddr,
391 + ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
392 +
393 + TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
394 +@@ -2898,14 +2900,39 @@ struct proto tcp_prot = {
395 + };
396 + EXPORT_SYMBOL(tcp_prot);
397 +
398 ++static void __net_exit tcp_sk_exit(struct net *net)
399 ++{
400 ++ int cpu;
401 ++
402 ++ for_each_possible_cpu(cpu)
403 ++ inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
404 ++ free_percpu(net->ipv4.tcp_sk);
405 ++}
406 ++
407 + static int __net_init tcp_sk_init(struct net *net)
408 + {
409 ++ int res, cpu;
410 ++
411 ++ net->ipv4.tcp_sk = alloc_percpu(struct sock *);
412 ++ if (!net->ipv4.tcp_sk)
413 ++ return -ENOMEM;
414 ++
415 ++ for_each_possible_cpu(cpu) {
416 ++ struct sock *sk;
417 ++
418 ++ res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
419 ++ IPPROTO_TCP, net);
420 ++ if (res)
421 ++ goto fail;
422 ++ *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
423 ++ }
424 + net->ipv4.sysctl_tcp_ecn = 2;
425 + return 0;
426 +-}
427 +
428 +-static void __net_exit tcp_sk_exit(struct net *net)
429 +-{
430 ++fail:
431 ++ tcp_sk_exit(net);
432 ++
433 ++ return res;
434 + }
435 +
436 + static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
437 +diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
438 +index 7927db0a9279..4a000f1dd757 100644
439 +--- a/net/ipv4/udp_diag.c
440 ++++ b/net/ipv4/udp_diag.c
441 +@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
442 + s_slot = cb->args[0];
443 + num = s_num = cb->args[1];
444 +
445 +- for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
446 ++ for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
447 + struct sock *sk;
448 + struct hlist_nulls_node *node;
449 + struct udp_hslot *hslot = &table->hash[slot];
450 +
451 ++ num = 0;
452 ++
453 + if (hlist_nulls_empty(&hslot->head))
454 + continue;
455 +
456 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
457 +index 8997340e3742..ce17d3da9b2b 100644
458 +--- a/net/ipv6/datagram.c
459 ++++ b/net/ipv6/datagram.c
460 +@@ -374,11 +374,10 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
461 +
462 + memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
463 + sin = &errhdr.offender;
464 +- sin->sin6_family = AF_UNSPEC;
465 ++ memset(sin, 0, sizeof(*sin));
466 ++
467 + if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
468 + sin->sin6_family = AF_INET6;
469 +- sin->sin6_flowinfo = 0;
470 +- sin->sin6_port = 0;
471 + if (skb->protocol == htons(ETH_P_IPV6)) {
472 + sin->sin6_addr = ipv6_hdr(skb)->saddr;
473 + if (np->rxopt.all)
474 +@@ -387,12 +386,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
475 + ipv6_iface_scope_id(&sin->sin6_addr,
476 + IP6CB(skb)->iif);
477 + } else {
478 +- struct inet_sock *inet = inet_sk(sk);
479 +-
480 + ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
481 + &sin->sin6_addr);
482 +- sin->sin6_scope_id = 0;
483 +- if (inet->cmsg_flags)
484 ++ if (inet_sk(sk)->cmsg_flags)
485 + ip_cmsg_recv(msg, skb);
486 + }
487 + }
488 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
489 +index 009c9620f442..ceeb9458bb60 100644
490 +--- a/net/ipv6/ip6_fib.c
491 ++++ b/net/ipv6/ip6_fib.c
492 +@@ -638,6 +638,29 @@ static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt)
493 + RTF_GATEWAY;
494 + }
495 +
496 ++static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
497 ++ struct net *net)
498 ++{
499 ++ if (atomic_read(&rt->rt6i_ref) != 1) {
500 ++ /* This route is used as dummy address holder in some split
501 ++ * nodes. It is not leaked, but it still holds other resources,
502 ++ * which must be released in time. So, scan ascendant nodes
503 ++ * and replace dummy references to this route with references
504 ++ * to still alive ones.
505 ++ */
506 ++ while (fn) {
507 ++ if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
508 ++ fn->leaf = fib6_find_prefix(net, fn);
509 ++ atomic_inc(&fn->leaf->rt6i_ref);
510 ++ rt6_release(rt);
511 ++ }
512 ++ fn = fn->parent;
513 ++ }
514 ++ /* No more references are possible at this point. */
515 ++ BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
516 ++ }
517 ++}
518 ++
519 + /*
520 + * Insert routing information in a node.
521 + */
522 +@@ -775,11 +798,12 @@ add:
523 + rt->dst.rt6_next = iter->dst.rt6_next;
524 + atomic_inc(&rt->rt6i_ref);
525 + inet6_rt_notify(RTM_NEWROUTE, rt, info);
526 +- rt6_release(iter);
527 + if (!(fn->fn_flags & RTN_RTINFO)) {
528 + info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
529 + fn->fn_flags |= RTN_RTINFO;
530 + }
531 ++ fib6_purge_rt(iter, fn, info->nl_net);
532 ++ rt6_release(iter);
533 + }
534 +
535 + return 0;
536 +@@ -1284,24 +1308,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
537 + fn = fib6_repair_tree(net, fn);
538 + }
539 +
540 +- if (atomic_read(&rt->rt6i_ref) != 1) {
541 +- /* This route is used as dummy address holder in some split
542 +- * nodes. It is not leaked, but it still holds other resources,
543 +- * which must be released in time. So, scan ascendant nodes
544 +- * and replace dummy references to this route with references
545 +- * to still alive ones.
546 +- */
547 +- while (fn) {
548 +- if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
549 +- fn->leaf = fib6_find_prefix(net, fn);
550 +- atomic_inc(&fn->leaf->rt6i_ref);
551 +- rt6_release(rt);
552 +- }
553 +- fn = fn->parent;
554 +- }
555 +- /* No more references are possible at this point. */
556 +- BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
557 +- }
558 ++ fib6_purge_rt(rt, fn, net);
559 +
560 + inet6_rt_notify(RTM_DELROUTE, rt, info);
561 + rt6_release(rt);
562 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
563 +index b2614b22622b..92274796eb71 100644
564 +--- a/net/ipv6/route.c
565 ++++ b/net/ipv6/route.c
566 +@@ -1141,12 +1141,9 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
567 + struct net *net = dev_net(dst->dev);
568 +
569 + rt6->rt6i_flags |= RTF_MODIFIED;
570 +- if (mtu < IPV6_MIN_MTU) {
571 +- u32 features = dst_metric(dst, RTAX_FEATURES);
572 ++ if (mtu < IPV6_MIN_MTU)
573 + mtu = IPV6_MIN_MTU;
574 +- features |= RTAX_FEATURE_ALLFRAG;
575 +- dst_metric_set(dst, RTAX_FEATURES, features);
576 +- }
577 ++
578 + dst_metric_set(dst, RTAX_MTU, mtu);
579 + rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
580 + }
581 +diff --git a/net/sctp/associola.c b/net/sctp/associola.c
582 +index ca4a1a1b8e69..6360a14edeab 100644
583 +--- a/net/sctp/associola.c
584 ++++ b/net/sctp/associola.c
585 +@@ -1297,7 +1297,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
586 + asoc->peer.peer_hmacs = new->peer.peer_hmacs;
587 + new->peer.peer_hmacs = NULL;
588 +
589 +- sctp_auth_key_put(asoc->asoc_shared_key);
590 + sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
591 + }
592 +
593 +diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
594 +index 29fc16f3633f..beedadf62f6c 100644
595 +--- a/net/sctp/sm_make_chunk.c
596 ++++ b/net/sctp/sm_make_chunk.c
597 +@@ -2595,7 +2595,7 @@ do_addr_param:
598 +
599 + addr_param = param.v + sizeof(sctp_addip_param_t);
600 +
601 +- af = sctp_get_af_specific(param_type2af(param.p->type));
602 ++ af = sctp_get_af_specific(param_type2af(addr_param->p.type));
603 + if (af == NULL)
604 + break;
605 +