Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Sun, 05 May 2019 13:42:11
Message-Id: 1557063712.b058d6c4cd938dce578766f72dd910dabf3db4ab.mpagano@gentoo
1 commit: b058d6c4cd938dce578766f72dd910dabf3db4ab
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun May 5 13:41:52 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun May 5 13:41:52 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b058d6c4
7
8 Linux patch 4.19.40
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1039_linux-4.19.40.patch | 1032 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1036 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index be92968..27107b1 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -199,6 +199,10 @@ Patch: 1038_linux-4.19.39.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.39
23
24 +Patch: 1039_linux-4.19.40.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.40
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1039_linux-4.19.40.patch b/1039_linux-4.19.40.patch
33 new file mode 100644
34 index 0000000..6263b35
35 --- /dev/null
36 +++ b/1039_linux-4.19.40.patch
37 @@ -0,0 +1,1032 @@
38 +diff --git a/Makefile b/Makefile
39 +index be1bd297bca9..3822720a8a1c 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 39
47 ++SUBLEVEL = 40
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
52 +index fd23d5778ea1..f1645578d9d0 100644
53 +--- a/arch/x86/include/uapi/asm/kvm.h
54 ++++ b/arch/x86/include/uapi/asm/kvm.h
55 +@@ -378,6 +378,7 @@ struct kvm_sync_regs {
56 + #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
57 + #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
58 + #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
59 ++#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
60 +
61 + #define KVM_STATE_NESTED_GUEST_MODE 0x00000001
62 + #define KVM_STATE_NESTED_RUN_PENDING 0x00000002
63 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
64 +index 3380a312d186..215339c7d161 100644
65 +--- a/arch/x86/kvm/vmx.c
66 ++++ b/arch/x86/kvm/vmx.c
67 +@@ -14236,7 +14236,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
68 + return ret;
69 +
70 + /* Empty 'VMXON' state is permitted */
71 +- if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
72 ++ if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
73 + return 0;
74 +
75 + if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
76 +@@ -14269,7 +14269,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
77 + if (nested_cpu_has_shadow_vmcs(vmcs12) &&
78 + vmcs12->vmcs_link_pointer != -1ull) {
79 + struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
80 +- if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
81 ++ if (kvm_state->size < sizeof(*kvm_state) + 2 * sizeof(*vmcs12))
82 + return -EINVAL;
83 +
84 + if (copy_from_user(shadow_vmcs12,
85 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
86 +index 4a61e1609c97..f3337adaf9b3 100644
87 +--- a/arch/x86/kvm/x86.c
88 ++++ b/arch/x86/kvm/x86.c
89 +@@ -6328,6 +6328,12 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
90 + }
91 + EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
92 +
93 ++static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
94 ++{
95 ++ vcpu->arch.pio.count = 0;
96 ++ return 1;
97 ++}
98 ++
99 + static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
100 + {
101 + vcpu->arch.pio.count = 0;
102 +@@ -6344,12 +6350,23 @@ static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
103 + unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
104 + int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
105 + size, port, &val, 1);
106 ++ if (ret)
107 ++ return ret;
108 +
109 +- if (!ret) {
110 ++ /*
111 ++ * Workaround userspace that relies on old KVM behavior of %rip being
112 ++ * incremented prior to exiting to userspace to handle "OUT 0x7e".
113 ++ */
114 ++ if (port == 0x7e &&
115 ++ kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
116 ++ vcpu->arch.complete_userspace_io =
117 ++ complete_fast_pio_out_port_0x7e;
118 ++ kvm_skip_emulated_instruction(vcpu);
119 ++ } else {
120 + vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
121 + vcpu->arch.complete_userspace_io = complete_fast_pio_out;
122 + }
123 +- return ret;
124 ++ return 0;
125 + }
126 +
127 + static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
128 +diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
129 +index 47c5f272a084..21db1804e85d 100644
130 +--- a/drivers/net/dsa/bcm_sf2_cfp.c
131 ++++ b/drivers/net/dsa/bcm_sf2_cfp.c
132 +@@ -742,6 +742,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
133 + fs->m_ext.data[1]))
134 + return -EINVAL;
135 +
136 ++ if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
137 ++ return -EINVAL;
138 ++
139 + if (fs->location != RX_CLS_LOC_ANY &&
140 + test_bit(fs->location, priv->cfp.used))
141 + return -EBUSY;
142 +@@ -836,6 +839,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
143 + u32 next_loc = 0;
144 + int ret;
145 +
146 ++ if (loc >= CFP_NUM_RULES)
147 ++ return -EINVAL;
148 ++
149 + /* Refuse deleting unused rules, and those that are not unique since
150 + * that could leave IPv6 rules with one of the chained rule in the
151 + * table.
152 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
153 +index 581ad0a17d0c..de46331aefc1 100644
154 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
155 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
156 +@@ -1584,7 +1584,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
157 + netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
158 + bnxt_sched_reset(bp, rxr);
159 + }
160 +- goto next_rx;
161 ++ goto next_rx_no_len;
162 + }
163 +
164 + len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
165 +@@ -1665,12 +1665,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
166 + rc = 1;
167 +
168 + next_rx:
169 +- rxr->rx_prod = NEXT_RX(prod);
170 +- rxr->rx_next_cons = NEXT_RX(cons);
171 +-
172 + cpr->rx_packets += 1;
173 + cpr->rx_bytes += len;
174 +
175 ++next_rx_no_len:
176 ++ rxr->rx_prod = NEXT_RX(prod);
177 ++ rxr->rx_next_cons = NEXT_RX(cons);
178 ++
179 + next_rx_no_prod_no_len:
180 + *raw_cons = tmp_raw_cons;
181 +
182 +@@ -7441,8 +7442,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
183 +
184 + skip_uc:
185 + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
186 ++ if (rc && vnic->mc_list_count) {
187 ++ netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
188 ++ rc);
189 ++ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
190 ++ vnic->mc_list_count = 0;
191 ++ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
192 ++ }
193 + if (rc)
194 +- netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
195 ++ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
196 + rc);
197 +
198 + return rc;
199 +@@ -9077,6 +9085,7 @@ init_err_cleanup_tc:
200 + bnxt_clear_int_mode(bp);
201 +
202 + init_err_pci_clean:
203 ++ bnxt_free_hwrm_short_cmd_req(bp);
204 + bnxt_free_hwrm_resources(bp);
205 + bnxt_cleanup_pci(bp);
206 +
207 +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
208 +index 73813c7afa49..bb6107f3b947 100644
209 +--- a/drivers/net/phy/marvell.c
210 ++++ b/drivers/net/phy/marvell.c
211 +@@ -1513,9 +1513,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
212 +
213 + static void marvell_get_strings(struct phy_device *phydev, u8 *data)
214 + {
215 ++ int count = marvell_get_sset_count(phydev);
216 + int i;
217 +
218 +- for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
219 ++ for (i = 0; i < count; i++) {
220 + strlcpy(data + i * ETH_GSTRING_LEN,
221 + marvell_hw_stats[i].string, ETH_GSTRING_LEN);
222 + }
223 +@@ -1543,9 +1544,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
224 + static void marvell_get_stats(struct phy_device *phydev,
225 + struct ethtool_stats *stats, u64 *data)
226 + {
227 ++ int count = marvell_get_sset_count(phydev);
228 + int i;
229 +
230 +- for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
231 ++ for (i = 0; i < count; i++)
232 + data[i] = marvell_get_stat(phydev, i);
233 + }
234 +
235 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
236 +index 90f9372dec25..f3b1cfacfe9d 100644
237 +--- a/drivers/net/wireless/ath/ath10k/mac.c
238 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
239 +@@ -5622,7 +5622,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
240 + }
241 +
242 + if (changed & BSS_CHANGED_MCAST_RATE &&
243 +- !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
244 ++ !ath10k_mac_vif_chan(arvif->vif, &def)) {
245 + band = def.chan->band;
246 + rateidx = vif->bss_conf.mcast_rate[band] - 1;
247 +
248 +diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
249 +index 6640f84fe536..6d5beac29bc1 100644
250 +--- a/include/net/sctp/command.h
251 ++++ b/include/net/sctp/command.h
252 +@@ -105,7 +105,6 @@ enum sctp_verb {
253 + SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
254 + SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
255 + SCTP_CMD_SEND_MSG, /* Send the whole use message */
256 +- SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
257 + SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
258 + SCTP_CMD_SET_ASOC, /* Restore association context */
259 + SCTP_CMD_LAST
260 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
261 +index 9c4e72e9c60a..73894ed12a70 100644
262 +--- a/net/ipv4/ip_output.c
263 ++++ b/net/ipv4/ip_output.c
264 +@@ -519,6 +519,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
265 + to->pkt_type = from->pkt_type;
266 + to->priority = from->priority;
267 + to->protocol = from->protocol;
268 ++ to->skb_iif = from->skb_iif;
269 + skb_dst_drop(to);
270 + skb_dst_copy(to, from);
271 + to->dev = from->dev;
272 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
273 +index cbe46175bb59..3e2a9bd3459c 100644
274 +--- a/net/ipv6/ip6_fib.c
275 ++++ b/net/ipv6/ip6_fib.c
276 +@@ -889,9 +889,7 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
277 + if (pcpu_rt) {
278 + struct fib6_info *from;
279 +
280 +- from = rcu_dereference_protected(pcpu_rt->from,
281 +- lockdep_is_held(&table->tb6_lock));
282 +- rcu_assign_pointer(pcpu_rt->from, NULL);
283 ++ from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
284 + fib6_info_release(from);
285 + }
286 + }
287 +diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
288 +index cb54a8a3c273..be5f3d7ceb96 100644
289 +--- a/net/ipv6/ip6_flowlabel.c
290 ++++ b/net/ipv6/ip6_flowlabel.c
291 +@@ -94,15 +94,21 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
292 + return fl;
293 + }
294 +
295 ++static void fl_free_rcu(struct rcu_head *head)
296 ++{
297 ++ struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
298 ++
299 ++ if (fl->share == IPV6_FL_S_PROCESS)
300 ++ put_pid(fl->owner.pid);
301 ++ kfree(fl->opt);
302 ++ kfree(fl);
303 ++}
304 ++
305 +
306 + static void fl_free(struct ip6_flowlabel *fl)
307 + {
308 +- if (fl) {
309 +- if (fl->share == IPV6_FL_S_PROCESS)
310 +- put_pid(fl->owner.pid);
311 +- kfree(fl->opt);
312 +- kfree_rcu(fl, rcu);
313 +- }
314 ++ if (fl)
315 ++ call_rcu(&fl->rcu, fl_free_rcu);
316 + }
317 +
318 + static void fl_release(struct ip6_flowlabel *fl)
319 +@@ -633,9 +639,9 @@ recheck:
320 + if (fl1->share == IPV6_FL_S_EXCL ||
321 + fl1->share != fl->share ||
322 + ((fl1->share == IPV6_FL_S_PROCESS) &&
323 +- (fl1->owner.pid == fl->owner.pid)) ||
324 ++ (fl1->owner.pid != fl->owner.pid)) ||
325 + ((fl1->share == IPV6_FL_S_USER) &&
326 +- uid_eq(fl1->owner.uid, fl->owner.uid)))
327 ++ !uid_eq(fl1->owner.uid, fl->owner.uid)))
328 + goto release;
329 +
330 + err = -ENOMEM;
331 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
332 +index 06fa8425d82c..2e519f7b983c 100644
333 +--- a/net/ipv6/route.c
334 ++++ b/net/ipv6/route.c
335 +@@ -382,11 +382,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
336 + in6_dev_put(idev);
337 + }
338 +
339 +- rcu_read_lock();
340 +- from = rcu_dereference(rt->from);
341 +- rcu_assign_pointer(rt->from, NULL);
342 ++ from = xchg((__force struct fib6_info **)&rt->from, NULL);
343 + fib6_info_release(from);
344 +- rcu_read_unlock();
345 + }
346 +
347 + static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
348 +@@ -1296,9 +1293,7 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
349 + /* purge completely the exception to allow releasing the held resources:
350 + * some [sk] cache may keep the dst around for unlimited time
351 + */
352 +- from = rcu_dereference_protected(rt6_ex->rt6i->from,
353 +- lockdep_is_held(&rt6_exception_lock));
354 +- rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
355 ++ from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
356 + fib6_info_release(from);
357 + dst_dev_put(&rt6_ex->rt6i->dst);
358 +
359 +@@ -3454,11 +3449,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
360 +
361 + rcu_read_lock();
362 + from = rcu_dereference(rt->from);
363 +- /* This fib6_info_hold() is safe here because we hold reference to rt
364 +- * and rt already holds reference to fib6_info.
365 +- */
366 +- fib6_info_hold(from);
367 +- rcu_read_unlock();
368 ++ if (!from)
369 ++ goto out;
370 +
371 + nrt = ip6_rt_cache_alloc(from, &msg->dest, NULL);
372 + if (!nrt)
373 +@@ -3470,10 +3462,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
374 +
375 + nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
376 +
377 +- /* No need to remove rt from the exception table if rt is
378 +- * a cached route because rt6_insert_exception() will
379 +- * takes care of it
380 +- */
381 ++ /* rt6_insert_exception() will take care of duplicated exceptions */
382 + if (rt6_insert_exception(nrt, from)) {
383 + dst_release_immediate(&nrt->dst);
384 + goto out;
385 +@@ -3486,7 +3475,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
386 + call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
387 +
388 + out:
389 +- fib6_info_release(from);
390 ++ rcu_read_unlock();
391 + neigh_release(neigh);
392 + }
393 +
394 +@@ -4991,16 +4980,20 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
395 +
396 + rcu_read_lock();
397 + from = rcu_dereference(rt->from);
398 +-
399 +- if (fibmatch)
400 +- err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif,
401 +- RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
402 +- nlh->nlmsg_seq, 0);
403 +- else
404 +- err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
405 +- &fl6.saddr, iif, RTM_NEWROUTE,
406 +- NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
407 +- 0);
408 ++ if (from) {
409 ++ if (fibmatch)
410 ++ err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
411 ++ iif, RTM_NEWROUTE,
412 ++ NETLINK_CB(in_skb).portid,
413 ++ nlh->nlmsg_seq, 0);
414 ++ else
415 ++ err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
416 ++ &fl6.saddr, iif, RTM_NEWROUTE,
417 ++ NETLINK_CB(in_skb).portid,
418 ++ nlh->nlmsg_seq, 0);
419 ++ } else {
420 ++ err = -ENETUNREACH;
421 ++ }
422 + rcu_read_unlock();
423 +
424 + if (err < 0) {
425 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
426 +index fed6becc5daf..52b5a2797c0c 100644
427 +--- a/net/l2tp/l2tp_core.c
428 ++++ b/net/l2tp/l2tp_core.c
429 +@@ -169,8 +169,8 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
430 +
431 + rcu_read_lock_bh();
432 + list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
433 +- if (tunnel->tunnel_id == tunnel_id) {
434 +- l2tp_tunnel_inc_refcount(tunnel);
435 ++ if (tunnel->tunnel_id == tunnel_id &&
436 ++ refcount_inc_not_zero(&tunnel->ref_count)) {
437 + rcu_read_unlock_bh();
438 +
439 + return tunnel;
440 +@@ -190,8 +190,8 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
441 +
442 + rcu_read_lock_bh();
443 + list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
444 +- if (++count > nth) {
445 +- l2tp_tunnel_inc_refcount(tunnel);
446 ++ if (++count > nth &&
447 ++ refcount_inc_not_zero(&tunnel->ref_count)) {
448 + rcu_read_unlock_bh();
449 + return tunnel;
450 + }
451 +@@ -909,7 +909,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
452 + {
453 + struct l2tp_tunnel *tunnel;
454 +
455 +- tunnel = l2tp_tunnel(sk);
456 ++ tunnel = rcu_dereference_sk_user_data(sk);
457 + if (tunnel == NULL)
458 + goto pass_up;
459 +
460 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
461 +index a0d295478e69..ebbb30064251 100644
462 +--- a/net/packet/af_packet.c
463 ++++ b/net/packet/af_packet.c
464 +@@ -2603,8 +2603,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
465 + void *ph;
466 + DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
467 + bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
468 ++ unsigned char *addr = NULL;
469 + int tp_len, size_max;
470 +- unsigned char *addr;
471 + void *data;
472 + int len_sum = 0;
473 + int status = TP_STATUS_AVAILABLE;
474 +@@ -2615,7 +2615,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
475 + if (likely(saddr == NULL)) {
476 + dev = packet_cached_dev_get(po);
477 + proto = po->num;
478 +- addr = NULL;
479 + } else {
480 + err = -EINVAL;
481 + if (msg->msg_namelen < sizeof(struct sockaddr_ll))
482 +@@ -2625,10 +2624,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
483 + sll_addr)))
484 + goto out;
485 + proto = saddr->sll_protocol;
486 +- addr = saddr->sll_halen ? saddr->sll_addr : NULL;
487 + dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
488 +- if (addr && dev && saddr->sll_halen < dev->addr_len)
489 +- goto out_put;
490 ++ if (po->sk.sk_socket->type == SOCK_DGRAM) {
491 ++ if (dev && msg->msg_namelen < dev->addr_len +
492 ++ offsetof(struct sockaddr_ll, sll_addr))
493 ++ goto out_put;
494 ++ addr = saddr->sll_addr;
495 ++ }
496 + }
497 +
498 + err = -ENXIO;
499 +@@ -2800,7 +2802,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
500 + struct sk_buff *skb;
501 + struct net_device *dev;
502 + __be16 proto;
503 +- unsigned char *addr;
504 ++ unsigned char *addr = NULL;
505 + int err, reserve = 0;
506 + struct sockcm_cookie sockc;
507 + struct virtio_net_hdr vnet_hdr = { 0 };
508 +@@ -2817,7 +2819,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
509 + if (likely(saddr == NULL)) {
510 + dev = packet_cached_dev_get(po);
511 + proto = po->num;
512 +- addr = NULL;
513 + } else {
514 + err = -EINVAL;
515 + if (msg->msg_namelen < sizeof(struct sockaddr_ll))
516 +@@ -2825,10 +2826,13 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
517 + if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
518 + goto out;
519 + proto = saddr->sll_protocol;
520 +- addr = saddr->sll_halen ? saddr->sll_addr : NULL;
521 + dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
522 +- if (addr && dev && saddr->sll_halen < dev->addr_len)
523 +- goto out_unlock;
524 ++ if (sock->type == SOCK_DGRAM) {
525 ++ if (dev && msg->msg_namelen < dev->addr_len +
526 ++ offsetof(struct sockaddr_ll, sll_addr))
527 ++ goto out_unlock;
528 ++ addr = saddr->sll_addr;
529 ++ }
530 + }
531 +
532 + err = -ENXIO;
533 +diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
534 +index 8f1a8f85b1f9..215f4d98baa0 100644
535 +--- a/net/rxrpc/call_object.c
536 ++++ b/net/rxrpc/call_object.c
537 +@@ -701,30 +701,30 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
538 +
539 + _enter("");
540 +
541 +- if (list_empty(&rxnet->calls))
542 +- return;
543 ++ if (!list_empty(&rxnet->calls)) {
544 ++ write_lock(&rxnet->call_lock);
545 +
546 +- write_lock(&rxnet->call_lock);
547 ++ while (!list_empty(&rxnet->calls)) {
548 ++ call = list_entry(rxnet->calls.next,
549 ++ struct rxrpc_call, link);
550 ++ _debug("Zapping call %p", call);
551 +
552 +- while (!list_empty(&rxnet->calls)) {
553 +- call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
554 +- _debug("Zapping call %p", call);
555 ++ rxrpc_see_call(call);
556 ++ list_del_init(&call->link);
557 +
558 +- rxrpc_see_call(call);
559 +- list_del_init(&call->link);
560 ++ pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
561 ++ call, atomic_read(&call->usage),
562 ++ rxrpc_call_states[call->state],
563 ++ call->flags, call->events);
564 +
565 +- pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
566 +- call, atomic_read(&call->usage),
567 +- rxrpc_call_states[call->state],
568 +- call->flags, call->events);
569 ++ write_unlock(&rxnet->call_lock);
570 ++ cond_resched();
571 ++ write_lock(&rxnet->call_lock);
572 ++ }
573 +
574 + write_unlock(&rxnet->call_lock);
575 +- cond_resched();
576 +- write_lock(&rxnet->call_lock);
577 + }
578 +
579 +- write_unlock(&rxnet->call_lock);
580 +-
581 + atomic_dec(&rxnet->nr_calls);
582 + wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
583 + }
584 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
585 +index 85d393090238..48fe8f01265f 100644
586 +--- a/net/sctp/sm_sideeffect.c
587 ++++ b/net/sctp/sm_sideeffect.c
588 +@@ -1112,32 +1112,6 @@ static void sctp_cmd_send_msg(struct sctp_association *asoc,
589 + }
590 +
591 +
592 +-/* Sent the next ASCONF packet currently stored in the association.
593 +- * This happens after the ASCONF_ACK was succeffully processed.
594 +- */
595 +-static void sctp_cmd_send_asconf(struct sctp_association *asoc)
596 +-{
597 +- struct net *net = sock_net(asoc->base.sk);
598 +-
599 +- /* Send the next asconf chunk from the addip chunk
600 +- * queue.
601 +- */
602 +- if (!list_empty(&asoc->addip_chunk_list)) {
603 +- struct list_head *entry = asoc->addip_chunk_list.next;
604 +- struct sctp_chunk *asconf = list_entry(entry,
605 +- struct sctp_chunk, list);
606 +- list_del_init(entry);
607 +-
608 +- /* Hold the chunk until an ASCONF_ACK is received. */
609 +- sctp_chunk_hold(asconf);
610 +- if (sctp_primitive_ASCONF(net, asoc, asconf))
611 +- sctp_chunk_free(asconf);
612 +- else
613 +- asoc->addip_last_asconf = asconf;
614 +- }
615 +-}
616 +-
617 +-
618 + /* These three macros allow us to pull the debugging code out of the
619 + * main flow of sctp_do_sm() to keep attention focused on the real
620 + * functionality there.
621 +@@ -1783,9 +1757,6 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
622 + }
623 + sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
624 + break;
625 +- case SCTP_CMD_SEND_NEXT_ASCONF:
626 +- sctp_cmd_send_asconf(asoc);
627 +- break;
628 + case SCTP_CMD_PURGE_ASCONF_QUEUE:
629 + sctp_asconf_queue_teardown(asoc);
630 + break;
631 +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
632 +index c9ae3404b1bb..713a669d2058 100644
633 +--- a/net/sctp/sm_statefuns.c
634 ++++ b/net/sctp/sm_statefuns.c
635 +@@ -3824,6 +3824,29 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
636 + return SCTP_DISPOSITION_CONSUME;
637 + }
638 +
639 ++static enum sctp_disposition sctp_send_next_asconf(
640 ++ struct net *net,
641 ++ const struct sctp_endpoint *ep,
642 ++ struct sctp_association *asoc,
643 ++ const union sctp_subtype type,
644 ++ struct sctp_cmd_seq *commands)
645 ++{
646 ++ struct sctp_chunk *asconf;
647 ++ struct list_head *entry;
648 ++
649 ++ if (list_empty(&asoc->addip_chunk_list))
650 ++ return SCTP_DISPOSITION_CONSUME;
651 ++
652 ++ entry = asoc->addip_chunk_list.next;
653 ++ asconf = list_entry(entry, struct sctp_chunk, list);
654 ++
655 ++ list_del_init(entry);
656 ++ sctp_chunk_hold(asconf);
657 ++ asoc->addip_last_asconf = asconf;
658 ++
659 ++ return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
660 ++}
661 ++
662 + /*
663 + * ADDIP Section 4.3 General rules for address manipulation
664 + * When building TLV parameters for the ASCONF Chunk that will add or
665 +@@ -3915,14 +3938,10 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
666 + SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
667 +
668 + if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
669 +- asconf_ack)) {
670 +- /* Successfully processed ASCONF_ACK. We can
671 +- * release the next asconf if we have one.
672 +- */
673 +- sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
674 +- SCTP_NULL());
675 +- return SCTP_DISPOSITION_CONSUME;
676 +- }
677 ++ asconf_ack))
678 ++ return sctp_send_next_asconf(net, ep,
679 ++ (struct sctp_association *)asoc,
680 ++ type, commands);
681 +
682 + abort = sctp_make_abort(asoc, asconf_ack,
683 + sizeof(struct sctp_errhdr));
684 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
685 +index c9588b682db4..f4a19eac975d 100644
686 +--- a/net/tls/tls_device.c
687 ++++ b/net/tls/tls_device.c
688 +@@ -569,7 +569,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
689 + static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
690 + {
691 + struct strp_msg *rxm = strp_msg(skb);
692 +- int err = 0, offset = rxm->offset, copy, nsg;
693 ++ int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
694 + struct sk_buff *skb_iter, *unused;
695 + struct scatterlist sg[1];
696 + char *orig_buf, *buf;
697 +@@ -600,25 +600,42 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
698 + else
699 + err = 0;
700 +
701 +- copy = min_t(int, skb_pagelen(skb) - offset,
702 +- rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
703 ++ data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
704 +
705 +- if (skb->decrypted)
706 +- skb_store_bits(skb, offset, buf, copy);
707 ++ if (skb_pagelen(skb) > offset) {
708 ++ copy = min_t(int, skb_pagelen(skb) - offset, data_len);
709 +
710 +- offset += copy;
711 +- buf += copy;
712 ++ if (skb->decrypted)
713 ++ skb_store_bits(skb, offset, buf, copy);
714 +
715 ++ offset += copy;
716 ++ buf += copy;
717 ++ }
718 ++
719 ++ pos = skb_pagelen(skb);
720 + skb_walk_frags(skb, skb_iter) {
721 +- copy = min_t(int, skb_iter->len,
722 +- rxm->full_len - offset + rxm->offset -
723 +- TLS_CIPHER_AES_GCM_128_TAG_SIZE);
724 ++ int frag_pos;
725 ++
726 ++ /* Practically all frags must belong to msg if reencrypt
727 ++ * is needed with current strparser and coalescing logic,
728 ++ * but strparser may "get optimized", so let's be safe.
729 ++ */
730 ++ if (pos + skb_iter->len <= offset)
731 ++ goto done_with_frag;
732 ++ if (pos >= data_len + rxm->offset)
733 ++ break;
734 ++
735 ++ frag_pos = offset - pos;
736 ++ copy = min_t(int, skb_iter->len - frag_pos,
737 ++ data_len + rxm->offset - offset);
738 +
739 + if (skb_iter->decrypted)
740 +- skb_store_bits(skb_iter, offset, buf, copy);
741 ++ skb_store_bits(skb_iter, frag_pos, buf, copy);
742 +
743 + offset += copy;
744 + buf += copy;
745 ++done_with_frag:
746 ++ pos += skb_iter->len;
747 + }
748 +
749 + free_buf:
750 +diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
751 +index ef8934fd8698..426dd97725e4 100644
752 +--- a/net/tls/tls_device_fallback.c
753 ++++ b/net/tls/tls_device_fallback.c
754 +@@ -200,13 +200,14 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
755 +
756 + skb_put(nskb, skb->len);
757 + memcpy(nskb->data, skb->data, headln);
758 +- update_chksum(nskb, headln);
759 +
760 + nskb->destructor = skb->destructor;
761 + nskb->sk = sk;
762 + skb->destructor = NULL;
763 + skb->sk = NULL;
764 +
765 ++ update_chksum(nskb, headln);
766 ++
767 + delta = nskb->truesize - skb->truesize;
768 + if (likely(delta < 0))
769 + WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
770 +diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
771 +index c1376bfdc90b..aa28510d23ad 100644
772 +--- a/sound/usb/line6/driver.c
773 ++++ b/sound/usb/line6/driver.c
774 +@@ -351,12 +351,16 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
775 + {
776 + struct usb_device *usbdev = line6->usbdev;
777 + int ret;
778 +- unsigned char len;
779 ++ unsigned char *len;
780 + unsigned count;
781 +
782 + if (address > 0xffff || datalen > 0xff)
783 + return -EINVAL;
784 +
785 ++ len = kmalloc(sizeof(*len), GFP_KERNEL);
786 ++ if (!len)
787 ++ return -ENOMEM;
788 ++
789 + /* query the serial number: */
790 + ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
791 + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
792 +@@ -365,7 +369,7 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
793 +
794 + if (ret < 0) {
795 + dev_err(line6->ifcdev, "read request failed (error %d)\n", ret);
796 +- return ret;
797 ++ goto exit;
798 + }
799 +
800 + /* Wait for data length. We'll get 0xff until length arrives. */
801 +@@ -375,28 +379,29 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
802 + ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
803 + USB_TYPE_VENDOR | USB_RECIP_DEVICE |
804 + USB_DIR_IN,
805 +- 0x0012, 0x0000, &len, 1,
806 ++ 0x0012, 0x0000, len, 1,
807 + LINE6_TIMEOUT * HZ);
808 + if (ret < 0) {
809 + dev_err(line6->ifcdev,
810 + "receive length failed (error %d)\n", ret);
811 +- return ret;
812 ++ goto exit;
813 + }
814 +
815 +- if (len != 0xff)
816 ++ if (*len != 0xff)
817 + break;
818 + }
819 +
820 +- if (len == 0xff) {
821 ++ ret = -EIO;
822 ++ if (*len == 0xff) {
823 + dev_err(line6->ifcdev, "read failed after %d retries\n",
824 + count);
825 +- return -EIO;
826 +- } else if (len != datalen) {
827 ++ goto exit;
828 ++ } else if (*len != datalen) {
829 + /* should be equal or something went wrong */
830 + dev_err(line6->ifcdev,
831 + "length mismatch (expected %d, got %d)\n",
832 +- (int)datalen, (int)len);
833 +- return -EIO;
834 ++ (int)datalen, (int)*len);
835 ++ goto exit;
836 + }
837 +
838 + /* receive the result: */
839 +@@ -405,12 +410,12 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
840 + 0x0013, 0x0000, data, datalen,
841 + LINE6_TIMEOUT * HZ);
842 +
843 +- if (ret < 0) {
844 ++ if (ret < 0)
845 + dev_err(line6->ifcdev, "read failed (error %d)\n", ret);
846 +- return ret;
847 +- }
848 +
849 +- return 0;
850 ++exit:
851 ++ kfree(len);
852 ++ return ret;
853 + }
854 + EXPORT_SYMBOL_GPL(line6_read_data);
855 +
856 +@@ -422,12 +427,16 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
857 + {
858 + struct usb_device *usbdev = line6->usbdev;
859 + int ret;
860 +- unsigned char status;
861 ++ unsigned char *status;
862 + int count;
863 +
864 + if (address > 0xffff || datalen > 0xffff)
865 + return -EINVAL;
866 +
867 ++ status = kmalloc(sizeof(*status), GFP_KERNEL);
868 ++ if (!status)
869 ++ return -ENOMEM;
870 ++
871 + ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
872 + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
873 + 0x0022, address, data, datalen,
874 +@@ -436,7 +445,7 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
875 + if (ret < 0) {
876 + dev_err(line6->ifcdev,
877 + "write request failed (error %d)\n", ret);
878 +- return ret;
879 ++ goto exit;
880 + }
881 +
882 + for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) {
883 +@@ -447,28 +456,29 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
884 + USB_TYPE_VENDOR | USB_RECIP_DEVICE |
885 + USB_DIR_IN,
886 + 0x0012, 0x0000,
887 +- &status, 1, LINE6_TIMEOUT * HZ);
888 ++ status, 1, LINE6_TIMEOUT * HZ);
889 +
890 + if (ret < 0) {
891 + dev_err(line6->ifcdev,
892 + "receiving status failed (error %d)\n", ret);
893 +- return ret;
894 ++ goto exit;
895 + }
896 +
897 +- if (status != 0xff)
898 ++ if (*status != 0xff)
899 + break;
900 + }
901 +
902 +- if (status == 0xff) {
903 ++ if (*status == 0xff) {
904 + dev_err(line6->ifcdev, "write failed after %d retries\n",
905 + count);
906 +- return -EIO;
907 +- } else if (status != 0) {
908 ++ ret = -EIO;
909 ++ } else if (*status != 0) {
910 + dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
911 +- return -EIO;
912 ++ ret = -EIO;
913 + }
914 +-
915 +- return 0;
916 ++exit:
917 ++ kfree(status);
918 ++ return ret;
919 + }
920 + EXPORT_SYMBOL_GPL(line6_write_data);
921 +
922 +diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
923 +index 36ed9c85c0eb..5f3c87264e66 100644
924 +--- a/sound/usb/line6/podhd.c
925 ++++ b/sound/usb/line6/podhd.c
926 +@@ -225,28 +225,32 @@ static void podhd_startup_start_workqueue(struct timer_list *t)
927 + static int podhd_dev_start(struct usb_line6_podhd *pod)
928 + {
929 + int ret;
930 +- u8 init_bytes[8];
931 ++ u8 *init_bytes;
932 + int i;
933 + struct usb_device *usbdev = pod->line6.usbdev;
934 +
935 ++ init_bytes = kmalloc(8, GFP_KERNEL);
936 ++ if (!init_bytes)
937 ++ return -ENOMEM;
938 ++
939 + ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
940 + 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
941 + 0x11, 0,
942 + NULL, 0, LINE6_TIMEOUT * HZ);
943 + if (ret < 0) {
944 + dev_err(pod->line6.ifcdev, "read request failed (error %d)\n", ret);
945 +- return ret;
946 ++ goto exit;
947 + }
948 +
949 + /* NOTE: looks like some kind of ping message */
950 + ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
951 + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
952 + 0x11, 0x0,
953 +- &init_bytes, 3, LINE6_TIMEOUT * HZ);
954 ++ init_bytes, 3, LINE6_TIMEOUT * HZ);
955 + if (ret < 0) {
956 + dev_err(pod->line6.ifcdev,
957 + "receive length failed (error %d)\n", ret);
958 +- return ret;
959 ++ goto exit;
960 + }
961 +
962 + pod->firmware_version =
963 +@@ -255,7 +259,7 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
964 + for (i = 0; i <= 16; i++) {
965 + ret = line6_read_data(&pod->line6, 0xf000 + 0x08 * i, init_bytes, 8);
966 + if (ret < 0)
967 +- return ret;
968 ++ goto exit;
969 + }
970 +
971 + ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
972 +@@ -263,10 +267,9 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
973 + USB_TYPE_STANDARD | USB_RECIP_DEVICE | USB_DIR_OUT,
974 + 1, 0,
975 + NULL, 0, LINE6_TIMEOUT * HZ);
976 +- if (ret < 0)
977 +- return ret;
978 +-
979 +- return 0;
980 ++exit:
981 ++ kfree(init_bytes);
982 ++ return ret;
983 + }
984 +
985 + static void podhd_startup_workqueue(struct work_struct *work)
986 +diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c
987 +index f47ba94e6f4a..19bee725de00 100644
988 +--- a/sound/usb/line6/toneport.c
989 ++++ b/sound/usb/line6/toneport.c
990 +@@ -365,16 +365,21 @@ static bool toneport_has_source_select(struct usb_line6_toneport *toneport)
991 + /*
992 + Setup Toneport device.
993 + */
994 +-static void toneport_setup(struct usb_line6_toneport *toneport)
995 ++static int toneport_setup(struct usb_line6_toneport *toneport)
996 + {
997 +- u32 ticks;
998 ++ u32 *ticks;
999 + struct usb_line6 *line6 = &toneport->line6;
1000 + struct usb_device *usbdev = line6->usbdev;
1001 +
1002 ++ ticks = kmalloc(sizeof(*ticks), GFP_KERNEL);
1003 ++ if (!ticks)
1004 ++ return -ENOMEM;
1005 ++
1006 + /* sync time on device with host: */
1007 + /* note: 32-bit timestamps overflow in year 2106 */
1008 +- ticks = (u32)ktime_get_real_seconds();
1009 +- line6_write_data(line6, 0x80c6, &ticks, 4);
1010 ++ *ticks = (u32)ktime_get_real_seconds();
1011 ++ line6_write_data(line6, 0x80c6, ticks, 4);
1012 ++ kfree(ticks);
1013 +
1014 + /* enable device: */
1015 + toneport_send_cmd(usbdev, 0x0301, 0x0000);
1016 +@@ -389,6 +394,7 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
1017 + toneport_update_led(toneport);
1018 +
1019 + mod_timer(&toneport->timer, jiffies + TONEPORT_PCM_DELAY * HZ);
1020 ++ return 0;
1021 + }
1022 +
1023 + /*
1024 +@@ -451,7 +457,9 @@ static int toneport_init(struct usb_line6 *line6,
1025 + return err;
1026 + }
1027 +
1028 +- toneport_setup(toneport);
1029 ++ err = toneport_setup(toneport);
1030 ++ if (err)
1031 ++ return err;
1032 +
1033 + /* register audio system: */
1034 + return snd_card_register(line6->card);
1035 +@@ -463,7 +471,11 @@ static int toneport_init(struct usb_line6 *line6,
1036 + */
1037 + static int toneport_reset_resume(struct usb_interface *interface)
1038 + {
1039 +- toneport_setup(usb_get_intfdata(interface));
1040 ++ int err;
1041 ++
1042 ++ err = toneport_setup(usb_get_intfdata(interface));
1043 ++ if (err)
1044 ++ return err;
1045 + return line6_resume(interface);
1046 + }
1047 + #endif
1048 +diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
1049 +index d4cfb6a7a086..d84193bdc307 100755
1050 +--- a/tools/testing/selftests/net/fib_rule_tests.sh
1051 ++++ b/tools/testing/selftests/net/fib_rule_tests.sh
1052 +@@ -27,6 +27,7 @@ log_test()
1053 + nsuccess=$((nsuccess+1))
1054 + printf "\n TEST: %-50s [ OK ]\n" "${msg}"
1055 + else
1056 ++ ret=1
1057 + nfail=$((nfail+1))
1058 + printf "\n TEST: %-50s [FAIL]\n" "${msg}"
1059 + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
1060 +@@ -245,4 +246,9 @@ setup
1061 + run_fibrule_tests
1062 + cleanup
1063 +
1064 ++if [ "$TESTS" != "none" ]; then
1065 ++ printf "\nTests passed: %3d\n" ${nsuccess}
1066 ++ printf "Tests failed: %3d\n" ${nfail}
1067 ++fi
1068 ++
1069 + exit $ret