Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.0 commit in: /
Date: Sun, 05 May 2019 13:39:20
Message-Id: 1557063537.b721073a475fe58039da5c0daf37b3ec3cdbd942.mpagano@gentoo
1 commit: b721073a475fe58039da5c0daf37b3ec3cdbd942
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun May 5 13:38:57 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun May 5 13:38:57 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b721073a
7
8 Linux patch 5.0.13
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 1012_linux-5.0.13.patch | 1280 +++++++++++++++++++++++++++++++++++++++++++++++
13 1 file changed, 1280 insertions(+)
14
15 diff --git a/1012_linux-5.0.13.patch b/1012_linux-5.0.13.patch
16 new file mode 100644
17 index 0000000..b3581f4
18 --- /dev/null
19 +++ b/1012_linux-5.0.13.patch
20 @@ -0,0 +1,1280 @@
21 +diff --git a/Makefile b/Makefile
22 +index fd044f594bbf..51a819544505 100644
23 +--- a/Makefile
24 ++++ b/Makefile
25 +@@ -1,7 +1,7 @@
26 + # SPDX-License-Identifier: GPL-2.0
27 + VERSION = 5
28 + PATCHLEVEL = 0
29 +-SUBLEVEL = 12
30 ++SUBLEVEL = 13
31 + EXTRAVERSION =
32 + NAME = Shy Crocodile
33 +
34 +diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
35 +index dabfcf7c3941..7a0e64ccd6ff 100644
36 +--- a/arch/x86/include/uapi/asm/kvm.h
37 ++++ b/arch/x86/include/uapi/asm/kvm.h
38 +@@ -381,6 +381,7 @@ struct kvm_sync_regs {
39 + #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
40 + #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
41 + #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
42 ++#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
43 +
44 + #define KVM_STATE_NESTED_GUEST_MODE 0x00000001
45 + #define KVM_STATE_NESTED_RUN_PENDING 0x00000002
46 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
47 +index f90b3a948291..a4bcac94392c 100644
48 +--- a/arch/x86/kvm/vmx/nested.c
49 ++++ b/arch/x86/kvm/vmx/nested.c
50 +@@ -5407,7 +5407,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
51 + return ret;
52 +
53 + /* Empty 'VMXON' state is permitted */
54 +- if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
55 ++ if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
56 + return 0;
57 +
58 + if (kvm_state->vmx.vmcs_pa != -1ull) {
59 +@@ -5451,7 +5451,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
60 + vmcs12->vmcs_link_pointer != -1ull) {
61 + struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
62 +
63 +- if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
64 ++ if (kvm_state->size < sizeof(*kvm_state) + 2 * sizeof(*vmcs12))
65 + return -EINVAL;
66 +
67 + if (copy_from_user(shadow_vmcs12,
68 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
69 +index 8c9fb6453b2f..7e413ea19a9a 100644
70 +--- a/arch/x86/kvm/x86.c
71 ++++ b/arch/x86/kvm/x86.c
72 +@@ -6536,6 +6536,12 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
73 + }
74 + EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
75 +
76 ++static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
77 ++{
78 ++ vcpu->arch.pio.count = 0;
79 ++ return 1;
80 ++}
81 ++
82 + static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
83 + {
84 + vcpu->arch.pio.count = 0;
85 +@@ -6552,12 +6558,23 @@ static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
86 + unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
87 + int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
88 + size, port, &val, 1);
89 ++ if (ret)
90 ++ return ret;
91 +
92 +- if (!ret) {
93 ++ /*
94 ++ * Workaround userspace that relies on old KVM behavior of %rip being
95 ++ * incremented prior to exiting to userspace to handle "OUT 0x7e".
96 ++ */
97 ++ if (port == 0x7e &&
98 ++ kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
99 ++ vcpu->arch.complete_userspace_io =
100 ++ complete_fast_pio_out_port_0x7e;
101 ++ kvm_skip_emulated_instruction(vcpu);
102 ++ } else {
103 + vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
104 + vcpu->arch.complete_userspace_io = complete_fast_pio_out;
105 + }
106 +- return ret;
107 ++ return 0;
108 + }
109 +
110 + static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
111 +diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
112 +index e14663ab6dbc..8dd74700a2ef 100644
113 +--- a/drivers/net/dsa/bcm_sf2_cfp.c
114 ++++ b/drivers/net/dsa/bcm_sf2_cfp.c
115 +@@ -854,6 +854,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
116 + fs->m_ext.data[1]))
117 + return -EINVAL;
118 +
119 ++ if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
120 ++ return -EINVAL;
121 ++
122 + if (fs->location != RX_CLS_LOC_ANY &&
123 + test_bit(fs->location, priv->cfp.used))
124 + return -EBUSY;
125 +@@ -942,6 +945,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
126 + struct cfp_rule *rule;
127 + int ret;
128 +
129 ++ if (loc >= CFP_NUM_RULES)
130 ++ return -EINVAL;
131 ++
132 + /* Refuse deleting unused rules, and those that are not unique since
133 + * that could leave IPv6 rules with one of the chained rule in the
134 + * table.
135 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
136 +index 40ca339ec3df..c6ddbc0e084e 100644
137 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
138 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
139 +@@ -1621,7 +1621,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
140 + netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
141 + bnxt_sched_reset(bp, rxr);
142 + }
143 +- goto next_rx;
144 ++ goto next_rx_no_len;
145 + }
146 +
147 + len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
148 +@@ -1702,12 +1702,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
149 + rc = 1;
150 +
151 + next_rx:
152 +- rxr->rx_prod = NEXT_RX(prod);
153 +- rxr->rx_next_cons = NEXT_RX(cons);
154 +-
155 + cpr->rx_packets += 1;
156 + cpr->rx_bytes += len;
157 +
158 ++next_rx_no_len:
159 ++ rxr->rx_prod = NEXT_RX(prod);
160 ++ rxr->rx_next_cons = NEXT_RX(cons);
161 ++
162 + next_rx_no_prod_no_len:
163 + *raw_cons = tmp_raw_cons;
164 +
165 +@@ -5131,10 +5132,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
166 + for (i = 0; i < bp->tx_nr_rings; i++) {
167 + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
168 + struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
169 +- u32 cmpl_ring_id;
170 +
171 +- cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
172 + if (ring->fw_ring_id != INVALID_HW_RING_ID) {
173 ++ u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
174 ++
175 + hwrm_ring_free_send_msg(bp, ring,
176 + RING_FREE_REQ_RING_TYPE_TX,
177 + close_path ? cmpl_ring_id :
178 +@@ -5147,10 +5148,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
179 + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
180 + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
181 + u32 grp_idx = rxr->bnapi->index;
182 +- u32 cmpl_ring_id;
183 +
184 +- cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
185 + if (ring->fw_ring_id != INVALID_HW_RING_ID) {
186 ++ u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
187 ++
188 + hwrm_ring_free_send_msg(bp, ring,
189 + RING_FREE_REQ_RING_TYPE_RX,
190 + close_path ? cmpl_ring_id :
191 +@@ -5169,10 +5170,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
192 + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
193 + struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
194 + u32 grp_idx = rxr->bnapi->index;
195 +- u32 cmpl_ring_id;
196 +
197 +- cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
198 + if (ring->fw_ring_id != INVALID_HW_RING_ID) {
199 ++ u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
200 ++
201 + hwrm_ring_free_send_msg(bp, ring, type,
202 + close_path ? cmpl_ring_id :
203 + INVALID_HW_RING_ID);
204 +@@ -5311,17 +5312,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
205 + req->num_tx_rings = cpu_to_le16(tx_rings);
206 + if (BNXT_NEW_RM(bp)) {
207 + enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
208 ++ enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
209 + if (bp->flags & BNXT_FLAG_CHIP_P5) {
210 + enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
211 + enables |= tx_rings + ring_grps ?
212 +- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
213 +- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
214 ++ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
215 + enables |= rx_rings ?
216 + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
217 + } else {
218 + enables |= cp_rings ?
219 +- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
220 +- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
221 ++ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
222 + enables |= ring_grps ?
223 + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
224 + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
225 +@@ -5361,14 +5361,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
226 + enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
227 + enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
228 + FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
229 ++ enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
230 + if (bp->flags & BNXT_FLAG_CHIP_P5) {
231 + enables |= tx_rings + ring_grps ?
232 +- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
233 +- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
234 ++ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
235 + } else {
236 + enables |= cp_rings ?
237 +- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
238 +- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
239 ++ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
240 + enables |= ring_grps ?
241 + FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
242 + }
243 +@@ -6745,6 +6744,7 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
244 + struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
245 + struct hwrm_port_qstats_ext_input req = {0};
246 + struct bnxt_pf_info *pf = &bp->pf;
247 ++ u32 tx_stat_size;
248 + int rc;
249 +
250 + if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
251 +@@ -6754,13 +6754,16 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
252 + req.port_id = cpu_to_le16(pf->port_id);
253 + req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
254 + req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
255 +- req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
256 ++ tx_stat_size = bp->hw_tx_port_stats_ext ?
257 ++ sizeof(*bp->hw_tx_port_stats_ext) : 0;
258 ++ req.tx_stat_size = cpu_to_le16(tx_stat_size);
259 + req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
260 + mutex_lock(&bp->hwrm_cmd_lock);
261 + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
262 + if (!rc) {
263 + bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
264 +- bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
265 ++ bp->fw_tx_stats_ext_size = tx_stat_size ?
266 ++ le16_to_cpu(resp->tx_stat_size) / 8 : 0;
267 + } else {
268 + bp->fw_rx_stats_ext_size = 0;
269 + bp->fw_tx_stats_ext_size = 0;
270 +@@ -8889,8 +8892,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
271 +
272 + skip_uc:
273 + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
274 ++ if (rc && vnic->mc_list_count) {
275 ++ netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
276 ++ rc);
277 ++ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
278 ++ vnic->mc_list_count = 0;
279 ++ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
280 ++ }
281 + if (rc)
282 +- netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
283 ++ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
284 + rc);
285 +
286 + return rc;
287 +@@ -10625,6 +10635,7 @@ init_err_cleanup_tc:
288 + bnxt_clear_int_mode(bp);
289 +
290 + init_err_pci_clean:
291 ++ bnxt_free_hwrm_short_cmd_req(bp);
292 + bnxt_free_hwrm_resources(bp);
293 + bnxt_free_ctx_mem(bp);
294 + kfree(bp->ctx);
295 +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
296 +index abb7876a8776..66573a218df5 100644
297 +--- a/drivers/net/phy/marvell.c
298 ++++ b/drivers/net/phy/marvell.c
299 +@@ -1494,9 +1494,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
300 +
301 + static void marvell_get_strings(struct phy_device *phydev, u8 *data)
302 + {
303 ++ int count = marvell_get_sset_count(phydev);
304 + int i;
305 +
306 +- for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
307 ++ for (i = 0; i < count; i++) {
308 + strlcpy(data + i * ETH_GSTRING_LEN,
309 + marvell_hw_stats[i].string, ETH_GSTRING_LEN);
310 + }
311 +@@ -1524,9 +1525,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
312 + static void marvell_get_stats(struct phy_device *phydev,
313 + struct ethtool_stats *stats, u64 *data)
314 + {
315 ++ int count = marvell_get_sset_count(phydev);
316 + int i;
317 +
318 +- for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
319 ++ for (i = 0; i < count; i++)
320 + data[i] = marvell_get_stat(phydev, i);
321 + }
322 +
323 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
324 +index 49758490eaba..9560acc5f7da 100644
325 +--- a/drivers/net/wireless/ath/ath10k/mac.c
326 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
327 +@@ -5705,7 +5705,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
328 + }
329 +
330 + if (changed & BSS_CHANGED_MCAST_RATE &&
331 +- !WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) {
332 ++ !ath10k_mac_vif_chan(arvif->vif, &def)) {
333 + band = def.chan->band;
334 + rateidx = vif->bss_conf.mcast_rate[band] - 1;
335 +
336 +@@ -5743,7 +5743,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
337 + }
338 +
339 + if (changed & BSS_CHANGED_BASIC_RATES) {
340 +- if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) {
341 ++ if (ath10k_mac_vif_chan(vif, &def)) {
342 + mutex_unlock(&ar->conf_mutex);
343 + return;
344 + }
345 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
346 +index 33b0af24a537..d61ab3e80759 100644
347 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
348 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
349 +@@ -1482,6 +1482,11 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
350 + return;
351 +
352 + mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
353 ++ if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) {
354 ++ IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
355 ++ dbgfs_dir);
356 ++ return;
357 ++ }
358 +
359 + if (!mvmvif->dbgfs_dir) {
360 + IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
361 +diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
362 +index 6640f84fe536..6d5beac29bc1 100644
363 +--- a/include/net/sctp/command.h
364 ++++ b/include/net/sctp/command.h
365 +@@ -105,7 +105,6 @@ enum sctp_verb {
366 + SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
367 + SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
368 + SCTP_CMD_SEND_MSG, /* Send the whole use message */
369 +- SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
370 + SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
371 + SCTP_CMD_SET_ASOC, /* Restore association context */
372 + SCTP_CMD_LAST
373 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
374 +index c80188875f39..e8bb2e85c5a4 100644
375 +--- a/net/ipv4/ip_output.c
376 ++++ b/net/ipv4/ip_output.c
377 +@@ -519,6 +519,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
378 + to->pkt_type = from->pkt_type;
379 + to->priority = from->priority;
380 + to->protocol = from->protocol;
381 ++ to->skb_iif = from->skb_iif;
382 + skb_dst_drop(to);
383 + skb_dst_copy(to, from);
384 + to->dev = from->dev;
385 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
386 +index 00852f47a73d..9a2ff79a93ad 100644
387 +--- a/net/ipv4/tcp_ipv4.c
388 ++++ b/net/ipv4/tcp_ipv4.c
389 +@@ -1673,7 +1673,9 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
390 + if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
391 + TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
392 + ((TCP_SKB_CB(tail)->tcp_flags |
393 +- TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_URG) ||
394 ++ TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
395 ++ !((TCP_SKB_CB(tail)->tcp_flags &
396 ++ TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
397 + ((TCP_SKB_CB(tail)->tcp_flags ^
398 + TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
399 + #ifdef CONFIG_TLS_DEVICE
400 +@@ -1692,6 +1694,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
401 + if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
402 + TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
403 +
404 ++ /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
405 ++ * thtail->fin, so that the fast path in tcp_rcv_established()
406 ++ * is not entered if we append a packet with a FIN.
407 ++ * SYN, RST, URG are not present.
408 ++ * ACK is set on both packets.
409 ++ * PSH : we do not really care in TCP stack,
410 ++ * at least for 'GRO' packets.
411 ++ */
412 ++ thtail->fin |= th->fin;
413 + TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
414 +
415 + if (TCP_SKB_CB(skb)->has_rxtstamp) {
416 +diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
417 +index 64f9715173ac..065334b41d57 100644
418 +--- a/net/ipv4/udp_offload.c
419 ++++ b/net/ipv4/udp_offload.c
420 +@@ -352,6 +352,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
421 + struct sk_buff *pp = NULL;
422 + struct udphdr *uh2;
423 + struct sk_buff *p;
424 ++ unsigned int ulen;
425 +
426 + /* requires non zero csum, for symmetry with GSO */
427 + if (!uh->check) {
428 +@@ -359,6 +360,12 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
429 + return NULL;
430 + }
431 +
432 ++ /* Do not deal with padded or malicious packets, sorry ! */
433 ++ ulen = ntohs(uh->len);
434 ++ if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) {
435 ++ NAPI_GRO_CB(skb)->flush = 1;
436 ++ return NULL;
437 ++ }
438 + /* pull encapsulating udp header */
439 + skb_gro_pull(skb, sizeof(struct udphdr));
440 + skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
441 +@@ -377,13 +384,14 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
442 +
443 + /* Terminate the flow on len mismatch or if it grow "too much".
444 + * Under small packet flood GRO count could elsewhere grow a lot
445 +- * leading to execessive truesize values
446 ++ * leading to excessive truesize values.
447 ++ * On len mismatch merge the first packet shorter than gso_size,
448 ++ * otherwise complete the GRO packet.
449 + */
450 +- if (!skb_gro_receive(p, skb) &&
451 ++ if (ulen > ntohs(uh2->len) || skb_gro_receive(p, skb) ||
452 ++ ulen != ntohs(uh2->len) ||
453 + NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
454 + pp = p;
455 +- else if (uh->len != uh2->len)
456 +- pp = p;
457 +
458 + return pp;
459 + }
460 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
461 +index 6613d8dbb0e5..91247a6fc67f 100644
462 +--- a/net/ipv6/ip6_fib.c
463 ++++ b/net/ipv6/ip6_fib.c
464 +@@ -921,9 +921,7 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
465 + if (pcpu_rt) {
466 + struct fib6_info *from;
467 +
468 +- from = rcu_dereference_protected(pcpu_rt->from,
469 +- lockdep_is_held(&table->tb6_lock));
470 +- rcu_assign_pointer(pcpu_rt->from, NULL);
471 ++ from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
472 + fib6_info_release(from);
473 + }
474 + }
475 +diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
476 +index cb54a8a3c273..be5f3d7ceb96 100644
477 +--- a/net/ipv6/ip6_flowlabel.c
478 ++++ b/net/ipv6/ip6_flowlabel.c
479 +@@ -94,15 +94,21 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
480 + return fl;
481 + }
482 +
483 ++static void fl_free_rcu(struct rcu_head *head)
484 ++{
485 ++ struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
486 ++
487 ++ if (fl->share == IPV6_FL_S_PROCESS)
488 ++ put_pid(fl->owner.pid);
489 ++ kfree(fl->opt);
490 ++ kfree(fl);
491 ++}
492 ++
493 +
494 + static void fl_free(struct ip6_flowlabel *fl)
495 + {
496 +- if (fl) {
497 +- if (fl->share == IPV6_FL_S_PROCESS)
498 +- put_pid(fl->owner.pid);
499 +- kfree(fl->opt);
500 +- kfree_rcu(fl, rcu);
501 +- }
502 ++ if (fl)
503 ++ call_rcu(&fl->rcu, fl_free_rcu);
504 + }
505 +
506 + static void fl_release(struct ip6_flowlabel *fl)
507 +@@ -633,9 +639,9 @@ recheck:
508 + if (fl1->share == IPV6_FL_S_EXCL ||
509 + fl1->share != fl->share ||
510 + ((fl1->share == IPV6_FL_S_PROCESS) &&
511 +- (fl1->owner.pid == fl->owner.pid)) ||
512 ++ (fl1->owner.pid != fl->owner.pid)) ||
513 + ((fl1->share == IPV6_FL_S_USER) &&
514 +- uid_eq(fl1->owner.uid, fl->owner.uid)))
515 ++ !uid_eq(fl1->owner.uid, fl->owner.uid)))
516 + goto release;
517 +
518 + err = -ENOMEM;
519 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
520 +index b6a97115a906..59c90bba048c 100644
521 +--- a/net/ipv6/route.c
522 ++++ b/net/ipv6/route.c
523 +@@ -379,11 +379,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
524 + in6_dev_put(idev);
525 + }
526 +
527 +- rcu_read_lock();
528 +- from = rcu_dereference(rt->from);
529 +- rcu_assign_pointer(rt->from, NULL);
530 ++ from = xchg((__force struct fib6_info **)&rt->from, NULL);
531 + fib6_info_release(from);
532 +- rcu_read_unlock();
533 + }
534 +
535 + static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
536 +@@ -1288,9 +1285,7 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
537 + /* purge completely the exception to allow releasing the held resources:
538 + * some [sk] cache may keep the dst around for unlimited time
539 + */
540 +- from = rcu_dereference_protected(rt6_ex->rt6i->from,
541 +- lockdep_is_held(&rt6_exception_lock));
542 +- rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
543 ++ from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
544 + fib6_info_release(from);
545 + dst_dev_put(&rt6_ex->rt6i->dst);
546 +
547 +@@ -3403,11 +3398,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
548 +
549 + rcu_read_lock();
550 + from = rcu_dereference(rt->from);
551 +- /* This fib6_info_hold() is safe here because we hold reference to rt
552 +- * and rt already holds reference to fib6_info.
553 +- */
554 +- fib6_info_hold(from);
555 +- rcu_read_unlock();
556 ++ if (!from)
557 ++ goto out;
558 +
559 + nrt = ip6_rt_cache_alloc(from, &msg->dest, NULL);
560 + if (!nrt)
561 +@@ -3419,10 +3411,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
562 +
563 + nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
564 +
565 +- /* No need to remove rt from the exception table if rt is
566 +- * a cached route because rt6_insert_exception() will
567 +- * takes care of it
568 +- */
569 ++ /* rt6_insert_exception() will take care of duplicated exceptions */
570 + if (rt6_insert_exception(nrt, from)) {
571 + dst_release_immediate(&nrt->dst);
572 + goto out;
573 +@@ -3435,7 +3424,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
574 + call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
575 +
576 + out:
577 +- fib6_info_release(from);
578 ++ rcu_read_unlock();
579 + neigh_release(neigh);
580 + }
581 +
582 +@@ -4957,16 +4946,20 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
583 +
584 + rcu_read_lock();
585 + from = rcu_dereference(rt->from);
586 +-
587 +- if (fibmatch)
588 +- err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif,
589 +- RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
590 +- nlh->nlmsg_seq, 0);
591 +- else
592 +- err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
593 +- &fl6.saddr, iif, RTM_NEWROUTE,
594 +- NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
595 +- 0);
596 ++ if (from) {
597 ++ if (fibmatch)
598 ++ err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
599 ++ iif, RTM_NEWROUTE,
600 ++ NETLINK_CB(in_skb).portid,
601 ++ nlh->nlmsg_seq, 0);
602 ++ else
603 ++ err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
604 ++ &fl6.saddr, iif, RTM_NEWROUTE,
605 ++ NETLINK_CB(in_skb).portid,
606 ++ nlh->nlmsg_seq, 0);
607 ++ } else {
608 ++ err = -ENETUNREACH;
609 ++ }
610 + rcu_read_unlock();
611 +
612 + if (err < 0) {
613 +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
614 +index fed6becc5daf..52b5a2797c0c 100644
615 +--- a/net/l2tp/l2tp_core.c
616 ++++ b/net/l2tp/l2tp_core.c
617 +@@ -169,8 +169,8 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
618 +
619 + rcu_read_lock_bh();
620 + list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
621 +- if (tunnel->tunnel_id == tunnel_id) {
622 +- l2tp_tunnel_inc_refcount(tunnel);
623 ++ if (tunnel->tunnel_id == tunnel_id &&
624 ++ refcount_inc_not_zero(&tunnel->ref_count)) {
625 + rcu_read_unlock_bh();
626 +
627 + return tunnel;
628 +@@ -190,8 +190,8 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
629 +
630 + rcu_read_lock_bh();
631 + list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
632 +- if (++count > nth) {
633 +- l2tp_tunnel_inc_refcount(tunnel);
634 ++ if (++count > nth &&
635 ++ refcount_inc_not_zero(&tunnel->ref_count)) {
636 + rcu_read_unlock_bh();
637 + return tunnel;
638 + }
639 +@@ -909,7 +909,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
640 + {
641 + struct l2tp_tunnel *tunnel;
642 +
643 +- tunnel = l2tp_tunnel(sk);
644 ++ tunnel = rcu_dereference_sk_user_data(sk);
645 + if (tunnel == NULL)
646 + goto pass_up;
647 +
648 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
649 +index 8406bf11eef4..faa2bc50cfa0 100644
650 +--- a/net/packet/af_packet.c
651 ++++ b/net/packet/af_packet.c
652 +@@ -2603,8 +2603,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
653 + void *ph;
654 + DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
655 + bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
656 ++ unsigned char *addr = NULL;
657 + int tp_len, size_max;
658 +- unsigned char *addr;
659 + void *data;
660 + int len_sum = 0;
661 + int status = TP_STATUS_AVAILABLE;
662 +@@ -2615,7 +2615,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
663 + if (likely(saddr == NULL)) {
664 + dev = packet_cached_dev_get(po);
665 + proto = po->num;
666 +- addr = NULL;
667 + } else {
668 + err = -EINVAL;
669 + if (msg->msg_namelen < sizeof(struct sockaddr_ll))
670 +@@ -2625,10 +2624,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
671 + sll_addr)))
672 + goto out;
673 + proto = saddr->sll_protocol;
674 +- addr = saddr->sll_halen ? saddr->sll_addr : NULL;
675 + dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
676 +- if (addr && dev && saddr->sll_halen < dev->addr_len)
677 +- goto out_put;
678 ++ if (po->sk.sk_socket->type == SOCK_DGRAM) {
679 ++ if (dev && msg->msg_namelen < dev->addr_len +
680 ++ offsetof(struct sockaddr_ll, sll_addr))
681 ++ goto out_put;
682 ++ addr = saddr->sll_addr;
683 ++ }
684 + }
685 +
686 + err = -ENXIO;
687 +@@ -2800,7 +2802,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
688 + struct sk_buff *skb;
689 + struct net_device *dev;
690 + __be16 proto;
691 +- unsigned char *addr;
692 ++ unsigned char *addr = NULL;
693 + int err, reserve = 0;
694 + struct sockcm_cookie sockc;
695 + struct virtio_net_hdr vnet_hdr = { 0 };
696 +@@ -2817,7 +2819,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
697 + if (likely(saddr == NULL)) {
698 + dev = packet_cached_dev_get(po);
699 + proto = po->num;
700 +- addr = NULL;
701 + } else {
702 + err = -EINVAL;
703 + if (msg->msg_namelen < sizeof(struct sockaddr_ll))
704 +@@ -2825,10 +2826,13 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
705 + if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
706 + goto out;
707 + proto = saddr->sll_protocol;
708 +- addr = saddr->sll_halen ? saddr->sll_addr : NULL;
709 + dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
710 +- if (addr && dev && saddr->sll_halen < dev->addr_len)
711 +- goto out_unlock;
712 ++ if (sock->type == SOCK_DGRAM) {
713 ++ if (dev && msg->msg_namelen < dev->addr_len +
714 ++ offsetof(struct sockaddr_ll, sll_addr))
715 ++ goto out_unlock;
716 ++ addr = saddr->sll_addr;
717 ++ }
718 + }
719 +
720 + err = -ENXIO;
721 +@@ -3345,20 +3349,29 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
722 + sock_recv_ts_and_drops(msg, sk, skb);
723 +
724 + if (msg->msg_name) {
725 ++ int copy_len;
726 ++
727 + /* If the address length field is there to be filled
728 + * in, we fill it in now.
729 + */
730 + if (sock->type == SOCK_PACKET) {
731 + __sockaddr_check_size(sizeof(struct sockaddr_pkt));
732 + msg->msg_namelen = sizeof(struct sockaddr_pkt);
733 ++ copy_len = msg->msg_namelen;
734 + } else {
735 + struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
736 +
737 + msg->msg_namelen = sll->sll_halen +
738 + offsetof(struct sockaddr_ll, sll_addr);
739 ++ copy_len = msg->msg_namelen;
740 ++ if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
741 ++ memset(msg->msg_name +
742 ++ offsetof(struct sockaddr_ll, sll_addr),
743 ++ 0, sizeof(sll->sll_addr));
744 ++ msg->msg_namelen = sizeof(struct sockaddr_ll);
745 ++ }
746 + }
747 +- memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
748 +- msg->msg_namelen);
749 ++ memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
750 + }
751 +
752 + if (pkt_sk(sk)->auxdata) {
753 +diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
754 +index 8aa2937b069f..fe96881a334d 100644
755 +--- a/net/rxrpc/call_object.c
756 ++++ b/net/rxrpc/call_object.c
757 +@@ -604,30 +604,30 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
758 +
759 + _enter("");
760 +
761 +- if (list_empty(&rxnet->calls))
762 +- return;
763 ++ if (!list_empty(&rxnet->calls)) {
764 ++ write_lock(&rxnet->call_lock);
765 +
766 +- write_lock(&rxnet->call_lock);
767 ++ while (!list_empty(&rxnet->calls)) {
768 ++ call = list_entry(rxnet->calls.next,
769 ++ struct rxrpc_call, link);
770 ++ _debug("Zapping call %p", call);
771 +
772 +- while (!list_empty(&rxnet->calls)) {
773 +- call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
774 +- _debug("Zapping call %p", call);
775 ++ rxrpc_see_call(call);
776 ++ list_del_init(&call->link);
777 +
778 +- rxrpc_see_call(call);
779 +- list_del_init(&call->link);
780 ++ pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
781 ++ call, atomic_read(&call->usage),
782 ++ rxrpc_call_states[call->state],
783 ++ call->flags, call->events);
784 +
785 +- pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
786 +- call, atomic_read(&call->usage),
787 +- rxrpc_call_states[call->state],
788 +- call->flags, call->events);
789 ++ write_unlock(&rxnet->call_lock);
790 ++ cond_resched();
791 ++ write_lock(&rxnet->call_lock);
792 ++ }
793 +
794 + write_unlock(&rxnet->call_lock);
795 +- cond_resched();
796 +- write_lock(&rxnet->call_lock);
797 + }
798 +
799 +- write_unlock(&rxnet->call_lock);
800 +-
801 + atomic_dec(&rxnet->nr_calls);
802 + wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
803 + }
804 +diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
805 +index 1d143bc3f73d..4aa03588f87b 100644
806 +--- a/net/sctp/sm_sideeffect.c
807 ++++ b/net/sctp/sm_sideeffect.c
808 +@@ -1112,32 +1112,6 @@ static void sctp_cmd_send_msg(struct sctp_association *asoc,
809 + }
810 +
811 +
812 +-/* Sent the next ASCONF packet currently stored in the association.
813 +- * This happens after the ASCONF_ACK was succeffully processed.
814 +- */
815 +-static void sctp_cmd_send_asconf(struct sctp_association *asoc)
816 +-{
817 +- struct net *net = sock_net(asoc->base.sk);
818 +-
819 +- /* Send the next asconf chunk from the addip chunk
820 +- * queue.
821 +- */
822 +- if (!list_empty(&asoc->addip_chunk_list)) {
823 +- struct list_head *entry = asoc->addip_chunk_list.next;
824 +- struct sctp_chunk *asconf = list_entry(entry,
825 +- struct sctp_chunk, list);
826 +- list_del_init(entry);
827 +-
828 +- /* Hold the chunk until an ASCONF_ACK is received. */
829 +- sctp_chunk_hold(asconf);
830 +- if (sctp_primitive_ASCONF(net, asoc, asconf))
831 +- sctp_chunk_free(asconf);
832 +- else
833 +- asoc->addip_last_asconf = asconf;
834 +- }
835 +-}
836 +-
837 +-
838 + /* These three macros allow us to pull the debugging code out of the
839 + * main flow of sctp_do_sm() to keep attention focused on the real
840 + * functionality there.
841 +@@ -1783,9 +1757,6 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
842 + }
843 + sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
844 + break;
845 +- case SCTP_CMD_SEND_NEXT_ASCONF:
846 +- sctp_cmd_send_asconf(asoc);
847 +- break;
848 + case SCTP_CMD_PURGE_ASCONF_QUEUE:
849 + sctp_asconf_queue_teardown(asoc);
850 + break;
851 +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
852 +index c9ae3404b1bb..713a669d2058 100644
853 +--- a/net/sctp/sm_statefuns.c
854 ++++ b/net/sctp/sm_statefuns.c
855 +@@ -3824,6 +3824,29 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
856 + return SCTP_DISPOSITION_CONSUME;
857 + }
858 +
859 ++static enum sctp_disposition sctp_send_next_asconf(
860 ++ struct net *net,
861 ++ const struct sctp_endpoint *ep,
862 ++ struct sctp_association *asoc,
863 ++ const union sctp_subtype type,
864 ++ struct sctp_cmd_seq *commands)
865 ++{
866 ++ struct sctp_chunk *asconf;
867 ++ struct list_head *entry;
868 ++
869 ++ if (list_empty(&asoc->addip_chunk_list))
870 ++ return SCTP_DISPOSITION_CONSUME;
871 ++
872 ++ entry = asoc->addip_chunk_list.next;
873 ++ asconf = list_entry(entry, struct sctp_chunk, list);
874 ++
875 ++ list_del_init(entry);
876 ++ sctp_chunk_hold(asconf);
877 ++ asoc->addip_last_asconf = asconf;
878 ++
879 ++ return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
880 ++}
881 ++
882 + /*
883 + * ADDIP Section 4.3 General rules for address manipulation
884 + * When building TLV parameters for the ASCONF Chunk that will add or
885 +@@ -3915,14 +3938,10 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
886 + SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
887 +
888 + if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
889 +- asconf_ack)) {
890 +- /* Successfully processed ASCONF_ACK. We can
891 +- * release the next asconf if we have one.
892 +- */
893 +- sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
894 +- SCTP_NULL());
895 +- return SCTP_DISPOSITION_CONSUME;
896 +- }
897 ++ asconf_ack))
898 ++ return sctp_send_next_asconf(net, ep,
899 ++ (struct sctp_association *)asoc,
900 ++ type, commands);
901 +
902 + abort = sctp_make_abort(asoc, asconf_ack,
903 + sizeof(struct sctp_errhdr));
904 +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
905 +index 5f1d937c4be9..7d5136ecee78 100644
906 +--- a/net/tls/tls_device.c
907 ++++ b/net/tls/tls_device.c
908 +@@ -579,7 +579,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
909 + static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
910 + {
911 + struct strp_msg *rxm = strp_msg(skb);
912 +- int err = 0, offset = rxm->offset, copy, nsg;
913 ++ int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
914 + struct sk_buff *skb_iter, *unused;
915 + struct scatterlist sg[1];
916 + char *orig_buf, *buf;
917 +@@ -610,25 +610,42 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
918 + else
919 + err = 0;
920 +
921 +- copy = min_t(int, skb_pagelen(skb) - offset,
922 +- rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
923 ++ data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
924 +
925 +- if (skb->decrypted)
926 +- skb_store_bits(skb, offset, buf, copy);
927 ++ if (skb_pagelen(skb) > offset) {
928 ++ copy = min_t(int, skb_pagelen(skb) - offset, data_len);
929 +
930 +- offset += copy;
931 +- buf += copy;
932 ++ if (skb->decrypted)
933 ++ skb_store_bits(skb, offset, buf, copy);
934 +
935 ++ offset += copy;
936 ++ buf += copy;
937 ++ }
938 ++
939 ++ pos = skb_pagelen(skb);
940 + skb_walk_frags(skb, skb_iter) {
941 +- copy = min_t(int, skb_iter->len,
942 +- rxm->full_len - offset + rxm->offset -
943 +- TLS_CIPHER_AES_GCM_128_TAG_SIZE);
944 ++ int frag_pos;
945 ++
946 ++ /* Practically all frags must belong to msg if reencrypt
947 ++ * is needed with current strparser and coalescing logic,
948 ++ * but strparser may "get optimized", so let's be safe.
949 ++ */
950 ++ if (pos + skb_iter->len <= offset)
951 ++ goto done_with_frag;
952 ++ if (pos >= data_len + rxm->offset)
953 ++ break;
954 ++
955 ++ frag_pos = offset - pos;
956 ++ copy = min_t(int, skb_iter->len - frag_pos,
957 ++ data_len + rxm->offset - offset);
958 +
959 + if (skb_iter->decrypted)
960 +- skb_store_bits(skb_iter, offset, buf, copy);
961 ++ skb_store_bits(skb_iter, frag_pos, buf, copy);
962 +
963 + offset += copy;
964 + buf += copy;
965 ++done_with_frag:
966 ++ pos += skb_iter->len;
967 + }
968 +
969 + free_buf:
970 +diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
971 +index ef8934fd8698..426dd97725e4 100644
972 +--- a/net/tls/tls_device_fallback.c
973 ++++ b/net/tls/tls_device_fallback.c
974 +@@ -200,13 +200,14 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
975 +
976 + skb_put(nskb, skb->len);
977 + memcpy(nskb->data, skb->data, headln);
978 +- update_chksum(nskb, headln);
979 +
980 + nskb->destructor = skb->destructor;
981 + nskb->sk = sk;
982 + skb->destructor = NULL;
983 + skb->sk = NULL;
984 +
985 ++ update_chksum(nskb, headln);
986 ++
987 + delta = nskb->truesize - skb->truesize;
988 + if (likely(delta < 0))
989 + WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
990 +diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
991 +index c1376bfdc90b..aa28510d23ad 100644
992 +--- a/sound/usb/line6/driver.c
993 ++++ b/sound/usb/line6/driver.c
994 +@@ -351,12 +351,16 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
995 + {
996 + struct usb_device *usbdev = line6->usbdev;
997 + int ret;
998 +- unsigned char len;
999 ++ unsigned char *len;
1000 + unsigned count;
1001 +
1002 + if (address > 0xffff || datalen > 0xff)
1003 + return -EINVAL;
1004 +
1005 ++ len = kmalloc(sizeof(*len), GFP_KERNEL);
1006 ++ if (!len)
1007 ++ return -ENOMEM;
1008 ++
1009 + /* query the serial number: */
1010 + ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
1011 + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
1012 +@@ -365,7 +369,7 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
1013 +
1014 + if (ret < 0) {
1015 + dev_err(line6->ifcdev, "read request failed (error %d)\n", ret);
1016 +- return ret;
1017 ++ goto exit;
1018 + }
1019 +
1020 + /* Wait for data length. We'll get 0xff until length arrives. */
1021 +@@ -375,28 +379,29 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
1022 + ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
1023 + USB_TYPE_VENDOR | USB_RECIP_DEVICE |
1024 + USB_DIR_IN,
1025 +- 0x0012, 0x0000, &len, 1,
1026 ++ 0x0012, 0x0000, len, 1,
1027 + LINE6_TIMEOUT * HZ);
1028 + if (ret < 0) {
1029 + dev_err(line6->ifcdev,
1030 + "receive length failed (error %d)\n", ret);
1031 +- return ret;
1032 ++ goto exit;
1033 + }
1034 +
1035 +- if (len != 0xff)
1036 ++ if (*len != 0xff)
1037 + break;
1038 + }
1039 +
1040 +- if (len == 0xff) {
1041 ++ ret = -EIO;
1042 ++ if (*len == 0xff) {
1043 + dev_err(line6->ifcdev, "read failed after %d retries\n",
1044 + count);
1045 +- return -EIO;
1046 +- } else if (len != datalen) {
1047 ++ goto exit;
1048 ++ } else if (*len != datalen) {
1049 + /* should be equal or something went wrong */
1050 + dev_err(line6->ifcdev,
1051 + "length mismatch (expected %d, got %d)\n",
1052 +- (int)datalen, (int)len);
1053 +- return -EIO;
1054 ++ (int)datalen, (int)*len);
1055 ++ goto exit;
1056 + }
1057 +
1058 + /* receive the result: */
1059 +@@ -405,12 +410,12 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
1060 + 0x0013, 0x0000, data, datalen,
1061 + LINE6_TIMEOUT * HZ);
1062 +
1063 +- if (ret < 0) {
1064 ++ if (ret < 0)
1065 + dev_err(line6->ifcdev, "read failed (error %d)\n", ret);
1066 +- return ret;
1067 +- }
1068 +
1069 +- return 0;
1070 ++exit:
1071 ++ kfree(len);
1072 ++ return ret;
1073 + }
1074 + EXPORT_SYMBOL_GPL(line6_read_data);
1075 +
1076 +@@ -422,12 +427,16 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
1077 + {
1078 + struct usb_device *usbdev = line6->usbdev;
1079 + int ret;
1080 +- unsigned char status;
1081 ++ unsigned char *status;
1082 + int count;
1083 +
1084 + if (address > 0xffff || datalen > 0xffff)
1085 + return -EINVAL;
1086 +
1087 ++ status = kmalloc(sizeof(*status), GFP_KERNEL);
1088 ++ if (!status)
1089 ++ return -ENOMEM;
1090 ++
1091 + ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
1092 + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
1093 + 0x0022, address, data, datalen,
1094 +@@ -436,7 +445,7 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
1095 + if (ret < 0) {
1096 + dev_err(line6->ifcdev,
1097 + "write request failed (error %d)\n", ret);
1098 +- return ret;
1099 ++ goto exit;
1100 + }
1101 +
1102 + for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) {
1103 +@@ -447,28 +456,29 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
1104 + USB_TYPE_VENDOR | USB_RECIP_DEVICE |
1105 + USB_DIR_IN,
1106 + 0x0012, 0x0000,
1107 +- &status, 1, LINE6_TIMEOUT * HZ);
1108 ++ status, 1, LINE6_TIMEOUT * HZ);
1109 +
1110 + if (ret < 0) {
1111 + dev_err(line6->ifcdev,
1112 + "receiving status failed (error %d)\n", ret);
1113 +- return ret;
1114 ++ goto exit;
1115 + }
1116 +
1117 +- if (status != 0xff)
1118 ++ if (*status != 0xff)
1119 + break;
1120 + }
1121 +
1122 +- if (status == 0xff) {
1123 ++ if (*status == 0xff) {
1124 + dev_err(line6->ifcdev, "write failed after %d retries\n",
1125 + count);
1126 +- return -EIO;
1127 +- } else if (status != 0) {
1128 ++ ret = -EIO;
1129 ++ } else if (*status != 0) {
1130 + dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
1131 +- return -EIO;
1132 ++ ret = -EIO;
1133 + }
1134 +-
1135 +- return 0;
1136 ++exit:
1137 ++ kfree(status);
1138 ++ return ret;
1139 + }
1140 + EXPORT_SYMBOL_GPL(line6_write_data);
1141 +
1142 +diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
1143 +index 36ed9c85c0eb..5f3c87264e66 100644
1144 +--- a/sound/usb/line6/podhd.c
1145 ++++ b/sound/usb/line6/podhd.c
1146 +@@ -225,28 +225,32 @@ static void podhd_startup_start_workqueue(struct timer_list *t)
1147 + static int podhd_dev_start(struct usb_line6_podhd *pod)
1148 + {
1149 + int ret;
1150 +- u8 init_bytes[8];
1151 ++ u8 *init_bytes;
1152 + int i;
1153 + struct usb_device *usbdev = pod->line6.usbdev;
1154 +
1155 ++ init_bytes = kmalloc(8, GFP_KERNEL);
1156 ++ if (!init_bytes)
1157 ++ return -ENOMEM;
1158 ++
1159 + ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
1160 + 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
1161 + 0x11, 0,
1162 + NULL, 0, LINE6_TIMEOUT * HZ);
1163 + if (ret < 0) {
1164 + dev_err(pod->line6.ifcdev, "read request failed (error %d)\n", ret);
1165 +- return ret;
1166 ++ goto exit;
1167 + }
1168 +
1169 + /* NOTE: looks like some kind of ping message */
1170 + ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
1171 + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
1172 + 0x11, 0x0,
1173 +- &init_bytes, 3, LINE6_TIMEOUT * HZ);
1174 ++ init_bytes, 3, LINE6_TIMEOUT * HZ);
1175 + if (ret < 0) {
1176 + dev_err(pod->line6.ifcdev,
1177 + "receive length failed (error %d)\n", ret);
1178 +- return ret;
1179 ++ goto exit;
1180 + }
1181 +
1182 + pod->firmware_version =
1183 +@@ -255,7 +259,7 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
1184 + for (i = 0; i <= 16; i++) {
1185 + ret = line6_read_data(&pod->line6, 0xf000 + 0x08 * i, init_bytes, 8);
1186 + if (ret < 0)
1187 +- return ret;
1188 ++ goto exit;
1189 + }
1190 +
1191 + ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
1192 +@@ -263,10 +267,9 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
1193 + USB_TYPE_STANDARD | USB_RECIP_DEVICE | USB_DIR_OUT,
1194 + 1, 0,
1195 + NULL, 0, LINE6_TIMEOUT * HZ);
1196 +- if (ret < 0)
1197 +- return ret;
1198 +-
1199 +- return 0;
1200 ++exit:
1201 ++ kfree(init_bytes);
1202 ++ return ret;
1203 + }
1204 +
1205 + static void podhd_startup_workqueue(struct work_struct *work)
1206 +diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c
1207 +index f47ba94e6f4a..19bee725de00 100644
1208 +--- a/sound/usb/line6/toneport.c
1209 ++++ b/sound/usb/line6/toneport.c
1210 +@@ -365,16 +365,21 @@ static bool toneport_has_source_select(struct usb_line6_toneport *toneport)
1211 + /*
1212 + Setup Toneport device.
1213 + */
1214 +-static void toneport_setup(struct usb_line6_toneport *toneport)
1215 ++static int toneport_setup(struct usb_line6_toneport *toneport)
1216 + {
1217 +- u32 ticks;
1218 ++ u32 *ticks;
1219 + struct usb_line6 *line6 = &toneport->line6;
1220 + struct usb_device *usbdev = line6->usbdev;
1221 +
1222 ++ ticks = kmalloc(sizeof(*ticks), GFP_KERNEL);
1223 ++ if (!ticks)
1224 ++ return -ENOMEM;
1225 ++
1226 + /* sync time on device with host: */
1227 + /* note: 32-bit timestamps overflow in year 2106 */
1228 +- ticks = (u32)ktime_get_real_seconds();
1229 +- line6_write_data(line6, 0x80c6, &ticks, 4);
1230 ++ *ticks = (u32)ktime_get_real_seconds();
1231 ++ line6_write_data(line6, 0x80c6, ticks, 4);
1232 ++ kfree(ticks);
1233 +
1234 + /* enable device: */
1235 + toneport_send_cmd(usbdev, 0x0301, 0x0000);
1236 +@@ -389,6 +394,7 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
1237 + toneport_update_led(toneport);
1238 +
1239 + mod_timer(&toneport->timer, jiffies + TONEPORT_PCM_DELAY * HZ);
1240 ++ return 0;
1241 + }
1242 +
1243 + /*
1244 +@@ -451,7 +457,9 @@ static int toneport_init(struct usb_line6 *line6,
1245 + return err;
1246 + }
1247 +
1248 +- toneport_setup(toneport);
1249 ++ err = toneport_setup(toneport);
1250 ++ if (err)
1251 ++ return err;
1252 +
1253 + /* register audio system: */
1254 + return snd_card_register(line6->card);
1255 +@@ -463,7 +471,11 @@ static int toneport_init(struct usb_line6 *line6,
1256 + */
1257 + static int toneport_reset_resume(struct usb_interface *interface)
1258 + {
1259 +- toneport_setup(usb_get_intfdata(interface));
1260 ++ int err;
1261 ++
1262 ++ err = toneport_setup(usb_get_intfdata(interface));
1263 ++ if (err)
1264 ++ return err;
1265 + return line6_resume(interface);
1266 + }
1267 + #endif
1268 +diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
1269 +index d4cfb6a7a086..4b7e107865bf 100755
1270 +--- a/tools/testing/selftests/net/fib_rule_tests.sh
1271 ++++ b/tools/testing/selftests/net/fib_rule_tests.sh
1272 +@@ -27,6 +27,7 @@ log_test()
1273 + nsuccess=$((nsuccess+1))
1274 + printf "\n TEST: %-50s [ OK ]\n" "${msg}"
1275 + else
1276 ++ ret=1
1277 + nfail=$((nfail+1))
1278 + printf "\n TEST: %-50s [FAIL]\n" "${msg}"
1279 + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
1280 +@@ -147,8 +148,8 @@ fib_rule6_test()
1281 +
1282 + fib_check_iproute_support "ipproto" "ipproto"
1283 + if [ $? -eq 0 ]; then
1284 +- match="ipproto icmp"
1285 +- fib_rule6_test_match_n_redirect "$match" "$match" "ipproto icmp match"
1286 ++ match="ipproto ipv6-icmp"
1287 ++ fib_rule6_test_match_n_redirect "$match" "$match" "ipproto ipv6-icmp match"
1288 + fi
1289 + }
1290 +
1291 +@@ -245,4 +246,9 @@ setup
1292 + run_fibrule_tests
1293 + cleanup
1294 +
1295 ++if [ "$TESTS" != "none" ]; then
1296 ++ printf "\nTests passed: %3d\n" ${nsuccess}
1297 ++ printf "Tests failed: %3d\n" ${nfail}
1298 ++fi
1299 ++
1300 + exit $ret