Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Thu, 03 Feb 2022 11:46:40
Message-Id: 1643888784.044d7e58892acf12dc60d42da17cec146792c238.mpagano@gentoo
1 commit: 044d7e58892acf12dc60d42da17cec146792c238
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Thu Feb 3 11:46:24 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Thu Feb 3 11:46:24 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=044d7e58
7
8 Linux patch 4.4.302
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1301_linux-4.4.302.patch | 884 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 888 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index c28b7561..00214606 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1247,6 +1247,10 @@ Patch: 1300_linux-4.4.301.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.4.301
23
24 +Patch: 1301_linux-4.4.302.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.4.302
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1301_linux-4.4.302.patch b/1301_linux-4.4.302.patch
33 new file mode 100644
34 index 00000000..a83ed321
35 --- /dev/null
36 +++ b/1301_linux-4.4.302.patch
37 @@ -0,0 +1,884 @@
38 +diff --git a/Makefile b/Makefile
39 +index 3bf23154499e6..2a03be868d10b 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 4
45 +-SUBLEVEL = 301
46 ++SUBLEVEL = 302
47 + EXTRAVERSION =
48 + NAME = Blurry Fish Butt
49 +
50 +diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
51 +index 44feac38ccfc2..7f53e40597f96 100644
52 +--- a/arch/s390/hypfs/hypfs_vm.c
53 ++++ b/arch/s390/hypfs/hypfs_vm.c
54 +@@ -19,6 +19,7 @@
55 +
56 + static char local_guest[] = " ";
57 + static char all_guests[] = "* ";
58 ++static char *all_groups = all_guests;
59 + static char *guest_query;
60 +
61 + struct diag2fc_data {
62 +@@ -61,10 +62,11 @@ static int diag2fc(int size, char* query, void *addr)
63 +
64 + memcpy(parm_list.userid, query, NAME_LEN);
65 + ASCEBC(parm_list.userid, NAME_LEN);
66 +- parm_list.addr = (unsigned long) addr ;
67 ++ memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
68 ++ ASCEBC(parm_list.aci_grp, NAME_LEN);
69 ++ parm_list.addr = (unsigned long)addr;
70 + parm_list.size = size;
71 + parm_list.fmt = 0x02;
72 +- memset(parm_list.aci_grp, 0x40, NAME_LEN);
73 + rc = -1;
74 +
75 + diag_stat_inc(DIAG_STAT_X2FC);
76 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
77 +index 8dce61ca934b2..910100257df93 100644
78 +--- a/arch/x86/kvm/x86.c
79 ++++ b/arch/x86/kvm/x86.c
80 +@@ -4417,13 +4417,6 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
81 + if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
82 + access |= PFERR_USER_MASK;
83 +
84 +- /*
85 +- * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
86 +- * is returned, but our callers are not ready for that and they blindly
87 +- * call kvm_inject_page_fault. Ensure that they at least do not leak
88 +- * uninitialized kernel stack memory into cr2 and error code.
89 +- */
90 +- memset(exception, 0, sizeof(*exception));
91 + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
92 + access, exception);
93 + }
94 +@@ -4431,6 +4424,13 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
95 + int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
96 + unsigned int bytes, struct x86_exception *exception)
97 + {
98 ++ /*
99 ++ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
100 ++ * is returned, but our callers are not ready for that and they blindly
101 ++ * call kvm_inject_page_fault. Ensure that they at least do not leak
102 ++ * uninitialized kernel stack memory into cr2 and error code.
103 ++ */
104 ++ memset(exception, 0, sizeof(*exception));
105 + return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
106 + PFERR_WRITE_MASK, exception);
107 + }
108 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
109 +index 24d45fc7716c5..c9853626cec4e 100644
110 +--- a/drivers/gpu/drm/msm/msm_drv.c
111 ++++ b/drivers/gpu/drm/msm/msm_drv.c
112 +@@ -286,7 +286,7 @@ static int msm_init_vram(struct drm_device *dev)
113 + ret = of_address_to_resource(node, 0, &r);
114 + if (ret)
115 + return ret;
116 +- size = r.end - r.start;
117 ++ size = r.end - r.start + 1;
118 + DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
119 + } else
120 + #endif
121 +diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
122 +index 8e1bf9ed8efff..c8baa06773df8 100644
123 +--- a/drivers/gpu/drm/radeon/ci_dpm.c
124 ++++ b/drivers/gpu/drm/radeon/ci_dpm.c
125 +@@ -776,12 +776,6 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
126 + u32 vblank_time = r600_dpm_get_vblank_time(rdev);
127 + u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
128 +
129 +- /* disable mclk switching if the refresh is >120Hz, even if the
130 +- * blanking period would allow it
131 +- */
132 +- if (r600_dpm_get_vrefresh(rdev) > 120)
133 +- return true;
134 +-
135 + /* disable mclk switching if the refresh is >120Hz, even if the
136 + * blanking period would allow it
137 + */
138 +diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
139 +index 420f341272621..6f6f173aca6f2 100644
140 +--- a/drivers/hwmon/lm90.c
141 ++++ b/drivers/hwmon/lm90.c
142 +@@ -265,7 +265,7 @@ static const struct lm90_params lm90_params[] = {
143 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
144 + | LM90_HAVE_BROKEN_ALERT,
145 + .alert_alarms = 0x7c,
146 +- .max_convrate = 8,
147 ++ .max_convrate = 7,
148 + },
149 + [lm86] = {
150 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
151 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
152 +index 323b86b38b3a3..6cd2ae95e21ed 100644
153 +--- a/drivers/input/serio/i8042-x86ia64io.h
154 ++++ b/drivers/input/serio/i8042-x86ia64io.h
155 +@@ -586,11 +586,6 @@ static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = {
156 + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
157 + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
158 + },
159 +- }, {
160 +- .matches = {
161 +- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
162 +- DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
163 +- },
164 + },
165 + { }
166 + };
167 +@@ -677,6 +672,12 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
168 + DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"),
169 + },
170 + },
171 ++ {
172 ++ .matches = {
173 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
174 ++ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
175 ++ },
176 ++ },
177 + { }
178 + };
179 + static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
180 +diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
181 +index 546cd995ab294..c3befb3f5dcda 100644
182 +--- a/drivers/media/i2c/tc358743.c
183 ++++ b/drivers/media/i2c/tc358743.c
184 +@@ -241,7 +241,7 @@ static void i2c_wr16(struct v4l2_subdev *sd, u16 reg, u16 val)
185 +
186 + static void i2c_wr16_and_or(struct v4l2_subdev *sd, u16 reg, u16 mask, u16 val)
187 + {
188 +- i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1);
189 ++ i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2);
190 + }
191 +
192 + static u32 i2c_rd32(struct v4l2_subdev *sd, u16 reg)
193 +diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
194 +index f7630cf581cd9..fd622021748f8 100644
195 +--- a/drivers/s390/scsi/zfcp_fc.c
196 ++++ b/drivers/s390/scsi/zfcp_fc.c
197 +@@ -518,6 +518,8 @@ static void zfcp_fc_adisc_handler(void *data)
198 + goto out;
199 + }
200 +
201 ++ /* re-init to undo drop from zfcp_fc_adisc() */
202 ++ port->d_id = ntoh24(adisc_resp->adisc_port_id);
203 + /* port is good, unblock rport without going through erp */
204 + zfcp_scsi_schedule_rport_register(port);
205 + out:
206 +@@ -531,6 +533,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
207 + struct zfcp_fc_req *fc_req;
208 + struct zfcp_adapter *adapter = port->adapter;
209 + struct Scsi_Host *shost = adapter->scsi_host;
210 ++ u32 d_id;
211 + int ret;
212 +
213 + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
214 +@@ -555,7 +558,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
215 + fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
216 + hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
217 +
218 +- ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
219 ++ d_id = port->d_id; /* remember as destination for send els below */
220 ++ /*
221 ++ * Force fresh GID_PN lookup on next port recovery.
222 ++ * Must happen after request setup and before sending request,
223 ++ * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
224 ++ */
225 ++ port->d_id = 0;
226 ++
227 ++ ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
228 + ZFCP_FC_CTELS_TMO);
229 + if (ret)
230 + kmem_cache_free(zfcp_fc_req_cache, fc_req);
231 +diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
232 +index 573aeec7a02b6..66f7f89aa0ee4 100644
233 +--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
234 ++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
235 +@@ -79,7 +79,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
236 + static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
237 + static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
238 + struct device *parent, int npiv);
239 +-static void bnx2fc_destroy_work(struct work_struct *work);
240 ++static void bnx2fc_port_destroy(struct fcoe_port *port);
241 +
242 + static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
243 + static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
244 +@@ -855,9 +855,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
245 + __bnx2fc_destroy(interface);
246 + }
247 + mutex_unlock(&bnx2fc_dev_lock);
248 +-
249 +- /* Ensure ALL destroy work has been completed before return */
250 +- flush_workqueue(bnx2fc_wq);
251 + return;
252 +
253 + default:
254 +@@ -1148,8 +1145,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
255 + mutex_unlock(&n_port->lp_mutex);
256 + bnx2fc_free_vport(interface->hba, port->lport);
257 + bnx2fc_port_shutdown(port->lport);
258 ++ bnx2fc_port_destroy(port);
259 + bnx2fc_interface_put(interface);
260 +- queue_work(bnx2fc_wq, &port->destroy_work);
261 + return 0;
262 + }
263 +
264 +@@ -1457,7 +1454,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
265 + port->lport = lport;
266 + port->priv = interface;
267 + port->get_netdev = bnx2fc_netdev;
268 +- INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
269 +
270 + /* Configure fcoe_port */
271 + rc = bnx2fc_lport_config(lport);
272 +@@ -1582,8 +1578,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
273 + bnx2fc_interface_cleanup(interface);
274 + bnx2fc_stop(interface);
275 + list_del(&interface->list);
276 ++ bnx2fc_port_destroy(port);
277 + bnx2fc_interface_put(interface);
278 +- queue_work(bnx2fc_wq, &port->destroy_work);
279 + }
280 +
281 + /**
282 +@@ -1624,15 +1620,12 @@ netdev_err:
283 + return rc;
284 + }
285 +
286 +-static void bnx2fc_destroy_work(struct work_struct *work)
287 ++static void bnx2fc_port_destroy(struct fcoe_port *port)
288 + {
289 +- struct fcoe_port *port;
290 + struct fc_lport *lport;
291 +
292 +- port = container_of(work, struct fcoe_port, destroy_work);
293 + lport = port->lport;
294 +-
295 +- BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
296 ++ BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
297 +
298 + bnx2fc_if_destroy(lport);
299 + }
300 +@@ -2469,9 +2462,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
301 + __bnx2fc_destroy(interface);
302 + mutex_unlock(&bnx2fc_dev_lock);
303 +
304 +- /* Ensure ALL destroy work has been completed before return */
305 +- flush_workqueue(bnx2fc_wq);
306 +-
307 + bnx2fc_ulp_stop(hba);
308 + /* unregister cnic device */
309 + if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
310 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
311 +index 9b2beada2ff3b..d2ff11d524c02 100644
312 +--- a/drivers/tty/n_gsm.c
313 ++++ b/drivers/tty/n_gsm.c
314 +@@ -329,6 +329,7 @@ static struct tty_driver *gsm_tty_driver;
315 + #define GSM1_ESCAPE_BITS 0x20
316 + #define XON 0x11
317 + #define XOFF 0x13
318 ++#define ISO_IEC_646_MASK 0x7F
319 +
320 + static const struct tty_port_operations gsm_port_ops;
321 +
322 +@@ -547,7 +548,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
323 + int olen = 0;
324 + while (len--) {
325 + if (*input == GSM1_SOF || *input == GSM1_ESCAPE
326 +- || *input == XON || *input == XOFF) {
327 ++ || (*input & ISO_IEC_646_MASK) == XON
328 ++ || (*input & ISO_IEC_646_MASK) == XOFF) {
329 + *output++ = GSM1_ESCAPE;
330 + *output++ = *input++ ^ GSM1_ESCAPE_BITS;
331 + olen++;
332 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
333 +index db66e533319ea..43f15ac54f0d1 100644
334 +--- a/drivers/tty/serial/8250/8250_pci.c
335 ++++ b/drivers/tty/serial/8250/8250_pci.c
336 +@@ -5404,8 +5404,30 @@ static struct pci_device_id serial_pci_tbl[] = {
337 + { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
338 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
339 + pbn_b2_4_115200 },
340 ++ /* Brainboxes Devices */
341 + /*
342 +- * BrainBoxes UC-260
343 ++ * Brainboxes UC-101
344 ++ */
345 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0BA1,
346 ++ PCI_ANY_ID, PCI_ANY_ID,
347 ++ 0, 0,
348 ++ pbn_b2_2_115200 },
349 ++ /*
350 ++ * Brainboxes UC-235/246
351 ++ */
352 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0AA1,
353 ++ PCI_ANY_ID, PCI_ANY_ID,
354 ++ 0, 0,
355 ++ pbn_b2_1_115200 },
356 ++ /*
357 ++ * Brainboxes UC-257
358 ++ */
359 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0861,
360 ++ PCI_ANY_ID, PCI_ANY_ID,
361 ++ 0, 0,
362 ++ pbn_b2_2_115200 },
363 ++ /*
364 ++ * Brainboxes UC-260/271/701/756
365 + */
366 + { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
367 + PCI_ANY_ID, PCI_ANY_ID,
368 +@@ -5413,7 +5435,81 @@ static struct pci_device_id serial_pci_tbl[] = {
369 + pbn_b2_4_115200 },
370 + { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
371 + PCI_ANY_ID, PCI_ANY_ID,
372 +- PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
373 ++ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
374 ++ pbn_b2_4_115200 },
375 ++ /*
376 ++ * Brainboxes UC-268
377 ++ */
378 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0841,
379 ++ PCI_ANY_ID, PCI_ANY_ID,
380 ++ 0, 0,
381 ++ pbn_b2_4_115200 },
382 ++ /*
383 ++ * Brainboxes UC-275/279
384 ++ */
385 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0881,
386 ++ PCI_ANY_ID, PCI_ANY_ID,
387 ++ 0, 0,
388 ++ pbn_b2_8_115200 },
389 ++ /*
390 ++ * Brainboxes UC-302
391 ++ */
392 ++ { PCI_VENDOR_ID_INTASHIELD, 0x08E1,
393 ++ PCI_ANY_ID, PCI_ANY_ID,
394 ++ 0, 0,
395 ++ pbn_b2_2_115200 },
396 ++ /*
397 ++ * Brainboxes UC-310
398 ++ */
399 ++ { PCI_VENDOR_ID_INTASHIELD, 0x08C1,
400 ++ PCI_ANY_ID, PCI_ANY_ID,
401 ++ 0, 0,
402 ++ pbn_b2_2_115200 },
403 ++ /*
404 ++ * Brainboxes UC-313
405 ++ */
406 ++ { PCI_VENDOR_ID_INTASHIELD, 0x08A3,
407 ++ PCI_ANY_ID, PCI_ANY_ID,
408 ++ 0, 0,
409 ++ pbn_b2_2_115200 },
410 ++ /*
411 ++ * Brainboxes UC-320/324
412 ++ */
413 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0A61,
414 ++ PCI_ANY_ID, PCI_ANY_ID,
415 ++ 0, 0,
416 ++ pbn_b2_1_115200 },
417 ++ /*
418 ++ * Brainboxes UC-346
419 ++ */
420 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0B02,
421 ++ PCI_ANY_ID, PCI_ANY_ID,
422 ++ 0, 0,
423 ++ pbn_b2_4_115200 },
424 ++ /*
425 ++ * Brainboxes UC-357
426 ++ */
427 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0A81,
428 ++ PCI_ANY_ID, PCI_ANY_ID,
429 ++ 0, 0,
430 ++ pbn_b2_2_115200 },
431 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0A83,
432 ++ PCI_ANY_ID, PCI_ANY_ID,
433 ++ 0, 0,
434 ++ pbn_b2_2_115200 },
435 ++ /*
436 ++ * Brainboxes UC-368
437 ++ */
438 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0C41,
439 ++ PCI_ANY_ID, PCI_ANY_ID,
440 ++ 0, 0,
441 ++ pbn_b2_4_115200 },
442 ++ /*
443 ++ * Brainboxes UC-420/431
444 ++ */
445 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0921,
446 ++ PCI_ANY_ID, PCI_ANY_ID,
447 ++ 0, 0,
448 + pbn_b2_4_115200 },
449 + /*
450 + * Perle PCI-RAS cards
451 +diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
452 +index f89d1f79be18f..83d3645ac2ef1 100644
453 +--- a/drivers/tty/serial/stm32-usart.c
454 ++++ b/drivers/tty/serial/stm32-usart.c
455 +@@ -279,7 +279,7 @@ static void stm32_start_tx(struct uart_port *port)
456 + {
457 + struct circ_buf *xmit = &port->state->xmit;
458 +
459 +- if (uart_circ_empty(xmit))
460 ++ if (uart_circ_empty(xmit) && !port->x_char)
461 + return;
462 +
463 + stm32_set_bits(port, USART_CR1, USART_CR1_TXEIE | USART_CR1_TE);
464 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
465 +index 3e0c1ff3a688e..a6f1eda264dad 100644
466 +--- a/drivers/usb/core/hcd.c
467 ++++ b/drivers/usb/core/hcd.c
468 +@@ -1642,6 +1642,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
469 + urb->hcpriv = NULL;
470 + INIT_LIST_HEAD(&urb->urb_list);
471 + atomic_dec(&urb->use_count);
472 ++ /*
473 ++ * Order the write of urb->use_count above before the read
474 ++ * of urb->reject below. Pairs with the memory barriers in
475 ++ * usb_kill_urb() and usb_poison_urb().
476 ++ */
477 ++ smp_mb__after_atomic();
478 ++
479 + atomic_dec(&urb->dev->urbnum);
480 + if (atomic_read(&urb->reject))
481 + wake_up(&usb_kill_urb_queue);
482 +@@ -1751,6 +1758,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
483 +
484 + usb_anchor_resume_wakeups(anchor);
485 + atomic_dec(&urb->use_count);
486 ++ /*
487 ++ * Order the write of urb->use_count above before the read
488 ++ * of urb->reject below. Pairs with the memory barriers in
489 ++ * usb_kill_urb() and usb_poison_urb().
490 ++ */
491 ++ smp_mb__after_atomic();
492 ++
493 + if (unlikely(atomic_read(&urb->reject)))
494 + wake_up(&usb_kill_urb_queue);
495 + usb_put_urb(urb);
496 +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
497 +index 8c4bfd42f785d..693f352b8e074 100644
498 +--- a/drivers/usb/core/urb.c
499 ++++ b/drivers/usb/core/urb.c
500 +@@ -686,6 +686,12 @@ void usb_kill_urb(struct urb *urb)
501 + if (!(urb && urb->dev && urb->ep))
502 + return;
503 + atomic_inc(&urb->reject);
504 ++ /*
505 ++ * Order the write of urb->reject above before the read
506 ++ * of urb->use_count below. Pairs with the barriers in
507 ++ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
508 ++ */
509 ++ smp_mb__after_atomic();
510 +
511 + usb_hcd_unlink_urb(urb, -ENOENT);
512 + wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
513 +@@ -727,6 +733,12 @@ void usb_poison_urb(struct urb *urb)
514 + if (!urb)
515 + return;
516 + atomic_inc(&urb->reject);
517 ++ /*
518 ++ * Order the write of urb->reject above before the read
519 ++ * of urb->use_count below. Pairs with the barriers in
520 ++ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
521 ++ */
522 ++ smp_mb__after_atomic();
523 +
524 + if (!urb->dev || !urb->ep)
525 + return;
526 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
527 +index 52895c462d7d9..3357ceef0b013 100644
528 +--- a/drivers/usb/storage/unusual_devs.h
529 ++++ b/drivers/usb/storage/unusual_devs.h
530 +@@ -2155,6 +2155,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
531 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
532 + US_FL_SCM_MULT_TARG ),
533 +
534 ++/*
535 ++ * Reported by DocMAX <mail@××××××××××.de>
536 ++ * and Thomas Weißschuh <linux@××××××××××.net>
537 ++ */
538 ++UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999,
539 ++ "VIA Labs, Inc.",
540 ++ "VL817 SATA Bridge",
541 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
542 ++ US_FL_IGNORE_UAS),
543 ++
544 + UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
545 + "ST",
546 + "2A",
547 +diff --git a/fs/udf/inode.c b/fs/udf/inode.c
548 +index 2c39c1c81196c..b709c51c9f9ec 100644
549 +--- a/fs/udf/inode.c
550 ++++ b/fs/udf/inode.c
551 +@@ -260,10 +260,6 @@ int udf_expand_file_adinicb(struct inode *inode)
552 + char *kaddr;
553 + struct udf_inode_info *iinfo = UDF_I(inode);
554 + int err;
555 +- struct writeback_control udf_wbc = {
556 +- .sync_mode = WB_SYNC_NONE,
557 +- .nr_to_write = 1,
558 +- };
559 +
560 + WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex));
561 + if (!iinfo->i_lenAlloc) {
562 +@@ -307,8 +303,10 @@ int udf_expand_file_adinicb(struct inode *inode)
563 + iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
564 + /* from now on we have normal address_space methods */
565 + inode->i_data.a_ops = &udf_aops;
566 ++ set_page_dirty(page);
567 ++ unlock_page(page);
568 + up_write(&iinfo->i_data_sem);
569 +- err = inode->i_data.a_ops->writepage(page, &udf_wbc);
570 ++ err = filemap_fdatawrite(inode->i_mapping);
571 + if (err) {
572 + /* Restore everything back so that we don't lose data... */
573 + lock_page(page);
574 +@@ -320,6 +318,7 @@ int udf_expand_file_adinicb(struct inode *inode)
575 + unlock_page(page);
576 + iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
577 + inode->i_data.a_ops = &udf_adinicb_aops;
578 ++ iinfo->i_lenAlloc = inode->i_size;
579 + up_write(&iinfo->i_data_sem);
580 + }
581 + page_cache_release(page);
582 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
583 +index 401a404b64b93..78864ffaf0d71 100644
584 +--- a/include/linux/netdevice.h
585 ++++ b/include/linux/netdevice.h
586 +@@ -2055,6 +2055,7 @@ struct packet_type {
587 + struct net_device *);
588 + bool (*id_match)(struct packet_type *ptype,
589 + struct sock *sk);
590 ++ struct net *af_packet_net;
591 + void *af_packet_priv;
592 + struct list_head list;
593 + };
594 +diff --git a/include/net/ip.h b/include/net/ip.h
595 +index 5c9de851a9191..869fd7a136121 100644
596 +--- a/include/net/ip.h
597 ++++ b/include/net/ip.h
598 +@@ -353,19 +353,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
599 + {
600 + struct iphdr *iph = ip_hdr(skb);
601 +
602 ++ /* We had many attacks based on IPID, use the private
603 ++ * generator as much as we can.
604 ++ */
605 ++ if (sk && inet_sk(sk)->inet_daddr) {
606 ++ iph->id = htons(inet_sk(sk)->inet_id);
607 ++ inet_sk(sk)->inet_id += segs;
608 ++ return;
609 ++ }
610 + if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
611 +- /* This is only to work around buggy Windows95/2000
612 +- * VJ compression implementations. If the ID field
613 +- * does not change, they drop every other packet in
614 +- * a TCP stream using header compression.
615 +- */
616 +- if (sk && inet_sk(sk)->inet_daddr) {
617 +- iph->id = htons(inet_sk(sk)->inet_id);
618 +- inet_sk(sk)->inet_id += segs;
619 +- } else {
620 +- iph->id = 0;
621 +- }
622 ++ iph->id = 0;
623 + } else {
624 ++ /* Unfortunately we need the big hammer to get a suitable IPID */
625 + __ip_select_ident(net, iph, segs);
626 + }
627 + }
628 +diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
629 +index 1896386e16bbe..78e354b1c593b 100644
630 +--- a/kernel/power/wakelock.c
631 ++++ b/kernel/power/wakelock.c
632 +@@ -38,23 +38,19 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
633 + {
634 + struct rb_node *node;
635 + struct wakelock *wl;
636 +- char *str = buf;
637 +- char *end = buf + PAGE_SIZE;
638 ++ int len = 0;
639 +
640 + mutex_lock(&wakelocks_lock);
641 +
642 + for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
643 + wl = rb_entry(node, struct wakelock, node);
644 + if (wl->ws.active == show_active)
645 +- str += scnprintf(str, end - str, "%s ", wl->name);
646 ++ len += sysfs_emit_at(buf, len, "%s ", wl->name);
647 + }
648 +- if (str > buf)
649 +- str--;
650 +-
651 +- str += scnprintf(str, end - str, "\n");
652 ++ len += sysfs_emit_at(buf, len, "\n");
653 +
654 + mutex_unlock(&wakelocks_lock);
655 +- return (str - buf);
656 ++ return len;
657 + }
658 +
659 + #if CONFIG_PM_WAKELOCKS_LIMIT > 0
660 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
661 +index 05ccd2bcd9e46..a557543ad29f3 100644
662 +--- a/net/bluetooth/hci_event.c
663 ++++ b/net/bluetooth/hci_event.c
664 +@@ -4940,6 +4940,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
665 + struct hci_ev_le_advertising_info *ev = ptr;
666 + s8 rssi;
667 +
668 ++ if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
669 ++ bt_dev_err(hdev, "Malicious advertising data.");
670 ++ break;
671 ++ }
672 ++
673 + if (ev->length <= HCI_MAX_AD_LENGTH &&
674 + ev->data + ev->length <= skb_tail_pointer(skb)) {
675 + rssi = ev->data[ev->length];
676 +@@ -4951,11 +4956,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
677 + }
678 +
679 + ptr += sizeof(*ev) + ev->length + 1;
680 +-
681 +- if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) {
682 +- bt_dev_err(hdev, "Malicious advertising data. Stopping processing");
683 +- break;
684 +- }
685 + }
686 +
687 + hci_dev_unlock(hdev);
688 +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
689 +index 4a95c89d85065..621329cb668aa 100644
690 +--- a/net/bluetooth/mgmt.c
691 ++++ b/net/bluetooth/mgmt.c
692 +@@ -2285,10 +2285,6 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
693 +
694 + BT_DBG("request for %s", hdev->name);
695 +
696 +- if (!IS_ENABLED(CONFIG_BT_HS))
697 +- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
698 +- MGMT_STATUS_NOT_SUPPORTED);
699 +-
700 + status = mgmt_bredr_support(hdev);
701 + if (status)
702 + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
703 +@@ -2438,6 +2434,10 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
704 +
705 + BT_DBG("request for %s", hdev->name);
706 +
707 ++ if (!IS_ENABLED(CONFIG_BT_HS))
708 ++ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
709 ++ MGMT_STATUS_NOT_SUPPORTED);
710 ++
711 + status = mgmt_bredr_support(hdev);
712 + if (status)
713 + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
714 +diff --git a/net/can/bcm.c b/net/can/bcm.c
715 +index 3e131dc5f0e52..549ee0de456f0 100644
716 +--- a/net/can/bcm.c
717 ++++ b/net/can/bcm.c
718 +@@ -737,21 +737,21 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
719 + static void bcm_remove_op(struct bcm_op *op)
720 + {
721 + if (op->tsklet.func) {
722 +- while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
723 +- test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
724 +- hrtimer_active(&op->timer)) {
725 +- hrtimer_cancel(&op->timer);
726 ++ do {
727 + tasklet_kill(&op->tsklet);
728 +- }
729 ++ hrtimer_cancel(&op->timer);
730 ++ } while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
731 ++ test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
732 ++ hrtimer_active(&op->timer));
733 + }
734 +
735 + if (op->thrtsklet.func) {
736 +- while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
737 +- test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
738 +- hrtimer_active(&op->thrtimer)) {
739 +- hrtimer_cancel(&op->thrtimer);
740 ++ do {
741 + tasklet_kill(&op->thrtsklet);
742 +- }
743 ++ hrtimer_cancel(&op->thrtimer);
744 ++ } while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
745 ++ test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
746 ++ hrtimer_active(&op->thrtimer));
747 + }
748 +
749 + if ((op->frames) && (op->frames != &op->sframe))
750 +diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
751 +index 2bf83299600a4..ef7170adee846 100644
752 +--- a/net/core/net-procfs.c
753 ++++ b/net/core/net-procfs.c
754 +@@ -207,12 +207,23 @@ static const struct file_operations softnet_seq_fops = {
755 + .release = seq_release,
756 + };
757 +
758 +-static void *ptype_get_idx(loff_t pos)
759 ++static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
760 + {
761 ++ struct list_head *ptype_list = NULL;
762 + struct packet_type *pt = NULL;
763 ++ struct net_device *dev;
764 + loff_t i = 0;
765 + int t;
766 +
767 ++ for_each_netdev_rcu(seq_file_net(seq), dev) {
768 ++ ptype_list = &dev->ptype_all;
769 ++ list_for_each_entry_rcu(pt, ptype_list, list) {
770 ++ if (i == pos)
771 ++ return pt;
772 ++ ++i;
773 ++ }
774 ++ }
775 ++
776 + list_for_each_entry_rcu(pt, &ptype_all, list) {
777 + if (i == pos)
778 + return pt;
779 +@@ -233,22 +244,40 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
780 + __acquires(RCU)
781 + {
782 + rcu_read_lock();
783 +- return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
784 ++ return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
785 + }
786 +
787 + static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
788 + {
789 ++ struct net_device *dev;
790 + struct packet_type *pt;
791 + struct list_head *nxt;
792 + int hash;
793 +
794 + ++*pos;
795 + if (v == SEQ_START_TOKEN)
796 +- return ptype_get_idx(0);
797 ++ return ptype_get_idx(seq, 0);
798 +
799 + pt = v;
800 + nxt = pt->list.next;
801 ++ if (pt->dev) {
802 ++ if (nxt != &pt->dev->ptype_all)
803 ++ goto found;
804 ++
805 ++ dev = pt->dev;
806 ++ for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
807 ++ if (!list_empty(&dev->ptype_all)) {
808 ++ nxt = dev->ptype_all.next;
809 ++ goto found;
810 ++ }
811 ++ }
812 ++
813 ++ nxt = ptype_all.next;
814 ++ goto ptype_all;
815 ++ }
816 ++
817 + if (pt->type == htons(ETH_P_ALL)) {
818 ++ptype_all:
819 + if (nxt != &ptype_all)
820 + goto found;
821 + hash = 0;
822 +@@ -277,7 +306,8 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
823 +
824 + if (v == SEQ_START_TOKEN)
825 + seq_puts(seq, "Type Device Function\n");
826 +- else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
827 ++ else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
828 ++ (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
829 + if (pt->type == htons(ETH_P_ALL))
830 + seq_puts(seq, "ALL ");
831 + else
832 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
833 +index 477540b3d3207..efd4410a73587 100644
834 +--- a/net/ipv4/ip_output.c
835 ++++ b/net/ipv4/ip_output.c
836 +@@ -155,12 +155,19 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
837 + iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
838 + iph->saddr = saddr;
839 + iph->protocol = sk->sk_protocol;
840 +- if (ip_dont_fragment(sk, &rt->dst)) {
841 ++ /* Do not bother generating IPID for small packets (eg SYNACK) */
842 ++ if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
843 + iph->frag_off = htons(IP_DF);
844 + iph->id = 0;
845 + } else {
846 + iph->frag_off = 0;
847 +- __ip_select_ident(net, iph, 1);
848 ++ /* TCP packets here are SYNACK with fat IPv4/TCP options.
849 ++ * Avoid using the hashed IP ident generator.
850 ++ */
851 ++ if (sk->sk_protocol == IPPROTO_TCP)
852 ++ iph->id = (__force __be16)prandom_u32();
853 ++ else
854 ++ __ip_select_ident(net, iph, 1);
855 + }
856 +
857 + if (opt && opt->opt.optlen) {
858 +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
859 +index 24ce13a796654..65dbe1784d021 100644
860 +--- a/net/ipv4/raw.c
861 ++++ b/net/ipv4/raw.c
862 +@@ -709,6 +709,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
863 + int ret = -EINVAL;
864 + int chk_addr_ret;
865 +
866 ++ lock_sock(sk);
867 + if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
868 + goto out;
869 + chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
870 +@@ -721,7 +722,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
871 + inet->inet_saddr = 0; /* Use device */
872 + sk_dst_reset(sk);
873 + ret = 0;
874 +-out: return ret;
875 ++out:
876 ++ release_sock(sk);
877 ++ return ret;
878 + }
879 +
880 + /*
881 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
882 +index 80f88df280d7a..d3811c7adaed9 100644
883 +--- a/net/ipv6/ip6_tunnel.c
884 ++++ b/net/ipv6/ip6_tunnel.c
885 +@@ -917,12 +917,12 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
886 + ldev = dev_get_by_index_rcu(net, p->link);
887 +
888 + if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
889 +- pr_warn("%s xmit: Local address not yet configured!\n",
890 +- p->name);
891 ++ pr_warn_ratelimited("%s xmit: Local address not yet configured!\n",
892 ++ p->name);
893 + else if (!ipv6_addr_is_multicast(raddr) &&
894 + unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
895 +- pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
896 +- p->name);
897 ++ pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n",
898 ++ p->name);
899 + else
900 + ret = 1;
901 + rcu_read_unlock();
902 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
903 +index eac6f7eea7b51..9208bc1793028 100644
904 +--- a/net/packet/af_packet.c
905 ++++ b/net/packet/af_packet.c
906 +@@ -1709,6 +1709,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
907 + match->prot_hook.dev = po->prot_hook.dev;
908 + match->prot_hook.func = packet_rcv_fanout;
909 + match->prot_hook.af_packet_priv = match;
910 ++ match->prot_hook.af_packet_net = read_pnet(&match->net);
911 + match->prot_hook.id_match = match_fanout_group;
912 + list_add(&match->list, &fanout_list);
913 + }
914 +@@ -3167,6 +3168,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
915 + po->prot_hook.func = packet_rcv_spkt;
916 +
917 + po->prot_hook.af_packet_priv = sk;
918 ++ po->prot_hook.af_packet_net = sock_net(sk);
919 +
920 + if (proto) {
921 + po->prot_hook.type = proto;