Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Mon, 06 Jun 2022 11:05:59
Message-Id: 1654513543.2f8052d8b0cab6963d9adda65943c18822146475.mpagano@gentoo
1 commit: 2f8052d8b0cab6963d9adda65943c18822146475
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Jun 6 11:05:43 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Jun 6 11:05:43 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2f8052d8
7
8 Linux patch 4.19.246
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1245_linux-4.19.246.patch | 930 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 934 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index d527b79c..4389c65a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1019,6 +1019,10 @@ Patch: 1244_linux-4.19.245.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.245
23
24 +Patch: 1245_linux-4.19.246.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.246
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1245_linux-4.19.246.patch b/1245_linux-4.19.246.patch
33 new file mode 100644
34 index 00000000..111b704a
35 --- /dev/null
36 +++ b/1245_linux-4.19.246.patch
37 @@ -0,0 +1,930 @@
38 +diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
39 +index c0917107b90ab..3739c1ce686d4 100644
40 +--- a/Documentation/process/submitting-patches.rst
41 ++++ b/Documentation/process/submitting-patches.rst
42 +@@ -133,7 +133,7 @@ as you intend it to.
43 +
44 + The maintainer will thank you if you write your patch description in a
45 + form which can be easily pulled into Linux's source code management
46 +-system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
47 ++system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`.
48 +
49 + Solve only one problem per patch. If your description starts to get
50 + long, that's a sign that you probably need to split up your patch.
51 +diff --git a/Makefile b/Makefile
52 +index 64a64f6ba90d8..e1cbe6e8bcaef 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 4
58 + PATCHLEVEL = 19
59 +-SUBLEVEL = 245
60 ++SUBLEVEL = 246
61 + EXTRAVERSION =
62 + NAME = "People's Front"
63 +
64 +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
65 +index 22da9bfd8a458..bacf8d988f65f 100644
66 +--- a/arch/x86/pci/xen.c
67 ++++ b/arch/x86/pci/xen.c
68 +@@ -441,6 +441,11 @@ void __init xen_msi_init(void)
69 +
70 + x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
71 + x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
72 ++ /*
73 ++ * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
74 ++ * controlled by the hypervisor.
75 ++ */
76 ++ pci_msi_ignore_mask = 1;
77 + }
78 + #endif
79 +
80 +diff --git a/block/bio.c b/block/bio.c
81 +index fe749404ef93b..7858b2d239161 100644
82 +--- a/block/bio.c
83 ++++ b/block/bio.c
84 +@@ -1528,7 +1528,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
85 + if (bytes > len)
86 + bytes = len;
87 +
88 +- page = alloc_page(q->bounce_gfp | gfp_mask);
89 ++ page = alloc_page(q->bounce_gfp | __GFP_ZERO | gfp_mask);
90 + if (!page)
91 + goto cleanup;
92 +
93 +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
94 +index 39ee0ca636aae..0b6489fd5d0d1 100644
95 +--- a/drivers/acpi/sysfs.c
96 ++++ b/drivers/acpi/sysfs.c
97 +@@ -439,18 +439,29 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
98 + {
99 + struct acpi_data_attr *data_attr;
100 + void __iomem *base;
101 +- ssize_t rc;
102 ++ ssize_t size;
103 +
104 + data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
105 ++ size = data_attr->attr.size;
106 ++
107 ++ if (offset < 0)
108 ++ return -EINVAL;
109 ++
110 ++ if (offset >= size)
111 ++ return 0;
112 +
113 +- base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
114 ++ if (count > size - offset)
115 ++ count = size - offset;
116 ++
117 ++ base = acpi_os_map_iomem(data_attr->addr, size);
118 + if (!base)
119 + return -ENOMEM;
120 +- rc = memory_read_from_buffer(buf, count, &offset, base,
121 +- data_attr->attr.size);
122 +- acpi_os_unmap_memory(base, data_attr->attr.size);
123 +
124 +- return rc;
125 ++ memcpy_fromio(buf, base + offset, count);
126 ++
127 ++ acpi_os_unmap_iomem(base, size);
128 ++
129 ++ return count;
130 + }
131 +
132 + static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
133 +diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
134 +index e71c6b24aed1c..ce497b0691d71 100644
135 +--- a/drivers/char/tpm/tpm2-cmd.c
136 ++++ b/drivers/char/tpm/tpm2-cmd.c
137 +@@ -717,7 +717,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
138 + if (!rc) {
139 + out = (struct tpm2_get_cap_out *)
140 + &buf.data[TPM_HEADER_SIZE];
141 +- *value = be32_to_cpu(out->value);
142 ++ /*
143 ++ * To prevent failing boot up of some systems, Infineon TPM2.0
144 ++ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also
145 ++ * the TPM2_Getcapability command returns a zero length list
146 ++ * in field upgrade mode.
147 ++ */
148 ++ if (be32_to_cpu(out->property_cnt) > 0)
149 ++ *value = be32_to_cpu(out->value);
150 ++ else
151 ++ rc = -ENODATA;
152 + }
153 + tpm_buf_destroy(&buf);
154 + return rc;
155 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
156 +index 3ba67bc6baba0..80647eb071fd6 100644
157 +--- a/drivers/char/tpm/tpm_ibmvtpm.c
158 ++++ b/drivers/char/tpm/tpm_ibmvtpm.c
159 +@@ -692,6 +692,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
160 + if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
161 + ibmvtpm->rtce_buf != NULL,
162 + HZ)) {
163 ++ rc = -ENODEV;
164 + dev_err(dev, "CRQ response timed out\n");
165 + goto init_irq_cleanup;
166 + }
167 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
168 +index 1847faa45d370..af8865281b478 100644
169 +--- a/drivers/gpu/drm/i915/intel_pm.c
170 ++++ b/drivers/gpu/drm/i915/intel_pm.c
171 +@@ -2814,7 +2814,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
172 + }
173 +
174 + static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
175 +- uint16_t wm[8])
176 ++ uint16_t wm[])
177 + {
178 + if (INTEL_GEN(dev_priv) >= 9) {
179 + uint32_t val;
180 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
181 +index c20945ed1dc19..e99286258f62f 100644
182 +--- a/drivers/hid/hid-multitouch.c
183 ++++ b/drivers/hid/hid-multitouch.c
184 +@@ -2111,6 +2111,9 @@ static const struct hid_device_id mt_devices[] = {
185 + { .driver_data = MT_CLS_GOOGLE,
186 + HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
187 + USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) },
188 ++ { .driver_data = MT_CLS_GOOGLE,
189 ++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE,
190 ++ USB_DEVICE_ID_GOOGLE_WHISKERS) },
191 +
192 + /* Generic MT device */
193 + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
194 +diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
195 +index 0d1c3ec8cb407..80796061102f1 100644
196 +--- a/drivers/i2c/busses/i2c-ismt.c
197 ++++ b/drivers/i2c/busses/i2c-ismt.c
198 +@@ -80,6 +80,7 @@
199 +
200 + #define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
201 + #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
202 ++#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */
203 +
204 + /* Hardware Descriptor Constants - Control Field */
205 + #define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
206 +@@ -173,6 +174,8 @@ struct ismt_priv {
207 + u8 head; /* ring buffer head pointer */
208 + struct completion cmp; /* interrupt completion */
209 + u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
210 ++ dma_addr_t log_dma;
211 ++ u32 *log;
212 + };
213 +
214 + /**
215 +@@ -406,6 +409,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
216 + memset(desc, 0, sizeof(struct ismt_desc));
217 + desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
218 +
219 ++ /* Always clear the log entries */
220 ++ memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32));
221 ++
222 + /* Initialize common control bits */
223 + if (likely(pci_dev_msi_enabled(priv->pci_dev)))
224 + desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
225 +@@ -695,6 +701,8 @@ static void ismt_hw_init(struct ismt_priv *priv)
226 + /* initialize the Master Descriptor Base Address (MDBA) */
227 + writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
228 +
229 ++ writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL);
230 ++
231 + /* initialize the Master Control Register (MCTRL) */
232 + writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
233 +
234 +@@ -784,6 +792,12 @@ static int ismt_dev_init(struct ismt_priv *priv)
235 + priv->head = 0;
236 + init_completion(&priv->cmp);
237 +
238 ++ priv->log = dmam_alloc_coherent(&priv->pci_dev->dev,
239 ++ ISMT_LOG_ENTRIES * sizeof(u32),
240 ++ &priv->log_dma, GFP_KERNEL);
241 ++ if (!priv->log)
242 ++ return -ENOMEM;
243 ++
244 + return 0;
245 + }
246 +
247 +diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
248 +index 19f8eec387172..107aeb8b54da4 100644
249 +--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
250 ++++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
251 +@@ -208,6 +208,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
252 + i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
253 + i2c->adap.dev.parent = dev;
254 + i2c->adap.dev.of_node = pdev->dev.of_node;
255 ++ i2c->adap.dev.fwnode = dev->fwnode;
256 + snprintf(i2c->adap.name, sizeof(i2c->adap.name),
257 + "Cavium ThunderX i2c adapter at %s", dev_name(dev));
258 + i2c_set_adapdata(&i2c->adap, i2c);
259 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
260 +index 3441ad140b583..e38c713e882e3 100644
261 +--- a/drivers/md/dm-crypt.c
262 ++++ b/drivers/md/dm-crypt.c
263 +@@ -2932,6 +2932,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
264 + return DM_MAPIO_SUBMITTED;
265 + }
266 +
267 ++static char hex2asc(unsigned char c)
268 ++{
269 ++ return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
270 ++}
271 ++
272 + static void crypt_status(struct dm_target *ti, status_type_t type,
273 + unsigned status_flags, char *result, unsigned maxlen)
274 + {
275 +@@ -2950,9 +2955,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
276 + if (cc->key_size > 0) {
277 + if (cc->key_string)
278 + DMEMIT(":%u:%s", cc->key_size, cc->key_string);
279 +- else
280 +- for (i = 0; i < cc->key_size; i++)
281 +- DMEMIT("%02x", cc->key[i]);
282 ++ else {
283 ++ for (i = 0; i < cc->key_size; i++) {
284 ++ DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
285 ++ hex2asc(cc->key[i] & 0xf));
286 ++ }
287 ++ }
288 + } else
289 + DMEMIT("-");
290 +
291 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
292 +index b0105d53918ab..8456e82409e20 100644
293 +--- a/drivers/md/dm-integrity.c
294 ++++ b/drivers/md/dm-integrity.c
295 +@@ -3565,8 +3565,6 @@ try_smaller_buffer:
296 + }
297 +
298 + if (should_write_sb) {
299 +- int r;
300 +-
301 + init_journal(ic, 0, ic->journal_sections, 0);
302 + r = dm_integrity_failed(ic);
303 + if (unlikely(r)) {
304 +diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
305 +index 21de30b4e2a16..3d59f3e208c50 100644
306 +--- a/drivers/md/dm-stats.c
307 ++++ b/drivers/md/dm-stats.c
308 +@@ -224,6 +224,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
309 + atomic_read(&shared->in_flight[READ]),
310 + atomic_read(&shared->in_flight[WRITE]));
311 + }
312 ++ cond_resched();
313 + }
314 + dm_stat_free(&s->rcu_head);
315 + }
316 +@@ -313,6 +314,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
317 + for (ni = 0; ni < n_entries; ni++) {
318 + atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
319 + atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
320 ++ cond_resched();
321 + }
322 +
323 + if (s->n_histogram_entries) {
324 +@@ -325,6 +327,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
325 + for (ni = 0; ni < n_entries; ni++) {
326 + s->stat_shared[ni].tmp.histogram = hi;
327 + hi += s->n_histogram_entries + 1;
328 ++ cond_resched();
329 + }
330 + }
331 +
332 +@@ -345,6 +348,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
333 + for (ni = 0; ni < n_entries; ni++) {
334 + p[ni].histogram = hi;
335 + hi += s->n_histogram_entries + 1;
336 ++ cond_resched();
337 + }
338 + }
339 + }
340 +@@ -474,6 +478,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
341 + }
342 + DMEMIT("\n");
343 + }
344 ++ cond_resched();
345 + }
346 + mutex_unlock(&stats->mutex);
347 +
348 +@@ -750,6 +755,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
349 + local_irq_enable();
350 + }
351 + }
352 ++ cond_resched();
353 + }
354 + }
355 +
356 +@@ -865,6 +871,8 @@ static int dm_stats_print(struct dm_stats *stats, int id,
357 +
358 + if (unlikely(sz + 1 >= maxlen))
359 + goto buffer_overflow;
360 ++
361 ++ cond_resched();
362 + }
363 +
364 + if (clear)
365 +diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
366 +index fa8c201fca778..36945030520a9 100644
367 +--- a/drivers/md/dm-verity-target.c
368 ++++ b/drivers/md/dm-verity-target.c
369 +@@ -1176,6 +1176,7 @@ bad:
370 +
371 + static struct target_type verity_target = {
372 + .name = "verity",
373 ++ .features = DM_TARGET_IMMUTABLE,
374 + .version = {1, 4, 0},
375 + .module = THIS_MODULE,
376 + .ctr = verity_ctr,
377 +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
378 +index 964407deca350..23c019d1278cd 100644
379 +--- a/drivers/net/ethernet/faraday/ftgmac100.c
380 ++++ b/drivers/net/ethernet/faraday/ftgmac100.c
381 +@@ -1869,6 +1869,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
382 + /* AST2400 doesn't have working HW checksum generation */
383 + if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
384 + netdev->hw_features &= ~NETIF_F_HW_CSUM;
385 ++
386 ++ /* AST2600 tx checksum with NCSI is broken */
387 ++ if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
388 ++ netdev->hw_features &= ~NETIF_F_HW_CSUM;
389 ++
390 + if (np && of_get_property(np, "no-hw-checksum", NULL))
391 + netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
392 + netdev->features |= netdev->hw_features;
393 +diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
394 +index 466d25ccc4bbc..40d7130a4909b 100644
395 +--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
396 ++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
397 +@@ -1359,9 +1359,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
398 +
399 + sec_len = *(pos++); len-= 1;
400 +
401 +- if (sec_len>0 && sec_len<=len) {
402 ++ if (sec_len > 0 &&
403 ++ sec_len <= len &&
404 ++ sec_len <= 32) {
405 + ssid[ssid_index].SsidLength = sec_len;
406 +- memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
407 ++ memcpy(ssid[ssid_index].Ssid, pos, sec_len);
408 + /* DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __func__ */
409 + /* , ssid[ssid_index].Ssid, ssid[ssid_index].SsidLength); */
410 + ssid_index++;
411 +diff --git a/fs/exec.c b/fs/exec.c
412 +index e87e3c020c61e..28e3b5eb2f4a1 100644
413 +--- a/fs/exec.c
414 ++++ b/fs/exec.c
415 +@@ -1805,6 +1805,9 @@ static int __do_execve_file(int fd, struct filename *filename,
416 + goto out_unmark;
417 +
418 + bprm->argc = count(argv, MAX_ARG_STRINGS);
419 ++ if (bprm->argc == 0)
420 ++ pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n",
421 ++ current->comm, bprm->filename);
422 + if ((retval = bprm->argc) < 0)
423 + goto out;
424 +
425 +@@ -1829,6 +1832,20 @@ static int __do_execve_file(int fd, struct filename *filename,
426 + if (retval < 0)
427 + goto out;
428 +
429 ++ /*
430 ++ * When argv is empty, add an empty string ("") as argv[0] to
431 ++ * ensure confused userspace programs that start processing
432 ++ * from argv[1] won't end up walking envp. See also
433 ++ * bprm_stack_limits().
434 ++ */
435 ++ if (bprm->argc == 0) {
436 ++ const char *argv[] = { "", NULL };
437 ++ retval = copy_strings_kernel(1, argv, bprm);
438 ++ if (retval < 0)
439 ++ goto out;
440 ++ bprm->argc = 1;
441 ++ }
442 ++
443 + retval = exec_binprm(bprm);
444 + if (retval < 0)
445 + goto out;
446 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
447 +index ed5429d18595c..78191320f8e21 100644
448 +--- a/fs/nfsd/nfs4state.c
449 ++++ b/fs/nfsd/nfs4state.c
450 +@@ -6401,16 +6401,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
451 + if (sop->so_is_open_owner || !same_owner_str(sop, owner))
452 + continue;
453 +
454 +- /* see if there are still any locks associated with it */
455 +- lo = lockowner(sop);
456 +- list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
457 +- if (check_for_locks(stp->st_stid.sc_file, lo)) {
458 +- status = nfserr_locks_held;
459 +- spin_unlock(&clp->cl_lock);
460 +- return status;
461 +- }
462 ++ if (atomic_read(&sop->so_count) != 1) {
463 ++ spin_unlock(&clp->cl_lock);
464 ++ return nfserr_locks_held;
465 + }
466 +
467 ++ lo = lockowner(sop);
468 + nfs4_get_stateowner(sop);
469 + break;
470 + }
471 +diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
472 +index fa5fe23ca6aaa..2d04f3e06de11 100644
473 +--- a/include/net/inet_hashtables.h
474 ++++ b/include/net/inet_hashtables.h
475 +@@ -407,7 +407,7 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
476 + }
477 +
478 + int __inet_hash_connect(struct inet_timewait_death_row *death_row,
479 +- struct sock *sk, u32 port_offset,
480 ++ struct sock *sk, u64 port_offset,
481 + int (*check_established)(struct inet_timewait_death_row *,
482 + struct sock *, __u16,
483 + struct inet_timewait_sock **));
484 +diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
485 +index 2a3e0974a6af4..4e3fff9f929b1 100644
486 +--- a/include/net/netfilter/nf_conntrack_core.h
487 ++++ b/include/net/netfilter/nf_conntrack_core.h
488 +@@ -58,8 +58,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
489 + int ret = NF_ACCEPT;
490 +
491 + if (ct) {
492 +- if (!nf_ct_is_confirmed(ct))
493 ++ if (!nf_ct_is_confirmed(ct)) {
494 + ret = __nf_conntrack_confirm(skb);
495 ++
496 ++ if (ret == NF_ACCEPT)
497 ++ ct = (struct nf_conn *)skb_nfct(skb);
498 ++ }
499 ++
500 + if (likely(ret == NF_ACCEPT))
501 + nf_ct_deliver_cached_events(ct);
502 + }
503 +diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
504 +index d7d2495f83c27..dac91aa38c5af 100644
505 +--- a/include/net/secure_seq.h
506 ++++ b/include/net/secure_seq.h
507 +@@ -4,8 +4,8 @@
508 +
509 + #include <linux/types.h>
510 +
511 +-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
512 +-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
513 ++u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
514 ++u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
515 + __be16 dport);
516 + u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
517 + __be16 sport, __be16 dport);
518 +diff --git a/lib/assoc_array.c b/lib/assoc_array.c
519 +index 59875eb278ea5..3b1ff063ceca3 100644
520 +--- a/lib/assoc_array.c
521 ++++ b/lib/assoc_array.c
522 +@@ -1465,6 +1465,7 @@ int assoc_array_gc(struct assoc_array *array,
523 + struct assoc_array_ptr *cursor, *ptr;
524 + struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
525 + unsigned long nr_leaves_on_tree;
526 ++ bool retained;
527 + int keylen, slot, nr_free, next_slot, i;
528 +
529 + pr_devel("-->%s()\n", __func__);
530 +@@ -1541,6 +1542,7 @@ continue_node:
531 + goto descend;
532 + }
533 +
534 ++retry_compress:
535 + pr_devel("-- compress node %p --\n", new_n);
536 +
537 + /* Count up the number of empty slots in this node and work out the
538 +@@ -1558,6 +1560,7 @@ continue_node:
539 + pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
540 +
541 + /* See what we can fold in */
542 ++ retained = false;
543 + next_slot = 0;
544 + for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
545 + struct assoc_array_shortcut *s;
546 +@@ -1607,9 +1610,14 @@ continue_node:
547 + pr_devel("[%d] retain node %lu/%d [nx %d]\n",
548 + slot, child->nr_leaves_on_branch, nr_free + 1,
549 + next_slot);
550 ++ retained = true;
551 + }
552 + }
553 +
554 ++ if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
555 ++ pr_devel("internal nodes remain despite enough space, retrying\n");
556 ++ goto retry_compress;
557 ++ }
558 + pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
559 +
560 + nr_leaves_on_tree = new_n->nr_leaves_on_branch;
561 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
562 +index 11e81b3ff0cf3..4d71356ea66a3 100644
563 +--- a/mm/zsmalloc.c
564 ++++ b/mm/zsmalloc.c
565 +@@ -1812,11 +1812,40 @@ static enum fullness_group putback_zspage(struct size_class *class,
566 + */
567 + static void lock_zspage(struct zspage *zspage)
568 + {
569 +- struct page *page = get_first_page(zspage);
570 ++ struct page *curr_page, *page;
571 +
572 +- do {
573 +- lock_page(page);
574 +- } while ((page = get_next_page(page)) != NULL);
575 ++ /*
576 ++ * Pages we haven't locked yet can be migrated off the list while we're
577 ++ * trying to lock them, so we need to be careful and only attempt to
578 ++ * lock each page under migrate_read_lock(). Otherwise, the page we lock
579 ++ * may no longer belong to the zspage. This means that we may wait for
580 ++ * the wrong page to unlock, so we must take a reference to the page
581 ++ * prior to waiting for it to unlock outside migrate_read_lock().
582 ++ */
583 ++ while (1) {
584 ++ migrate_read_lock(zspage);
585 ++ page = get_first_page(zspage);
586 ++ if (trylock_page(page))
587 ++ break;
588 ++ get_page(page);
589 ++ migrate_read_unlock(zspage);
590 ++ wait_on_page_locked(page);
591 ++ put_page(page);
592 ++ }
593 ++
594 ++ curr_page = page;
595 ++ while ((page = get_next_page(curr_page))) {
596 ++ if (trylock_page(page)) {
597 ++ curr_page = page;
598 ++ } else {
599 ++ get_page(page);
600 ++ migrate_read_unlock(zspage);
601 ++ wait_on_page_locked(page);
602 ++ put_page(page);
603 ++ migrate_read_lock(zspage);
604 ++ }
605 ++ }
606 ++ migrate_read_unlock(zspage);
607 + }
608 +
609 + static struct dentry *zs_mount(struct file_system_type *fs_type,
610 +diff --git a/net/core/filter.c b/net/core/filter.c
611 +index 7d68c98a00aa8..c1310c9d1b903 100644
612 +--- a/net/core/filter.c
613 ++++ b/net/core/filter.c
614 +@@ -1666,7 +1666,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
615 +
616 + if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
617 + return -EINVAL;
618 +- if (unlikely(offset > 0xffff))
619 ++ if (unlikely(offset > INT_MAX))
620 + return -EFAULT;
621 + if (unlikely(bpf_try_make_writable(skb, offset + len)))
622 + return -EFAULT;
623 +@@ -1701,7 +1701,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
624 + {
625 + void *ptr;
626 +
627 +- if (unlikely(offset > 0xffff))
628 ++ if (unlikely(offset > INT_MAX))
629 + goto err_clear;
630 +
631 + ptr = skb_header_pointer(skb, offset, len, to);
632 +diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
633 +index 3a8128341e6ac..6fd25279bee95 100644
634 +--- a/net/core/secure_seq.c
635 ++++ b/net/core/secure_seq.c
636 +@@ -96,7 +96,7 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
637 + }
638 + EXPORT_SYMBOL(secure_tcpv6_seq);
639 +
640 +-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
641 ++u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
642 + __be16 dport)
643 + {
644 + const struct {
645 +@@ -146,7 +146,7 @@ u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
646 + }
647 + EXPORT_SYMBOL_GPL(secure_tcp_seq);
648 +
649 +-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
650 ++u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
651 + {
652 + net_secret_init();
653 + return siphash_4u32((__force u32)saddr, (__force u32)daddr,
654 +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
655 +index c96a5871b49da..0a8aec3f37cc6 100644
656 +--- a/net/ipv4/inet_hashtables.c
657 ++++ b/net/ipv4/inet_hashtables.c
658 +@@ -507,7 +507,7 @@ not_unique:
659 + return -EADDRNOTAVAIL;
660 + }
661 +
662 +-static u32 inet_sk_port_offset(const struct sock *sk)
663 ++static u64 inet_sk_port_offset(const struct sock *sk)
664 + {
665 + const struct inet_sock *inet = inet_sk(sk);
666 +
667 +@@ -714,8 +714,19 @@ unlock:
668 + }
669 + EXPORT_SYMBOL_GPL(inet_unhash);
670 +
671 ++/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm
672 ++ * Note that we use 32bit integers (vs RFC 'short integers')
673 ++ * because 2^16 is not a multiple of num_ephemeral and this
674 ++ * property might be used by clever attacker.
675 ++ * RFC claims using TABLE_LENGTH=10 buckets gives an improvement,
676 ++ * we use 256 instead to really give more isolation and
677 ++ * privacy, this only consumes 1 KB of kernel memory.
678 ++ */
679 ++#define INET_TABLE_PERTURB_SHIFT 8
680 ++static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT];
681 ++
682 + int __inet_hash_connect(struct inet_timewait_death_row *death_row,
683 +- struct sock *sk, u32 port_offset,
684 ++ struct sock *sk, u64 port_offset,
685 + int (*check_established)(struct inet_timewait_death_row *,
686 + struct sock *, __u16, struct inet_timewait_sock **))
687 + {
688 +@@ -727,7 +738,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
689 + struct inet_bind_bucket *tb;
690 + u32 remaining, offset;
691 + int ret, i, low, high;
692 +- static u32 hint;
693 ++ u32 index;
694 +
695 + if (port) {
696 + head = &hinfo->bhash[inet_bhashfn(net, port,
697 +@@ -752,7 +763,12 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
698 + if (likely(remaining > 1))
699 + remaining &= ~1U;
700 +
701 +- offset = (hint + port_offset) % remaining;
702 ++ net_get_random_once(table_perturb, sizeof(table_perturb));
703 ++ index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
704 ++
705 ++ offset = READ_ONCE(table_perturb[index]) + port_offset;
706 ++ offset %= remaining;
707 ++
708 + /* In first pass we try ports of @low parity.
709 + * inet_csk_get_port() does the opposite choice.
710 + */
711 +@@ -805,7 +821,7 @@ next_port:
712 + return -EADDRNOTAVAIL;
713 +
714 + ok:
715 +- hint += i + 2;
716 ++ WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
717 +
718 + /* Head lock still held and bh's disabled */
719 + inet_bind_hash(sk, tb, port);
720 +@@ -828,7 +844,7 @@ ok:
721 + int inet_hash_connect(struct inet_timewait_death_row *death_row,
722 + struct sock *sk)
723 + {
724 +- u32 port_offset = 0;
725 ++ u64 port_offset = 0;
726 +
727 + if (!inet_sk(sk)->inet_num)
728 + port_offset = inet_sk_port_offset(sk);
729 +diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
730 +index d9e2575dad948..d8391921363ff 100644
731 +--- a/net/ipv6/inet6_hashtables.c
732 ++++ b/net/ipv6/inet6_hashtables.c
733 +@@ -311,7 +311,7 @@ not_unique:
734 + return -EADDRNOTAVAIL;
735 + }
736 +
737 +-static u32 inet6_sk_port_offset(const struct sock *sk)
738 ++static u64 inet6_sk_port_offset(const struct sock *sk)
739 + {
740 + const struct inet_sock *inet = inet_sk(sk);
741 +
742 +@@ -323,7 +323,7 @@ static u32 inet6_sk_port_offset(const struct sock *sk)
743 + int inet6_hash_connect(struct inet_timewait_death_row *death_row,
744 + struct sock *sk)
745 + {
746 +- u32 port_offset = 0;
747 ++ u64 port_offset = 0;
748 +
749 + if (!inet_sk(sk)->inet_num)
750 + port_offset = inet6_sk_port_offset(sk);
751 +diff --git a/net/key/af_key.c b/net/key/af_key.c
752 +index 170960ef7e360..1bbb6ec89ff3d 100644
753 +--- a/net/key/af_key.c
754 ++++ b/net/key/af_key.c
755 +@@ -2910,7 +2910,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
756 + break;
757 + if (!aalg->pfkey_supported)
758 + continue;
759 +- if (aalg_tmpl_set(t, aalg))
760 ++ if (aalg_tmpl_set(t, aalg) && aalg->available)
761 + sz += sizeof(struct sadb_comb);
762 + }
763 + return sz + sizeof(struct sadb_prop);
764 +@@ -2928,7 +2928,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
765 + if (!ealg->pfkey_supported)
766 + continue;
767 +
768 +- if (!(ealg_tmpl_set(t, ealg)))
769 ++ if (!(ealg_tmpl_set(t, ealg) && ealg->available))
770 + continue;
771 +
772 + for (k = 1; ; k++) {
773 +@@ -2939,7 +2939,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
774 + if (!aalg->pfkey_supported)
775 + continue;
776 +
777 +- if (aalg_tmpl_set(t, aalg))
778 ++ if (aalg_tmpl_set(t, aalg) && aalg->available)
779 + sz += sizeof(struct sadb_comb);
780 + }
781 + }
782 +diff --git a/net/wireless/core.c b/net/wireless/core.c
783 +index 68660781aa51f..7c66f99046ac8 100644
784 +--- a/net/wireless/core.c
785 ++++ b/net/wireless/core.c
786 +@@ -4,6 +4,7 @@
787 + * Copyright 2006-2010 Johannes Berg <johannes@××××××××××××.net>
788 + * Copyright 2013-2014 Intel Mobile Communications GmbH
789 + * Copyright 2015-2017 Intel Deutschland GmbH
790 ++ * Copyright (C) 2018-2021 Intel Corporation
791 + */
792 +
793 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
794 +@@ -835,9 +836,6 @@ int wiphy_register(struct wiphy *wiphy)
795 + return res;
796 + }
797 +
798 +- /* set up regulatory info */
799 +- wiphy_regulatory_register(wiphy);
800 +-
801 + list_add_rcu(&rdev->list, &cfg80211_rdev_list);
802 + cfg80211_rdev_list_generation++;
803 +
804 +@@ -851,6 +849,9 @@ int wiphy_register(struct wiphy *wiphy)
805 + cfg80211_debugfs_rdev_add(rdev);
806 + nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
807 +
808 ++ /* set up regulatory info */
809 ++ wiphy_regulatory_register(wiphy);
810 ++
811 + if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
812 + struct regulatory_request request;
813 +
814 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
815 +index c7825b951f725..dd8503a3ef1e1 100644
816 +--- a/net/wireless/reg.c
817 ++++ b/net/wireless/reg.c
818 +@@ -3756,6 +3756,7 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
819 +
820 + wiphy_update_regulatory(wiphy, lr->initiator);
821 + wiphy_all_share_dfs_chan_state(wiphy);
822 ++ reg_process_self_managed_hints();
823 + }
824 +
825 + void wiphy_regulatory_deregister(struct wiphy *wiphy)
826 +diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
827 +index 05f8a0f27121a..b7f7e4e541d7b 100644
828 +--- a/tools/lib/traceevent/Makefile
829 ++++ b/tools/lib/traceevent/Makefile
830 +@@ -263,7 +263,7 @@ define do_generate_dynamic_list_file
831 + xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
832 + if [ "$$symbol_type" = "U W" ];then \
833 + (echo '{'; \
834 +- $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
835 ++ $(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print "\t"$$2";"}' | sort -u;\
836 + echo '};'; \
837 + ) > $2; \
838 + else \
839 +diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
840 +index 6c9fcd757f310..b3e418afc21a2 100644
841 +--- a/tools/perf/bench/bench.h
842 ++++ b/tools/perf/bench/bench.h
843 +@@ -2,6 +2,10 @@
844 + #ifndef BENCH_H
845 + #define BENCH_H
846 +
847 ++#include <sys/time.h>
848 ++
849 ++extern struct timeval bench__start, bench__end, bench__runtime;
850 ++
851 + /*
852 + * The madvise transparent hugepage constants were added in glibc
853 + * 2.13. For compatibility with older versions of glibc, define these
854 +diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
855 +index 9aa3a674829b3..ee9b280651093 100644
856 +--- a/tools/perf/bench/futex-hash.c
857 ++++ b/tools/perf/bench/futex-hash.c
858 +@@ -35,7 +35,7 @@ static unsigned int nfutexes = 1024;
859 + static bool fshared = false, done = false, silent = false;
860 + static int futex_flag = 0;
861 +
862 +-struct timeval start, end, runtime;
863 ++struct timeval bench__start, bench__end, bench__runtime;
864 + static pthread_mutex_t thread_lock;
865 + static unsigned int threads_starting;
866 + static struct stats throughput_stats;
867 +@@ -101,8 +101,8 @@ static void toggle_done(int sig __maybe_unused,
868 + {
869 + /* inform all threads that we're done for the day */
870 + done = true;
871 +- gettimeofday(&end, NULL);
872 +- timersub(&end, &start, &runtime);
873 ++ gettimeofday(&bench__end, NULL);
874 ++ timersub(&bench__end, &bench__start, &bench__runtime);
875 + }
876 +
877 + static void print_summary(void)
878 +@@ -112,7 +112,7 @@ static void print_summary(void)
879 +
880 + printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
881 + !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
882 +- (int) runtime.tv_sec);
883 ++ (int)bench__runtime.tv_sec);
884 + }
885 +
886 + int bench_futex_hash(int argc, const char **argv)
887 +@@ -159,7 +159,7 @@ int bench_futex_hash(int argc, const char **argv)
888 +
889 + threads_starting = nthreads;
890 + pthread_attr_init(&thread_attr);
891 +- gettimeofday(&start, NULL);
892 ++ gettimeofday(&bench__start, NULL);
893 + for (i = 0; i < nthreads; i++) {
894 + worker[i].tid = i;
895 + worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
896 +@@ -202,7 +202,7 @@ int bench_futex_hash(int argc, const char **argv)
897 + pthread_mutex_destroy(&thread_lock);
898 +
899 + for (i = 0; i < nthreads; i++) {
900 +- unsigned long t = worker[i].ops/runtime.tv_sec;
901 ++ unsigned long t = worker[i].ops / bench__runtime.tv_sec;
902 + update_stats(&throughput_stats, t);
903 + if (!silent) {
904 + if (nfutexes == 1)
905 +diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
906 +index 8e9c4753e3040..017609ae35906 100644
907 +--- a/tools/perf/bench/futex-lock-pi.c
908 ++++ b/tools/perf/bench/futex-lock-pi.c
909 +@@ -35,7 +35,6 @@ static bool silent = false, multi = false;
910 + static bool done = false, fshared = false;
911 + static unsigned int nthreads = 0;
912 + static int futex_flag = 0;
913 +-struct timeval start, end, runtime;
914 + static pthread_mutex_t thread_lock;
915 + static unsigned int threads_starting;
916 + static struct stats throughput_stats;
917 +@@ -62,7 +61,7 @@ static void print_summary(void)
918 +
919 + printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
920 + !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
921 +- (int) runtime.tv_sec);
922 ++ (int)bench__runtime.tv_sec);
923 + }
924 +
925 + static void toggle_done(int sig __maybe_unused,
926 +@@ -71,8 +70,8 @@ static void toggle_done(int sig __maybe_unused,
927 + {
928 + /* inform all threads that we're done for the day */
929 + done = true;
930 +- gettimeofday(&end, NULL);
931 +- timersub(&end, &start, &runtime);
932 ++ gettimeofday(&bench__end, NULL);
933 ++ timersub(&bench__end, &bench__start, &bench__runtime);
934 + }
935 +
936 + static void *workerfn(void *arg)
937 +@@ -183,7 +182,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
938 +
939 + threads_starting = nthreads;
940 + pthread_attr_init(&thread_attr);
941 +- gettimeofday(&start, NULL);
942 ++ gettimeofday(&bench__start, NULL);
943 +
944 + create_threads(worker, thread_attr, cpu);
945 + pthread_attr_destroy(&thread_attr);
946 +@@ -209,7 +208,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
947 + pthread_mutex_destroy(&thread_lock);
948 +
949 + for (i = 0; i < nthreads; i++) {
950 +- unsigned long t = worker[i].ops/runtime.tv_sec;
951 ++ unsigned long t = worker[i].ops / bench__runtime.tv_sec;
952 +
953 + update_stats(&throughput_stats, t);
954 + if (!silent)
955 +diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
956 +index a20cbc4454269..624e4ef73d1c0 100644
957 +--- a/tools/perf/tests/bp_account.c
958 ++++ b/tools/perf/tests/bp_account.c
959 +@@ -22,7 +22,7 @@
960 + #include "perf.h"
961 + #include "cloexec.h"
962 +
963 +-volatile long the_var;
964 ++static volatile long the_var;
965 +
966 + static noinline int test_function(void)
967 + {