Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Mon, 06 Jun 2022 11:04:46
Message-Id: 1654513470.66aa243550ca0fe12b3d0aae436558ed42c72972.mpagano@gentoo
1 commit: 66aa243550ca0fe12b3d0aae436558ed42c72972
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Jun 6 11:04:30 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Jun 6 11:04:30 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=66aa2435
7
8 Linux patch 5.4.197
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1196_linux-5.4.197.patch | 1161 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1165 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 23f7d279..b35c40bb 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -827,6 +827,10 @@ Patch: 1195_linux-5.4.196.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.196
23
24 +Patch: 1196_linux-5.4.197.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.197
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1196_linux-5.4.197.patch b/1196_linux-5.4.197.patch
33 new file mode 100644
34 index 00000000..91dc56f8
35 --- /dev/null
36 +++ b/1196_linux-5.4.197.patch
37 @@ -0,0 +1,1161 @@
38 +diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
39 +index fb56297f70dc8..857be0d44e809 100644
40 +--- a/Documentation/process/submitting-patches.rst
41 ++++ b/Documentation/process/submitting-patches.rst
42 +@@ -133,7 +133,7 @@ as you intend it to.
43 +
44 + The maintainer will thank you if you write your patch description in a
45 + form which can be easily pulled into Linux's source code management
46 +-system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
47 ++system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`.
48 +
49 + Solve only one problem per patch. If your description starts to get
50 + long, that's a sign that you probably need to split up your patch.
51 +diff --git a/Makefile b/Makefile
52 +index c064ed925552d..57e27af9fc0c0 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 4
59 +-SUBLEVEL = 196
60 ++SUBLEVEL = 197
61 + EXTRAVERSION =
62 + NAME = Kleptomaniac Octopus
63 +
64 +diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
65 +index 5c11ae66b5d8e..9cf8f5417e7f4 100644
66 +--- a/arch/x86/pci/xen.c
67 ++++ b/arch/x86/pci/xen.c
68 +@@ -442,6 +442,11 @@ void __init xen_msi_init(void)
69 +
70 + x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
71 + x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
72 ++ /*
73 ++ * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
74 ++ * controlled by the hypervisor.
75 ++ */
76 ++ pci_msi_ignore_mask = 1;
77 + }
78 + #endif
79 +
80 +diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
81 +index 887ec21aee494..1ffcea7b03558 100644
82 +--- a/crypto/ecrdsa.c
83 ++++ b/crypto/ecrdsa.c
84 +@@ -112,15 +112,15 @@ static int ecrdsa_verify(struct akcipher_request *req)
85 +
86 + /* Step 1: verify that 0 < r < q, 0 < s < q */
87 + if (vli_is_zero(r, ndigits) ||
88 +- vli_cmp(r, ctx->curve->n, ndigits) == 1 ||
89 ++ vli_cmp(r, ctx->curve->n, ndigits) >= 0 ||
90 + vli_is_zero(s, ndigits) ||
91 +- vli_cmp(s, ctx->curve->n, ndigits) == 1)
92 ++ vli_cmp(s, ctx->curve->n, ndigits) >= 0)
93 + return -EKEYREJECTED;
94 +
95 + /* Step 2: calculate hash (h) of the message (passed as input) */
96 + /* Step 3: calculate e = h \mod q */
97 + vli_from_le64(e, digest, ndigits);
98 +- if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
99 ++ if (vli_cmp(e, ctx->curve->n, ndigits) >= 0)
100 + vli_sub(e, e, ctx->curve->n, ndigits);
101 + if (vli_is_zero(e, ndigits))
102 + e[0] = 1;
103 +@@ -136,7 +136,7 @@ static int ecrdsa_verify(struct akcipher_request *req)
104 + /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
105 + ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
106 + ctx->curve);
107 +- if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1)
108 ++ if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0)
109 + vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
110 +
111 + /* Step 7: if R == r signature is valid */
112 +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
113 +index 76c668c05fa03..cc0b98affd64d 100644
114 +--- a/drivers/acpi/sysfs.c
115 ++++ b/drivers/acpi/sysfs.c
116 +@@ -439,18 +439,29 @@ static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
117 + {
118 + struct acpi_data_attr *data_attr;
119 + void __iomem *base;
120 +- ssize_t rc;
121 ++ ssize_t size;
122 +
123 + data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
124 ++ size = data_attr->attr.size;
125 ++
126 ++ if (offset < 0)
127 ++ return -EINVAL;
128 ++
129 ++ if (offset >= size)
130 ++ return 0;
131 +
132 +- base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
133 ++ if (count > size - offset)
134 ++ count = size - offset;
135 ++
136 ++ base = acpi_os_map_iomem(data_attr->addr, size);
137 + if (!base)
138 + return -ENOMEM;
139 +- rc = memory_read_from_buffer(buf, count, &offset, base,
140 +- data_attr->attr.size);
141 +- acpi_os_unmap_memory(base, data_attr->attr.size);
142 +
143 +- return rc;
144 ++ memcpy_fromio(buf, base + offset, count);
145 ++
146 ++ acpi_os_unmap_iomem(base, size);
147 ++
148 ++ return count;
149 + }
150 +
151 + static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
152 +diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
153 +index 1385c2c0acbe1..89635bb117d28 100644
154 +--- a/drivers/char/tpm/tpm2-cmd.c
155 ++++ b/drivers/char/tpm/tpm2-cmd.c
156 +@@ -706,7 +706,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
157 + if (!rc) {
158 + out = (struct tpm2_get_cap_out *)
159 + &buf.data[TPM_HEADER_SIZE];
160 +- *value = be32_to_cpu(out->value);
161 ++ /*
162 ++ * To prevent failing boot up of some systems, Infineon TPM2.0
163 ++ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also
164 ++ * the TPM2_Getcapability command returns a zero length list
165 ++ * in field upgrade mode.
166 ++ */
167 ++ if (be32_to_cpu(out->property_cnt) > 0)
168 ++ *value = be32_to_cpu(out->value);
169 ++ else
170 ++ rc = -ENODATA;
171 + }
172 + tpm_buf_destroy(&buf);
173 + return rc;
174 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
175 +index 64428dbed9928..4236607f69e43 100644
176 +--- a/drivers/char/tpm/tpm_ibmvtpm.c
177 ++++ b/drivers/char/tpm/tpm_ibmvtpm.c
178 +@@ -685,6 +685,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
179 + if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
180 + ibmvtpm->rtce_buf != NULL,
181 + HZ)) {
182 ++ rc = -ENODEV;
183 + dev_err(dev, "CRQ response timed out\n");
184 + goto init_irq_cleanup;
185 + }
186 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
187 +index e43a76b027a2a..d59455b2d401f 100644
188 +--- a/drivers/gpu/drm/i915/intel_pm.c
189 ++++ b/drivers/gpu/drm/i915/intel_pm.c
190 +@@ -2822,7 +2822,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
191 + }
192 +
193 + static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
194 +- u16 wm[8])
195 ++ u16 wm[])
196 + {
197 + struct intel_uncore *uncore = &dev_priv->uncore;
198 +
199 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
200 +index 37270b8f4e58b..653f436aa4593 100644
201 +--- a/drivers/hid/hid-multitouch.c
202 ++++ b/drivers/hid/hid-multitouch.c
203 +@@ -2158,6 +2158,9 @@ static const struct hid_device_id mt_devices[] = {
204 + { .driver_data = MT_CLS_GOOGLE,
205 + HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
206 + USB_DEVICE_ID_GOOGLE_TOUCH_ROSE) },
207 ++ { .driver_data = MT_CLS_GOOGLE,
208 ++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_GOOGLE,
209 ++ USB_DEVICE_ID_GOOGLE_WHISKERS) },
210 +
211 + /* Generic MT device */
212 + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
213 +diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
214 +index 2f95e25a10f7c..53325419ec13d 100644
215 +--- a/drivers/i2c/busses/i2c-ismt.c
216 ++++ b/drivers/i2c/busses/i2c-ismt.c
217 +@@ -81,6 +81,7 @@
218 +
219 + #define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
220 + #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
221 ++#define ISMT_LOG_ENTRIES 3 /* number of interrupt cause log entries */
222 +
223 + /* Hardware Descriptor Constants - Control Field */
224 + #define ISMT_DESC_CWRL 0x01 /* Command/Write Length */
225 +@@ -174,6 +175,8 @@ struct ismt_priv {
226 + u8 head; /* ring buffer head pointer */
227 + struct completion cmp; /* interrupt completion */
228 + u8 buffer[I2C_SMBUS_BLOCK_MAX + 16]; /* temp R/W data buffer */
229 ++ dma_addr_t log_dma;
230 ++ u32 *log;
231 + };
232 +
233 + /**
234 +@@ -408,6 +411,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
235 + memset(desc, 0, sizeof(struct ismt_desc));
236 + desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
237 +
238 ++ /* Always clear the log entries */
239 ++ memset(priv->log, 0, ISMT_LOG_ENTRIES * sizeof(u32));
240 ++
241 + /* Initialize common control bits */
242 + if (likely(pci_dev_msi_enabled(priv->pci_dev)))
243 + desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
244 +@@ -697,6 +703,8 @@ static void ismt_hw_init(struct ismt_priv *priv)
245 + /* initialize the Master Descriptor Base Address (MDBA) */
246 + writeq(priv->io_rng_dma, priv->smba + ISMT_MSTR_MDBA);
247 +
248 ++ writeq(priv->log_dma, priv->smba + ISMT_GR_SMTICL);
249 ++
250 + /* initialize the Master Control Register (MCTRL) */
251 + writel(ISMT_MCTRL_MEIE, priv->smba + ISMT_MSTR_MCTRL);
252 +
253 +@@ -784,6 +792,12 @@ static int ismt_dev_init(struct ismt_priv *priv)
254 + priv->head = 0;
255 + init_completion(&priv->cmp);
256 +
257 ++ priv->log = dmam_alloc_coherent(&priv->pci_dev->dev,
258 ++ ISMT_LOG_ENTRIES * sizeof(u32),
259 ++ &priv->log_dma, GFP_KERNEL);
260 ++ if (!priv->log)
261 ++ return -ENOMEM;
262 ++
263 + return 0;
264 + }
265 +
266 +diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
267 +index 19f8eec387172..107aeb8b54da4 100644
268 +--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
269 ++++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
270 +@@ -208,6 +208,7 @@ static int thunder_i2c_probe_pci(struct pci_dev *pdev,
271 + i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
272 + i2c->adap.dev.parent = dev;
273 + i2c->adap.dev.of_node = pdev->dev.of_node;
274 ++ i2c->adap.dev.fwnode = dev->fwnode;
275 + snprintf(i2c->adap.name, sizeof(i2c->adap.name),
276 + "Cavium ThunderX i2c adapter at %s", dev_name(dev));
277 + i2c_set_adapdata(&i2c->adap, i2c);
278 +diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
279 +index bfb945fc33a17..3c9cdb87770f2 100644
280 +--- a/drivers/input/touchscreen/goodix.c
281 ++++ b/drivers/input/touchscreen/goodix.c
282 +@@ -335,7 +335,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
283 + * The Goodix panel will send spurious interrupts after a
284 + * 'finger up' event, which will always cause a timeout.
285 + */
286 +- return 0;
287 ++ return -ENOMSG;
288 + }
289 +
290 + static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data)
291 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
292 +index 3ed8ca47bc6e6..fa674e9b6f23d 100644
293 +--- a/drivers/md/dm-crypt.c
294 ++++ b/drivers/md/dm-crypt.c
295 +@@ -2817,6 +2817,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
296 + return DM_MAPIO_SUBMITTED;
297 + }
298 +
299 ++static char hex2asc(unsigned char c)
300 ++{
301 ++ return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
302 ++}
303 ++
304 + static void crypt_status(struct dm_target *ti, status_type_t type,
305 + unsigned status_flags, char *result, unsigned maxlen)
306 + {
307 +@@ -2835,9 +2840,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
308 + if (cc->key_size > 0) {
309 + if (cc->key_string)
310 + DMEMIT(":%u:%s", cc->key_size, cc->key_string);
311 +- else
312 +- for (i = 0; i < cc->key_size; i++)
313 +- DMEMIT("%02x", cc->key[i]);
314 ++ else {
315 ++ for (i = 0; i < cc->key_size; i++) {
316 ++ DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
317 ++ hex2asc(cc->key[i] & 0xf));
318 ++ }
319 ++ }
320 + } else
321 + DMEMIT("-");
322 +
323 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
324 +index 28a9eeae83b66..acbda91e7643c 100644
325 +--- a/drivers/md/dm-integrity.c
326 ++++ b/drivers/md/dm-integrity.c
327 +@@ -4149,8 +4149,6 @@ try_smaller_buffer:
328 + }
329 +
330 + if (should_write_sb) {
331 +- int r;
332 +-
333 + init_journal(ic, 0, ic->journal_sections, 0);
334 + r = dm_integrity_failed(ic);
335 + if (unlikely(r)) {
336 +diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
337 +index 71417048256af..ce6d3bce1b7b0 100644
338 +--- a/drivers/md/dm-stats.c
339 ++++ b/drivers/md/dm-stats.c
340 +@@ -224,6 +224,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
341 + atomic_read(&shared->in_flight[READ]),
342 + atomic_read(&shared->in_flight[WRITE]));
343 + }
344 ++ cond_resched();
345 + }
346 + dm_stat_free(&s->rcu_head);
347 + }
348 +@@ -313,6 +314,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
349 + for (ni = 0; ni < n_entries; ni++) {
350 + atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
351 + atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
352 ++ cond_resched();
353 + }
354 +
355 + if (s->n_histogram_entries) {
356 +@@ -325,6 +327,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
357 + for (ni = 0; ni < n_entries; ni++) {
358 + s->stat_shared[ni].tmp.histogram = hi;
359 + hi += s->n_histogram_entries + 1;
360 ++ cond_resched();
361 + }
362 + }
363 +
364 +@@ -345,6 +348,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
365 + for (ni = 0; ni < n_entries; ni++) {
366 + p[ni].histogram = hi;
367 + hi += s->n_histogram_entries + 1;
368 ++ cond_resched();
369 + }
370 + }
371 + }
372 +@@ -474,6 +478,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
373 + }
374 + DMEMIT("\n");
375 + }
376 ++ cond_resched();
377 + }
378 + mutex_unlock(&stats->mutex);
379 +
380 +@@ -750,6 +755,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
381 + local_irq_enable();
382 + }
383 + }
384 ++ cond_resched();
385 + }
386 + }
387 +
388 +@@ -865,6 +871,8 @@ static int dm_stats_print(struct dm_stats *stats, int id,
389 +
390 + if (unlikely(sz + 1 >= maxlen))
391 + goto buffer_overflow;
392 ++
393 ++ cond_resched();
394 + }
395 +
396 + if (clear)
397 +diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
398 +index 711f101447e3e..9dcdf34b7e32d 100644
399 +--- a/drivers/md/dm-verity-target.c
400 ++++ b/drivers/md/dm-verity-target.c
401 +@@ -1217,6 +1217,7 @@ bad:
402 +
403 + static struct target_type verity_target = {
404 + .name = "verity",
405 ++ .features = DM_TARGET_IMMUTABLE,
406 + .version = {1, 5, 0},
407 + .module = THIS_MODULE,
408 + .ctr = verity_ctr,
409 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
410 +index 08a7f97750f7a..c40327ad6ad53 100644
411 +--- a/drivers/md/raid5.c
412 ++++ b/drivers/md/raid5.c
413 +@@ -609,17 +609,17 @@ int raid5_calc_degraded(struct r5conf *conf)
414 + return degraded;
415 + }
416 +
417 +-static int has_failed(struct r5conf *conf)
418 ++static bool has_failed(struct r5conf *conf)
419 + {
420 +- int degraded;
421 ++ int degraded = conf->mddev->degraded;
422 +
423 +- if (conf->mddev->reshape_position == MaxSector)
424 +- return conf->mddev->degraded > conf->max_degraded;
425 ++ if (test_bit(MD_BROKEN, &conf->mddev->flags))
426 ++ return true;
427 +
428 +- degraded = raid5_calc_degraded(conf);
429 +- if (degraded > conf->max_degraded)
430 +- return 1;
431 +- return 0;
432 ++ if (conf->mddev->reshape_position != MaxSector)
433 ++ degraded = raid5_calc_degraded(conf);
434 ++
435 ++ return degraded > conf->max_degraded;
436 + }
437 +
438 + struct stripe_head *
439 +@@ -2679,34 +2679,31 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
440 + unsigned long flags;
441 + pr_debug("raid456: error called\n");
442 +
443 ++ pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n",
444 ++ mdname(mddev), bdevname(rdev->bdev, b));
445 ++
446 + spin_lock_irqsave(&conf->device_lock, flags);
447 ++ set_bit(Faulty, &rdev->flags);
448 ++ clear_bit(In_sync, &rdev->flags);
449 ++ mddev->degraded = raid5_calc_degraded(conf);
450 +
451 +- if (test_bit(In_sync, &rdev->flags) &&
452 +- mddev->degraded == conf->max_degraded) {
453 +- /*
454 +- * Don't allow to achieve failed state
455 +- * Don't try to recover this device
456 +- */
457 ++ if (has_failed(conf)) {
458 ++ set_bit(MD_BROKEN, &conf->mddev->flags);
459 + conf->recovery_disabled = mddev->recovery_disabled;
460 +- spin_unlock_irqrestore(&conf->device_lock, flags);
461 +- return;
462 ++
463 ++ pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n",
464 ++ mdname(mddev), mddev->degraded, conf->raid_disks);
465 ++ } else {
466 ++ pr_crit("md/raid:%s: Operation continuing on %d devices.\n",
467 ++ mdname(mddev), conf->raid_disks - mddev->degraded);
468 + }
469 +
470 +- set_bit(Faulty, &rdev->flags);
471 +- clear_bit(In_sync, &rdev->flags);
472 +- mddev->degraded = raid5_calc_degraded(conf);
473 + spin_unlock_irqrestore(&conf->device_lock, flags);
474 + set_bit(MD_RECOVERY_INTR, &mddev->recovery);
475 +
476 + set_bit(Blocked, &rdev->flags);
477 + set_mask_bits(&mddev->sb_flags, 0,
478 + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
479 +- pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
480 +- "md/raid:%s: Operation continuing on %d devices.\n",
481 +- mdname(mddev),
482 +- bdevname(rdev->bdev, b),
483 +- mdname(mddev),
484 +- conf->raid_disks - mddev->degraded);
485 + r5c_update_on_rdev_error(mddev, rdev);
486 + }
487 +
488 +diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
489 +index 8d6b09623d884..6fba00e03c67b 100644
490 +--- a/drivers/media/platform/vim2m.c
491 ++++ b/drivers/media/platform/vim2m.c
492 +@@ -1333,12 +1333,6 @@ static int vim2m_probe(struct platform_device *pdev)
493 + vfd->lock = &dev->dev_mutex;
494 + vfd->v4l2_dev = &dev->v4l2_dev;
495 +
496 +- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
497 +- if (ret) {
498 +- v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
499 +- goto error_v4l2;
500 +- }
501 +-
502 + video_set_drvdata(vfd, dev);
503 + v4l2_info(&dev->v4l2_dev,
504 + "Device registered as /dev/video%d\n", vfd->num);
505 +@@ -1361,12 +1355,20 @@ static int vim2m_probe(struct platform_device *pdev)
506 + media_device_init(&dev->mdev);
507 + dev->mdev.ops = &m2m_media_ops;
508 + dev->v4l2_dev.mdev = &dev->mdev;
509 ++#endif
510 ++
511 ++ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
512 ++ if (ret) {
513 ++ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
514 ++ goto error_m2m;
515 ++ }
516 +
517 ++#ifdef CONFIG_MEDIA_CONTROLLER
518 + ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
519 + MEDIA_ENT_F_PROC_VIDEO_SCALER);
520 + if (ret) {
521 + v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
522 +- goto error_dev;
523 ++ goto error_v4l2;
524 + }
525 +
526 + ret = media_device_register(&dev->mdev);
527 +@@ -1381,11 +1383,13 @@ static int vim2m_probe(struct platform_device *pdev)
528 + error_m2m_mc:
529 + v4l2_m2m_unregister_media_controller(dev->m2m_dev);
530 + #endif
531 +-error_dev:
532 ++error_v4l2:
533 + video_unregister_device(&dev->vfd);
534 + /* vim2m_device_release called by video_unregister_device to release various objects */
535 + return ret;
536 +-error_v4l2:
537 ++error_m2m:
538 ++ v4l2_m2m_release(dev->m2m_dev);
539 ++error_dev:
540 + v4l2_device_unregister(&dev->v4l2_dev);
541 + error_free:
542 + kfree(dev);
543 +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
544 +index 2c06cdcd3e751..d7478d332820e 100644
545 +--- a/drivers/net/ethernet/faraday/ftgmac100.c
546 ++++ b/drivers/net/ethernet/faraday/ftgmac100.c
547 +@@ -1880,6 +1880,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
548 + /* AST2400 doesn't have working HW checksum generation */
549 + if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
550 + netdev->hw_features &= ~NETIF_F_HW_CSUM;
551 ++
552 ++ /* AST2600 tx checksum with NCSI is broken */
553 ++ if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
554 ++ netdev->hw_features &= ~NETIF_F_HW_CSUM;
555 ++
556 + if (np && of_get_property(np, "no-hw-checksum", NULL))
557 + netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
558 + netdev->features |= netdev->hw_features;
559 +diff --git a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
560 +index 2801ca7062732..68a5b627fb9b2 100644
561 +--- a/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
562 ++++ b/drivers/pinctrl/sunxi/pinctrl-suniv-f1c100s.c
563 +@@ -204,7 +204,7 @@ static const struct sunxi_desc_pin suniv_f1c100s_pins[] = {
564 + SUNXI_FUNCTION(0x0, "gpio_in"),
565 + SUNXI_FUNCTION(0x1, "gpio_out"),
566 + SUNXI_FUNCTION(0x2, "lcd"), /* D20 */
567 +- SUNXI_FUNCTION(0x3, "lvds1"), /* RX */
568 ++ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
569 + SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)),
570 + SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
571 + SUNXI_FUNCTION(0x0, "gpio_in"),
572 +diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
573 +index d8d44fd9a92f4..ea2fd3a73c3a8 100644
574 +--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
575 ++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
576 +@@ -1351,9 +1351,11 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
577 +
578 + sec_len = *(pos++); len-= 1;
579 +
580 +- if (sec_len>0 && sec_len<=len) {
581 ++ if (sec_len > 0 &&
582 ++ sec_len <= len &&
583 ++ sec_len <= 32) {
584 + ssid[ssid_index].SsidLength = sec_len;
585 +- memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
586 ++ memcpy(ssid[ssid_index].Ssid, pos, sec_len);
587 + /* DBG_871X("%s COMBO_SCAN with specific ssid:%s, %d\n", __func__ */
588 + /* , ssid[ssid_index].Ssid, ssid[ssid_index].SsidLength); */
589 + ssid_index++;
590 +diff --git a/fs/exec.c b/fs/exec.c
591 +index 098de820abcc9..a7d78241082a2 100644
592 +--- a/fs/exec.c
593 ++++ b/fs/exec.c
594 +@@ -454,6 +454,9 @@ static int prepare_arg_pages(struct linux_binprm *bprm,
595 + unsigned long limit, ptr_size;
596 +
597 + bprm->argc = count(argv, MAX_ARG_STRINGS);
598 ++ if (bprm->argc == 0)
599 ++ pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n",
600 ++ current->comm, bprm->filename);
601 + if (bprm->argc < 0)
602 + return bprm->argc;
603 +
604 +@@ -482,8 +485,14 @@ static int prepare_arg_pages(struct linux_binprm *bprm,
605 + * the stack. They aren't stored until much later when we can't
606 + * signal to the parent that the child has run out of stack space.
607 + * Instead, calculate it here so it's possible to fail gracefully.
608 ++ *
609 ++ * In the case of argc = 0, make sure there is space for adding a
610 ++ * empty string (which will bump argc to 1), to ensure confused
611 ++ * userspace programs don't start processing from argv[1], thinking
612 ++ * argc can never be 0, to keep them from walking envp by accident.
613 ++ * See do_execveat_common().
614 + */
615 +- ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
616 ++ ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *);
617 + if (limit <= ptr_size)
618 + return -E2BIG;
619 + limit -= ptr_size;
620 +@@ -1848,6 +1857,20 @@ static int __do_execve_file(int fd, struct filename *filename,
621 + if (retval < 0)
622 + goto out;
623 +
624 ++ /*
625 ++ * When argv is empty, add an empty string ("") as argv[0] to
626 ++ * ensure confused userspace programs that start processing
627 ++ * from argv[1] won't end up walking envp. See also
628 ++ * bprm_stack_limits().
629 ++ */
630 ++ if (bprm->argc == 0) {
631 ++ const char *argv[] = { "", NULL };
632 ++ retval = copy_strings_kernel(1, argv, bprm);
633 ++ if (retval < 0)
634 ++ goto out;
635 ++ bprm->argc = 1;
636 ++ }
637 ++
638 + retval = exec_binprm(bprm);
639 + if (retval < 0)
640 + goto out;
641 +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
642 +index 9e717796e57b7..a4dc182e8989b 100644
643 +--- a/fs/nfs/internal.h
644 ++++ b/fs/nfs/internal.h
645 +@@ -775,6 +775,7 @@ static inline bool nfs_error_is_fatal_on_server(int err)
646 + case 0:
647 + case -ERESTARTSYS:
648 + case -EINTR:
649 ++ case -ENOMEM:
650 + return false;
651 + }
652 + return nfs_error_is_fatal(err);
653 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
654 +index 62eb78ac7437f..228c2b0753dcf 100644
655 +--- a/fs/nfsd/nfs4state.c
656 ++++ b/fs/nfsd/nfs4state.c
657 +@@ -6894,16 +6894,12 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
658 + if (sop->so_is_open_owner || !same_owner_str(sop, owner))
659 + continue;
660 +
661 +- /* see if there are still any locks associated with it */
662 +- lo = lockowner(sop);
663 +- list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
664 +- if (check_for_locks(stp->st_stid.sc_file, lo)) {
665 +- status = nfserr_locks_held;
666 +- spin_unlock(&clp->cl_lock);
667 +- return status;
668 +- }
669 ++ if (atomic_read(&sop->so_count) != 1) {
670 ++ spin_unlock(&clp->cl_lock);
671 ++ return nfserr_locks_held;
672 + }
673 +
674 ++ lo = lockowner(sop);
675 + nfs4_get_stateowner(sop);
676 + break;
677 + }
678 +diff --git a/include/linux/security.h b/include/linux/security.h
679 +index 3f6b8195ae9eb..aa5c7141c8d17 100644
680 +--- a/include/linux/security.h
681 ++++ b/include/linux/security.h
682 +@@ -118,10 +118,12 @@ enum lockdown_reason {
683 + LOCKDOWN_MMIOTRACE,
684 + LOCKDOWN_DEBUGFS,
685 + LOCKDOWN_XMON_WR,
686 ++ LOCKDOWN_DBG_WRITE_KERNEL,
687 + LOCKDOWN_INTEGRITY_MAX,
688 + LOCKDOWN_KCORE,
689 + LOCKDOWN_KPROBES,
690 + LOCKDOWN_BPF_READ,
691 ++ LOCKDOWN_DBG_READ_KERNEL,
692 + LOCKDOWN_PERF,
693 + LOCKDOWN_TRACEFS,
694 + LOCKDOWN_XMON_RW,
695 +diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
696 +index a1869a6789448..a186c245a6f41 100644
697 +--- a/include/net/inet_hashtables.h
698 ++++ b/include/net/inet_hashtables.h
699 +@@ -420,7 +420,7 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
700 + }
701 +
702 + int __inet_hash_connect(struct inet_timewait_death_row *death_row,
703 +- struct sock *sk, u32 port_offset,
704 ++ struct sock *sk, u64 port_offset,
705 + int (*check_established)(struct inet_timewait_death_row *,
706 + struct sock *, __u16,
707 + struct inet_timewait_sock **));
708 +diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
709 +index 09f2efea0b970..5805fe4947f3c 100644
710 +--- a/include/net/netfilter/nf_conntrack_core.h
711 ++++ b/include/net/netfilter/nf_conntrack_core.h
712 +@@ -59,8 +59,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
713 + int ret = NF_ACCEPT;
714 +
715 + if (ct) {
716 +- if (!nf_ct_is_confirmed(ct))
717 ++ if (!nf_ct_is_confirmed(ct)) {
718 + ret = __nf_conntrack_confirm(skb);
719 ++
720 ++ if (ret == NF_ACCEPT)
721 ++ ct = (struct nf_conn *)skb_nfct(skb);
722 ++ }
723 ++
724 + if (likely(ret == NF_ACCEPT))
725 + nf_ct_deliver_cached_events(ct);
726 + }
727 +diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
728 +index d7d2495f83c27..dac91aa38c5af 100644
729 +--- a/include/net/secure_seq.h
730 ++++ b/include/net/secure_seq.h
731 +@@ -4,8 +4,8 @@
732 +
733 + #include <linux/types.h>
734 +
735 +-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
736 +-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
737 ++u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
738 ++u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
739 + __be16 dport);
740 + u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
741 + __be16 sport, __be16 dport);
742 +diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
743 +index 097ab02989f92..565987557ad89 100644
744 +--- a/kernel/debug/debug_core.c
745 ++++ b/kernel/debug/debug_core.c
746 +@@ -56,6 +56,7 @@
747 + #include <linux/vmacache.h>
748 + #include <linux/rcupdate.h>
749 + #include <linux/irq.h>
750 ++#include <linux/security.h>
751 +
752 + #include <asm/cacheflush.h>
753 + #include <asm/byteorder.h>
754 +@@ -685,6 +686,29 @@ cpu_master_loop:
755 + continue;
756 + kgdb_connected = 0;
757 + } else {
758 ++ /*
759 ++ * This is a brutal way to interfere with the debugger
760 ++ * and prevent gdb being used to poke at kernel memory.
761 ++ * This could cause trouble if lockdown is applied when
762 ++ * there is already an active gdb session. For now the
763 ++ * answer is simply "don't do that". Typically lockdown
764 ++ * *will* be applied before the debug core gets started
765 ++ * so only developers using kgdb for fairly advanced
766 ++ * early kernel debug can be biten by this. Hopefully
767 ++ * they are sophisticated enough to take care of
768 ++ * themselves, especially with help from the lockdown
769 ++ * message printed on the console!
770 ++ */
771 ++ if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) {
772 ++ if (IS_ENABLED(CONFIG_KGDB_KDB)) {
773 ++ /* Switch back to kdb if possible... */
774 ++ dbg_kdb_mode = 1;
775 ++ continue;
776 ++ } else {
777 ++ /* ... otherwise just bail */
778 ++ break;
779 ++ }
780 ++ }
781 + error = gdb_serial_stub(ks);
782 + }
783 +
784 +diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
785 +index 4567fe998c306..7c96bf9a6c2c2 100644
786 +--- a/kernel/debug/kdb/kdb_main.c
787 ++++ b/kernel/debug/kdb/kdb_main.c
788 +@@ -45,6 +45,7 @@
789 + #include <linux/proc_fs.h>
790 + #include <linux/uaccess.h>
791 + #include <linux/slab.h>
792 ++#include <linux/security.h>
793 + #include "kdb_private.h"
794 +
795 + #undef MODULE_PARAM_PREFIX
796 +@@ -198,10 +199,62 @@ struct task_struct *kdb_curr_task(int cpu)
797 + }
798 +
799 + /*
800 +- * Check whether the flags of the current command and the permissions
801 +- * of the kdb console has allow a command to be run.
802 ++ * Update the permissions flags (kdb_cmd_enabled) to match the
803 ++ * current lockdown state.
804 ++ *
805 ++ * Within this function the calls to security_locked_down() are "lazy". We
806 ++ * avoid calling them if the current value of kdb_cmd_enabled already excludes
807 ++ * flags that might be subject to lockdown. Additionally we deliberately check
808 ++ * the lockdown flags independently (even though read lockdown implies write
809 ++ * lockdown) since that results in both simpler code and clearer messages to
810 ++ * the user on first-time debugger entry.
811 ++ *
812 ++ * The permission masks during a read+write lockdown permits the following
813 ++ * flags: INSPECT, SIGNAL, REBOOT (and ALWAYS_SAFE).
814 ++ *
815 ++ * The INSPECT commands are not blocked during lockdown because they are
816 ++ * not arbitrary memory reads. INSPECT covers the backtrace family (sometimes
817 ++ * forcing them to have no arguments) and lsmod. These commands do expose
818 ++ * some kernel state but do not allow the developer seated at the console to
819 ++ * choose what state is reported. SIGNAL and REBOOT should not be controversial,
820 ++ * given these are allowed for root during lockdown already.
821 ++ */
822 ++static void kdb_check_for_lockdown(void)
823 ++{
824 ++ const int write_flags = KDB_ENABLE_MEM_WRITE |
825 ++ KDB_ENABLE_REG_WRITE |
826 ++ KDB_ENABLE_FLOW_CTRL;
827 ++ const int read_flags = KDB_ENABLE_MEM_READ |
828 ++ KDB_ENABLE_REG_READ;
829 ++
830 ++ bool need_to_lockdown_write = false;
831 ++ bool need_to_lockdown_read = false;
832 ++
833 ++ if (kdb_cmd_enabled & (KDB_ENABLE_ALL | write_flags))
834 ++ need_to_lockdown_write =
835 ++ security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL);
836 ++
837 ++ if (kdb_cmd_enabled & (KDB_ENABLE_ALL | read_flags))
838 ++ need_to_lockdown_read =
839 ++ security_locked_down(LOCKDOWN_DBG_READ_KERNEL);
840 ++
841 ++ /* De-compose KDB_ENABLE_ALL if required */
842 ++ if (need_to_lockdown_write || need_to_lockdown_read)
843 ++ if (kdb_cmd_enabled & KDB_ENABLE_ALL)
844 ++ kdb_cmd_enabled = KDB_ENABLE_MASK & ~KDB_ENABLE_ALL;
845 ++
846 ++ if (need_to_lockdown_write)
847 ++ kdb_cmd_enabled &= ~write_flags;
848 ++
849 ++ if (need_to_lockdown_read)
850 ++ kdb_cmd_enabled &= ~read_flags;
851 ++}
852 ++
853 ++/*
854 ++ * Check whether the flags of the current command, the permissions of the kdb
855 ++ * console and the lockdown state allow a command to be run.
856 + */
857 +-static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
858 ++static bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
859 + bool no_args)
860 + {
861 + /* permissions comes from userspace so needs massaging slightly */
862 +@@ -1188,6 +1241,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
863 + kdb_curr_task(raw_smp_processor_id());
864 +
865 + KDB_DEBUG_STATE("kdb_local 1", reason);
866 ++
867 ++ kdb_check_for_lockdown();
868 ++
869 + kdb_go_count = 0;
870 + if (reason == KDB_REASON_DEBUG) {
871 + /* special case below */
872 +diff --git a/lib/assoc_array.c b/lib/assoc_array.c
873 +index 6f4bcf5245547..b537a83678e11 100644
874 +--- a/lib/assoc_array.c
875 ++++ b/lib/assoc_array.c
876 +@@ -1462,6 +1462,7 @@ int assoc_array_gc(struct assoc_array *array,
877 + struct assoc_array_ptr *cursor, *ptr;
878 + struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
879 + unsigned long nr_leaves_on_tree;
880 ++ bool retained;
881 + int keylen, slot, nr_free, next_slot, i;
882 +
883 + pr_devel("-->%s()\n", __func__);
884 +@@ -1538,6 +1539,7 @@ continue_node:
885 + goto descend;
886 + }
887 +
888 ++retry_compress:
889 + pr_devel("-- compress node %p --\n", new_n);
890 +
891 + /* Count up the number of empty slots in this node and work out the
892 +@@ -1555,6 +1557,7 @@ continue_node:
893 + pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
894 +
895 + /* See what we can fold in */
896 ++ retained = false;
897 + next_slot = 0;
898 + for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
899 + struct assoc_array_shortcut *s;
900 +@@ -1604,9 +1607,14 @@ continue_node:
901 + pr_devel("[%d] retain node %lu/%d [nx %d]\n",
902 + slot, child->nr_leaves_on_branch, nr_free + 1,
903 + next_slot);
904 ++ retained = true;
905 + }
906 + }
907 +
908 ++ if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
909 ++ pr_devel("internal nodes remain despite enough space, retrying\n");
910 ++ goto retry_compress;
911 ++ }
912 + pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
913 +
914 + nr_leaves_on_tree = new_n->nr_leaves_on_branch;
915 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
916 +index 490e5f3ae614a..6b100f02ee431 100644
917 +--- a/mm/zsmalloc.c
918 ++++ b/mm/zsmalloc.c
919 +@@ -1748,11 +1748,40 @@ static enum fullness_group putback_zspage(struct size_class *class,
920 + */
921 + static void lock_zspage(struct zspage *zspage)
922 + {
923 +- struct page *page = get_first_page(zspage);
924 ++ struct page *curr_page, *page;
925 +
926 +- do {
927 +- lock_page(page);
928 +- } while ((page = get_next_page(page)) != NULL);
929 ++ /*
930 ++ * Pages we haven't locked yet can be migrated off the list while we're
931 ++ * trying to lock them, so we need to be careful and only attempt to
932 ++ * lock each page under migrate_read_lock(). Otherwise, the page we lock
933 ++ * may no longer belong to the zspage. This means that we may wait for
934 ++ * the wrong page to unlock, so we must take a reference to the page
935 ++ * prior to waiting for it to unlock outside migrate_read_lock().
936 ++ */
937 ++ while (1) {
938 ++ migrate_read_lock(zspage);
939 ++ page = get_first_page(zspage);
940 ++ if (trylock_page(page))
941 ++ break;
942 ++ get_page(page);
943 ++ migrate_read_unlock(zspage);
944 ++ wait_on_page_locked(page);
945 ++ put_page(page);
946 ++ }
947 ++
948 ++ curr_page = page;
949 ++ while ((page = get_next_page(curr_page))) {
950 ++ if (trylock_page(page)) {
951 ++ curr_page = page;
952 ++ } else {
953 ++ get_page(page);
954 ++ migrate_read_unlock(zspage);
955 ++ wait_on_page_locked(page);
956 ++ put_page(page);
957 ++ migrate_read_lock(zspage);
958 ++ }
959 ++ }
960 ++ migrate_read_unlock(zspage);
961 + }
962 +
963 + static int zs_init_fs_context(struct fs_context *fc)
964 +diff --git a/net/core/filter.c b/net/core/filter.c
965 +index e16b2b5cda981..b0df4ddbe30c3 100644
966 +--- a/net/core/filter.c
967 ++++ b/net/core/filter.c
968 +@@ -1668,7 +1668,7 @@ BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
969 +
970 + if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
971 + return -EINVAL;
972 +- if (unlikely(offset > 0xffff))
973 ++ if (unlikely(offset > INT_MAX))
974 + return -EFAULT;
975 + if (unlikely(bpf_try_make_writable(skb, offset + len)))
976 + return -EFAULT;
977 +@@ -1703,7 +1703,7 @@ BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
978 + {
979 + void *ptr;
980 +
981 +- if (unlikely(offset > 0xffff))
982 ++ if (unlikely(offset > INT_MAX))
983 + goto err_clear;
984 +
985 + ptr = skb_header_pointer(skb, offset, len, to);
986 +diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
987 +index 2f9796a1a63ff..a1867c65ac632 100644
988 +--- a/net/core/secure_seq.c
989 ++++ b/net/core/secure_seq.c
990 +@@ -97,7 +97,7 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
991 + }
992 + EXPORT_SYMBOL(secure_tcpv6_seq);
993 +
994 +-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
995 ++u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
996 + __be16 dport)
997 + {
998 + const struct {
999 +@@ -147,7 +147,7 @@ u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
1000 + }
1001 + EXPORT_SYMBOL_GPL(secure_tcp_seq);
1002 +
1003 +-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
1004 ++u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
1005 + {
1006 + net_secret_init();
1007 + return siphash_4u32((__force u32)saddr, (__force u32)daddr,
1008 +diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
1009 +index cbbeb0eea0c35..959f4f0c85460 100644
1010 +--- a/net/ipv4/inet_hashtables.c
1011 ++++ b/net/ipv4/inet_hashtables.c
1012 +@@ -464,7 +464,7 @@ not_unique:
1013 + return -EADDRNOTAVAIL;
1014 + }
1015 +
1016 +-static u32 inet_sk_port_offset(const struct sock *sk)
1017 ++static u64 inet_sk_port_offset(const struct sock *sk)
1018 + {
1019 + const struct inet_sock *inet = inet_sk(sk);
1020 +
1021 +@@ -671,8 +671,19 @@ unlock:
1022 + }
1023 + EXPORT_SYMBOL_GPL(inet_unhash);
1024 +
1025 ++/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm
1026 ++ * Note that we use 32bit integers (vs RFC 'short integers')
1027 ++ * because 2^16 is not a multiple of num_ephemeral and this
1028 ++ * property might be used by clever attacker.
1029 ++ * RFC claims using TABLE_LENGTH=10 buckets gives an improvement,
1030 ++ * we use 256 instead to really give more isolation and
1031 ++ * privacy, this only consumes 1 KB of kernel memory.
1032 ++ */
1033 ++#define INET_TABLE_PERTURB_SHIFT 8
1034 ++static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT];
1035 ++
1036 + int __inet_hash_connect(struct inet_timewait_death_row *death_row,
1037 +- struct sock *sk, u32 port_offset,
1038 ++ struct sock *sk, u64 port_offset,
1039 + int (*check_established)(struct inet_timewait_death_row *,
1040 + struct sock *, __u16, struct inet_timewait_sock **))
1041 + {
1042 +@@ -684,8 +695,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
1043 + struct inet_bind_bucket *tb;
1044 + u32 remaining, offset;
1045 + int ret, i, low, high;
1046 +- static u32 hint;
1047 + int l3mdev;
1048 ++ u32 index;
1049 +
1050 + if (port) {
1051 + head = &hinfo->bhash[inet_bhashfn(net, port,
1052 +@@ -712,7 +723,12 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
1053 + if (likely(remaining > 1))
1054 + remaining &= ~1U;
1055 +
1056 +- offset = (hint + port_offset) % remaining;
1057 ++ net_get_random_once(table_perturb, sizeof(table_perturb));
1058 ++ index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
1059 ++
1060 ++ offset = READ_ONCE(table_perturb[index]) + port_offset;
1061 ++ offset %= remaining;
1062 ++
1063 + /* In first pass we try ports of @low parity.
1064 + * inet_csk_get_port() does the opposite choice.
1065 + */
1066 +@@ -766,7 +782,7 @@ next_port:
1067 + return -EADDRNOTAVAIL;
1068 +
1069 + ok:
1070 +- hint += i + 2;
1071 ++ WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
1072 +
1073 + /* Head lock still held and bh's disabled */
1074 + inet_bind_hash(sk, tb, port);
1075 +@@ -789,7 +805,7 @@ ok:
1076 + int inet_hash_connect(struct inet_timewait_death_row *death_row,
1077 + struct sock *sk)
1078 + {
1079 +- u32 port_offset = 0;
1080 ++ u64 port_offset = 0;
1081 +
1082 + if (!inet_sk(sk)->inet_num)
1083 + port_offset = inet_sk_port_offset(sk);
1084 +diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
1085 +index ab12e00f6bfff..528c78bc920e0 100644
1086 +--- a/net/ipv6/inet6_hashtables.c
1087 ++++ b/net/ipv6/inet6_hashtables.c
1088 +@@ -262,7 +262,7 @@ not_unique:
1089 + return -EADDRNOTAVAIL;
1090 + }
1091 +
1092 +-static u32 inet6_sk_port_offset(const struct sock *sk)
1093 ++static u64 inet6_sk_port_offset(const struct sock *sk)
1094 + {
1095 + const struct inet_sock *inet = inet_sk(sk);
1096 +
1097 +@@ -274,7 +274,7 @@ static u32 inet6_sk_port_offset(const struct sock *sk)
1098 + int inet6_hash_connect(struct inet_timewait_death_row *death_row,
1099 + struct sock *sk)
1100 + {
1101 +- u32 port_offset = 0;
1102 ++ u64 port_offset = 0;
1103 +
1104 + if (!inet_sk(sk)->inet_num)
1105 + port_offset = inet6_sk_port_offset(sk);
1106 +diff --git a/net/key/af_key.c b/net/key/af_key.c
1107 +index f67d3ba72c496..dd064d5eff6ed 100644
1108 +--- a/net/key/af_key.c
1109 ++++ b/net/key/af_key.c
1110 +@@ -2904,7 +2904,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
1111 + break;
1112 + if (!aalg->pfkey_supported)
1113 + continue;
1114 +- if (aalg_tmpl_set(t, aalg))
1115 ++ if (aalg_tmpl_set(t, aalg) && aalg->available)
1116 + sz += sizeof(struct sadb_comb);
1117 + }
1118 + return sz + sizeof(struct sadb_prop);
1119 +@@ -2922,7 +2922,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
1120 + if (!ealg->pfkey_supported)
1121 + continue;
1122 +
1123 +- if (!(ealg_tmpl_set(t, ealg)))
1124 ++ if (!(ealg_tmpl_set(t, ealg) && ealg->available))
1125 + continue;
1126 +
1127 + for (k = 1; ; k++) {
1128 +@@ -2933,7 +2933,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
1129 + if (!aalg->pfkey_supported)
1130 + continue;
1131 +
1132 +- if (aalg_tmpl_set(t, aalg))
1133 ++ if (aalg_tmpl_set(t, aalg) && aalg->available)
1134 + sz += sizeof(struct sadb_comb);
1135 + }
1136 + }
1137 +diff --git a/net/wireless/core.c b/net/wireless/core.c
1138 +index 5d151e8f89320..f7228afd81ebd 100644
1139 +--- a/net/wireless/core.c
1140 ++++ b/net/wireless/core.c
1141 +@@ -5,7 +5,7 @@
1142 + * Copyright 2006-2010 Johannes Berg <johannes@××××××××××××.net>
1143 + * Copyright 2013-2014 Intel Mobile Communications GmbH
1144 + * Copyright 2015-2017 Intel Deutschland GmbH
1145 +- * Copyright (C) 2018-2019 Intel Corporation
1146 ++ * Copyright (C) 2018-2021 Intel Corporation
1147 + */
1148 +
1149 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
1150 +@@ -891,9 +891,6 @@ int wiphy_register(struct wiphy *wiphy)
1151 + return res;
1152 + }
1153 +
1154 +- /* set up regulatory info */
1155 +- wiphy_regulatory_register(wiphy);
1156 +-
1157 + list_add_rcu(&rdev->list, &cfg80211_rdev_list);
1158 + cfg80211_rdev_list_generation++;
1159 +
1160 +@@ -904,6 +901,9 @@ int wiphy_register(struct wiphy *wiphy)
1161 + cfg80211_debugfs_rdev_add(rdev);
1162 + nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
1163 +
1164 ++ /* set up regulatory info */
1165 ++ wiphy_regulatory_register(wiphy);
1166 ++
1167 + if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
1168 + struct regulatory_request request;
1169 +
1170 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
1171 +index 0f3b57a73670b..74caece779633 100644
1172 +--- a/net/wireless/reg.c
1173 ++++ b/net/wireless/reg.c
1174 +@@ -3790,6 +3790,7 @@ void wiphy_regulatory_register(struct wiphy *wiphy)
1175 +
1176 + wiphy_update_regulatory(wiphy, lr->initiator);
1177 + wiphy_all_share_dfs_chan_state(wiphy);
1178 ++ reg_process_self_managed_hints();
1179 + }
1180 +
1181 + void wiphy_regulatory_deregister(struct wiphy *wiphy)
1182 +diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c
1183 +index 3f38583bed06f..655a6edb5d7f9 100644
1184 +--- a/security/lockdown/lockdown.c
1185 ++++ b/security/lockdown/lockdown.c
1186 +@@ -33,10 +33,12 @@ static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
1187 + [LOCKDOWN_MMIOTRACE] = "unsafe mmio",
1188 + [LOCKDOWN_DEBUGFS] = "debugfs access",
1189 + [LOCKDOWN_XMON_WR] = "xmon write access",
1190 ++ [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM",
1191 + [LOCKDOWN_INTEGRITY_MAX] = "integrity",
1192 + [LOCKDOWN_KCORE] = "/proc/kcore access",
1193 + [LOCKDOWN_KPROBES] = "use of kprobes",
1194 + [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
1195 ++ [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM",
1196 + [LOCKDOWN_PERF] = "unsafe use of perf",
1197 + [LOCKDOWN_TRACEFS] = "use of tracefs",
1198 + [LOCKDOWN_XMON_RW] = "xmon read and write access",