Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 02 Mar 2022 13:08:44
Message-Id: 1646226509.b8ebdaacbc21d984b8b46909f0fa94bca15392a9.mpagano@gentoo
1 commit: b8ebdaacbc21d984b8b46909f0fa94bca15392a9
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Mar 2 13:08:29 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Mar 2 13:08:29 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b8ebdaac
7
8 Linux patch 4.14.269
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1268_linux-4.14.269.patch | 1019 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1023 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index d97a880d..a89f7164 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1119,6 +1119,10 @@ Patch: 1267_linux-4.14.268.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.268
23
24 +Patch: 1268_linux-4.14.269.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.269
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1268_linux-4.14.269.patch b/1268_linux-4.14.269.patch
33 new file mode 100644
34 index 00000000..49c2a68a
35 --- /dev/null
36 +++ b/1268_linux-4.14.269.patch
37 @@ -0,0 +1,1019 @@
38 +diff --git a/Makefile b/Makefile
39 +index e3be05e00d9d2..560ecede8070b 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 268
47 ++SUBLEVEL = 269
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
52 +index e36f7b75ab07b..b3c19ab2485ce 100644
53 +--- a/arch/parisc/kernel/unaligned.c
54 ++++ b/arch/parisc/kernel/unaligned.c
55 +@@ -354,7 +354,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
56 + : "r" (val), "r" (regs->ior), "r" (regs->isr)
57 + : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
58 +
59 +- return 0;
60 ++ return ret;
61 + }
62 + static int emulate_std(struct pt_regs *regs, int frreg, int flop)
63 + {
64 +@@ -411,7 +411,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
65 + __asm__ __volatile__ (
66 + " mtsp %4, %%sr1\n"
67 + " zdep %2, 29, 2, %%r19\n"
68 +-" dep %%r0, 31, 2, %2\n"
69 ++" dep %%r0, 31, 2, %3\n"
70 + " mtsar %%r19\n"
71 + " zvdepi -2, 32, %%r19\n"
72 + "1: ldw 0(%%sr1,%3),%%r20\n"
73 +@@ -423,7 +423,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
74 + " andcm %%r21, %%r19, %%r21\n"
75 + " or %1, %%r20, %1\n"
76 + " or %2, %%r21, %2\n"
77 +-"3: stw %1,0(%%sr1,%1)\n"
78 ++"3: stw %1,0(%%sr1,%3)\n"
79 + "4: stw %%r1,4(%%sr1,%3)\n"
80 + "5: stw %2,8(%%sr1,%3)\n"
81 + " copy %%r0, %0\n"
82 +@@ -611,7 +611,6 @@ void handle_unaligned(struct pt_regs *regs)
83 + ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
84 + break;
85 + }
86 +-#ifdef CONFIG_PA20
87 + switch (regs->iir & OPCODE2_MASK)
88 + {
89 + case OPCODE_FLDD_L:
90 +@@ -622,22 +621,23 @@ void handle_unaligned(struct pt_regs *regs)
91 + flop=1;
92 + ret = emulate_std(regs, R2(regs->iir),1);
93 + break;
94 ++#ifdef CONFIG_PA20
95 + case OPCODE_LDD_L:
96 + ret = emulate_ldd(regs, R2(regs->iir),0);
97 + break;
98 + case OPCODE_STD_L:
99 + ret = emulate_std(regs, R2(regs->iir),0);
100 + break;
101 +- }
102 + #endif
103 ++ }
104 + switch (regs->iir & OPCODE3_MASK)
105 + {
106 + case OPCODE_FLDW_L:
107 + flop=1;
108 +- ret = emulate_ldw(regs, R2(regs->iir),0);
109 ++ ret = emulate_ldw(regs, R2(regs->iir), 1);
110 + break;
111 + case OPCODE_LDW_M:
112 +- ret = emulate_ldw(regs, R2(regs->iir),1);
113 ++ ret = emulate_ldw(regs, R2(regs->iir), 0);
114 + break;
115 +
116 + case OPCODE_FSTW_L:
117 +diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
118 +index 3ba843f5cdc0f..821fc1f2324c8 100644
119 +--- a/drivers/ata/pata_hpt37x.c
120 ++++ b/drivers/ata/pata_hpt37x.c
121 +@@ -919,6 +919,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
122 + irqmask &= ~0x10;
123 + pci_write_config_byte(dev, 0x5a, irqmask);
124 +
125 ++ /*
126 ++ * HPT371 chips physically have only one channel, the secondary one,
127 ++ * but the primary channel registers do exist! Go figure...
128 ++ * So, we manually disable the non-existing channel here
129 ++ * (if the BIOS hasn't done this already).
130 ++ */
131 ++ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
132 ++ u8 mcr1;
133 ++
134 ++ pci_read_config_byte(dev, 0x50, &mcr1);
135 ++ mcr1 &= ~0x04;
136 ++ pci_write_config_byte(dev, 0x50, mcr1);
137 ++ }
138 ++
139 + /*
140 + * default to pci clock. make sure MA15/16 are set to output
141 + * to prevent drives having problems with 40-pin cables. Needed
142 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
143 +index 51276dd0d864c..4824c775dd7d0 100644
144 +--- a/drivers/gpu/drm/drm_edid.c
145 ++++ b/drivers/gpu/drm/drm_edid.c
146 +@@ -4418,6 +4418,7 @@ static void drm_add_display_info(struct drm_connector *connector,
147 + if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
148 + return;
149 +
150 ++ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
151 + drm_parse_cea_ext(connector, edid);
152 +
153 + /*
154 +@@ -4466,7 +4467,6 @@ static void drm_add_display_info(struct drm_connector *connector,
155 + DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
156 + connector->name, info->bpc);
157 +
158 +- info->color_formats |= DRM_COLOR_FORMAT_RGB444;
159 + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
160 + info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
161 + if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
162 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
163 +index 8cf3d1b4662de..ce70a193caa7f 100644
164 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
165 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
166 +@@ -70,13 +70,20 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend)
167 + return 0;
168 + }
169 +
170 +-static void
171 ++static int
172 + nvkm_pmu_reset(struct nvkm_pmu *pmu)
173 + {
174 + struct nvkm_device *device = pmu->subdev.device;
175 +
176 + if (!pmu->func->enabled(pmu))
177 +- return;
178 ++ return 0;
179 ++
180 ++ /* Inhibit interrupts, and wait for idle. */
181 ++ nvkm_wr32(device, 0x10a014, 0x0000ffff);
182 ++ nvkm_msec(device, 2000,
183 ++ if (!nvkm_rd32(device, 0x10a04c))
184 ++ break;
185 ++ );
186 +
187 + /* Reset. */
188 + if (pmu->func->reset)
189 +@@ -87,37 +94,25 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu)
190 + if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
191 + break;
192 + );
193 ++
194 ++ return 0;
195 + }
196 +
197 + static int
198 + nvkm_pmu_preinit(struct nvkm_subdev *subdev)
199 + {
200 + struct nvkm_pmu *pmu = nvkm_pmu(subdev);
201 +- nvkm_pmu_reset(pmu);
202 +- return 0;
203 ++ return nvkm_pmu_reset(pmu);
204 + }
205 +
206 + static int
207 + nvkm_pmu_init(struct nvkm_subdev *subdev)
208 + {
209 + struct nvkm_pmu *pmu = nvkm_pmu(subdev);
210 +- struct nvkm_device *device = pmu->subdev.device;
211 +-
212 +- if (!pmu->func->init)
213 +- return 0;
214 +-
215 +- if (pmu->func->enabled(pmu)) {
216 +- /* Inhibit interrupts, and wait for idle. */
217 +- nvkm_wr32(device, 0x10a014, 0x0000ffff);
218 +- nvkm_msec(device, 2000,
219 +- if (!nvkm_rd32(device, 0x10a04c))
220 +- break;
221 +- );
222 +-
223 +- nvkm_pmu_reset(pmu);
224 +- }
225 +-
226 +- return pmu->func->init(pmu);
227 ++ int ret = nvkm_pmu_reset(pmu);
228 ++ if (ret == 0 && pmu->func->init)
229 ++ ret = pmu->func->init(pmu);
230 ++ return ret;
231 + }
232 +
233 + static int
234 +diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
235 +index 8f3606de4eafb..47be2cd2c60db 100644
236 +--- a/drivers/iio/adc/men_z188_adc.c
237 ++++ b/drivers/iio/adc/men_z188_adc.c
238 +@@ -107,6 +107,7 @@ static int men_z188_probe(struct mcb_device *dev,
239 + struct z188_adc *adc;
240 + struct iio_dev *indio_dev;
241 + struct resource *mem;
242 ++ int ret;
243 +
244 + indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc));
245 + if (!indio_dev)
246 +@@ -133,8 +134,14 @@ static int men_z188_probe(struct mcb_device *dev,
247 + adc->mem = mem;
248 + mcb_set_drvdata(dev, indio_dev);
249 +
250 +- return iio_device_register(indio_dev);
251 ++ ret = iio_device_register(indio_dev);
252 ++ if (ret)
253 ++ goto err_unmap;
254 ++
255 ++ return 0;
256 +
257 ++err_unmap:
258 ++ iounmap(adc->base);
259 + err:
260 + mcb_release_mem(mem);
261 + return -ENXIO;
262 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
263 +index 9f7287f45d06f..63358c4c8e57c 100644
264 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
265 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
266 +@@ -3683,9 +3683,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
267 + spin_unlock(&host->target_lock);
268 +
269 + /*
270 +- * Wait for tl_err and target port removal tasks.
271 ++ * srp_queue_remove_work() queues a call to
272 ++ * srp_remove_target(). The latter function cancels
273 ++ * target->tl_err_work so waiting for the remove works to
274 ++ * finish is sufficient.
275 + */
276 +- flush_workqueue(system_long_wq);
277 + flush_workqueue(srp_remove_wq);
278 +
279 + kfree(host);
280 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
281 +index 377f91885bdaa..76d5ec11514db 100644
282 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
283 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
284 +@@ -1662,7 +1662,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
285 + if (size_read < 0) {
286 + netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
287 + __func__, size_read);
288 +- return 0;
289 ++ return size_read;
290 + }
291 +
292 + i += size_read;
293 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
294 +index 8c9eae5f30722..92a7247b62999 100644
295 +--- a/drivers/net/usb/cdc_ether.c
296 ++++ b/drivers/net/usb/cdc_ether.c
297 +@@ -584,6 +584,11 @@ static const struct usb_device_id products[] = {
298 + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
299 + .bInterfaceProtocol = USB_CDC_PROTO_NONE
300 +
301 ++#define ZAURUS_FAKE_INTERFACE \
302 ++ .bInterfaceClass = USB_CLASS_COMM, \
303 ++ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
304 ++ .bInterfaceProtocol = USB_CDC_PROTO_NONE
305 ++
306 + /* SA-1100 based Sharp Zaurus ("collie"), or compatible;
307 + * wire-incompatible with true CDC Ethernet implementations.
308 + * (And, it seems, needlessly so...)
309 +@@ -637,6 +642,13 @@ static const struct usb_device_id products[] = {
310 + .idProduct = 0x9032, /* SL-6000 */
311 + ZAURUS_MASTER_INTERFACE,
312 + .driver_info = 0,
313 ++}, {
314 ++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
315 ++ | USB_DEVICE_ID_MATCH_DEVICE,
316 ++ .idVendor = 0x04DD,
317 ++ .idProduct = 0x9032, /* SL-6000 */
318 ++ ZAURUS_FAKE_INTERFACE,
319 ++ .driver_info = 0,
320 + }, {
321 + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
322 + | USB_DEVICE_ID_MATCH_DEVICE,
323 +diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
324 +index 2d316c1b851b2..a97dd62b9d54b 100644
325 +--- a/drivers/net/usb/sr9700.c
326 ++++ b/drivers/net/usb/sr9700.c
327 +@@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
328 + /* ignore the CRC length */
329 + len = (skb->data[1] | (skb->data[2] << 8)) - 4;
330 +
331 +- if (len > ETH_FRAME_LEN)
332 ++ if (len > ETH_FRAME_LEN || len > skb->len)
333 + return 0;
334 +
335 + /* the last packet of current skb */
336 +diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
337 +index 9c2196c3fd113..1f19fc5e6117e 100644
338 +--- a/drivers/net/usb/zaurus.c
339 ++++ b/drivers/net/usb/zaurus.c
340 +@@ -268,6 +268,11 @@ static const struct usb_device_id products [] = {
341 + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
342 + .bInterfaceProtocol = USB_CDC_PROTO_NONE
343 +
344 ++#define ZAURUS_FAKE_INTERFACE \
345 ++ .bInterfaceClass = USB_CLASS_COMM, \
346 ++ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
347 ++ .bInterfaceProtocol = USB_CDC_PROTO_NONE
348 ++
349 + /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */
350 + {
351 + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
352 +@@ -325,6 +330,13 @@ static const struct usb_device_id products [] = {
353 + .idProduct = 0x9032, /* SL-6000 */
354 + ZAURUS_MASTER_INTERFACE,
355 + .driver_info = ZAURUS_PXA_INFO,
356 ++}, {
357 ++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
358 ++ | USB_DEVICE_ID_MATCH_DEVICE,
359 ++ .idVendor = 0x04DD,
360 ++ .idProduct = 0x9032, /* SL-6000 */
361 ++ ZAURUS_FAKE_INTERFACE,
362 ++ .driver_info = (unsigned long)&bogus_mdlm_info,
363 + }, {
364 + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
365 + | USB_DEVICE_ID_MATCH_DEVICE,
366 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
367 +index 52a43922b4fea..28133a8e3169b 100644
368 +--- a/drivers/tty/n_gsm.c
369 ++++ b/drivers/tty/n_gsm.c
370 +@@ -440,7 +440,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
371 + modembits |= MDM_RTR;
372 + if (dlci->modem_tx & TIOCM_RI)
373 + modembits |= MDM_IC;
374 +- if (dlci->modem_tx & TIOCM_CD)
375 ++ if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator)
376 + modembits |= MDM_DV;
377 + return modembits;
378 + }
379 +@@ -1502,7 +1502,7 @@ static void gsm_dlci_t1(unsigned long data)
380 + dlci->mode = DLCI_MODE_ADM;
381 + gsm_dlci_open(dlci);
382 + } else {
383 +- gsm_dlci_close(dlci);
384 ++ gsm_dlci_begin_close(dlci); /* prevent half open link */
385 + }
386 +
387 + break;
388 +diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
389 +index c51044ba503c3..0e83ce81ca332 100644
390 +--- a/drivers/tty/serial/8250/8250_of.c
391 ++++ b/drivers/tty/serial/8250/8250_of.c
392 +@@ -102,8 +102,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
393 + port->mapsize = resource_size(&resource);
394 +
395 + /* Check for shifted address mapping */
396 +- if (of_property_read_u32(np, "reg-offset", &prop) == 0)
397 ++ if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
398 ++ if (prop >= port->mapsize) {
399 ++ dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n",
400 ++ prop, &port->mapsize);
401 ++ ret = -EINVAL;
402 ++ goto err_unprepare;
403 ++ }
404 ++
405 + port->mapbase += prop;
406 ++ port->mapsize -= prop;
407 ++ }
408 +
409 + /* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */
410 + if (of_device_is_compatible(np, "mrvl,mmp-uart"))
411 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
412 +index dcbd3e0ec2d95..0273a1649f236 100644
413 +--- a/drivers/usb/dwc3/gadget.c
414 ++++ b/drivers/usb/dwc3/gadget.c
415 +@@ -3153,9 +3153,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
416 + unsigned long flags;
417 + irqreturn_t ret = IRQ_NONE;
418 +
419 ++ local_bh_disable();
420 + spin_lock_irqsave(&dwc->lock, flags);
421 + ret = dwc3_process_event_buf(evt);
422 + spin_unlock_irqrestore(&dwc->lock, flags);
423 ++ local_bh_enable();
424 +
425 + return ret;
426 + }
427 +diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
428 +index 743d41c6952b5..55be224b64a48 100644
429 +--- a/drivers/usb/gadget/function/rndis.c
430 ++++ b/drivers/usb/gadget/function/rndis.c
431 +@@ -922,6 +922,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
432 + params->resp_avail = resp_avail;
433 + params->v = v;
434 + INIT_LIST_HEAD(&params->resp_queue);
435 ++ spin_lock_init(&params->resp_lock);
436 + pr_debug("%s: configNr = %d\n", __func__, i);
437 +
438 + return params;
439 +@@ -1015,12 +1016,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf)
440 + {
441 + rndis_resp_t *r, *n;
442 +
443 ++ spin_lock(&params->resp_lock);
444 + list_for_each_entry_safe(r, n, &params->resp_queue, list) {
445 + if (r->buf == buf) {
446 + list_del(&r->list);
447 + kfree(r);
448 + }
449 + }
450 ++ spin_unlock(&params->resp_lock);
451 + }
452 + EXPORT_SYMBOL_GPL(rndis_free_response);
453 +
454 +@@ -1030,14 +1033,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length)
455 +
456 + if (!length) return NULL;
457 +
458 ++ spin_lock(&params->resp_lock);
459 + list_for_each_entry_safe(r, n, &params->resp_queue, list) {
460 + if (!r->send) {
461 + r->send = 1;
462 + *length = r->length;
463 ++ spin_unlock(&params->resp_lock);
464 + return r->buf;
465 + }
466 + }
467 +
468 ++ spin_unlock(&params->resp_lock);
469 + return NULL;
470 + }
471 + EXPORT_SYMBOL_GPL(rndis_get_next_response);
472 +@@ -1054,7 +1060,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length)
473 + r->length = length;
474 + r->send = 0;
475 +
476 ++ spin_lock(&params->resp_lock);
477 + list_add_tail(&r->list, &params->resp_queue);
478 ++ spin_unlock(&params->resp_lock);
479 + return r;
480 + }
481 +
482 +diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
483 +index 21e0430ffb986..463ac45ef4cb9 100644
484 +--- a/drivers/usb/gadget/function/rndis.h
485 ++++ b/drivers/usb/gadget/function/rndis.h
486 +@@ -177,6 +177,7 @@ typedef struct rndis_params {
487 + void (*resp_avail)(void *v);
488 + void *v;
489 + struct list_head resp_queue;
490 ++ spinlock_t resp_lock;
491 + } rndis_params;
492 +
493 + /* RNDIS Message parser and other useless functions */
494 +diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
495 +index de207a90571ef..c2e396e004929 100644
496 +--- a/drivers/usb/gadget/udc/udc-xilinx.c
497 ++++ b/drivers/usb/gadget/udc/udc-xilinx.c
498 +@@ -1620,6 +1620,8 @@ static void xudc_getstatus(struct xusb_udc *udc)
499 + break;
500 + case USB_RECIP_ENDPOINT:
501 + epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
502 ++ if (epnum >= XUSB_MAX_ENDPOINTS)
503 ++ goto stall;
504 + target_ep = &udc->ep[epnum];
505 + epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
506 + halt = epcfgreg & XUSB_EP_CFG_STALL_MASK;
507 +@@ -1687,6 +1689,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
508 + case USB_RECIP_ENDPOINT:
509 + if (!udc->setup.wValue) {
510 + endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
511 ++ if (endpoint >= XUSB_MAX_ENDPOINTS) {
512 ++ xudc_ep0_stall(udc);
513 ++ return;
514 ++ }
515 + target_ep = &udc->ep[endpoint];
516 + outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK;
517 + outinbit = outinbit >> 7;
518 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
519 +index 7e1b5e00e1f4f..98fbf396c10ec 100644
520 +--- a/drivers/usb/host/xhci.c
521 ++++ b/drivers/usb/host/xhci.c
522 +@@ -1022,6 +1022,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
523 + int retval = 0;
524 + bool comp_timer_running = false;
525 + bool pending_portevent = false;
526 ++ bool reinit_xhc = false;
527 +
528 + if (!hcd->state)
529 + return 0;
530 +@@ -1038,10 +1039,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
531 + set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
532 +
533 + spin_lock_irq(&xhci->lock);
534 +- if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
535 +- hibernated = true;
536 +
537 +- if (!hibernated) {
538 ++ if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
539 ++ reinit_xhc = true;
540 ++
541 ++ if (!reinit_xhc) {
542 + /*
543 + * Some controllers might lose power during suspend, so wait
544 + * for controller not ready bit to clear, just as in xHC init.
545 +@@ -1074,12 +1076,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
546 + spin_unlock_irq(&xhci->lock);
547 + return -ETIMEDOUT;
548 + }
549 +- temp = readl(&xhci->op_regs->status);
550 + }
551 +
552 +- /* If restore operation fails, re-initialize the HC during resume */
553 +- if ((temp & STS_SRE) || hibernated) {
554 ++ temp = readl(&xhci->op_regs->status);
555 +
556 ++ /* re-initialize the HC on Restore Error, or Host Controller Error */
557 ++ if (temp & (STS_SRE | STS_HCE)) {
558 ++ reinit_xhc = true;
559 ++ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
560 ++ }
561 ++
562 ++ if (reinit_xhc) {
563 + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
564 + !(xhci_all_ports_seen_u0(xhci))) {
565 + del_timer_sync(&xhci->comp_mode_recovery_timer);
566 +@@ -1390,9 +1397,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
567 + struct urb_priv *urb_priv;
568 + int num_tds;
569 +
570 +- if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
571 +- true, true, __func__) <= 0)
572 ++ if (!urb)
573 + return -EINVAL;
574 ++ ret = xhci_check_args(hcd, urb->dev, urb->ep,
575 ++ true, true, __func__);
576 ++ if (ret <= 0)
577 ++ return ret ? ret : -EINVAL;
578 +
579 + slot_id = urb->dev->slot_id;
580 + ep_index = xhci_get_endpoint_index(&urb->ep->desc);
581 +@@ -3019,7 +3029,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
582 + return -EINVAL;
583 + ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
584 + if (ret <= 0)
585 +- return -EINVAL;
586 ++ return ret ? ret : -EINVAL;
587 + if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
588 + xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
589 + " descriptor for ep 0x%x does not support streams\n",
590 +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
591 +index 71da297e148df..1f6761a4daafd 100644
592 +--- a/drivers/usb/serial/ch341.c
593 ++++ b/drivers/usb/serial/ch341.c
594 +@@ -83,7 +83,6 @@
595 + #define CH341_LCR_CS5 0x00
596 +
597 + static const struct usb_device_id id_table[] = {
598 +- { USB_DEVICE(0x1a86, 0x5512) },
599 + { USB_DEVICE(0x1a86, 0x5523) },
600 + { USB_DEVICE(0x1a86, 0x7522) },
601 + { USB_DEVICE(0x1a86, 0x7523) },
602 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
603 +index 2a951793e08b8..a45d3502bd95c 100644
604 +--- a/drivers/usb/serial/option.c
605 ++++ b/drivers/usb/serial/option.c
606 +@@ -201,6 +201,8 @@ static void option_instat_callback(struct urb *urb);
607 +
608 + #define DELL_PRODUCT_5821E 0x81d7
609 + #define DELL_PRODUCT_5821E_ESIM 0x81e0
610 ++#define DELL_PRODUCT_5829E_ESIM 0x81e4
611 ++#define DELL_PRODUCT_5829E 0x81e6
612 +
613 + #define KYOCERA_VENDOR_ID 0x0c88
614 + #define KYOCERA_PRODUCT_KPC650 0x17da
615 +@@ -1066,6 +1068,10 @@ static const struct usb_device_id option_ids[] = {
616 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
617 + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
618 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
619 ++ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E),
620 ++ .driver_info = RSVD(0) | RSVD(6) },
621 ++ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
622 ++ .driver_info = RSVD(0) | RSVD(6) },
623 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
624 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
625 + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
626 +@@ -1276,10 +1282,16 @@ static const struct usb_device_id option_ids[] = {
627 + .driver_info = NCTRL(2) },
628 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
629 + .driver_info = NCTRL(2) },
630 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */
631 ++ .driver_info = NCTRL(2) },
632 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */
633 ++ .driver_info = NCTRL(2) },
634 + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
635 + .driver_info = NCTRL(0) | ZLP },
636 + { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
637 + .driver_info = NCTRL(0) | ZLP },
638 ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */
639 ++ .driver_info = NCTRL(0) | ZLP },
640 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
641 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
642 + .driver_info = RSVD(1) },
643 +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
644 +index b43b4942f1566..c87072217dc06 100644
645 +--- a/drivers/vhost/vsock.c
646 ++++ b/drivers/vhost/vsock.c
647 +@@ -569,16 +569,18 @@ err:
648 + return ret;
649 + }
650 +
651 +-static int vhost_vsock_stop(struct vhost_vsock *vsock)
652 ++static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
653 + {
654 + size_t i;
655 +- int ret;
656 ++ int ret = 0;
657 +
658 + mutex_lock(&vsock->dev.mutex);
659 +
660 +- ret = vhost_dev_check_owner(&vsock->dev);
661 +- if (ret)
662 +- goto err;
663 ++ if (check_owner) {
664 ++ ret = vhost_dev_check_owner(&vsock->dev);
665 ++ if (ret)
666 ++ goto err;
667 ++ }
668 +
669 + for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
670 + struct vhost_virtqueue *vq = &vsock->vqs[i];
671 +@@ -692,7 +694,12 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
672 + * inefficient. Room for improvement here. */
673 + vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
674 +
675 +- vhost_vsock_stop(vsock);
676 ++ /* Don't check the owner, because we are in the release path, so we
677 ++ * need to stop the vsock device in any case.
678 ++ * vhost_vsock_stop() can not fail in this case, so we don't need to
679 ++ * check the return code.
680 ++ */
681 ++ vhost_vsock_stop(vsock, false);
682 + vhost_vsock_flush(vsock);
683 + vhost_dev_stop(&vsock->dev);
684 +
685 +@@ -790,7 +797,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
686 + if (start)
687 + return vhost_vsock_start(vsock);
688 + else
689 +- return vhost_vsock_stop(vsock);
690 ++ return vhost_vsock_stop(vsock, true);
691 + case VHOST_GET_FEATURES:
692 + features = VHOST_VSOCK_FEATURES;
693 + if (copy_to_user(argp, &features, sizeof(features)))
694 +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
695 +index c875f246cb0e9..ccb49caed502c 100644
696 +--- a/fs/configfs/dir.c
697 ++++ b/fs/configfs/dir.c
698 +@@ -50,6 +50,14 @@ DECLARE_RWSEM(configfs_rename_sem);
699 + */
700 + DEFINE_SPINLOCK(configfs_dirent_lock);
701 +
702 ++/*
703 ++ * All of link_obj/unlink_obj/link_group/unlink_group require that
704 ++ * subsys->su_mutex is held.
705 ++ * But parent configfs_subsystem is NULL when config_item is root.
706 ++ * Use this mutex when config_item is root.
707 ++ */
708 ++static DEFINE_MUTEX(configfs_subsystem_mutex);
709 ++
710 + static void configfs_d_iput(struct dentry * dentry,
711 + struct inode * inode)
712 + {
713 +@@ -1937,7 +1945,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
714 + group->cg_item.ci_name = group->cg_item.ci_namebuf;
715 +
716 + sd = root->d_fsdata;
717 ++ mutex_lock(&configfs_subsystem_mutex);
718 + link_group(to_config_group(sd->s_element), group);
719 ++ mutex_unlock(&configfs_subsystem_mutex);
720 +
721 + inode_lock_nested(d_inode(root), I_MUTEX_PARENT);
722 +
723 +@@ -1962,7 +1972,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
724 + inode_unlock(d_inode(root));
725 +
726 + if (err) {
727 ++ mutex_lock(&configfs_subsystem_mutex);
728 + unlink_group(group);
729 ++ mutex_unlock(&configfs_subsystem_mutex);
730 + configfs_release_fs();
731 + }
732 + put_fragment(frag);
733 +@@ -2008,7 +2020,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
734 +
735 + dput(dentry);
736 +
737 ++ mutex_lock(&configfs_subsystem_mutex);
738 + unlink_group(group);
739 ++ mutex_unlock(&configfs_subsystem_mutex);
740 + configfs_release_fs();
741 + }
742 +
743 +diff --git a/fs/file.c b/fs/file.c
744 +index f1943da6303b7..5e79aa9f5d73b 100644
745 +--- a/fs/file.c
746 ++++ b/fs/file.c
747 +@@ -679,28 +679,69 @@ void do_close_on_exec(struct files_struct *files)
748 + spin_unlock(&files->file_lock);
749 + }
750 +
751 +-static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
752 ++static inline struct file *__fget_files_rcu(struct files_struct *files,
753 ++ unsigned int fd, fmode_t mask, unsigned int refs)
754 + {
755 +- struct files_struct *files = current->files;
756 +- struct file *file;
757 ++ for (;;) {
758 ++ struct file *file;
759 ++ struct fdtable *fdt = rcu_dereference_raw(files->fdt);
760 ++ struct file __rcu **fdentry;
761 +
762 +- rcu_read_lock();
763 +-loop:
764 +- file = fcheck_files(files, fd);
765 +- if (file) {
766 +- /* File object ref couldn't be taken.
767 +- * dup2() atomicity guarantee is the reason
768 +- * we loop to catch the new file (or NULL pointer)
769 ++ if (unlikely(fd >= fdt->max_fds))
770 ++ return NULL;
771 ++
772 ++ fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
773 ++ file = rcu_dereference_raw(*fdentry);
774 ++ if (unlikely(!file))
775 ++ return NULL;
776 ++
777 ++ if (unlikely(file->f_mode & mask))
778 ++ return NULL;
779 ++
780 ++ /*
781 ++ * Ok, we have a file pointer. However, because we do
782 ++ * this all locklessly under RCU, we may be racing with
783 ++ * that file being closed.
784 ++ *
785 ++ * Such a race can take two forms:
786 ++ *
787 ++ * (a) the file ref already went down to zero,
788 ++ * and get_file_rcu_many() fails. Just try
789 ++ * again:
790 ++ */
791 ++ if (unlikely(!get_file_rcu_many(file, refs)))
792 ++ continue;
793 ++
794 ++ /*
795 ++ * (b) the file table entry has changed under us.
796 ++ * Note that we don't need to re-check the 'fdt->fd'
797 ++ * pointer having changed, because it always goes
798 ++ * hand-in-hand with 'fdt'.
799 ++ *
800 ++ * If so, we need to put our refs and try again.
801 + */
802 +- if (file->f_mode & mask)
803 +- file = NULL;
804 +- else if (!get_file_rcu_many(file, refs))
805 +- goto loop;
806 +- else if (__fcheck_files(files, fd) != file) {
807 ++ if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
808 ++ unlikely(rcu_dereference_raw(*fdentry) != file)) {
809 + fput_many(file, refs);
810 +- goto loop;
811 ++ continue;
812 + }
813 ++
814 ++ /*
815 ++ * Ok, we have a ref to the file, and checked that it
816 ++ * still exists.
817 ++ */
818 ++ return file;
819 + }
820 ++}
821 ++
822 ++
823 ++static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
824 ++{
825 ++ struct files_struct *files = current->files;
826 ++ struct file *file;
827 ++
828 ++ rcu_read_lock();
829 ++ file = __fget_files_rcu(files, fd, mask, refs);
830 + rcu_read_unlock();
831 +
832 + return file;
833 +diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
834 +index 16dc063edc4cf..6ac0a079c5b7b 100644
835 +--- a/fs/tracefs/inode.c
836 ++++ b/fs/tracefs/inode.c
837 +@@ -265,7 +265,6 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
838 + if (!gid_valid(gid))
839 + return -EINVAL;
840 + opts->gid = gid;
841 +- set_gid(tracefs_mount->mnt_root, gid);
842 + break;
843 + case Opt_mode:
844 + if (match_octal(&args[0], &option))
845 +@@ -292,7 +291,9 @@ static int tracefs_apply_options(struct super_block *sb)
846 + inode->i_mode |= opts->mode;
847 +
848 + inode->i_uid = opts->uid;
849 +- inode->i_gid = opts->gid;
850 ++
851 ++ /* Set all the group ids to the mount option */
852 ++ set_gid(sb->s_root, opts->gid);
853 +
854 + return 0;
855 + }
856 +diff --git a/include/net/checksum.h b/include/net/checksum.h
857 +index aef2b2bb6603f..051307cc877f3 100644
858 +--- a/include/net/checksum.h
859 ++++ b/include/net/checksum.h
860 +@@ -143,6 +143,11 @@ static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
861 + *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
862 + }
863 +
864 ++static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
865 ++{
866 ++ *csum = csum_add(csum_sub(*csum, old), new);
867 ++}
868 ++
869 + struct sk_buff;
870 + void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
871 + __be32 from, __be32 to, bool pseudohdr);
872 +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
873 +index 0a0e1aa11f5e7..bf858e416b5e4 100644
874 +--- a/kernel/cgroup/cpuset.c
875 ++++ b/kernel/cgroup/cpuset.c
876 +@@ -1532,6 +1532,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
877 + cgroup_taskset_first(tset, &css);
878 + cs = css_cs(css);
879 +
880 ++ cpus_read_lock();
881 + mutex_lock(&cpuset_mutex);
882 +
883 + /* prepare for attach */
884 +@@ -1587,6 +1588,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
885 + wake_up(&cpuset_attach_wq);
886 +
887 + mutex_unlock(&cpuset_mutex);
888 ++ cpus_read_unlock();
889 + }
890 +
891 + /* The various types of files and directories in a cpuset file system */
892 +diff --git a/mm/memblock.c b/mm/memblock.c
893 +index 5d36b4c549292..91059030fb69e 100644
894 +--- a/mm/memblock.c
895 ++++ b/mm/memblock.c
896 +@@ -260,14 +260,20 @@ void __init memblock_discard(void)
897 + addr = __pa(memblock.reserved.regions);
898 + size = PAGE_ALIGN(sizeof(struct memblock_region) *
899 + memblock.reserved.max);
900 +- __memblock_free_late(addr, size);
901 ++ if (memblock_reserved_in_slab)
902 ++ kfree(memblock.reserved.regions);
903 ++ else
904 ++ __memblock_free_late(addr, size);
905 + }
906 +
907 + if (memblock.memory.regions != memblock_memory_init_regions) {
908 + addr = __pa(memblock.memory.regions);
909 + size = PAGE_ALIGN(sizeof(struct memblock_region) *
910 + memblock.memory.max);
911 +- __memblock_free_late(addr, size);
912 ++ if (memblock_memory_in_slab)
913 ++ kfree(memblock.memory.regions);
914 ++ else
915 ++ __memblock_free_late(addr, size);
916 + }
917 + }
918 + #endif
919 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
920 +index a278258e68cbf..fbb1ab032d2e3 100644
921 +--- a/net/core/skbuff.c
922 ++++ b/net/core/skbuff.c
923 +@@ -1974,7 +1974,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
924 + /* Free pulled out fragments. */
925 + while ((list = skb_shinfo(skb)->frag_list) != insp) {
926 + skb_shinfo(skb)->frag_list = list->next;
927 +- kfree_skb(list);
928 ++ consume_skb(list);
929 + }
930 + /* And insert new clone at head. */
931 + if (clone) {
932 +@@ -5408,7 +5408,7 @@ static int pskb_carve_frag_list(struct sk_buff *skb,
933 + /* Free pulled out fragments. */
934 + while ((list = shinfo->frag_list) != insp) {
935 + shinfo->frag_list = list->next;
936 +- kfree_skb(list);
937 ++ consume_skb(list);
938 + }
939 + /* And insert new clone at head. */
940 + if (clone) {
941 +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
942 +index d0ef51d49f8a9..ee42907f48270 100644
943 +--- a/net/ipv4/af_inet.c
944 ++++ b/net/ipv4/af_inet.c
945 +@@ -1268,8 +1268,11 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
946 + }
947 +
948 + ops = rcu_dereference(inet_offloads[proto]);
949 +- if (likely(ops && ops->callbacks.gso_segment))
950 ++ if (likely(ops && ops->callbacks.gso_segment)) {
951 + segs = ops->callbacks.gso_segment(skb, features);
952 ++ if (!segs)
953 ++ skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
954 ++ }
955 +
956 + if (IS_ERR_OR_NULL(segs))
957 + goto out;
958 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
959 +index 1ee488a539363..0dd5ca2004c73 100644
960 +--- a/net/ipv4/ping.c
961 ++++ b/net/ipv4/ping.c
962 +@@ -192,7 +192,6 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
963 + (int)ident, &ipv6_hdr(skb)->daddr, dif);
964 + #endif
965 + } else {
966 +- pr_err("ping: protocol(%x) is not supported\n", ntohs(skb->protocol));
967 + return NULL;
968 + }
969 +
970 +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
971 +index e3698b6d82313..cb8a837ab9448 100644
972 +--- a/net/ipv6/ip6_offload.c
973 ++++ b/net/ipv6/ip6_offload.c
974 +@@ -96,6 +96,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
975 + if (likely(ops && ops->callbacks.gso_segment)) {
976 + skb_reset_transport_header(skb);
977 + segs = ops->callbacks.gso_segment(skb, features);
978 ++ if (!segs)
979 ++ skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
980 + }
981 +
982 + if (IS_ERR_OR_NULL(segs))
983 +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
984 +index edd31f2879e77..93eb9631e2aa6 100644
985 +--- a/net/openvswitch/actions.c
986 ++++ b/net/openvswitch/actions.c
987 +@@ -460,12 +460,43 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
988 + memcpy(addr, new_addr, sizeof(__be32[4]));
989 + }
990 +
991 +-static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
992 ++static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
993 + {
994 ++ u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
995 ++
996 ++ ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
997 ++
998 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
999 ++ csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
1000 ++ (__force __wsum)(ipv6_tclass << 12));
1001 ++
1002 ++ ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
1003 ++}
1004 ++
1005 ++static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
1006 ++{
1007 ++ u32 ofl;
1008 ++
1009 ++ ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
1010 ++ fl = OVS_MASKED(ofl, fl, mask);
1011 ++
1012 + /* Bits 21-24 are always unmasked, so this retains their values. */
1013 +- OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
1014 +- OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
1015 +- OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
1016 ++ nh->flow_lbl[0] = (u8)(fl >> 16);
1017 ++ nh->flow_lbl[1] = (u8)(fl >> 8);
1018 ++ nh->flow_lbl[2] = (u8)fl;
1019 ++
1020 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
1021 ++ csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
1022 ++}
1023 ++
1024 ++static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
1025 ++{
1026 ++ new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
1027 ++
1028 ++ if (skb->ip_summed == CHECKSUM_COMPLETE)
1029 ++ csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
1030 ++ (__force __wsum)(new_ttl << 8));
1031 ++ nh->hop_limit = new_ttl;
1032 + }
1033 +
1034 + static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
1035 +@@ -583,18 +614,17 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
1036 + }
1037 + }
1038 + if (mask->ipv6_tclass) {
1039 +- ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
1040 ++ set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
1041 + flow_key->ip.tos = ipv6_get_dsfield(nh);
1042 + }
1043 + if (mask->ipv6_label) {
1044 +- set_ipv6_fl(nh, ntohl(key->ipv6_label),
1045 ++ set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
1046 + ntohl(mask->ipv6_label));
1047 + flow_key->ipv6.label =
1048 + *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
1049 + }
1050 + if (mask->ipv6_hlimit) {
1051 +- OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
1052 +- mask->ipv6_hlimit);
1053 ++ set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
1054 + flow_key->ip.ttl = nh->hop_limit;
1055 + }
1056 + return 0;