Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Tue, 29 Oct 2019 14:00:27
Message-Id: 1572357542.16a87317d920b434480c0f624ae9ea25e4ee5b5d.mpagano@gentoo
1 commit: 16a87317d920b434480c0f624ae9ea25e4ee5b5d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Aug 4 16:06:12 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Oct 29 13:59:02 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=16a87317
7
8 Linux patch 4.14.136
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1135_linux-4.14.136.patch | 1854 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1858 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 675e99c..45bebaa 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -583,6 +583,10 @@ Patch: 1134_linux-4.14.135.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.135
23
24 +Patch: 1135_linux-4.14.136.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.136
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1135_linux-4.14.136.patch b/1135_linux-4.14.136.patch
33 new file mode 100644
34 index 0000000..57a211c
35 --- /dev/null
36 +++ b/1135_linux-4.14.136.patch
37 @@ -0,0 +1,1854 @@
38 +diff --git a/Documentation/devicetree/bindings/serial/mvebu-uart.txt b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
39 +index 6087defd9f93..d37fabe17bd1 100644
40 +--- a/Documentation/devicetree/bindings/serial/mvebu-uart.txt
41 ++++ b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
42 +@@ -8,6 +8,6 @@ Required properties:
43 + Example:
44 + serial@12000 {
45 + compatible = "marvell,armada-3700-uart";
46 +- reg = <0x12000 0x400>;
47 ++ reg = <0x12000 0x200>;
48 + interrupts = <43>;
49 + };
50 +diff --git a/Makefile b/Makefile
51 +index 57825473c031..a798f4777ae2 100644
52 +--- a/Makefile
53 ++++ b/Makefile
54 +@@ -1,7 +1,7 @@
55 + # SPDX-License-Identifier: GPL-2.0
56 + VERSION = 4
57 + PATCHLEVEL = 14
58 +-SUBLEVEL = 135
59 ++SUBLEVEL = 136
60 + EXTRAVERSION =
61 + NAME = Petit Gorille
62 +
63 +diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
64 +index 8c0cf7efac65..b554cdaf5e53 100644
65 +--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
66 ++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
67 +@@ -134,7 +134,7 @@
68 +
69 + uart0: serial@12000 {
70 + compatible = "marvell,armada-3700-uart";
71 +- reg = <0x12000 0x400>;
72 ++ reg = <0x12000 0x200>;
73 + interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
74 + status = "disabled";
75 + };
76 +diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
77 +index a3c7f271ad4c..9ed290a9811c 100644
78 +--- a/arch/arm64/include/asm/compat.h
79 ++++ b/arch/arm64/include/asm/compat.h
80 +@@ -234,6 +234,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
81 + }
82 +
83 + #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
84 ++#define COMPAT_MINSIGSTKSZ 2048
85 +
86 + static inline void __user *arch_compat_alloc_user_space(long len)
87 + {
88 +diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
89 +index 6394b4f0a69b..f42feab25dcf 100644
90 +--- a/arch/sh/boards/Kconfig
91 ++++ b/arch/sh/boards/Kconfig
92 +@@ -8,27 +8,19 @@ config SH_ALPHA_BOARD
93 + bool
94 +
95 + config SH_DEVICE_TREE
96 +- bool "Board Described by Device Tree"
97 ++ bool
98 + select OF
99 + select OF_EARLY_FLATTREE
100 + select TIMER_OF
101 + select COMMON_CLK
102 + select GENERIC_CALIBRATE_DELAY
103 +- help
104 +- Select Board Described by Device Tree to build a kernel that
105 +- does not hard-code any board-specific knowledge but instead uses
106 +- a device tree blob provided by the boot-loader. You must enable
107 +- drivers for any hardware you want to use separately. At this
108 +- time, only boards based on the open-hardware J-Core processors
109 +- have sufficient driver coverage to use this option; do not
110 +- select it if you are using original SuperH hardware.
111 +
112 + config SH_JCORE_SOC
113 + bool "J-Core SoC"
114 +- depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2)
115 ++ select SH_DEVICE_TREE
116 + select CLKSRC_JCORE_PIT
117 + select JCORE_AIC
118 +- default y if CPU_J2
119 ++ depends on CPU_J2
120 + help
121 + Select this option to include drivers core components of the
122 + J-Core SoC, including interrupt controllers and timers.
123 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
124 +index e694fd2c4ed0..05e75d18b4d9 100644
125 +--- a/drivers/android/binder.c
126 ++++ b/drivers/android/binder.c
127 +@@ -1903,8 +1903,18 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
128 +
129 + static void binder_free_transaction(struct binder_transaction *t)
130 + {
131 +- if (t->buffer)
132 +- t->buffer->transaction = NULL;
133 ++ struct binder_proc *target_proc = t->to_proc;
134 ++
135 ++ if (target_proc) {
136 ++ binder_inner_proc_lock(target_proc);
137 ++ if (t->buffer)
138 ++ t->buffer->transaction = NULL;
139 ++ binder_inner_proc_unlock(target_proc);
140 ++ }
141 ++ /*
142 ++ * If the transaction has no target_proc, then
143 ++ * t->buffer->transaction has already been cleared.
144 ++ */
145 + kfree(t);
146 + binder_stats_deleted(BINDER_STAT_TRANSACTION);
147 + }
148 +@@ -3426,10 +3436,12 @@ static int binder_thread_write(struct binder_proc *proc,
149 + buffer->debug_id,
150 + buffer->transaction ? "active" : "finished");
151 +
152 ++ binder_inner_proc_lock(proc);
153 + if (buffer->transaction) {
154 + buffer->transaction->buffer = NULL;
155 + buffer->transaction = NULL;
156 + }
157 ++ binder_inner_proc_unlock(proc);
158 + if (buffer->async_transaction && buffer->target_node) {
159 + struct binder_node *buf_node;
160 + struct binder_work *w;
161 +diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
162 +index 0ccf6bf01ed4..c50b68bbecdc 100644
163 +--- a/drivers/bluetooth/hci_ath.c
164 ++++ b/drivers/bluetooth/hci_ath.c
165 +@@ -101,6 +101,9 @@ static int ath_open(struct hci_uart *hu)
166 +
167 + BT_DBG("hu %p", hu);
168 +
169 ++ if (!hci_uart_has_flow_control(hu))
170 ++ return -EOPNOTSUPP;
171 ++
172 + ath = kzalloc(sizeof(*ath), GFP_KERNEL);
173 + if (!ath)
174 + return -ENOMEM;
175 +diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
176 +index 32527bdf4b50..6d41b2023f09 100644
177 +--- a/drivers/bluetooth/hci_bcm.c
178 ++++ b/drivers/bluetooth/hci_bcm.c
179 +@@ -305,6 +305,9 @@ static int bcm_open(struct hci_uart *hu)
180 +
181 + bt_dev_dbg(hu->hdev, "hu %p", hu);
182 +
183 ++ if (!hci_uart_has_flow_control(hu))
184 ++ return -EOPNOTSUPP;
185 ++
186 + bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
187 + if (!bcm)
188 + return -ENOMEM;
189 +diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
190 +index aad07e40ea4f..c75311d4dd31 100644
191 +--- a/drivers/bluetooth/hci_intel.c
192 ++++ b/drivers/bluetooth/hci_intel.c
193 +@@ -406,6 +406,9 @@ static int intel_open(struct hci_uart *hu)
194 +
195 + BT_DBG("hu %p", hu);
196 +
197 ++ if (!hci_uart_has_flow_control(hu))
198 ++ return -EOPNOTSUPP;
199 ++
200 + intel = kzalloc(sizeof(*intel), GFP_KERNEL);
201 + if (!intel)
202 + return -ENOMEM;
203 +diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
204 +index 3b63a781f10f..43221def1d29 100644
205 +--- a/drivers/bluetooth/hci_ldisc.c
206 ++++ b/drivers/bluetooth/hci_ldisc.c
207 +@@ -297,6 +297,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
208 + return 0;
209 + }
210 +
211 ++/* Check the underlying device or tty has flow control support */
212 ++bool hci_uart_has_flow_control(struct hci_uart *hu)
213 ++{
214 ++ /* serdev nodes check if the needed operations are present */
215 ++ if (hu->serdev)
216 ++ return true;
217 ++
218 ++ if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
219 ++ return true;
220 ++
221 ++ return false;
222 ++}
223 ++
224 + /* Flow control or un-flow control the device */
225 + void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
226 + {
227 +diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
228 +index ffb00669346f..23791df081ba 100644
229 +--- a/drivers/bluetooth/hci_mrvl.c
230 ++++ b/drivers/bluetooth/hci_mrvl.c
231 +@@ -66,6 +66,9 @@ static int mrvl_open(struct hci_uart *hu)
232 +
233 + BT_DBG("hu %p", hu);
234 +
235 ++ if (!hci_uart_has_flow_control(hu))
236 ++ return -EOPNOTSUPP;
237 ++
238 + mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
239 + if (!mrvl)
240 + return -ENOMEM;
241 +diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
242 +index 66e8c68e4607..e5ec2cf1755b 100644
243 +--- a/drivers/bluetooth/hci_uart.h
244 ++++ b/drivers/bluetooth/hci_uart.h
245 +@@ -117,6 +117,7 @@ void hci_uart_unregister_device(struct hci_uart *hu);
246 + int hci_uart_tx_wakeup(struct hci_uart *hu);
247 + int hci_uart_init_ready(struct hci_uart *hu);
248 + void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
249 ++bool hci_uart_has_flow_control(struct hci_uart *hu);
250 + void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
251 + void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
252 + unsigned int oper_speed);
253 +diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
254 +index 08f8e0107642..8f6903ec7aec 100644
255 +--- a/drivers/i2c/busses/i2c-qup.c
256 ++++ b/drivers/i2c/busses/i2c-qup.c
257 +@@ -844,6 +844,8 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
258 + }
259 +
260 + if (ret || qup->bus_err || qup->qup_err) {
261 ++ reinit_completion(&qup->xfer);
262 ++
263 + if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
264 + dev_err(qup->dev, "change to run state timed out");
265 + goto desc_err;
266 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
267 +index baa4c58e2736..523d0889c2a4 100644
268 +--- a/drivers/iommu/intel-iommu.c
269 ++++ b/drivers/iommu/intel-iommu.c
270 +@@ -3702,7 +3702,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
271 +
272 + freelist = domain_unmap(domain, start_pfn, last_pfn);
273 +
274 +- if (intel_iommu_strict) {
275 ++ if (intel_iommu_strict || !has_iova_flush_queue(&domain->iovad)) {
276 + iommu_flush_iotlb_psi(iommu, domain, start_pfn,
277 + nrpages, !freelist, 0);
278 + /* free iova */
279 +diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
280 +index 33edfa794ae9..9f35b9a0d6d8 100644
281 +--- a/drivers/iommu/iova.c
282 ++++ b/drivers/iommu/iova.c
283 +@@ -58,9 +58,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
284 + }
285 + EXPORT_SYMBOL_GPL(init_iova_domain);
286 +
287 ++bool has_iova_flush_queue(struct iova_domain *iovad)
288 ++{
289 ++ return !!iovad->fq;
290 ++}
291 ++
292 + static void free_iova_flush_queue(struct iova_domain *iovad)
293 + {
294 +- if (!iovad->fq)
295 ++ if (!has_iova_flush_queue(iovad))
296 + return;
297 +
298 + if (timer_pending(&iovad->fq_timer))
299 +@@ -78,13 +83,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
300 + int init_iova_flush_queue(struct iova_domain *iovad,
301 + iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
302 + {
303 ++ struct iova_fq __percpu *queue;
304 + int cpu;
305 +
306 + atomic64_set(&iovad->fq_flush_start_cnt, 0);
307 + atomic64_set(&iovad->fq_flush_finish_cnt, 0);
308 +
309 +- iovad->fq = alloc_percpu(struct iova_fq);
310 +- if (!iovad->fq)
311 ++ queue = alloc_percpu(struct iova_fq);
312 ++ if (!queue)
313 + return -ENOMEM;
314 +
315 + iovad->flush_cb = flush_cb;
316 +@@ -93,13 +99,17 @@ int init_iova_flush_queue(struct iova_domain *iovad,
317 + for_each_possible_cpu(cpu) {
318 + struct iova_fq *fq;
319 +
320 +- fq = per_cpu_ptr(iovad->fq, cpu);
321 ++ fq = per_cpu_ptr(queue, cpu);
322 + fq->head = 0;
323 + fq->tail = 0;
324 +
325 + spin_lock_init(&fq->lock);
326 + }
327 +
328 ++ smp_wmb();
329 ++
330 ++ iovad->fq = queue;
331 ++
332 + setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
333 + atomic_set(&iovad->fq_timer_on, 0);
334 +
335 +diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
336 +index 17cc879ad2bb..35983c7c3137 100644
337 +--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
338 ++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
339 +@@ -1963,6 +1963,9 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
340 +
341 + /* get endpoint base */
342 + idx = ((ep_addr & 0x7f) - 1) * 2;
343 ++ if (idx > 15)
344 ++ return -EIO;
345 ++
346 + if (ep_addr & 0x80)
347 + idx++;
348 + attr = ep->desc.bmAttributes;
349 +diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
350 +index 3c0a22a54113..932c32e56d73 100644
351 +--- a/drivers/media/radio/radio-raremono.c
352 ++++ b/drivers/media/radio/radio-raremono.c
353 +@@ -283,6 +283,14 @@ static int vidioc_g_frequency(struct file *file, void *priv,
354 + return 0;
355 + }
356 +
357 ++static void raremono_device_release(struct v4l2_device *v4l2_dev)
358 ++{
359 ++ struct raremono_device *radio = to_raremono_dev(v4l2_dev);
360 ++
361 ++ kfree(radio->buffer);
362 ++ kfree(radio);
363 ++}
364 ++
365 + /* File system interface */
366 + static const struct v4l2_file_operations usb_raremono_fops = {
367 + .owner = THIS_MODULE,
368 +@@ -307,12 +315,14 @@ static int usb_raremono_probe(struct usb_interface *intf,
369 + struct raremono_device *radio;
370 + int retval = 0;
371 +
372 +- radio = devm_kzalloc(&intf->dev, sizeof(struct raremono_device), GFP_KERNEL);
373 +- if (radio)
374 +- radio->buffer = devm_kmalloc(&intf->dev, BUFFER_LENGTH, GFP_KERNEL);
375 +-
376 +- if (!radio || !radio->buffer)
377 ++ radio = kzalloc(sizeof(*radio), GFP_KERNEL);
378 ++ if (!radio)
379 ++ return -ENOMEM;
380 ++ radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
381 ++ if (!radio->buffer) {
382 ++ kfree(radio);
383 + return -ENOMEM;
384 ++ }
385 +
386 + radio->usbdev = interface_to_usbdev(intf);
387 + radio->intf = intf;
388 +@@ -336,7 +346,8 @@ static int usb_raremono_probe(struct usb_interface *intf,
389 + if (retval != 3 ||
390 + (get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) {
391 + dev_info(&intf->dev, "this is not Thanko's Raremono.\n");
392 +- return -ENODEV;
393 ++ retval = -ENODEV;
394 ++ goto free_mem;
395 + }
396 +
397 + dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n",
398 +@@ -345,7 +356,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
399 + retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
400 + if (retval < 0) {
401 + dev_err(&intf->dev, "couldn't register v4l2_device\n");
402 +- return retval;
403 ++ goto free_mem;
404 + }
405 +
406 + mutex_init(&radio->lock);
407 +@@ -357,6 +368,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
408 + radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops;
409 + radio->vdev.lock = &radio->lock;
410 + radio->vdev.release = video_device_release_empty;
411 ++ radio->v4l2_dev.release = raremono_device_release;
412 +
413 + usb_set_intfdata(intf, &radio->v4l2_dev);
414 +
415 +@@ -372,6 +384,10 @@ static int usb_raremono_probe(struct usb_interface *intf,
416 + }
417 + dev_err(&intf->dev, "could not register video device\n");
418 + v4l2_device_unregister(&radio->v4l2_dev);
419 ++
420 ++free_mem:
421 ++ kfree(radio->buffer);
422 ++ kfree(radio);
423 + return retval;
424 + }
425 +
426 +diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
427 +index 257ae0d8cfe2..e3f63299f85c 100644
428 +--- a/drivers/media/usb/au0828/au0828-core.c
429 ++++ b/drivers/media/usb/au0828/au0828-core.c
430 +@@ -623,6 +623,12 @@ static int au0828_usb_probe(struct usb_interface *interface,
431 + /* Setup */
432 + au0828_card_setup(dev);
433 +
434 ++ /*
435 ++ * Store the pointer to the au0828_dev so it can be accessed in
436 ++ * au0828_usb_disconnect
437 ++ */
438 ++ usb_set_intfdata(interface, dev);
439 ++
440 + /* Analog TV */
441 + retval = au0828_analog_register(dev, interface);
442 + if (retval) {
443 +@@ -641,12 +647,6 @@ static int au0828_usb_probe(struct usb_interface *interface,
444 + /* Remote controller */
445 + au0828_rc_register(dev);
446 +
447 +- /*
448 +- * Store the pointer to the au0828_dev so it can be accessed in
449 +- * au0828_usb_disconnect
450 +- */
451 +- usb_set_intfdata(interface, dev);
452 +-
453 + pr_info("Registered device AU0828 [%s]\n",
454 + dev->board.name == NULL ? "Unset" : dev->board.name);
455 +
456 +diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
457 +index 6089036049d9..0964ff556f4e 100644
458 +--- a/drivers/media/usb/cpia2/cpia2_usb.c
459 ++++ b/drivers/media/usb/cpia2/cpia2_usb.c
460 +@@ -901,7 +901,6 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
461 + cpia2_unregister_camera(cam);
462 + v4l2_device_disconnect(&cam->v4l2_dev);
463 + mutex_unlock(&cam->v4l2_lock);
464 +- v4l2_device_put(&cam->v4l2_dev);
465 +
466 + if(cam->buffers) {
467 + DBG("Wakeup waiting processes\n");
468 +@@ -913,6 +912,8 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
469 + DBG("Releasing interface\n");
470 + usb_driver_release_interface(&cpia2_driver, intf);
471 +
472 ++ v4l2_device_put(&cam->v4l2_dev);
473 ++
474 + LOG("CPiA2 camera disconnected.\n");
475 + }
476 +
477 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
478 +index ddededc4ced4..18db7aaafcd6 100644
479 +--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
480 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
481 +@@ -1680,7 +1680,7 @@ static int pvr2_decoder_enable(struct pvr2_hdw *hdw,int enablefl)
482 + }
483 + if (!hdw->flag_decoder_missed) {
484 + pvr2_trace(PVR2_TRACE_ERROR_LEGS,
485 +- "WARNING: No decoder present");
486 ++ "***WARNING*** No decoder present");
487 + hdw->flag_decoder_missed = !0;
488 + trace_stbit("flag_decoder_missed",
489 + hdw->flag_decoder_missed);
490 +@@ -2365,7 +2365,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
491 + if (hdw_desc->flag_is_experimental) {
492 + pvr2_trace(PVR2_TRACE_INFO, "**********");
493 + pvr2_trace(PVR2_TRACE_INFO,
494 +- "WARNING: Support for this device (%s) is experimental.",
495 ++ "***WARNING*** Support for this device (%s) is experimental.",
496 + hdw_desc->description);
497 + pvr2_trace(PVR2_TRACE_INFO,
498 + "Important functionality might not be entirely working.");
499 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
500 +index ff7b4d1d385d..f57ddb382dbf 100644
501 +--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
502 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
503 +@@ -343,11 +343,11 @@ static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
504 +
505 + if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
506 + pvr2_trace(PVR2_TRACE_ERROR_LEGS,
507 +- "WARNING: Detected a wedged cx25840 chip; the device will not work.");
508 ++ "***WARNING*** Detected a wedged cx25840 chip; the device will not work.");
509 + pvr2_trace(PVR2_TRACE_ERROR_LEGS,
510 +- "WARNING: Try power cycling the pvrusb2 device.");
511 ++ "***WARNING*** Try power cycling the pvrusb2 device.");
512 + pvr2_trace(PVR2_TRACE_ERROR_LEGS,
513 +- "WARNING: Disabling further access to the device to prevent other foul-ups.");
514 ++ "***WARNING*** Disabling further access to the device to prevent other foul-ups.");
515 + // This blocks all further communication with the part.
516 + hdw->i2c_func[0x44] = NULL;
517 + pvr2_hdw_render_useless(hdw);
518 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c
519 +index 21bb20dba82c..243e2704ce3a 100644
520 +--- a/drivers/media/usb/pvrusb2/pvrusb2-std.c
521 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c
522 +@@ -353,7 +353,7 @@ struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
523 + bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
524 + pvr2_trace(
525 + PVR2_TRACE_ERROR_LEGS,
526 +- "WARNING: Failed to classify the following standard(s): %.*s",
527 ++ "***WARNING*** Failed to classify the following standard(s): %.*s",
528 + bcnt,buf);
529 + }
530 +
531 +diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
532 +index d4803ff5a78a..f09a4ad2e9de 100644
533 +--- a/drivers/net/wireless/ath/ath10k/usb.c
534 ++++ b/drivers/net/wireless/ath/ath10k/usb.c
535 +@@ -1025,7 +1025,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
536 + }
537 +
538 + /* TODO: remove this once USB support is fully implemented */
539 +- ath10k_warn(ar, "WARNING: ath10k USB support is incomplete, don't expect anything to work!\n");
540 ++ ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't expect anything to work!\n");
541 +
542 + return 0;
543 +
544 +diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
545 +index 6eb0db37dd88..574b08af0d98 100644
546 +--- a/drivers/pps/pps.c
547 ++++ b/drivers/pps/pps.c
548 +@@ -166,6 +166,14 @@ static long pps_cdev_ioctl(struct file *file,
549 + pps->params.mode |= PPS_CANWAIT;
550 + pps->params.api_version = PPS_API_VERS;
551 +
552 ++ /*
553 ++ * Clear unused fields of pps_kparams to avoid leaking
554 ++ * uninitialized data of the PPS_SETPARAMS caller via
555 ++ * PPS_GETPARAMS
556 ++ */
557 ++ pps->params.assert_off_tu.flags = 0;
558 ++ pps->params.clear_off_tu.flags = 0;
559 ++
560 + spin_unlock_irq(&pps->lock);
561 +
562 + break;
563 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
564 +index 92eb9c3052ee..238d24348a98 100644
565 +--- a/fs/ceph/caps.c
566 ++++ b/fs/ceph/caps.c
567 +@@ -1119,20 +1119,23 @@ static int send_cap_msg(struct cap_msg_args *arg)
568 + }
569 +
570 + /*
571 +- * Queue cap releases when an inode is dropped from our cache. Since
572 +- * inode is about to be destroyed, there is no need for i_ceph_lock.
573 ++ * Queue cap releases when an inode is dropped from our cache.
574 + */
575 + void ceph_queue_caps_release(struct inode *inode)
576 + {
577 + struct ceph_inode_info *ci = ceph_inode(inode);
578 + struct rb_node *p;
579 +
580 ++ /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
581 ++ * may call __ceph_caps_issued_mask() on a freeing inode. */
582 ++ spin_lock(&ci->i_ceph_lock);
583 + p = rb_first(&ci->i_caps);
584 + while (p) {
585 + struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
586 + p = rb_next(p);
587 + __ceph_remove_cap(cap, true);
588 + }
589 ++ spin_unlock(&ci->i_ceph_lock);
590 + }
591 +
592 + /*
593 +diff --git a/fs/exec.c b/fs/exec.c
594 +index 0936b5a8199a..4623fc3ac86b 100644
595 +--- a/fs/exec.c
596 ++++ b/fs/exec.c
597 +@@ -1808,7 +1808,7 @@ static int do_execveat_common(int fd, struct filename *filename,
598 + current->in_execve = 0;
599 + membarrier_execve(current);
600 + acct_update_integrals(current);
601 +- task_numa_free(current);
602 ++ task_numa_free(current, false);
603 + free_bprm(bprm);
604 + kfree(pathbuf);
605 + putname(filename);
606 +diff --git a/fs/nfs/client.c b/fs/nfs/client.c
607 +index 0c7008fb6d5a..9e7d49fac4e3 100644
608 +--- a/fs/nfs/client.c
609 ++++ b/fs/nfs/client.c
610 +@@ -416,10 +416,10 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
611 + clp = nfs_match_client(cl_init);
612 + if (clp) {
613 + spin_unlock(&nn->nfs_client_lock);
614 +- if (IS_ERR(clp))
615 +- return clp;
616 + if (new)
617 + new->rpc_ops->free_client(new);
618 ++ if (IS_ERR(clp))
619 ++ return clp;
620 + return nfs_found_client(cl_init, clp);
621 + }
622 + if (new) {
623 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
624 +index bf2c43635062..85a6fdd76e20 100644
625 +--- a/fs/nfs/dir.c
626 ++++ b/fs/nfs/dir.c
627 +@@ -1059,6 +1059,100 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
628 + return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU);
629 + }
630 +
631 ++static int
632 ++nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
633 ++ struct inode *inode, int error)
634 ++{
635 ++ switch (error) {
636 ++ case 1:
637 ++ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
638 ++ __func__, dentry);
639 ++ return 1;
640 ++ case 0:
641 ++ nfs_mark_for_revalidate(dir);
642 ++ if (inode && S_ISDIR(inode->i_mode)) {
643 ++ /* Purge readdir caches. */
644 ++ nfs_zap_caches(inode);
645 ++ /*
646 ++ * We can't d_drop the root of a disconnected tree:
647 ++ * its d_hash is on the s_anon list and d_drop() would hide
648 ++ * it from shrink_dcache_for_unmount(), leading to busy
649 ++ * inodes on unmount and further oopses.
650 ++ */
651 ++ if (IS_ROOT(dentry))
652 ++ return 1;
653 ++ }
654 ++ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
655 ++ __func__, dentry);
656 ++ return 0;
657 ++ }
658 ++ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
659 ++ __func__, dentry, error);
660 ++ return error;
661 ++}
662 ++
663 ++static int
664 ++nfs_lookup_revalidate_negative(struct inode *dir, struct dentry *dentry,
665 ++ unsigned int flags)
666 ++{
667 ++ int ret = 1;
668 ++ if (nfs_neg_need_reval(dir, dentry, flags)) {
669 ++ if (flags & LOOKUP_RCU)
670 ++ return -ECHILD;
671 ++ ret = 0;
672 ++ }
673 ++ return nfs_lookup_revalidate_done(dir, dentry, NULL, ret);
674 ++}
675 ++
676 ++static int
677 ++nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry,
678 ++ struct inode *inode)
679 ++{
680 ++ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
681 ++ return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
682 ++}
683 ++
684 ++static int
685 ++nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
686 ++ struct inode *inode)
687 ++{
688 ++ struct nfs_fh *fhandle;
689 ++ struct nfs_fattr *fattr;
690 ++ struct nfs4_label *label;
691 ++ int ret;
692 ++
693 ++ ret = -ENOMEM;
694 ++ fhandle = nfs_alloc_fhandle();
695 ++ fattr = nfs_alloc_fattr();
696 ++ label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
697 ++ if (fhandle == NULL || fattr == NULL || IS_ERR(label))
698 ++ goto out;
699 ++
700 ++ ret = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
701 ++ if (ret < 0) {
702 ++ if (ret == -ESTALE || ret == -ENOENT)
703 ++ ret = 0;
704 ++ goto out;
705 ++ }
706 ++ ret = 0;
707 ++ if (nfs_compare_fh(NFS_FH(inode), fhandle))
708 ++ goto out;
709 ++ if (nfs_refresh_inode(inode, fattr) < 0)
710 ++ goto out;
711 ++
712 ++ nfs_setsecurity(inode, fattr, label);
713 ++ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
714 ++
715 ++ /* set a readdirplus hint that we had a cache miss */
716 ++ nfs_force_use_readdirplus(dir);
717 ++ ret = 1;
718 ++out:
719 ++ nfs_free_fattr(fattr);
720 ++ nfs_free_fhandle(fhandle);
721 ++ nfs4_label_free(label);
722 ++ return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
723 ++}
724 ++
725 + /*
726 + * This is called every time the dcache has a lookup hit,
727 + * and we should check whether we can really trust that
728 +@@ -1070,58 +1164,36 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
729 + * If the parent directory is seen to have changed, we throw out the
730 + * cached dentry and do a new lookup.
731 + */
732 +-static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
733 ++static int
734 ++nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
735 ++ unsigned int flags)
736 + {
737 +- struct inode *dir;
738 + struct inode *inode;
739 +- struct dentry *parent;
740 +- struct nfs_fh *fhandle = NULL;
741 +- struct nfs_fattr *fattr = NULL;
742 +- struct nfs4_label *label = NULL;
743 + int error;
744 +
745 +- if (flags & LOOKUP_RCU) {
746 +- parent = ACCESS_ONCE(dentry->d_parent);
747 +- dir = d_inode_rcu(parent);
748 +- if (!dir)
749 +- return -ECHILD;
750 +- } else {
751 +- parent = dget_parent(dentry);
752 +- dir = d_inode(parent);
753 +- }
754 + nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
755 + inode = d_inode(dentry);
756 +
757 +- if (!inode) {
758 +- if (nfs_neg_need_reval(dir, dentry, flags)) {
759 +- if (flags & LOOKUP_RCU)
760 +- return -ECHILD;
761 +- goto out_bad;
762 +- }
763 +- goto out_valid;
764 +- }
765 ++ if (!inode)
766 ++ return nfs_lookup_revalidate_negative(dir, dentry, flags);
767 +
768 + if (is_bad_inode(inode)) {
769 +- if (flags & LOOKUP_RCU)
770 +- return -ECHILD;
771 + dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
772 + __func__, dentry);
773 + goto out_bad;
774 + }
775 +
776 + if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
777 +- goto out_set_verifier;
778 ++ return nfs_lookup_revalidate_delegated(dir, dentry, inode);
779 +
780 + /* Force a full look up iff the parent directory has changed */
781 + if (!nfs_is_exclusive_create(dir, flags) &&
782 + nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
783 + error = nfs_lookup_verify_inode(inode, flags);
784 + if (error) {
785 +- if (flags & LOOKUP_RCU)
786 +- return -ECHILD;
787 + if (error == -ESTALE)
788 +- goto out_zap_parent;
789 +- goto out_error;
790 ++ nfs_zap_caches(dir);
791 ++ goto out_bad;
792 + }
793 + nfs_advise_use_readdirplus(dir);
794 + goto out_valid;
795 +@@ -1133,81 +1205,45 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
796 + if (NFS_STALE(inode))
797 + goto out_bad;
798 +
799 +- error = -ENOMEM;
800 +- fhandle = nfs_alloc_fhandle();
801 +- fattr = nfs_alloc_fattr();
802 +- if (fhandle == NULL || fattr == NULL)
803 +- goto out_error;
804 +-
805 +- label = nfs4_label_alloc(NFS_SERVER(inode), GFP_NOWAIT);
806 +- if (IS_ERR(label))
807 +- goto out_error;
808 +-
809 + trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
810 +- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
811 ++ error = nfs_lookup_revalidate_dentry(dir, dentry, inode);
812 + trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
813 +- if (error == -ESTALE || error == -ENOENT)
814 +- goto out_bad;
815 +- if (error)
816 +- goto out_error;
817 +- if (nfs_compare_fh(NFS_FH(inode), fhandle))
818 +- goto out_bad;
819 +- if ((error = nfs_refresh_inode(inode, fattr)) != 0)
820 +- goto out_bad;
821 +-
822 +- nfs_setsecurity(inode, fattr, label);
823 +-
824 +- nfs_free_fattr(fattr);
825 +- nfs_free_fhandle(fhandle);
826 +- nfs4_label_free(label);
827 ++ return error;
828 ++out_valid:
829 ++ return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
830 ++out_bad:
831 ++ if (flags & LOOKUP_RCU)
832 ++ return -ECHILD;
833 ++ return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
834 ++}
835 +
836 +- /* set a readdirplus hint that we had a cache miss */
837 +- nfs_force_use_readdirplus(dir);
838 ++static int
839 ++__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
840 ++ int (*reval)(struct inode *, struct dentry *, unsigned int))
841 ++{
842 ++ struct dentry *parent;
843 ++ struct inode *dir;
844 ++ int ret;
845 +
846 +-out_set_verifier:
847 +- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
848 +- out_valid:
849 + if (flags & LOOKUP_RCU) {
850 ++ parent = ACCESS_ONCE(dentry->d_parent);
851 ++ dir = d_inode_rcu(parent);
852 ++ if (!dir)
853 ++ return -ECHILD;
854 ++ ret = reval(dir, dentry, flags);
855 + if (parent != ACCESS_ONCE(dentry->d_parent))
856 + return -ECHILD;
857 +- } else
858 ++ } else {
859 ++ parent = dget_parent(dentry);
860 ++ ret = reval(d_inode(parent), dentry, flags);
861 + dput(parent);
862 +- dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
863 +- __func__, dentry);
864 +- return 1;
865 +-out_zap_parent:
866 +- nfs_zap_caches(dir);
867 +- out_bad:
868 +- WARN_ON(flags & LOOKUP_RCU);
869 +- nfs_free_fattr(fattr);
870 +- nfs_free_fhandle(fhandle);
871 +- nfs4_label_free(label);
872 +- nfs_mark_for_revalidate(dir);
873 +- if (inode && S_ISDIR(inode->i_mode)) {
874 +- /* Purge readdir caches. */
875 +- nfs_zap_caches(inode);
876 +- /*
877 +- * We can't d_drop the root of a disconnected tree:
878 +- * its d_hash is on the s_anon list and d_drop() would hide
879 +- * it from shrink_dcache_for_unmount(), leading to busy
880 +- * inodes on unmount and further oopses.
881 +- */
882 +- if (IS_ROOT(dentry))
883 +- goto out_valid;
884 + }
885 +- dput(parent);
886 +- dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
887 +- __func__, dentry);
888 +- return 0;
889 +-out_error:
890 +- WARN_ON(flags & LOOKUP_RCU);
891 +- nfs_free_fattr(fattr);
892 +- nfs_free_fhandle(fhandle);
893 +- nfs4_label_free(label);
894 +- dput(parent);
895 +- dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
896 +- __func__, dentry, error);
897 +- return error;
898 ++ return ret;
899 ++}
900 ++
901 ++static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
902 ++{
903 ++ return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
904 + }
905 +
906 + /*
907 +@@ -1560,62 +1596,55 @@ no_open:
908 + }
909 + EXPORT_SYMBOL_GPL(nfs_atomic_open);
910 +
911 +-static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
912 ++static int
913 ++nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
914 ++ unsigned int flags)
915 + {
916 + struct inode *inode;
917 +- int ret = 0;
918 +
919 + if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
920 +- goto no_open;
921 ++ goto full_reval;
922 + if (d_mountpoint(dentry))
923 +- goto no_open;
924 +- if (NFS_SB(dentry->d_sb)->caps & NFS_CAP_ATOMIC_OPEN_V1)
925 +- goto no_open;
926 ++ goto full_reval;
927 +
928 + inode = d_inode(dentry);
929 +
930 + /* We can't create new files in nfs_open_revalidate(), so we
931 + * optimize away revalidation of negative dentries.
932 + */
933 +- if (inode == NULL) {
934 +- struct dentry *parent;
935 +- struct inode *dir;
936 +-
937 +- if (flags & LOOKUP_RCU) {
938 +- parent = ACCESS_ONCE(dentry->d_parent);
939 +- dir = d_inode_rcu(parent);
940 +- if (!dir)
941 +- return -ECHILD;
942 +- } else {
943 +- parent = dget_parent(dentry);
944 +- dir = d_inode(parent);
945 +- }
946 +- if (!nfs_neg_need_reval(dir, dentry, flags))
947 +- ret = 1;
948 +- else if (flags & LOOKUP_RCU)
949 +- ret = -ECHILD;
950 +- if (!(flags & LOOKUP_RCU))
951 +- dput(parent);
952 +- else if (parent != ACCESS_ONCE(dentry->d_parent))
953 +- return -ECHILD;
954 +- goto out;
955 +- }
956 ++ if (inode == NULL)
957 ++ goto full_reval;
958 ++
959 ++ if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
960 ++ return nfs_lookup_revalidate_delegated(dir, dentry, inode);
961 +
962 + /* NFS only supports OPEN on regular files */
963 + if (!S_ISREG(inode->i_mode))
964 +- goto no_open;
965 ++ goto full_reval;
966 ++
967 + /* We cannot do exclusive creation on a positive dentry */
968 +- if (flags & LOOKUP_EXCL)
969 +- goto no_open;
970 ++ if (flags & (LOOKUP_EXCL | LOOKUP_REVAL))
971 ++ goto reval_dentry;
972 ++
973 ++ /* Check if the directory changed */
974 ++ if (!nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU))
975 ++ goto reval_dentry;
976 +
977 + /* Let f_op->open() actually open (and revalidate) the file */
978 +- ret = 1;
979 ++ return 1;
980 ++reval_dentry:
981 ++ if (flags & LOOKUP_RCU)
982 ++ return -ECHILD;
983 ++ return nfs_lookup_revalidate_dentry(dir, dentry, inode);;
984 +
985 +-out:
986 +- return ret;
987 ++full_reval:
988 ++ return nfs_do_lookup_revalidate(dir, dentry, flags);
989 ++}
990 +
991 +-no_open:
992 +- return nfs_lookup_revalidate(dentry, flags);
993 ++static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
994 ++{
995 ++ return __nfs_lookup_revalidate(dentry, flags,
996 ++ nfs4_do_lookup_revalidate);
997 + }
998 +
999 + #endif /* CONFIG_NFSV4 */
1000 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1001 +index 209a21ed5f97..27deee5c8fa8 100644
1002 +--- a/fs/nfs/nfs4proc.c
1003 ++++ b/fs/nfs/nfs4proc.c
1004 +@@ -1317,12 +1317,20 @@ static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1005 + return false;
1006 + }
1007 +
1008 +-static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1009 ++static int can_open_cached(struct nfs4_state *state, fmode_t mode,
1010 ++ int open_mode, enum open_claim_type4 claim)
1011 + {
1012 + int ret = 0;
1013 +
1014 + if (open_mode & (O_EXCL|O_TRUNC))
1015 + goto out;
1016 ++ switch (claim) {
1017 ++ case NFS4_OPEN_CLAIM_NULL:
1018 ++ case NFS4_OPEN_CLAIM_FH:
1019 ++ goto out;
1020 ++ default:
1021 ++ break;
1022 ++ }
1023 + switch (mode & (FMODE_READ|FMODE_WRITE)) {
1024 + case FMODE_READ:
1025 + ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1026 +@@ -1617,7 +1625,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1027 +
1028 + for (;;) {
1029 + spin_lock(&state->owner->so_lock);
1030 +- if (can_open_cached(state, fmode, open_mode)) {
1031 ++ if (can_open_cached(state, fmode, open_mode, claim)) {
1032 + update_open_stateflags(state, fmode);
1033 + spin_unlock(&state->owner->so_lock);
1034 + goto out_return_state;
1035 +@@ -2141,7 +2149,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1036 + if (data->state != NULL) {
1037 + struct nfs_delegation *delegation;
1038 +
1039 +- if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1040 ++ if (can_open_cached(data->state, data->o_arg.fmode,
1041 ++ data->o_arg.open_flags, claim))
1042 + goto out_no_action;
1043 + rcu_read_lock();
1044 + delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1045 +diff --git a/include/linux/iova.h b/include/linux/iova.h
1046 +index d179b9bf7814..7d23bbb887f2 100644
1047 +--- a/include/linux/iova.h
1048 ++++ b/include/linux/iova.h
1049 +@@ -154,6 +154,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
1050 + void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
1051 + void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
1052 + unsigned long start_pfn, unsigned long pfn_32bit);
1053 ++bool has_iova_flush_queue(struct iova_domain *iovad);
1054 + int init_iova_flush_queue(struct iova_domain *iovad,
1055 + iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
1056 + struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
1057 +@@ -234,6 +235,11 @@ static inline void init_iova_domain(struct iova_domain *iovad,
1058 + {
1059 + }
1060 +
1061 ++static inline bool has_iova_flush_queue(struct iova_domain *iovad)
1062 ++{
1063 ++ return false;
1064 ++}
1065 ++
1066 + static inline int init_iova_flush_queue(struct iova_domain *iovad,
1067 + iova_flush_cb flush_cb,
1068 + iova_entry_dtor entry_dtor)
1069 +diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h
1070 +index e7dd04a84ba8..3988762efe15 100644
1071 +--- a/include/linux/sched/numa_balancing.h
1072 ++++ b/include/linux/sched/numa_balancing.h
1073 +@@ -19,7 +19,7 @@
1074 + extern void task_numa_fault(int last_node, int node, int pages, int flags);
1075 + extern pid_t task_numa_group_id(struct task_struct *p);
1076 + extern void set_numabalancing_state(bool enabled);
1077 +-extern void task_numa_free(struct task_struct *p);
1078 ++extern void task_numa_free(struct task_struct *p, bool final);
1079 + extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1080 + int src_nid, int dst_cpu);
1081 + #else
1082 +@@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
1083 + static inline void set_numabalancing_state(bool enabled)
1084 + {
1085 + }
1086 +-static inline void task_numa_free(struct task_struct *p)
1087 ++static inline void task_numa_free(struct task_struct *p, bool final)
1088 + {
1089 + }
1090 + static inline bool should_numa_migrate_memory(struct task_struct *p,
1091 +diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
1092 +index 5fb3f6361090..d3775b5379e4 100644
1093 +--- a/include/net/af_vsock.h
1094 ++++ b/include/net/af_vsock.h
1095 +@@ -22,9 +22,6 @@
1096 +
1097 + #include "vsock_addr.h"
1098 +
1099 +-/* vsock-specific sock->sk_state constants */
1100 +-#define VSOCK_SS_LISTEN 255
1101 +-
1102 + #define LAST_RESERVED_PORT 1023
1103 +
1104 + #define vsock_sk(__sk) ((struct vsock_sock *)__sk)
1105 +diff --git a/kernel/fork.c b/kernel/fork.c
1106 +index a5bb8fad5475..919e7cd5cd23 100644
1107 +--- a/kernel/fork.c
1108 ++++ b/kernel/fork.c
1109 +@@ -415,7 +415,7 @@ void __put_task_struct(struct task_struct *tsk)
1110 + WARN_ON(tsk == current);
1111 +
1112 + cgroup_free(tsk);
1113 +- task_numa_free(tsk);
1114 ++ task_numa_free(tsk, true);
1115 + security_task_free(tsk);
1116 + exit_creds(tsk);
1117 + delayacct_tsk_free(tsk);
1118 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1119 +index af7de1f9906c..0a4e882d4308 100644
1120 +--- a/kernel/sched/fair.c
1121 ++++ b/kernel/sched/fair.c
1122 +@@ -2358,13 +2358,23 @@ no_join:
1123 + return;
1124 + }
1125 +
1126 +-void task_numa_free(struct task_struct *p)
1127 ++/*
1128 ++ * Get rid of NUMA staticstics associated with a task (either current or dead).
1129 ++ * If @final is set, the task is dead and has reached refcount zero, so we can
1130 ++ * safely free all relevant data structures. Otherwise, there might be
1131 ++ * concurrent reads from places like load balancing and procfs, and we should
1132 ++ * reset the data back to default state without freeing ->numa_faults.
1133 ++ */
1134 ++void task_numa_free(struct task_struct *p, bool final)
1135 + {
1136 + struct numa_group *grp = p->numa_group;
1137 +- void *numa_faults = p->numa_faults;
1138 ++ unsigned long *numa_faults = p->numa_faults;
1139 + unsigned long flags;
1140 + int i;
1141 +
1142 ++ if (!numa_faults)
1143 ++ return;
1144 ++
1145 + if (grp) {
1146 + spin_lock_irqsave(&grp->lock, flags);
1147 + for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1148 +@@ -2377,8 +2387,14 @@ void task_numa_free(struct task_struct *p)
1149 + put_numa_group(grp);
1150 + }
1151 +
1152 +- p->numa_faults = NULL;
1153 +- kfree(numa_faults);
1154 ++ if (final) {
1155 ++ p->numa_faults = NULL;
1156 ++ kfree(numa_faults);
1157 ++ } else {
1158 ++ p->total_numa_faults = 0;
1159 ++ for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
1160 ++ numa_faults[i] = 0;
1161 ++ }
1162 + }
1163 +
1164 + /*
1165 +diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
1166 +index 423091727e15..2aaf7f8a3a96 100644
1167 +--- a/net/ipv4/ip_tunnel_core.c
1168 ++++ b/net/ipv4/ip_tunnel_core.c
1169 +@@ -89,9 +89,12 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
1170 + __ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1);
1171 +
1172 + err = ip_local_out(net, sk, skb);
1173 +- if (unlikely(net_xmit_eval(err)))
1174 +- pkt_len = 0;
1175 +- iptunnel_xmit_stats(dev, pkt_len);
1176 ++
1177 ++ if (dev) {
1178 ++ if (unlikely(net_xmit_eval(err)))
1179 ++ pkt_len = 0;
1180 ++ iptunnel_xmit_stats(dev, pkt_len);
1181 ++ }
1182 + }
1183 + EXPORT_SYMBOL_GPL(iptunnel_xmit);
1184 +
1185 +diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
1186 +index f2fd556c1233..b41170417316 100644
1187 +--- a/net/vmw_vsock/af_vsock.c
1188 ++++ b/net/vmw_vsock/af_vsock.c
1189 +@@ -36,7 +36,7 @@
1190 + * not support simultaneous connects (two "client" sockets connecting).
1191 + *
1192 + * - "Server" sockets are referred to as listener sockets throughout this
1193 +- * implementation because they are in the VSOCK_SS_LISTEN state. When a
1194 ++ * implementation because they are in the TCP_LISTEN state. When a
1195 + * connection request is received (the second kind of socket mentioned above),
1196 + * we create a new socket and refer to it as a pending socket. These pending
1197 + * sockets are placed on the pending connection list of the listener socket.
1198 +@@ -82,6 +82,15 @@
1199 + * argument, we must ensure the reference count is increased to ensure the
1200 + * socket isn't freed before the function is run; the deferred function will
1201 + * then drop the reference.
1202 ++ *
1203 ++ * - sk->sk_state uses the TCP state constants because they are widely used by
1204 ++ * other address families and exposed to userspace tools like ss(8):
1205 ++ *
1206 ++ * TCP_CLOSE - unconnected
1207 ++ * TCP_SYN_SENT - connecting
1208 ++ * TCP_ESTABLISHED - connected
1209 ++ * TCP_CLOSING - disconnecting
1210 ++ * TCP_LISTEN - listening
1211 + */
1212 +
1213 + #include <linux/types.h>
1214 +@@ -279,7 +288,8 @@ EXPORT_SYMBOL_GPL(vsock_insert_connected);
1215 + void vsock_remove_bound(struct vsock_sock *vsk)
1216 + {
1217 + spin_lock_bh(&vsock_table_lock);
1218 +- __vsock_remove_bound(vsk);
1219 ++ if (__vsock_in_bound_table(vsk))
1220 ++ __vsock_remove_bound(vsk);
1221 + spin_unlock_bh(&vsock_table_lock);
1222 + }
1223 + EXPORT_SYMBOL_GPL(vsock_remove_bound);
1224 +@@ -287,7 +297,8 @@ EXPORT_SYMBOL_GPL(vsock_remove_bound);
1225 + void vsock_remove_connected(struct vsock_sock *vsk)
1226 + {
1227 + spin_lock_bh(&vsock_table_lock);
1228 +- __vsock_remove_connected(vsk);
1229 ++ if (__vsock_in_connected_table(vsk))
1230 ++ __vsock_remove_connected(vsk);
1231 + spin_unlock_bh(&vsock_table_lock);
1232 + }
1233 + EXPORT_SYMBOL_GPL(vsock_remove_connected);
1234 +@@ -323,35 +334,10 @@ struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
1235 + }
1236 + EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
1237 +
1238 +-static bool vsock_in_bound_table(struct vsock_sock *vsk)
1239 +-{
1240 +- bool ret;
1241 +-
1242 +- spin_lock_bh(&vsock_table_lock);
1243 +- ret = __vsock_in_bound_table(vsk);
1244 +- spin_unlock_bh(&vsock_table_lock);
1245 +-
1246 +- return ret;
1247 +-}
1248 +-
1249 +-static bool vsock_in_connected_table(struct vsock_sock *vsk)
1250 +-{
1251 +- bool ret;
1252 +-
1253 +- spin_lock_bh(&vsock_table_lock);
1254 +- ret = __vsock_in_connected_table(vsk);
1255 +- spin_unlock_bh(&vsock_table_lock);
1256 +-
1257 +- return ret;
1258 +-}
1259 +-
1260 + void vsock_remove_sock(struct vsock_sock *vsk)
1261 + {
1262 +- if (vsock_in_bound_table(vsk))
1263 +- vsock_remove_bound(vsk);
1264 +-
1265 +- if (vsock_in_connected_table(vsk))
1266 +- vsock_remove_connected(vsk);
1267 ++ vsock_remove_bound(vsk);
1268 ++ vsock_remove_connected(vsk);
1269 + }
1270 + EXPORT_SYMBOL_GPL(vsock_remove_sock);
1271 +
1272 +@@ -482,10 +468,9 @@ static void vsock_pending_work(struct work_struct *work)
1273 + * incoming packets can't find this socket, and to reduce the reference
1274 + * count.
1275 + */
1276 +- if (vsock_in_connected_table(vsk))
1277 +- vsock_remove_connected(vsk);
1278 ++ vsock_remove_connected(vsk);
1279 +
1280 +- sk->sk_state = SS_FREE;
1281 ++ sk->sk_state = TCP_CLOSE;
1282 +
1283 + out:
1284 + release_sock(sk);
1285 +@@ -626,7 +611,6 @@ struct sock *__vsock_create(struct net *net,
1286 +
1287 + sk->sk_destruct = vsock_sk_destruct;
1288 + sk->sk_backlog_rcv = vsock_queue_rcv_skb;
1289 +- sk->sk_state = 0;
1290 + sock_reset_flag(sk, SOCK_DONE);
1291 +
1292 + INIT_LIST_HEAD(&vsk->bound_table);
1293 +@@ -902,7 +886,7 @@ static unsigned int vsock_poll(struct file *file, struct socket *sock,
1294 + /* Listening sockets that have connections in their accept
1295 + * queue can be read.
1296 + */
1297 +- if (sk->sk_state == VSOCK_SS_LISTEN
1298 ++ if (sk->sk_state == TCP_LISTEN
1299 + && !vsock_is_accept_queue_empty(sk))
1300 + mask |= POLLIN | POLLRDNORM;
1301 +
1302 +@@ -931,7 +915,7 @@ static unsigned int vsock_poll(struct file *file, struct socket *sock,
1303 + }
1304 +
1305 + /* Connected sockets that can produce data can be written. */
1306 +- if (sk->sk_state == SS_CONNECTED) {
1307 ++ if (sk->sk_state == TCP_ESTABLISHED) {
1308 + if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1309 + bool space_avail_now = false;
1310 + int ret = transport->notify_poll_out(
1311 +@@ -953,7 +937,7 @@ static unsigned int vsock_poll(struct file *file, struct socket *sock,
1312 + * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
1313 + * but local send is not shutdown.
1314 + */
1315 +- if (sk->sk_state == SS_UNCONNECTED) {
1316 ++ if (sk->sk_state == TCP_CLOSE) {
1317 + if (!(sk->sk_shutdown & SEND_SHUTDOWN))
1318 + mask |= POLLOUT | POLLWRNORM;
1319 +
1320 +@@ -1123,9 +1107,9 @@ static void vsock_connect_timeout(struct work_struct *work)
1321 + sk = sk_vsock(vsk);
1322 +
1323 + lock_sock(sk);
1324 +- if (sk->sk_state == SS_CONNECTING &&
1325 ++ if (sk->sk_state == TCP_SYN_SENT &&
1326 + (sk->sk_shutdown != SHUTDOWN_MASK)) {
1327 +- sk->sk_state = SS_UNCONNECTED;
1328 ++ sk->sk_state = TCP_CLOSE;
1329 + sk->sk_err = ETIMEDOUT;
1330 + sk->sk_error_report(sk);
1331 + cancel = 1;
1332 +@@ -1171,7 +1155,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1333 + err = -EALREADY;
1334 + break;
1335 + default:
1336 +- if ((sk->sk_state == VSOCK_SS_LISTEN) ||
1337 ++ if ((sk->sk_state == TCP_LISTEN) ||
1338 + vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1339 + err = -EINVAL;
1340 + goto out;
1341 +@@ -1194,7 +1178,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1342 + if (err)
1343 + goto out;
1344 +
1345 +- sk->sk_state = SS_CONNECTING;
1346 ++ sk->sk_state = TCP_SYN_SENT;
1347 +
1348 + err = transport->connect(vsk);
1349 + if (err < 0)
1350 +@@ -1214,7 +1198,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1351 + timeout = vsk->connect_timeout;
1352 + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1353 +
1354 +- while (sk->sk_state != SS_CONNECTED && sk->sk_err == 0) {
1355 ++ while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) {
1356 + if (flags & O_NONBLOCK) {
1357 + /* If we're not going to block, we schedule a timeout
1358 + * function to generate a timeout on the connection
1359 +@@ -1235,13 +1219,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1360 +
1361 + if (signal_pending(current)) {
1362 + err = sock_intr_errno(timeout);
1363 +- sk->sk_state = SS_UNCONNECTED;
1364 ++ sk->sk_state = TCP_CLOSE;
1365 + sock->state = SS_UNCONNECTED;
1366 + vsock_transport_cancel_pkt(vsk);
1367 + goto out_wait;
1368 + } else if (timeout == 0) {
1369 + err = -ETIMEDOUT;
1370 +- sk->sk_state = SS_UNCONNECTED;
1371 ++ sk->sk_state = TCP_CLOSE;
1372 + sock->state = SS_UNCONNECTED;
1373 + vsock_transport_cancel_pkt(vsk);
1374 + goto out_wait;
1375 +@@ -1252,7 +1236,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1376 +
1377 + if (sk->sk_err) {
1378 + err = -sk->sk_err;
1379 +- sk->sk_state = SS_UNCONNECTED;
1380 ++ sk->sk_state = TCP_CLOSE;
1381 + sock->state = SS_UNCONNECTED;
1382 + } else {
1383 + err = 0;
1384 +@@ -1285,7 +1269,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
1385 + goto out;
1386 + }
1387 +
1388 +- if (listener->sk_state != VSOCK_SS_LISTEN) {
1389 ++ if (listener->sk_state != TCP_LISTEN) {
1390 + err = -EINVAL;
1391 + goto out;
1392 + }
1393 +@@ -1375,7 +1359,7 @@ static int vsock_listen(struct socket *sock, int backlog)
1394 + }
1395 +
1396 + sk->sk_max_ack_backlog = backlog;
1397 +- sk->sk_state = VSOCK_SS_LISTEN;
1398 ++ sk->sk_state = TCP_LISTEN;
1399 +
1400 + err = 0;
1401 +
1402 +@@ -1555,7 +1539,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1403 +
1404 + /* Callers should not provide a destination with stream sockets. */
1405 + if (msg->msg_namelen) {
1406 +- err = sk->sk_state == SS_CONNECTED ? -EISCONN : -EOPNOTSUPP;
1407 ++ err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1408 + goto out;
1409 + }
1410 +
1411 +@@ -1566,7 +1550,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1412 + goto out;
1413 + }
1414 +
1415 +- if (sk->sk_state != SS_CONNECTED ||
1416 ++ if (sk->sk_state != TCP_ESTABLISHED ||
1417 + !vsock_addr_bound(&vsk->local_addr)) {
1418 + err = -ENOTCONN;
1419 + goto out;
1420 +@@ -1690,7 +1674,7 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1421 +
1422 + lock_sock(sk);
1423 +
1424 +- if (sk->sk_state != SS_CONNECTED) {
1425 ++ if (sk->sk_state != TCP_ESTABLISHED) {
1426 + /* Recvmsg is supposed to return 0 if a peer performs an
1427 + * orderly shutdown. Differentiate between that case and when a
1428 + * peer has not connected or a local shutdown occured with the
1429 +diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
1430 +index 3bee93bc5d11..52ac3e49c7ef 100644
1431 +--- a/net/vmw_vsock/hyperv_transport.c
1432 ++++ b/net/vmw_vsock/hyperv_transport.c
1433 +@@ -35,6 +35,9 @@
1434 + /* The MTU is 16KB per the host side's design */
1435 + #define HVS_MTU_SIZE (1024 * 16)
1436 +
1437 ++/* How long to wait for graceful shutdown of a connection */
1438 ++#define HVS_CLOSE_TIMEOUT (8 * HZ)
1439 ++
1440 + struct vmpipe_proto_header {
1441 + u32 pkt_type;
1442 + u32 data_size;
1443 +@@ -290,19 +293,32 @@ static void hvs_channel_cb(void *ctx)
1444 + sk->sk_write_space(sk);
1445 + }
1446 +
1447 +-static void hvs_close_connection(struct vmbus_channel *chan)
1448 ++static void hvs_do_close_lock_held(struct vsock_sock *vsk,
1449 ++ bool cancel_timeout)
1450 + {
1451 +- struct sock *sk = get_per_channel_state(chan);
1452 +- struct vsock_sock *vsk = vsock_sk(sk);
1453 +-
1454 +- lock_sock(sk);
1455 ++ struct sock *sk = sk_vsock(vsk);
1456 +
1457 +- sk->sk_state = SS_UNCONNECTED;
1458 + sock_set_flag(sk, SOCK_DONE);
1459 +- vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
1460 +-
1461 ++ vsk->peer_shutdown = SHUTDOWN_MASK;
1462 ++ if (vsock_stream_has_data(vsk) <= 0)
1463 ++ sk->sk_state = TCP_CLOSING;
1464 + sk->sk_state_change(sk);
1465 ++ if (vsk->close_work_scheduled &&
1466 ++ (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
1467 ++ vsk->close_work_scheduled = false;
1468 ++ vsock_remove_sock(vsk);
1469 +
1470 ++ /* Release the reference taken while scheduling the timeout */
1471 ++ sock_put(sk);
1472 ++ }
1473 ++}
1474 ++
1475 ++static void hvs_close_connection(struct vmbus_channel *chan)
1476 ++{
1477 ++ struct sock *sk = get_per_channel_state(chan);
1478 ++
1479 ++ lock_sock(sk);
1480 ++ hvs_do_close_lock_held(vsock_sk(sk), true);
1481 + release_sock(sk);
1482 + }
1483 +
1484 +@@ -336,8 +352,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
1485 +
1486 + lock_sock(sk);
1487 +
1488 +- if ((conn_from_host && sk->sk_state != VSOCK_SS_LISTEN) ||
1489 +- (!conn_from_host && sk->sk_state != SS_CONNECTING))
1490 ++ if ((conn_from_host && sk->sk_state != TCP_LISTEN) ||
1491 ++ (!conn_from_host && sk->sk_state != TCP_SYN_SENT))
1492 + goto out;
1493 +
1494 + if (conn_from_host) {
1495 +@@ -349,7 +365,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
1496 + if (!new)
1497 + goto out;
1498 +
1499 +- new->sk_state = SS_CONNECTING;
1500 ++ new->sk_state = TCP_SYN_SENT;
1501 + vnew = vsock_sk(new);
1502 + hvs_new = vnew->trans;
1503 + hvs_new->chan = chan;
1504 +@@ -383,7 +399,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
1505 + hvs_set_channel_pending_send_size(chan);
1506 +
1507 + if (conn_from_host) {
1508 +- new->sk_state = SS_CONNECTED;
1509 ++ new->sk_state = TCP_ESTABLISHED;
1510 + sk->sk_ack_backlog++;
1511 +
1512 + hvs_addr_init(&vnew->local_addr, if_type);
1513 +@@ -396,7 +412,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
1514 +
1515 + vsock_enqueue_accept(sk, new);
1516 + } else {
1517 +- sk->sk_state = SS_CONNECTED;
1518 ++ sk->sk_state = TCP_ESTABLISHED;
1519 + sk->sk_socket->state = SS_CONNECTED;
1520 +
1521 + vsock_insert_connected(vsock_sk(sk));
1522 +@@ -446,50 +462,80 @@ static int hvs_connect(struct vsock_sock *vsk)
1523 + return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id);
1524 + }
1525 +
1526 ++static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
1527 ++{
1528 ++ struct vmpipe_proto_header hdr;
1529 ++
1530 ++ if (hvs->fin_sent || !hvs->chan)
1531 ++ return;
1532 ++
1533 ++ /* It can't fail: see hvs_channel_writable_bytes(). */
1534 ++ (void)hvs_send_data(hvs->chan, (struct hvs_send_buf *)&hdr, 0);
1535 ++ hvs->fin_sent = true;
1536 ++}
1537 ++
1538 + static int hvs_shutdown(struct vsock_sock *vsk, int mode)
1539 + {
1540 + struct sock *sk = sk_vsock(vsk);
1541 +- struct vmpipe_proto_header hdr;
1542 +- struct hvs_send_buf *send_buf;
1543 +- struct hvsock *hvs;
1544 +
1545 + if (!(mode & SEND_SHUTDOWN))
1546 + return 0;
1547 +
1548 + lock_sock(sk);
1549 ++ hvs_shutdown_lock_held(vsk->trans, mode);
1550 ++ release_sock(sk);
1551 ++ return 0;
1552 ++}
1553 +
1554 +- hvs = vsk->trans;
1555 +- if (hvs->fin_sent)
1556 +- goto out;
1557 +-
1558 +- send_buf = (struct hvs_send_buf *)&hdr;
1559 ++static void hvs_close_timeout(struct work_struct *work)
1560 ++{
1561 ++ struct vsock_sock *vsk =
1562 ++ container_of(work, struct vsock_sock, close_work.work);
1563 ++ struct sock *sk = sk_vsock(vsk);
1564 +
1565 +- /* It can't fail: see hvs_channel_writable_bytes(). */
1566 +- (void)hvs_send_data(hvs->chan, send_buf, 0);
1567 ++ sock_hold(sk);
1568 ++ lock_sock(sk);
1569 ++ if (!sock_flag(sk, SOCK_DONE))
1570 ++ hvs_do_close_lock_held(vsk, false);
1571 +
1572 +- hvs->fin_sent = true;
1573 +-out:
1574 ++ vsk->close_work_scheduled = false;
1575 + release_sock(sk);
1576 +- return 0;
1577 ++ sock_put(sk);
1578 + }
1579 +
1580 +-static void hvs_release(struct vsock_sock *vsk)
1581 ++/* Returns true, if it is safe to remove socket; false otherwise */
1582 ++static bool hvs_close_lock_held(struct vsock_sock *vsk)
1583 + {
1584 + struct sock *sk = sk_vsock(vsk);
1585 +- struct hvsock *hvs = vsk->trans;
1586 +- struct vmbus_channel *chan;
1587 +
1588 +- lock_sock(sk);
1589 ++ if (!(sk->sk_state == TCP_ESTABLISHED ||
1590 ++ sk->sk_state == TCP_CLOSING))
1591 ++ return true;
1592 +
1593 +- sk->sk_state = TCP_CLOSING;
1594 +- vsock_remove_sock(vsk);
1595 ++ if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
1596 ++ hvs_shutdown_lock_held(vsk->trans, SHUTDOWN_MASK);
1597 +
1598 +- release_sock(sk);
1599 ++ if (sock_flag(sk, SOCK_DONE))
1600 ++ return true;
1601 +
1602 +- chan = hvs->chan;
1603 +- if (chan)
1604 +- hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN);
1605 ++ /* This reference will be dropped by the delayed close routine */
1606 ++ sock_hold(sk);
1607 ++ INIT_DELAYED_WORK(&vsk->close_work, hvs_close_timeout);
1608 ++ vsk->close_work_scheduled = true;
1609 ++ schedule_delayed_work(&vsk->close_work, HVS_CLOSE_TIMEOUT);
1610 ++ return false;
1611 ++}
1612 +
1613 ++static void hvs_release(struct vsock_sock *vsk)
1614 ++{
1615 ++ struct sock *sk = sk_vsock(vsk);
1616 ++ bool remove_sock;
1617 ++
1618 ++ lock_sock(sk);
1619 ++ remove_sock = hvs_close_lock_held(vsk);
1620 ++ release_sock(sk);
1621 ++ if (remove_sock)
1622 ++ vsock_remove_sock(vsk);
1623 + }
1624 +
1625 + static void hvs_destruct(struct vsock_sock *vsk)
1626 +diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
1627 +index 5ebeef8ae3fa..96ab344f17bb 100644
1628 +--- a/net/vmw_vsock/virtio_transport.c
1629 ++++ b/net/vmw_vsock/virtio_transport.c
1630 +@@ -417,7 +417,7 @@ static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
1631 + static void virtio_vsock_reset_sock(struct sock *sk)
1632 + {
1633 + lock_sock(sk);
1634 +- sk->sk_state = SS_UNCONNECTED;
1635 ++ sk->sk_state = TCP_CLOSE;
1636 + sk->sk_err = ECONNRESET;
1637 + sk->sk_error_report(sk);
1638 + release_sock(sk);
1639 +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
1640 +index 84d3c0aadd73..f3f3d06cb6d8 100644
1641 +--- a/net/vmw_vsock/virtio_transport_common.c
1642 ++++ b/net/vmw_vsock/virtio_transport_common.c
1643 +@@ -716,7 +716,7 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
1644 + sock_set_flag(sk, SOCK_DONE);
1645 + vsk->peer_shutdown = SHUTDOWN_MASK;
1646 + if (vsock_stream_has_data(vsk) <= 0)
1647 +- sk->sk_state = SS_DISCONNECTING;
1648 ++ sk->sk_state = TCP_CLOSING;
1649 + sk->sk_state_change(sk);
1650 +
1651 + if (vsk->close_work_scheduled &&
1652 +@@ -756,8 +756,8 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
1653 + {
1654 + struct sock *sk = &vsk->sk;
1655 +
1656 +- if (!(sk->sk_state == SS_CONNECTED ||
1657 +- sk->sk_state == SS_DISCONNECTING))
1658 ++ if (!(sk->sk_state == TCP_ESTABLISHED ||
1659 ++ sk->sk_state == TCP_CLOSING))
1660 + return true;
1661 +
1662 + /* Already received SHUTDOWN from peer, reply with RST */
1663 +@@ -816,7 +816,7 @@ virtio_transport_recv_connecting(struct sock *sk,
1664 +
1665 + switch (le16_to_cpu(pkt->hdr.op)) {
1666 + case VIRTIO_VSOCK_OP_RESPONSE:
1667 +- sk->sk_state = SS_CONNECTED;
1668 ++ sk->sk_state = TCP_ESTABLISHED;
1669 + sk->sk_socket->state = SS_CONNECTED;
1670 + vsock_insert_connected(vsk);
1671 + sk->sk_state_change(sk);
1672 +@@ -836,7 +836,7 @@ virtio_transport_recv_connecting(struct sock *sk,
1673 +
1674 + destroy:
1675 + virtio_transport_reset(vsk, pkt);
1676 +- sk->sk_state = SS_UNCONNECTED;
1677 ++ sk->sk_state = TCP_CLOSE;
1678 + sk->sk_err = skerr;
1679 + sk->sk_error_report(sk);
1680 + return err;
1681 +@@ -872,7 +872,7 @@ virtio_transport_recv_connected(struct sock *sk,
1682 + vsk->peer_shutdown |= SEND_SHUTDOWN;
1683 + if (vsk->peer_shutdown == SHUTDOWN_MASK &&
1684 + vsock_stream_has_data(vsk) <= 0)
1685 +- sk->sk_state = SS_DISCONNECTING;
1686 ++ sk->sk_state = TCP_CLOSING;
1687 + if (le32_to_cpu(pkt->hdr.flags))
1688 + sk->sk_state_change(sk);
1689 + break;
1690 +@@ -943,7 +943,7 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
1691 +
1692 + lock_sock_nested(child, SINGLE_DEPTH_NESTING);
1693 +
1694 +- child->sk_state = SS_CONNECTED;
1695 ++ child->sk_state = TCP_ESTABLISHED;
1696 +
1697 + vchild = vsock_sk(child);
1698 + vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
1699 +@@ -1031,18 +1031,18 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
1700 + sk->sk_write_space(sk);
1701 +
1702 + switch (sk->sk_state) {
1703 +- case VSOCK_SS_LISTEN:
1704 ++ case TCP_LISTEN:
1705 + virtio_transport_recv_listen(sk, pkt);
1706 + virtio_transport_free_pkt(pkt);
1707 + break;
1708 +- case SS_CONNECTING:
1709 ++ case TCP_SYN_SENT:
1710 + virtio_transport_recv_connecting(sk, pkt);
1711 + virtio_transport_free_pkt(pkt);
1712 + break;
1713 +- case SS_CONNECTED:
1714 ++ case TCP_ESTABLISHED:
1715 + virtio_transport_recv_connected(sk, pkt);
1716 + break;
1717 +- case SS_DISCONNECTING:
1718 ++ case TCP_CLOSING:
1719 + virtio_transport_recv_disconnecting(sk, pkt);
1720 + virtio_transport_free_pkt(pkt);
1721 + break;
1722 +diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
1723 +index ad3f47a714f3..ba4cb18c4b9a 100644
1724 +--- a/net/vmw_vsock/vmci_transport.c
1725 ++++ b/net/vmw_vsock/vmci_transport.c
1726 +@@ -776,7 +776,7 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
1727 + /* The local context ID may be out of date, update it. */
1728 + vsk->local_addr.svm_cid = dst.svm_cid;
1729 +
1730 +- if (sk->sk_state == SS_CONNECTED)
1731 ++ if (sk->sk_state == TCP_ESTABLISHED)
1732 + vmci_trans(vsk)->notify_ops->handle_notify_pkt(
1733 + sk, pkt, true, &dst, &src,
1734 + &bh_process_pkt);
1735 +@@ -834,7 +834,9 @@ static void vmci_transport_handle_detach(struct sock *sk)
1736 + * left in our consume queue.
1737 + */
1738 + if (vsock_stream_has_data(vsk) <= 0) {
1739 +- if (sk->sk_state == SS_CONNECTING) {
1740 ++ sk->sk_state = TCP_CLOSE;
1741 ++
1742 ++ if (sk->sk_state == TCP_SYN_SENT) {
1743 + /* The peer may detach from a queue pair while
1744 + * we are still in the connecting state, i.e.,
1745 + * if the peer VM is killed after attaching to
1746 +@@ -843,12 +845,10 @@ static void vmci_transport_handle_detach(struct sock *sk)
1747 + * event like a reset.
1748 + */
1749 +
1750 +- sk->sk_state = SS_UNCONNECTED;
1751 + sk->sk_err = ECONNRESET;
1752 + sk->sk_error_report(sk);
1753 + return;
1754 + }
1755 +- sk->sk_state = SS_UNCONNECTED;
1756 + }
1757 + sk->sk_state_change(sk);
1758 + }
1759 +@@ -916,17 +916,17 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work)
1760 + vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context;
1761 +
1762 + switch (sk->sk_state) {
1763 +- case VSOCK_SS_LISTEN:
1764 ++ case TCP_LISTEN:
1765 + vmci_transport_recv_listen(sk, pkt);
1766 + break;
1767 +- case SS_CONNECTING:
1768 ++ case TCP_SYN_SENT:
1769 + /* Processing of pending connections for servers goes through
1770 + * the listening socket, so see vmci_transport_recv_listen()
1771 + * for that path.
1772 + */
1773 + vmci_transport_recv_connecting_client(sk, pkt);
1774 + break;
1775 +- case SS_CONNECTED:
1776 ++ case TCP_ESTABLISHED:
1777 + vmci_transport_recv_connected(sk, pkt);
1778 + break;
1779 + default:
1780 +@@ -975,7 +975,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
1781 + vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context;
1782 +
1783 + switch (pending->sk_state) {
1784 +- case SS_CONNECTING:
1785 ++ case TCP_SYN_SENT:
1786 + err = vmci_transport_recv_connecting_server(sk,
1787 + pending,
1788 + pkt);
1789 +@@ -1105,7 +1105,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
1790 + vsock_add_pending(sk, pending);
1791 + sk->sk_ack_backlog++;
1792 +
1793 +- pending->sk_state = SS_CONNECTING;
1794 ++ pending->sk_state = TCP_SYN_SENT;
1795 + vmci_trans(vpending)->produce_size =
1796 + vmci_trans(vpending)->consume_size = qp_size;
1797 + vmci_trans(vpending)->queue_pair_size = qp_size;
1798 +@@ -1229,11 +1229,11 @@ vmci_transport_recv_connecting_server(struct sock *listener,
1799 + * the socket will be valid until it is removed from the queue.
1800 + *
1801 + * If we fail sending the attach below, we remove the socket from the
1802 +- * connected list and move the socket to SS_UNCONNECTED before
1803 ++ * connected list and move the socket to TCP_CLOSE before
1804 + * releasing the lock, so a pending slow path processing of an incoming
1805 + * packet will not see the socket in the connected state in that case.
1806 + */
1807 +- pending->sk_state = SS_CONNECTED;
1808 ++ pending->sk_state = TCP_ESTABLISHED;
1809 +
1810 + vsock_insert_connected(vpending);
1811 +
1812 +@@ -1264,7 +1264,7 @@ vmci_transport_recv_connecting_server(struct sock *listener,
1813 +
1814 + destroy:
1815 + pending->sk_err = skerr;
1816 +- pending->sk_state = SS_UNCONNECTED;
1817 ++ pending->sk_state = TCP_CLOSE;
1818 + /* As long as we drop our reference, all necessary cleanup will handle
1819 + * when the cleanup function drops its reference and our destruct
1820 + * implementation is called. Note that since the listen handler will
1821 +@@ -1302,7 +1302,7 @@ vmci_transport_recv_connecting_client(struct sock *sk,
1822 + * accounting (it can already be found since it's in the bound
1823 + * table).
1824 + */
1825 +- sk->sk_state = SS_CONNECTED;
1826 ++ sk->sk_state = TCP_ESTABLISHED;
1827 + sk->sk_socket->state = SS_CONNECTED;
1828 + vsock_insert_connected(vsk);
1829 + sk->sk_state_change(sk);
1830 +@@ -1370,7 +1370,7 @@ vmci_transport_recv_connecting_client(struct sock *sk,
1831 + destroy:
1832 + vmci_transport_send_reset(sk, pkt);
1833 +
1834 +- sk->sk_state = SS_UNCONNECTED;
1835 ++ sk->sk_state = TCP_CLOSE;
1836 + sk->sk_err = skerr;
1837 + sk->sk_error_report(sk);
1838 + return err;
1839 +@@ -1558,7 +1558,7 @@ static int vmci_transport_recv_connected(struct sock *sk,
1840 + sock_set_flag(sk, SOCK_DONE);
1841 + vsk->peer_shutdown = SHUTDOWN_MASK;
1842 + if (vsock_stream_has_data(vsk) <= 0)
1843 +- sk->sk_state = SS_DISCONNECTING;
1844 ++ sk->sk_state = TCP_CLOSING;
1845 +
1846 + sk->sk_state_change(sk);
1847 + break;
1848 +@@ -1826,7 +1826,7 @@ static int vmci_transport_connect(struct vsock_sock *vsk)
1849 + err = vmci_transport_send_conn_request(
1850 + sk, vmci_trans(vsk)->queue_pair_size);
1851 + if (err < 0) {
1852 +- sk->sk_state = SS_UNCONNECTED;
1853 ++ sk->sk_state = TCP_CLOSE;
1854 + return err;
1855 + }
1856 + } else {
1857 +@@ -1836,7 +1836,7 @@ static int vmci_transport_connect(struct vsock_sock *vsk)
1858 + sk, vmci_trans(vsk)->queue_pair_size,
1859 + supported_proto_versions);
1860 + if (err < 0) {
1861 +- sk->sk_state = SS_UNCONNECTED;
1862 ++ sk->sk_state = TCP_CLOSE;
1863 + return err;
1864 + }
1865 +
1866 +diff --git a/net/vmw_vsock/vmci_transport_notify.c b/net/vmw_vsock/vmci_transport_notify.c
1867 +index 1406db4d97d1..41fb427f150a 100644
1868 +--- a/net/vmw_vsock/vmci_transport_notify.c
1869 ++++ b/net/vmw_vsock/vmci_transport_notify.c
1870 +@@ -355,7 +355,7 @@ vmci_transport_notify_pkt_poll_in(struct sock *sk,
1871 + * queue. Ask for notifications when there is something to
1872 + * read.
1873 + */
1874 +- if (sk->sk_state == SS_CONNECTED) {
1875 ++ if (sk->sk_state == TCP_ESTABLISHED) {
1876 + if (!send_waiting_read(sk, 1))
1877 + return -1;
1878 +
1879 +diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c b/net/vmw_vsock/vmci_transport_notify_qstate.c
1880 +index f3a0afc46208..0cc84f2bb05e 100644
1881 +--- a/net/vmw_vsock/vmci_transport_notify_qstate.c
1882 ++++ b/net/vmw_vsock/vmci_transport_notify_qstate.c
1883 +@@ -176,7 +176,7 @@ vmci_transport_notify_pkt_poll_in(struct sock *sk,
1884 + * queue. Ask for notifications when there is something to
1885 + * read.
1886 + */
1887 +- if (sk->sk_state == SS_CONNECTED)
1888 ++ if (sk->sk_state == TCP_ESTABLISHED)
1889 + vsock_block_update_write_window(sk);
1890 + *data_ready_now = false;
1891 + }