Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.18 commit in: /
Date: Sun, 31 Jan 2016 15:36:38
Message-Id: 1454254580.41033c2db3e1f066508325a49e17c53e24013843.mpagano@gentoo
1 commit: 41033c2db3e1f066508325a49e17c53e24013843
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sun Jan 31 15:36:20 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sun Jan 31 15:36:20 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=41033c2d
7
8 Linux patch 3.18.26. Removal of redundant patchset.
9
10 0000_README | 8 +-
11 1025_linux-3.18.26.patch | 1763 ++++++++++++++++++++
12 ...ing-refleak-in-join-session-CVE-2016-0728.patch | 81 -
13 3 files changed, 1767 insertions(+), 85 deletions(-)
14
15 diff --git a/0000_README b/0000_README
16 index 3fe5eec..728c863 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -143,6 +143,10 @@ Patch: 1024_linux-3.18.25.patch
20 From: http://www.kernel.org
21 Desc: Linux 3.18.25
22
23 +Patch: 1025_linux-3.18.26.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.18.26
26 +
27 Patch: 1500_XATTR_USER_PREFIX.patch
28 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
29 Desc: Support for namespace user.pax.* on tmpfs.
30 @@ -151,10 +155,6 @@ Patch: 1510_fs-enable-link-security-restrictions-by-default.patch
31 From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
32 Desc: Enable link security restrictions by default
33
34 -Patch: 1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
35 -From: https://bugs.gentoo.org/show_bug.cgi?id=572384
36 -Desc: Ensure that thread joining a session keyring does not leak the keyring reference. CVE-2016-0728.
37 -
38 Patch: 1700_ARM-dts-patch-to-support-popoplug-e02.patch
39 From: https://bugs.gentoo.org/show_bug.cgi?id=508248
40 Desc: ARM: dts: Add support for Pogoplug E02.
41
42 diff --git a/1025_linux-3.18.26.patch b/1025_linux-3.18.26.patch
43 new file mode 100644
44 index 0000000..5ad05b0
45 --- /dev/null
46 +++ b/1025_linux-3.18.26.patch
47 @@ -0,0 +1,1763 @@
48 +diff --git a/Makefile b/Makefile
49 +index 6df25277ea44..03b0c3fb5bfd 100644
50 +--- a/Makefile
51 ++++ b/Makefile
52 +@@ -1,6 +1,6 @@
53 + VERSION = 3
54 + PATCHLEVEL = 18
55 +-SUBLEVEL = 25
56 ++SUBLEVEL = 26
57 + EXTRAVERSION =
58 + NAME = Diseased Newt
59 +
60 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
61 +index 8f51d6e3883e..c546a93c0f8a 100644
62 +--- a/drivers/block/rbd.c
63 ++++ b/drivers/block/rbd.c
64 +@@ -3394,6 +3394,7 @@ static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
65 + goto err_rq;
66 + }
67 + img_request->rq = rq;
68 ++ snapc = NULL; /* img_request consumes a ref */
69 +
70 + if (op_type == OBJ_OP_DISCARD)
71 + result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
72 +@@ -5172,41 +5173,36 @@ out_err:
73 + static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
74 + {
75 + struct rbd_device *parent = NULL;
76 +- struct rbd_spec *parent_spec;
77 +- struct rbd_client *rbdc;
78 + int ret;
79 +
80 + if (!rbd_dev->parent_spec)
81 + return 0;
82 +- /*
83 +- * We need to pass a reference to the client and the parent
84 +- * spec when creating the parent rbd_dev. Images related by
85 +- * parent/child relationships always share both.
86 +- */
87 +- parent_spec = rbd_spec_get(rbd_dev->parent_spec);
88 +- rbdc = __rbd_get_client(rbd_dev->rbd_client);
89 +
90 +- ret = -ENOMEM;
91 +- parent = rbd_dev_create(rbdc, parent_spec);
92 +- if (!parent)
93 ++ parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
94 ++ if (!parent) {
95 ++ ret = -ENOMEM;
96 + goto out_err;
97 ++ }
98 ++
99 ++ /*
100 ++ * Images related by parent/child relationships always share
101 ++ * rbd_client and spec/parent_spec, so bump their refcounts.
102 ++ */
103 ++ __rbd_get_client(rbd_dev->rbd_client);
104 ++ rbd_spec_get(rbd_dev->parent_spec);
105 +
106 + ret = rbd_dev_image_probe(parent, false);
107 + if (ret < 0)
108 + goto out_err;
109 ++
110 + rbd_dev->parent = parent;
111 + atomic_set(&rbd_dev->parent_ref, 1);
112 +-
113 + return 0;
114 ++
115 + out_err:
116 +- if (parent) {
117 +- rbd_dev_unparent(rbd_dev);
118 ++ rbd_dev_unparent(rbd_dev);
119 ++ if (parent)
120 + rbd_dev_destroy(parent);
121 +- } else {
122 +- rbd_put_client(rbdc);
123 +- rbd_spec_put(parent_spec);
124 +- }
125 +-
126 + return ret;
127 + }
128 +
129 +diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
130 +index a66a3217f1d9..f047d7c2b643 100644
131 +--- a/drivers/firewire/ohci.c
132 ++++ b/drivers/firewire/ohci.c
133 +@@ -3682,6 +3682,11 @@ static int pci_probe(struct pci_dev *dev,
134 +
135 + reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
136 + ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
137 ++ /* JMicron JMB38x often shows 0 at first read, just ignore it */
138 ++ if (!ohci->it_context_support) {
139 ++ ohci_notice(ohci, "overriding IsoXmitIntMask\n");
140 ++ ohci->it_context_support = 0xf;
141 ++ }
142 + reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
143 + ohci->it_context_mask = ohci->it_context_support;
144 + ohci->n_it = hweight32(ohci->it_context_mask);
145 +diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
146 +index 72fb86b9aa24..067f2cb9b215 100644
147 +--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
148 ++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
149 +@@ -1014,13 +1014,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
150 + sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
151 + 8 * 4;
152 +
153 +- ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
154 +- &ring_header->dma);
155 ++ ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
156 ++ &ring_header->dma, GFP_KERNEL);
157 + if (unlikely(!ring_header->desc)) {
158 +- dev_err(&pdev->dev, "pci_alloc_consistend failed\n");
159 ++ dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
160 + goto err_nomem;
161 + }
162 +- memset(ring_header->desc, 0, ring_header->size);
163 + /* init TPD ring */
164 +
165 + tpd_ring[0].dma = roundup(ring_header->dma, 8);
166 +diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
167 +index 2c811f66d5ac..f77b58911558 100644
168 +--- a/drivers/net/ethernet/qualcomm/qca_spi.c
169 ++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
170 +@@ -737,9 +737,8 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
171 + netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
172 + jiffies, jiffies - dev->trans_start);
173 + qca->net_dev->stats.tx_errors++;
174 +- /* wake the queue if there is room */
175 +- if (qcaspi_tx_ring_has_space(&qca->txr))
176 +- netif_wake_queue(dev);
177 ++ /* Trigger tx queue flush and QCA7000 reset */
178 ++ qca->sync = QCASPI_SYNC_UNKNOWN;
179 + }
180 +
181 + static int
182 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
183 +index b5db6b3f939f..b474dbfcdb4f 100644
184 +--- a/drivers/net/ethernet/renesas/sh_eth.c
185 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
186 +@@ -1417,6 +1417,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
187 + if (mdp->cd->shift_rd0)
188 + desc_status >>= 16;
189 +
190 ++ skb = mdp->rx_skbuff[entry];
191 + if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
192 + RD_RFS5 | RD_RFS6 | RD_RFS10)) {
193 + ndev->stats.rx_errors++;
194 +@@ -1432,12 +1433,11 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
195 + ndev->stats.rx_missed_errors++;
196 + if (desc_status & RD_RFS10)
197 + ndev->stats.rx_over_errors++;
198 +- } else {
199 ++ } else if (skb) {
200 + if (!mdp->cd->hw_swap)
201 + sh_eth_soft_swap(
202 + phys_to_virt(ALIGN(rxdesc->addr, 4)),
203 + pkt_len + 2);
204 +- skb = mdp->rx_skbuff[entry];
205 + mdp->rx_skbuff[entry] = NULL;
206 + if (mdp->cd->rpadir)
207 + skb_reserve(skb, NET_IP_ALIGN);
208 +diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
209 +index 1dc628ffce2b..0710214df2bf 100644
210 +--- a/drivers/net/ppp/pptp.c
211 ++++ b/drivers/net/ppp/pptp.c
212 +@@ -420,6 +420,9 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
213 + struct pptp_opt *opt = &po->proto.pptp;
214 + int error = 0;
215 +
216 ++ if (sockaddr_len < sizeof(struct sockaddr_pppox))
217 ++ return -EINVAL;
218 ++
219 + lock_sock(sk);
220 +
221 + opt->src_addr = sp->sa_addr.pptp;
222 +@@ -441,6 +444,9 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
223 + struct flowi4 fl4;
224 + int error = 0;
225 +
226 ++ if (sockaddr_len < sizeof(struct sockaddr_pppox))
227 ++ return -EINVAL;
228 ++
229 + if (sp->sa_protocol != PX_PROTO_PPTP)
230 + return -EINVAL;
231 +
232 +diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
233 +index 9dfd1d1106d7..655ecf982394 100644
234 +--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
235 ++++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
236 +@@ -69,8 +69,8 @@
237 + #include "iwl-agn-hw.h"
238 +
239 + /* Highest firmware API version supported */
240 +-#define IWL7260_UCODE_API_MAX 10
241 +-#define IWL3160_UCODE_API_MAX 10
242 ++#define IWL7260_UCODE_API_MAX 12
243 ++#define IWL3160_UCODE_API_MAX 12
244 +
245 + /* Oldest version we won't warn about */
246 + #define IWL7260_UCODE_API_OK 9
247 +@@ -103,7 +103,7 @@
248 + #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
249 +
250 + #define IWL7265D_FW_PRE "iwlwifi-7265D-"
251 +-#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
252 ++#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
253 +
254 + #define NVM_HW_SECTION_NUM_FAMILY_7000 0
255 +
256 +diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
257 +index d2b7234b1c73..d727ad324d17 100644
258 +--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
259 ++++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
260 +@@ -69,7 +69,7 @@
261 + #include "iwl-agn-hw.h"
262 +
263 + /* Highest firmware API version supported */
264 +-#define IWL8000_UCODE_API_MAX 10
265 ++#define IWL8000_UCODE_API_MAX 12
266 +
267 + /* Oldest version we won't warn about */
268 + #define IWL8000_UCODE_API_OK 8
269 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
270 +index 7d8c3d4ede20..0ecf393a7973 100644
271 +--- a/drivers/usb/class/cdc-acm.c
272 ++++ b/drivers/usb/class/cdc-acm.c
273 +@@ -1860,6 +1860,11 @@ static const struct usb_device_id acm_ids[] = {
274 + },
275 + #endif
276 +
277 ++ /* Exclude Infineon Flash Loader utility */
278 ++ { USB_DEVICE(0x058b, 0x0041),
279 ++ .driver_info = IGNORE_DEVICE,
280 ++ },
281 ++
282 + /* control interfaces without any protocol set */
283 + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
284 + USB_CDC_PROTO_NONE) },
285 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
286 +index b9ddf0c1ffe5..894894f2ff93 100644
287 +--- a/drivers/usb/core/config.c
288 ++++ b/drivers/usb/core/config.c
289 +@@ -115,7 +115,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
290 + USB_SS_MULT(desc->bmAttributes) > 3) {
291 + dev_warn(ddev, "Isoc endpoint has Mult of %d in "
292 + "config %d interface %d altsetting %d ep %d: "
293 +- "setting to 3\n", desc->bmAttributes + 1,
294 ++ "setting to 3\n",
295 ++ USB_SS_MULT(desc->bmAttributes),
296 + cfgno, inum, asnum, ep->desc.bEndpointAddress);
297 + ep->ss_ep_comp.bmAttributes = 2;
298 + }
299 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
300 +index 2222899c4b69..88e6e5debbe9 100644
301 +--- a/drivers/usb/core/hub.c
302 ++++ b/drivers/usb/core/hub.c
303 +@@ -124,6 +124,10 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
304 +
305 + static int usb_device_supports_lpm(struct usb_device *udev)
306 + {
307 ++ /* Some devices have trouble with LPM */
308 ++ if (udev->quirks & USB_QUIRK_NO_LPM)
309 ++ return 0;
310 ++
311 + /* USB 2.1 (and greater) devices indicate LPM support through
312 + * their USB 2.0 Extended Capabilities BOS descriptor.
313 + */
314 +@@ -4498,6 +4502,8 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
315 + goto fail;
316 + }
317 +
318 ++ usb_detect_quirks(udev);
319 ++
320 + if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
321 + retval = usb_get_bos_descriptor(udev);
322 + if (!retval) {
323 +@@ -4692,7 +4698,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
324 + if (status < 0)
325 + goto loop;
326 +
327 +- usb_detect_quirks(udev);
328 + if (udev->quirks & USB_QUIRK_DELAY_INIT)
329 + msleep(1000);
330 +
331 +@@ -5324,9 +5329,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
332 + if (udev->usb2_hw_lpm_enabled == 1)
333 + usb_set_usb2_hardware_lpm(udev, 0);
334 +
335 +- bos = udev->bos;
336 +- udev->bos = NULL;
337 +-
338 + /* Disable LPM and LTM while we reset the device and reinstall the alt
339 + * settings. Device-initiated LPM settings, and system exit latency
340 + * settings are cleared when the device is reset, so we have to set
341 +@@ -5335,15 +5337,18 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
342 + ret = usb_unlocked_disable_lpm(udev);
343 + if (ret) {
344 + dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__);
345 +- goto re_enumerate;
346 ++ goto re_enumerate_no_bos;
347 + }
348 + ret = usb_disable_ltm(udev);
349 + if (ret) {
350 + dev_err(&udev->dev, "%s Failed to disable LTM\n.",
351 + __func__);
352 +- goto re_enumerate;
353 ++ goto re_enumerate_no_bos;
354 + }
355 +
356 ++ bos = udev->bos;
357 ++ udev->bos = NULL;
358 ++
359 + for (i = 0; i < SET_CONFIG_TRIES; ++i) {
360 +
361 + /* ep0 maxpacket size may change; let the HCD know about it.
362 +@@ -5440,10 +5445,11 @@ done:
363 + return 0;
364 +
365 + re_enumerate:
366 +- /* LPM state doesn't matter when we're about to destroy the device. */
367 +- hub_port_logical_disconnect(parent_hub, port1);
368 + usb_release_bos_descriptor(udev);
369 + udev->bos = bos;
370 ++re_enumerate_no_bos:
371 ++ /* LPM state doesn't matter when we're about to destroy the device. */
372 ++ hub_port_logical_disconnect(parent_hub, port1);
373 + return -ENODEV;
374 + }
375 +
376 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
377 +index 8a77a417ccfd..6b53fc3ec636 100644
378 +--- a/drivers/usb/core/quirks.c
379 ++++ b/drivers/usb/core/quirks.c
380 +@@ -196,6 +196,12 @@ static const struct usb_device_id usb_quirk_list[] = {
381 + { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
382 + USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
383 +
384 ++ /* Blackmagic Design Intensity Shuttle */
385 ++ { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
386 ++
387 ++ /* Blackmagic Design UltraStudio SDI */
388 ++ { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
389 ++
390 + { } /* terminating entry must be last */
391 + };
392 +
393 +diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
394 +index 4868369eeec6..69456811ec11 100644
395 +--- a/drivers/usb/gadget/udc/pxa27x_udc.c
396 ++++ b/drivers/usb/gadget/udc/pxa27x_udc.c
397 +@@ -2564,6 +2564,9 @@ static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
398 + udc->pullup_resume = udc->pullup_on;
399 + dplus_pullup(udc, 0);
400 +
401 ++ if (udc->driver)
402 ++ udc->driver->disconnect(&udc->gadget);
403 ++
404 + return 0;
405 + }
406 +
407 +diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
408 +index dc31c425ce01..9f1c0538b211 100644
409 +--- a/drivers/usb/host/whci/qset.c
410 ++++ b/drivers/usb/host/whci/qset.c
411 +@@ -377,6 +377,10 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
412 + if (std->pl_virt == NULL)
413 + return -ENOMEM;
414 + std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
415 ++ if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
416 ++ kfree(std->pl_virt);
417 ++ return -EFAULT;
418 ++ }
419 +
420 + for (p = 0; p < std->num_pointers; p++) {
421 + std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
422 +diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
423 +index 06cc5d6ea681..dbc56eb5eee4 100644
424 +--- a/drivers/usb/musb/Kconfig
425 ++++ b/drivers/usb/musb/Kconfig
426 +@@ -140,7 +140,7 @@ config USB_TI_CPPI_DMA
427 +
428 + config USB_TI_CPPI41_DMA
429 + bool 'TI CPPI 4.1 (AM335x)'
430 +- depends on ARCH_OMAP
431 ++ depends on ARCH_OMAP && DMADEVICES
432 + select TI_CPPI41
433 +
434 + config USB_TUSB_OMAP_DMA
435 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
436 +index eac7ccaa3c85..7d4f51a32e66 100644
437 +--- a/drivers/usb/serial/cp210x.c
438 ++++ b/drivers/usb/serial/cp210x.c
439 +@@ -132,7 +132,6 @@ static const struct usb_device_id id_table[] = {
440 + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
441 + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
442 + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
443 +- { USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */
444 + { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
445 + { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
446 + { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
447 +diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
448 +index 7064eb8d6142..a1f2e2a05bc5 100644
449 +--- a/drivers/usb/serial/usb-serial-simple.c
450 ++++ b/drivers/usb/serial/usb-serial-simple.c
451 +@@ -53,6 +53,7 @@ DEVICE(funsoft, FUNSOFT_IDS);
452 +
453 + /* Infineon Flashloader driver */
454 + #define FLASHLOADER_IDS() \
455 ++ { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
456 + { USB_DEVICE(0x8087, 0x0716) }
457 + DEVICE(flashloader, FLASHLOADER_IDS);
458 +
459 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
460 +index 5193c7844315..e557e4ca0392 100644
461 +--- a/fs/btrfs/file.c
462 ++++ b/fs/btrfs/file.c
463 +@@ -760,8 +760,16 @@ next_slot:
464 + }
465 +
466 + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
467 +- if (key.objectid > ino ||
468 +- key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
469 ++
470 ++ if (key.objectid > ino)
471 ++ break;
472 ++ if (WARN_ON_ONCE(key.objectid < ino) ||
473 ++ key.type < BTRFS_EXTENT_DATA_KEY) {
474 ++ ASSERT(del_nr == 0);
475 ++ path->slots[0]++;
476 ++ goto next_slot;
477 ++ }
478 ++ if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
479 + break;
480 +
481 + fi = btrfs_item_ptr(leaf, path->slots[0],
482 +@@ -780,8 +788,8 @@ next_slot:
483 + btrfs_file_extent_inline_len(leaf,
484 + path->slots[0], fi);
485 + } else {
486 +- WARN_ON(1);
487 +- extent_end = search_start;
488 ++ /* can't happen */
489 ++ BUG();
490 + }
491 +
492 + /*
493 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
494 +index 0be09bb34b75..5db50e8bf52e 100644
495 +--- a/fs/btrfs/inode.c
496 ++++ b/fs/btrfs/inode.c
497 +@@ -1268,8 +1268,14 @@ next_slot:
498 + num_bytes = 0;
499 + btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
500 +
501 +- if (found_key.objectid > ino ||
502 +- found_key.type > BTRFS_EXTENT_DATA_KEY ||
503 ++ if (found_key.objectid > ino)
504 ++ break;
505 ++ if (WARN_ON_ONCE(found_key.objectid < ino) ||
506 ++ found_key.type < BTRFS_EXTENT_DATA_KEY) {
507 ++ path->slots[0]++;
508 ++ goto next_slot;
509 ++ }
510 ++ if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
511 + found_key.offset > end)
512 + break;
513 +
514 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
515 +index 3d50f1ee51ba..31c9f6471ce7 100644
516 +--- a/fs/btrfs/ioctl.c
517 ++++ b/fs/btrfs/ioctl.c
518 +@@ -3187,6 +3187,150 @@ static void clone_update_extent_map(struct inode *inode,
519 + &BTRFS_I(inode)->runtime_flags);
520 + }
521 +
522 ++/*
523 ++ * Make sure we do not end up inserting an inline extent into a file that has
524 ++ * already other (non-inline) extents. If a file has an inline extent it can
525 ++ * not have any other extents and the (single) inline extent must start at the
526 ++ * file offset 0. Failing to respect these rules will lead to file corruption,
527 ++ * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
528 ++ *
529 ++ * We can have extents that have been already written to disk or we can have
530 ++ * dirty ranges still in delalloc, in which case the extent maps and items are
531 ++ * created only when we run delalloc, and the delalloc ranges might fall outside
532 ++ * the range we are currently locking in the inode's io tree. So we check the
533 ++ * inode's i_size because of that (i_size updates are done while holding the
534 ++ * i_mutex, which we are holding here).
535 ++ * We also check to see if the inode has a size not greater than "datal" but has
536 ++ * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
537 ++ * protected against such concurrent fallocate calls by the i_mutex).
538 ++ *
539 ++ * If the file has no extents but a size greater than datal, do not allow the
540 ++ * copy because we would need turn the inline extent into a non-inline one (even
541 ++ * with NO_HOLES enabled). If we find our destination inode only has one inline
542 ++ * extent, just overwrite it with the source inline extent if its size is less
543 ++ * than the source extent's size, or we could copy the source inline extent's
544 ++ * data into the destination inode's inline extent if the later is greater then
545 ++ * the former.
546 ++ */
547 ++static int clone_copy_inline_extent(struct inode *src,
548 ++ struct inode *dst,
549 ++ struct btrfs_trans_handle *trans,
550 ++ struct btrfs_path *path,
551 ++ struct btrfs_key *new_key,
552 ++ const u64 drop_start,
553 ++ const u64 datal,
554 ++ const u64 skip,
555 ++ const u64 size,
556 ++ char *inline_data)
557 ++{
558 ++ struct btrfs_root *root = BTRFS_I(dst)->root;
559 ++ const u64 aligned_end = ALIGN(new_key->offset + datal,
560 ++ root->sectorsize);
561 ++ int ret;
562 ++ struct btrfs_key key;
563 ++
564 ++ if (new_key->offset > 0)
565 ++ return -EOPNOTSUPP;
566 ++
567 ++ key.objectid = btrfs_ino(dst);
568 ++ key.type = BTRFS_EXTENT_DATA_KEY;
569 ++ key.offset = 0;
570 ++ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
571 ++ if (ret < 0) {
572 ++ return ret;
573 ++ } else if (ret > 0) {
574 ++ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
575 ++ ret = btrfs_next_leaf(root, path);
576 ++ if (ret < 0)
577 ++ return ret;
578 ++ else if (ret > 0)
579 ++ goto copy_inline_extent;
580 ++ }
581 ++ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
582 ++ if (key.objectid == btrfs_ino(dst) &&
583 ++ key.type == BTRFS_EXTENT_DATA_KEY) {
584 ++ ASSERT(key.offset > 0);
585 ++ return -EOPNOTSUPP;
586 ++ }
587 ++ } else if (i_size_read(dst) <= datal) {
588 ++ struct btrfs_file_extent_item *ei;
589 ++ u64 ext_len;
590 ++
591 ++ /*
592 ++ * If the file size is <= datal, make sure there are no other
593 ++ * extents following (can happen do to an fallocate call with
594 ++ * the flag FALLOC_FL_KEEP_SIZE).
595 ++ */
596 ++ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
597 ++ struct btrfs_file_extent_item);
598 ++ /*
599 ++ * If it's an inline extent, it can not have other extents
600 ++ * following it.
601 ++ */
602 ++ if (btrfs_file_extent_type(path->nodes[0], ei) ==
603 ++ BTRFS_FILE_EXTENT_INLINE)
604 ++ goto copy_inline_extent;
605 ++
606 ++ ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
607 ++ if (ext_len > aligned_end)
608 ++ return -EOPNOTSUPP;
609 ++
610 ++ ret = btrfs_next_item(root, path);
611 ++ if (ret < 0) {
612 ++ return ret;
613 ++ } else if (ret == 0) {
614 ++ btrfs_item_key_to_cpu(path->nodes[0], &key,
615 ++ path->slots[0]);
616 ++ if (key.objectid == btrfs_ino(dst) &&
617 ++ key.type == BTRFS_EXTENT_DATA_KEY)
618 ++ return -EOPNOTSUPP;
619 ++ }
620 ++ }
621 ++
622 ++copy_inline_extent:
623 ++ /*
624 ++ * We have no extent items, or we have an extent at offset 0 which may
625 ++ * or may not be inlined. All these cases are dealt the same way.
626 ++ */
627 ++ if (i_size_read(dst) > datal) {
628 ++ /*
629 ++ * If the destination inode has an inline extent...
630 ++ * This would require copying the data from the source inline
631 ++ * extent into the beginning of the destination's inline extent.
632 ++ * But this is really complex, both extents can be compressed
633 ++ * or just one of them, which would require decompressing and
634 ++ * re-compressing data (which could increase the new compressed
635 ++ * size, not allowing the compressed data to fit anymore in an
636 ++ * inline extent).
637 ++ * So just don't support this case for now (it should be rare,
638 ++ * we are not really saving space when cloning inline extents).
639 ++ */
640 ++ return -EOPNOTSUPP;
641 ++ }
642 ++
643 ++ btrfs_release_path(path);
644 ++ ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
645 ++ if (ret)
646 ++ return ret;
647 ++ ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
648 ++ if (ret)
649 ++ return ret;
650 ++
651 ++ if (skip) {
652 ++ const u32 start = btrfs_file_extent_calc_inline_size(0);
653 ++
654 ++ memmove(inline_data + start, inline_data + start + skip, datal);
655 ++ }
656 ++
657 ++ write_extent_buffer(path->nodes[0], inline_data,
658 ++ btrfs_item_ptr_offset(path->nodes[0],
659 ++ path->slots[0]),
660 ++ size);
661 ++ inode_add_bytes(dst, datal);
662 ++
663 ++ return 0;
664 ++}
665 ++
666 + /**
667 + * btrfs_clone() - clone a range from inode file to another
668 + *
669 +@@ -3451,7 +3595,6 @@ process_slot:
670 + } else if (type == BTRFS_FILE_EXTENT_INLINE) {
671 + u64 skip = 0;
672 + u64 trim = 0;
673 +- u64 aligned_end = 0;
674 +
675 + if (off > key.offset) {
676 + skip = off - key.offset;
677 +@@ -3469,42 +3612,22 @@ process_slot:
678 + size -= skip + trim;
679 + datal -= skip + trim;
680 +
681 +- aligned_end = ALIGN(new_key.offset + datal,
682 +- root->sectorsize);
683 +- ret = btrfs_drop_extents(trans, root, inode,
684 +- drop_start,
685 +- aligned_end,
686 +- 1);
687 ++ ret = clone_copy_inline_extent(src, inode,
688 ++ trans, path,
689 ++ &new_key,
690 ++ drop_start,
691 ++ datal,
692 ++ skip, size, buf);
693 + if (ret) {
694 + if (ret != -EOPNOTSUPP)
695 + btrfs_abort_transaction(trans,
696 +- root, ret);
697 +- btrfs_end_transaction(trans, root);
698 +- goto out;
699 +- }
700 +-
701 +- ret = btrfs_insert_empty_item(trans, root, path,
702 +- &new_key, size);
703 +- if (ret) {
704 +- btrfs_abort_transaction(trans, root,
705 +- ret);
706 ++ root,
707 ++ ret);
708 + btrfs_end_transaction(trans, root);
709 + goto out;
710 + }
711 +-
712 +- if (skip) {
713 +- u32 start =
714 +- btrfs_file_extent_calc_inline_size(0);
715 +- memmove(buf+start, buf+start+skip,
716 +- datal);
717 +- }
718 +-
719 + leaf = path->nodes[0];
720 + slot = path->slots[0];
721 +- write_extent_buffer(leaf, buf,
722 +- btrfs_item_ptr_offset(leaf, slot),
723 +- size);
724 +- inode_add_bytes(inode, datal);
725 + }
726 +
727 + /* If we have an implicit hole (NO_HOLES feature). */
728 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
729 +index 01bad724b5f7..fbb0533e977f 100644
730 +--- a/fs/btrfs/xattr.c
731 ++++ b/fs/btrfs/xattr.c
732 +@@ -309,8 +309,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
733 + /* check to make sure this item is what we want */
734 + if (found_key.objectid != key.objectid)
735 + break;
736 +- if (found_key.type != BTRFS_XATTR_ITEM_KEY)
737 ++ if (found_key.type > BTRFS_XATTR_ITEM_KEY)
738 + break;
739 ++ if (found_key.type < BTRFS_XATTR_ITEM_KEY)
740 ++ goto next;
741 +
742 + di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
743 + if (verify_dir_item(root, leaf, di))
744 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
745 +index a92d3f5c6c12..6f29455c03fe 100644
746 +--- a/fs/ceph/mds_client.c
747 ++++ b/fs/ceph/mds_client.c
748 +@@ -1857,7 +1857,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
749 +
750 + len = sizeof(*head) +
751 + pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
752 +- sizeof(struct timespec);
753 ++ sizeof(struct ceph_timespec);
754 +
755 + /* calculate (max) length for cap releases */
756 + len += sizeof(struct ceph_mds_request_release) *
757 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
758 +index b5a2c29a8db8..b16ba5239dcf 100644
759 +--- a/fs/ext4/super.c
760 ++++ b/fs/ext4/super.c
761 +@@ -404,9 +404,13 @@ static void ext4_handle_error(struct super_block *sb)
762 + smp_wmb();
763 + sb->s_flags |= MS_RDONLY;
764 + }
765 +- if (test_opt(sb, ERRORS_PANIC))
766 ++ if (test_opt(sb, ERRORS_PANIC)) {
767 ++ if (EXT4_SB(sb)->s_journal &&
768 ++ !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
769 ++ return;
770 + panic("EXT4-fs (device %s): panic forced after error\n",
771 + sb->s_id);
772 ++ }
773 + }
774 +
775 + #define ext4_error_ratelimit(sb) \
776 +@@ -595,8 +599,12 @@ void __ext4_abort(struct super_block *sb, const char *function,
777 + jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
778 + save_error_info(sb, function, line);
779 + }
780 +- if (test_opt(sb, ERRORS_PANIC))
781 ++ if (test_opt(sb, ERRORS_PANIC)) {
782 ++ if (EXT4_SB(sb)->s_journal &&
783 ++ !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
784 ++ return;
785 + panic("EXT4-fs panic from previous error\n");
786 ++ }
787 + }
788 +
789 + void __ext4_msg(struct super_block *sb,
790 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
791 +index 2540324f084b..07e87ec45709 100644
792 +--- a/fs/jbd2/journal.c
793 ++++ b/fs/jbd2/journal.c
794 +@@ -2087,8 +2087,12 @@ static void __journal_abort_soft (journal_t *journal, int errno)
795 +
796 + __jbd2_journal_abort_hard(journal);
797 +
798 +- if (errno)
799 ++ if (errno) {
800 + jbd2_journal_update_sb_errno(journal);
801 ++ write_lock(&journal->j_state_lock);
802 ++ journal->j_flags |= JBD2_REC_ERR;
803 ++ write_unlock(&journal->j_state_lock);
804 ++ }
805 + }
806 +
807 + /**
808 +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
809 +index 00689a8a85e4..d7cfc6e42b5e 100644
810 +--- a/fs/nfs/inode.c
811 ++++ b/fs/nfs/inode.c
812 +@@ -1717,7 +1717,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
813 + nfsi->attrtimeo_timestamp = now;
814 + }
815 + }
816 +- invalid &= ~NFS_INO_INVALID_ATTR;
817 ++
818 ++ /* Don't declare attrcache up to date if there were no attrs! */
819 ++ if (fattr->valid != 0)
820 ++ invalid &= ~NFS_INO_INVALID_ATTR;
821 ++
822 + /* Don't invalidate the data if we were to blame */
823 + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
824 + || S_ISLNK(inode->i_mode)))
825 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
826 +index 368a6b72290c..0e11fe80e5b9 100644
827 +--- a/fs/nfs/nfs4client.c
828 ++++ b/fs/nfs/nfs4client.c
829 +@@ -33,7 +33,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
830 + return ret;
831 + idr_preload(GFP_KERNEL);
832 + spin_lock(&nn->nfs_client_lock);
833 +- ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT);
834 ++ ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT);
835 + if (ret >= 0)
836 + clp->cl_cb_ident = ret;
837 + spin_unlock(&nn->nfs_client_lock);
838 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
839 +index eda99c8ec3ed..b6c3a8792358 100644
840 +--- a/fs/nfsd/nfs4state.c
841 ++++ b/fs/nfsd/nfs4state.c
842 +@@ -3241,6 +3241,7 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
843 + stp->st_access_bmap = 0;
844 + stp->st_deny_bmap = 0;
845 + stp->st_openstp = NULL;
846 ++ init_rwsem(&stp->st_rwsem);
847 + spin_lock(&oo->oo_owner.so_client->cl_lock);
848 + list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
849 + spin_lock(&fp->fi_lock);
850 +@@ -4057,21 +4058,27 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
851 + */
852 + if (stp) {
853 + /* Stateid was found, this is an OPEN upgrade */
854 ++ down_read(&stp->st_rwsem);
855 + status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
856 +- if (status)
857 ++ if (status) {
858 ++ up_read(&stp->st_rwsem);
859 + goto out;
860 ++ }
861 + } else {
862 + stp = open->op_stp;
863 + open->op_stp = NULL;
864 + init_open_stateid(stp, fp, open);
865 ++ down_read(&stp->st_rwsem);
866 + status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
867 + if (status) {
868 ++ up_read(&stp->st_rwsem);
869 + release_open_stateid(stp);
870 + goto out;
871 + }
872 + }
873 + update_stateid(&stp->st_stid.sc_stateid);
874 + memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
875 ++ up_read(&stp->st_rwsem);
876 +
877 + if (nfsd4_has_session(&resp->cstate)) {
878 + if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
879 +@@ -4647,10 +4654,13 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
880 + * revoked delegations are kept only for free_stateid.
881 + */
882 + return nfserr_bad_stateid;
883 ++ down_write(&stp->st_rwsem);
884 + status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
885 +- if (status)
886 +- return status;
887 +- return nfs4_check_fh(current_fh, &stp->st_stid);
888 ++ if (status == nfs_ok)
889 ++ status = nfs4_check_fh(current_fh, &stp->st_stid);
890 ++ if (status != nfs_ok)
891 ++ up_write(&stp->st_rwsem);
892 ++ return status;
893 + }
894 +
895 + /*
896 +@@ -4697,6 +4707,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
897 + return status;
898 + oo = openowner(stp->st_stateowner);
899 + if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
900 ++ up_write(&stp->st_rwsem);
901 + nfs4_put_stid(&stp->st_stid);
902 + return nfserr_bad_stateid;
903 + }
904 +@@ -4727,11 +4738,14 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
905 + goto out;
906 + oo = openowner(stp->st_stateowner);
907 + status = nfserr_bad_stateid;
908 +- if (oo->oo_flags & NFS4_OO_CONFIRMED)
909 ++ if (oo->oo_flags & NFS4_OO_CONFIRMED) {
910 ++ up_write(&stp->st_rwsem);
911 + goto put_stateid;
912 ++ }
913 + oo->oo_flags |= NFS4_OO_CONFIRMED;
914 + update_stateid(&stp->st_stid.sc_stateid);
915 + memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
916 ++ up_write(&stp->st_rwsem);
917 + dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
918 + __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
919 +
920 +@@ -4810,6 +4824,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
921 + memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
922 + status = nfs_ok;
923 + put_stateid:
924 ++ up_write(&stp->st_rwsem);
925 + nfs4_put_stid(&stp->st_stid);
926 + out:
927 + nfsd4_bump_seqid(cstate, status);
928 +@@ -4860,6 +4875,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
929 + goto out;
930 + update_stateid(&stp->st_stid.sc_stateid);
931 + memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
932 ++ up_write(&stp->st_rwsem);
933 +
934 + nfsd4_close_open_stateid(stp);
935 +
936 +@@ -5088,6 +5104,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
937 + stp->st_access_bmap = 0;
938 + stp->st_deny_bmap = open_stp->st_deny_bmap;
939 + stp->st_openstp = open_stp;
940 ++ init_rwsem(&stp->st_rwsem);
941 + list_add(&stp->st_locks, &open_stp->st_locks);
942 + list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
943 + spin_lock(&fp->fi_lock);
944 +@@ -5256,6 +5273,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
945 + &open_stp, nn);
946 + if (status)
947 + goto out;
948 ++ up_write(&open_stp->st_rwsem);
949 + open_sop = openowner(open_stp->st_stateowner);
950 + status = nfserr_bad_stateid;
951 + if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
952 +@@ -5263,6 +5281,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
953 + goto out;
954 + status = lookup_or_create_lock_state(cstate, open_stp, lock,
955 + &lock_stp, &new);
956 ++ if (status == nfs_ok)
957 ++ down_write(&lock_stp->st_rwsem);
958 + } else {
959 + status = nfs4_preprocess_seqid_op(cstate,
960 + lock->lk_old_lock_seqid,
961 +@@ -5368,6 +5388,8 @@ out:
962 + seqid_mutating_err(ntohl(status)))
963 + lock_sop->lo_owner.so_seqid++;
964 +
965 ++ up_write(&lock_stp->st_rwsem);
966 ++
967 + /*
968 + * If this is a new, never-before-used stateid, and we are
969 + * returning an error, then just go ahead and release it.
970 +@@ -5538,6 +5560,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
971 + fput:
972 + fput(filp);
973 + put_stateid:
974 ++ up_write(&stp->st_rwsem);
975 + nfs4_put_stid(&stp->st_stid);
976 + out:
977 + nfsd4_bump_seqid(cstate, status);
978 +diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
979 +index 2712042a66b1..3ccb1046a2f9 100644
980 +--- a/fs/nfsd/state.h
981 ++++ b/fs/nfsd/state.h
982 +@@ -506,14 +506,15 @@ struct nfs4_file {
983 + * Better suggestions welcome.
984 + */
985 + struct nfs4_ol_stateid {
986 +- struct nfs4_stid st_stid; /* must be first field */
987 +- struct list_head st_perfile;
988 +- struct list_head st_perstateowner;
989 +- struct list_head st_locks;
990 +- struct nfs4_stateowner * st_stateowner;
991 +- unsigned char st_access_bmap;
992 +- unsigned char st_deny_bmap;
993 +- struct nfs4_ol_stateid * st_openstp;
994 ++ struct nfs4_stid st_stid;
995 ++ struct list_head st_perfile;
996 ++ struct list_head st_perstateowner;
997 ++ struct list_head st_locks;
998 ++ struct nfs4_stateowner *st_stateowner;
999 ++ unsigned char st_access_bmap;
1000 ++ unsigned char st_deny_bmap;
1001 ++ struct nfs4_ol_stateid *st_openstp;
1002 ++ struct rw_semaphore st_rwsem;
1003 + };
1004 +
1005 + static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
1006 +diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
1007 +index 914c121ec890..9fc1daecdfb3 100644
1008 +--- a/fs/ocfs2/namei.c
1009 ++++ b/fs/ocfs2/namei.c
1010 +@@ -361,6 +361,8 @@ static int ocfs2_mknod(struct inode *dir,
1011 + mlog_errno(status);
1012 + goto leave;
1013 + }
1014 ++ /* update inode->i_mode after mask with "umask". */
1015 ++ inode->i_mode = mode;
1016 +
1017 + handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
1018 + S_ISDIR(mode),
1019 +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
1020 +index 4caf8acfef11..c035001df223 100644
1021 +--- a/include/linux/jbd2.h
1022 ++++ b/include/linux/jbd2.h
1023 +@@ -1007,6 +1007,7 @@ struct journal_s
1024 + #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
1025 + * data write error in ordered
1026 + * mode */
1027 ++#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */
1028 +
1029 + /*
1030 + * Function declarations for the journaling transaction and buffer
1031 +diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
1032 +index 9948c874e3f1..1d0043dc34e4 100644
1033 +--- a/include/linux/usb/quirks.h
1034 ++++ b/include/linux/usb/quirks.h
1035 +@@ -47,4 +47,7 @@
1036 + /* device generates spurious wakeup, ignore remote wakeup capability */
1037 + #define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9)
1038 +
1039 ++/* device can't handle Link Power Management */
1040 ++#define USB_QUIRK_NO_LPM BIT(10)
1041 ++
1042 + #endif /* __LINUX_USB_QUIRKS_H */
1043 +diff --git a/include/net/af_unix.h b/include/net/af_unix.h
1044 +index dfe4ddfbb43c..e830c3dff61a 100644
1045 +--- a/include/net/af_unix.h
1046 ++++ b/include/net/af_unix.h
1047 +@@ -63,6 +63,7 @@ struct unix_sock {
1048 + #define UNIX_GC_CANDIDATE 0
1049 + #define UNIX_GC_MAYBE_CYCLE 1
1050 + struct socket_wq peer_wq;
1051 ++ wait_queue_t peer_wake;
1052 + };
1053 +
1054 + static inline struct unix_sock *unix_sk(struct sock *sk)
1055 +diff --git a/include/net/dst.h b/include/net/dst.h
1056 +index 0fb99a26e973..182b812d45e1 100644
1057 +--- a/include/net/dst.h
1058 ++++ b/include/net/dst.h
1059 +@@ -312,6 +312,39 @@ static inline void skb_dst_force(struct sk_buff *skb)
1060 + }
1061 + }
1062 +
1063 ++/**
1064 ++ * dst_hold_safe - Take a reference on a dst if possible
1065 ++ * @dst: pointer to dst entry
1066 ++ *
1067 ++ * This helper returns false if it could not safely
1068 ++ * take a reference on a dst.
1069 ++ */
1070 ++static inline bool dst_hold_safe(struct dst_entry *dst)
1071 ++{
1072 ++ if (dst->flags & DST_NOCACHE)
1073 ++ return atomic_inc_not_zero(&dst->__refcnt);
1074 ++ dst_hold(dst);
1075 ++ return true;
1076 ++}
1077 ++
1078 ++/**
1079 ++ * skb_dst_force_safe - makes sure skb dst is refcounted
1080 ++ * @skb: buffer
1081 ++ *
1082 ++ * If dst is not yet refcounted and not destroyed, grab a ref on it.
1083 ++ */
1084 ++static inline void skb_dst_force_safe(struct sk_buff *skb)
1085 ++{
1086 ++ if (skb_dst_is_noref(skb)) {
1087 ++ struct dst_entry *dst = skb_dst(skb);
1088 ++
1089 ++ if (!dst_hold_safe(dst))
1090 ++ dst = NULL;
1091 ++
1092 ++ skb->_skb_refdst = (unsigned long)dst;
1093 ++ }
1094 ++}
1095 ++
1096 +
1097 + /**
1098 + * __skb_tunnel_rx - prepare skb for rx reinsert
1099 +diff --git a/include/net/sock.h b/include/net/sock.h
1100 +index a098ce3cd242..a40bc8c0af4b 100644
1101 +--- a/include/net/sock.h
1102 ++++ b/include/net/sock.h
1103 +@@ -379,6 +379,7 @@ struct sock {
1104 + sk_no_check_rx : 1,
1105 + sk_userlocks : 4,
1106 + sk_protocol : 8,
1107 ++#define SK_PROTOCOL_MAX U8_MAX
1108 + sk_type : 16;
1109 + kmemcheck_bitfield_end(flags);
1110 + int sk_wmem_queued;
1111 +@@ -715,6 +716,8 @@ enum sock_flags {
1112 + SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
1113 + };
1114 +
1115 ++#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
1116 ++
1117 + static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
1118 + {
1119 + nsk->sk_flags = osk->sk_flags;
1120 +@@ -789,7 +792,7 @@ void sk_stream_write_space(struct sock *sk);
1121 + static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
1122 + {
1123 + /* dont let skb dst not refcounted, we are going to leave rcu lock */
1124 +- skb_dst_force(skb);
1125 ++ skb_dst_force_safe(skb);
1126 +
1127 + if (!sk->sk_backlog.tail)
1128 + sk->sk_backlog.head = skb;
1129 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
1130 +index bd3c41d4ec07..2273f534b01a 100644
1131 +--- a/kernel/workqueue.c
1132 ++++ b/kernel/workqueue.c
1133 +@@ -1442,13 +1442,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1134 + timer_stats_timer_set_start_info(&dwork->timer);
1135 +
1136 + dwork->wq = wq;
1137 +- /* timer isn't guaranteed to run in this cpu, record earlier */
1138 +- if (cpu == WORK_CPU_UNBOUND)
1139 +- cpu = raw_smp_processor_id();
1140 + dwork->cpu = cpu;
1141 + timer->expires = jiffies + delay;
1142 +
1143 +- add_timer_on(timer, cpu);
1144 ++ if (unlikely(cpu != WORK_CPU_UNBOUND))
1145 ++ add_timer_on(timer, cpu);
1146 ++ else
1147 ++ add_timer(timer);
1148 + }
1149 +
1150 + /**
1151 +diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
1152 +index c35c3f48fc0f..1428c3ff3341 100644
1153 +--- a/net/ax25/af_ax25.c
1154 ++++ b/net/ax25/af_ax25.c
1155 +@@ -806,6 +806,9 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
1156 + struct sock *sk;
1157 + ax25_cb *ax25;
1158 +
1159 ++ if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
1160 ++ return -EINVAL;
1161 ++
1162 + if (!net_eq(net, &init_net))
1163 + return -EAFNOSUPPORT;
1164 +
1165 +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
1166 +index 7ee9e4ab00f8..b3ef78a644ed 100644
1167 +--- a/net/bluetooth/sco.c
1168 ++++ b/net/bluetooth/sco.c
1169 +@@ -520,6 +520,9 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
1170 + if (!addr || addr->sa_family != AF_BLUETOOTH)
1171 + return -EINVAL;
1172 +
1173 ++ if (addr_len < sizeof(struct sockaddr_sco))
1174 ++ return -EINVAL;
1175 ++
1176 + lock_sock(sk);
1177 +
1178 + if (sk->sk_state != BT_OPEN) {
1179 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1180 +index ea0bcc4a9657..b2921c0d5608 100644
1181 +--- a/net/core/skbuff.c
1182 ++++ b/net/core/skbuff.c
1183 +@@ -3599,7 +3599,8 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
1184 + serr->ee.ee_info = tstype;
1185 + if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
1186 + serr->ee.ee_data = skb_shinfo(skb)->tskey;
1187 +- if (sk->sk_protocol == IPPROTO_TCP)
1188 ++ if (sk->sk_protocol == IPPROTO_TCP &&
1189 ++ sk->sk_type == SOCK_STREAM)
1190 + serr->ee.ee_data -= sk->sk_tskey;
1191 + }
1192 +
1193 +@@ -4108,7 +4109,8 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
1194 + return NULL;
1195 + }
1196 +
1197 +- memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
1198 ++ memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
1199 ++ 2 * ETH_ALEN);
1200 + skb->mac_header += VLAN_HLEN;
1201 + return skb;
1202 + }
1203 +diff --git a/net/core/sock.c b/net/core/sock.c
1204 +index 1e5130de31b6..b1a6ff0a9041 100644
1205 +--- a/net/core/sock.c
1206 ++++ b/net/core/sock.c
1207 +@@ -422,8 +422,6 @@ static void sock_warn_obsolete_bsdism(const char *name)
1208 + }
1209 + }
1210 +
1211 +-#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
1212 +-
1213 + static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
1214 + {
1215 + if (sk->sk_flags & flags) {
1216 +@@ -861,7 +859,8 @@ set_rcvbuf:
1217 + }
1218 + if (val & SOF_TIMESTAMPING_OPT_ID &&
1219 + !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
1220 +- if (sk->sk_protocol == IPPROTO_TCP) {
1221 ++ if (sk->sk_protocol == IPPROTO_TCP &&
1222 ++ sk->sk_type == SOCK_STREAM) {
1223 + if (sk->sk_state != TCP_ESTABLISHED) {
1224 + ret = -EINVAL;
1225 + break;
1226 +diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
1227 +index 25733d538147..2aeeb4f22e9d 100644
1228 +--- a/net/decnet/af_decnet.c
1229 ++++ b/net/decnet/af_decnet.c
1230 +@@ -678,6 +678,9 @@ static int dn_create(struct net *net, struct socket *sock, int protocol,
1231 + {
1232 + struct sock *sk;
1233 +
1234 ++ if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
1235 ++ return -EINVAL;
1236 ++
1237 + if (!net_eq(net, &init_net))
1238 + return -EAFNOSUPPORT;
1239 +
1240 +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
1241 +index 9a173577a790..6cf020ea4f46 100644
1242 +--- a/net/ipv4/af_inet.c
1243 ++++ b/net/ipv4/af_inet.c
1244 +@@ -259,6 +259,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
1245 + int try_loading_module = 0;
1246 + int err;
1247 +
1248 ++ if (protocol < 0 || protocol >= IPPROTO_MAX)
1249 ++ return -EINVAL;
1250 ++
1251 + sock->state = SS_UNCONNECTED;
1252 +
1253 + /* Look for the requested type/protocol pair. */
1254 +diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
1255 +index 606c520ffd5a..8ce8e82d1abb 100644
1256 +--- a/net/ipv4/fou.c
1257 ++++ b/net/ipv4/fou.c
1258 +@@ -25,6 +25,7 @@ struct fou {
1259 + u16 port;
1260 + struct udp_offload udp_offloads;
1261 + struct list_head list;
1262 ++ struct rcu_head rcu;
1263 + };
1264 +
1265 + struct fou_cfg {
1266 +@@ -287,7 +288,7 @@ static void fou_release(struct fou *fou)
1267 +
1268 + sock_release(sock);
1269 +
1270 +- kfree(fou);
1271 ++ kfree_rcu(fou, rcu);
1272 + }
1273 +
1274 + static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
1275 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1276 +index 6cd9f696d9c6..5d5390299277 100644
1277 +--- a/net/ipv4/tcp_ipv4.c
1278 ++++ b/net/ipv4/tcp_ipv4.c
1279 +@@ -1553,7 +1553,7 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1280 + if (likely(sk->sk_rx_dst))
1281 + skb_dst_drop(skb);
1282 + else
1283 +- skb_dst_force(skb);
1284 ++ skb_dst_force_safe(skb);
1285 +
1286 + __skb_queue_tail(&tp->ucopy.prequeue, skb);
1287 + tp->ucopy.memory += skb->truesize;
1288 +@@ -1758,8 +1758,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1289 + {
1290 + struct dst_entry *dst = skb_dst(skb);
1291 +
1292 +- if (dst) {
1293 +- dst_hold(dst);
1294 ++ if (dst && dst_hold_safe(dst)) {
1295 + sk->sk_rx_dst = dst;
1296 + inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1297 + }
1298 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
1299 +index 05417c330f4e..ad95905e7a70 100644
1300 +--- a/net/ipv6/af_inet6.c
1301 ++++ b/net/ipv6/af_inet6.c
1302 +@@ -109,6 +109,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
1303 + int try_loading_module = 0;
1304 + int err;
1305 +
1306 ++ if (protocol < 0 || protocol >= IPPROTO_MAX)
1307 ++ return -EINVAL;
1308 ++
1309 + /* Look for the requested type/protocol pair. */
1310 + lookup_protocol:
1311 + err = -ESOCKTNOSUPPORT;
1312 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
1313 +index 28d7a245ea34..25cd22c1ddee 100644
1314 +--- a/net/ipv6/ip6_gre.c
1315 ++++ b/net/ipv6/ip6_gre.c
1316 +@@ -1563,13 +1563,11 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
1317 + return -EEXIST;
1318 + } else {
1319 + t = nt;
1320 +-
1321 +- ip6gre_tunnel_unlink(ign, t);
1322 +- ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
1323 +- ip6gre_tunnel_link(ign, t);
1324 +- netdev_state_change(dev);
1325 + }
1326 +
1327 ++ ip6gre_tunnel_unlink(ign, t);
1328 ++ ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
1329 ++ ip6gre_tunnel_link(ign, t);
1330 + return 0;
1331 + }
1332 +
1333 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1334 +index 26feadd0b763..b5a4ac8ce4b1 100644
1335 +--- a/net/ipv6/tcp_ipv6.c
1336 ++++ b/net/ipv6/tcp_ipv6.c
1337 +@@ -93,10 +93,9 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1338 + {
1339 + struct dst_entry *dst = skb_dst(skb);
1340 +
1341 +- if (dst) {
1342 ++ if (dst && dst_hold_safe(dst)) {
1343 + const struct rt6_info *rt = (const struct rt6_info *)dst;
1344 +
1345 +- dst_hold(dst);
1346 + sk->sk_rx_dst = dst;
1347 + inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1348 + if (rt->rt6i_node)
1349 +diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
1350 +index 3f3a6cbdceb7..b3f6ec0df426 100644
1351 +--- a/net/irda/af_irda.c
1352 ++++ b/net/irda/af_irda.c
1353 +@@ -1100,6 +1100,9 @@ static int irda_create(struct net *net, struct socket *sock, int protocol,
1354 +
1355 + IRDA_DEBUG(2, "%s()\n", __func__);
1356 +
1357 ++ if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
1358 ++ return -EINVAL;
1359 ++
1360 + if (net != &init_net)
1361 + return -EAFNOSUPPORT;
1362 +
1363 +diff --git a/net/rds/send.c b/net/rds/send.c
1364 +index 0a64541020b0..0bae8d43b012 100644
1365 +--- a/net/rds/send.c
1366 ++++ b/net/rds/send.c
1367 +@@ -958,11 +958,13 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
1368 + release_sock(sk);
1369 + }
1370 +
1371 +- /* racing with another thread binding seems ok here */
1372 ++ lock_sock(sk);
1373 + if (daddr == 0 || rs->rs_bound_addr == 0) {
1374 ++ release_sock(sk);
1375 + ret = -ENOTCONN; /* XXX not a great errno */
1376 + goto out;
1377 + }
1378 ++ release_sock(sk);
1379 +
1380 + /* size of rm including all sgs */
1381 + ret = rds_rm_size(msg, payload_len);
1382 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
1383 +index 0e4198ee2370..3267a5cbb3e8 100644
1384 +--- a/net/sctp/ipv6.c
1385 ++++ b/net/sctp/ipv6.c
1386 +@@ -634,6 +634,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
1387 + struct sock *newsk;
1388 + struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1389 + struct sctp6_sock *newsctp6sk;
1390 ++ struct ipv6_txoptions *opt;
1391 +
1392 + newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot);
1393 + if (!newsk)
1394 +@@ -653,6 +654,13 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
1395 +
1396 + memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1397 +
1398 ++ rcu_read_lock();
1399 ++ opt = rcu_dereference(np->opt);
1400 ++ if (opt)
1401 ++ opt = ipv6_dup_options(newsk, opt);
1402 ++ RCU_INIT_POINTER(newnp->opt, opt);
1403 ++ rcu_read_unlock();
1404 ++
1405 + /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname()
1406 + * and getpeername().
1407 + */
1408 +diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
1409 +index 371a152d9759..642c11570285 100644
1410 +--- a/net/sctp/sm_make_chunk.c
1411 ++++ b/net/sctp/sm_make_chunk.c
1412 +@@ -1652,7 +1652,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1413 +
1414 + /* Set an expiration time for the cookie. */
1415 + cookie->c.expiration = ktime_add(asoc->cookie_life,
1416 +- ktime_get());
1417 ++ ktime_get_real());
1418 +
1419 + /* Copy the peer's init packet. */
1420 + memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr,
1421 +@@ -1780,7 +1780,7 @@ no_hmac:
1422 + if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
1423 + kt = skb_get_ktime(skb);
1424 + else
1425 +- kt = ktime_get();
1426 ++ kt = ktime_get_real();
1427 +
1428 + if (!asoc && ktime_before(bear_cookie->expiration, kt)) {
1429 + /*
1430 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1431 +index fb082aa4d656..4130c1b87dd6 100644
1432 +--- a/net/sctp/socket.c
1433 ++++ b/net/sctp/socket.c
1434 +@@ -7181,6 +7181,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
1435 + newsk->sk_type = sk->sk_type;
1436 + newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1437 + newsk->sk_flags = sk->sk_flags;
1438 ++ newsk->sk_tsflags = sk->sk_tsflags;
1439 + newsk->sk_no_check_tx = sk->sk_no_check_tx;
1440 + newsk->sk_no_check_rx = sk->sk_no_check_rx;
1441 + newsk->sk_reuse = sk->sk_reuse;
1442 +@@ -7213,6 +7214,9 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
1443 + newinet->mc_ttl = 1;
1444 + newinet->mc_index = 0;
1445 + newinet->mc_list = NULL;
1446 ++
1447 ++ if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1448 ++ net_enable_timestamp();
1449 + }
1450 +
1451 + static inline void sctp_copy_descendant(struct sock *sk_to,
1452 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
1453 +index 2ae4a5915aa7..7229794c1419 100644
1454 +--- a/net/unix/af_unix.c
1455 ++++ b/net/unix/af_unix.c
1456 +@@ -316,6 +316,118 @@ found:
1457 + return s;
1458 + }
1459 +
1460 ++/* Support code for asymmetrically connected dgram sockets
1461 ++ *
1462 ++ * If a datagram socket is connected to a socket not itself connected
1463 ++ * to the first socket (eg, /dev/log), clients may only enqueue more
1464 ++ * messages if the present receive queue of the server socket is not
1465 ++ * "too large". This means there's a second writeability condition
1466 ++ * poll and sendmsg need to test. The dgram recv code will do a wake
1467 ++ * up on the peer_wait wait queue of a socket upon reception of a
1468 ++ * datagram which needs to be propagated to sleeping would-be writers
1469 ++ * since these might not have sent anything so far. This can't be
1470 ++ * accomplished via poll_wait because the lifetime of the server
1471 ++ * socket might be less than that of its clients if these break their
1472 ++ * association with it or if the server socket is closed while clients
1473 ++ * are still connected to it and there's no way to inform "a polling
1474 ++ * implementation" that it should let go of a certain wait queue
1475 ++ *
1476 ++ * In order to propagate a wake up, a wait_queue_t of the client
1477 ++ * socket is enqueued on the peer_wait queue of the server socket
1478 ++ * whose wake function does a wake_up on the ordinary client socket
1479 ++ * wait queue. This connection is established whenever a write (or
1480 ++ * poll for write) hit the flow control condition and broken when the
1481 ++ * association to the server socket is dissolved or after a wake up
1482 ++ * was relayed.
1483 ++ */
1484 ++
1485 ++static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
1486 ++ void *key)
1487 ++{
1488 ++ struct unix_sock *u;
1489 ++ wait_queue_head_t *u_sleep;
1490 ++
1491 ++ u = container_of(q, struct unix_sock, peer_wake);
1492 ++
1493 ++ __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
1494 ++ q);
1495 ++ u->peer_wake.private = NULL;
1496 ++
1497 ++ /* relaying can only happen while the wq still exists */
1498 ++ u_sleep = sk_sleep(&u->sk);
1499 ++ if (u_sleep)
1500 ++ wake_up_interruptible_poll(u_sleep, key);
1501 ++
1502 ++ return 0;
1503 ++}
1504 ++
1505 ++static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
1506 ++{
1507 ++ struct unix_sock *u, *u_other;
1508 ++ int rc;
1509 ++
1510 ++ u = unix_sk(sk);
1511 ++ u_other = unix_sk(other);
1512 ++ rc = 0;
1513 ++ spin_lock(&u_other->peer_wait.lock);
1514 ++
1515 ++ if (!u->peer_wake.private) {
1516 ++ u->peer_wake.private = other;
1517 ++ __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
1518 ++
1519 ++ rc = 1;
1520 ++ }
1521 ++
1522 ++ spin_unlock(&u_other->peer_wait.lock);
1523 ++ return rc;
1524 ++}
1525 ++
1526 ++static void unix_dgram_peer_wake_disconnect(struct sock *sk,
1527 ++ struct sock *other)
1528 ++{
1529 ++ struct unix_sock *u, *u_other;
1530 ++
1531 ++ u = unix_sk(sk);
1532 ++ u_other = unix_sk(other);
1533 ++ spin_lock(&u_other->peer_wait.lock);
1534 ++
1535 ++ if (u->peer_wake.private == other) {
1536 ++ __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
1537 ++ u->peer_wake.private = NULL;
1538 ++ }
1539 ++
1540 ++ spin_unlock(&u_other->peer_wait.lock);
1541 ++}
1542 ++
1543 ++static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
1544 ++ struct sock *other)
1545 ++{
1546 ++ unix_dgram_peer_wake_disconnect(sk, other);
1547 ++ wake_up_interruptible_poll(sk_sleep(sk),
1548 ++ POLLOUT |
1549 ++ POLLWRNORM |
1550 ++ POLLWRBAND);
1551 ++}
1552 ++
1553 ++/* preconditions:
1554 ++ * - unix_peer(sk) == other
1555 ++ * - association is stable
1556 ++ */
1557 ++static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
1558 ++{
1559 ++ int connected;
1560 ++
1561 ++ connected = unix_dgram_peer_wake_connect(sk, other);
1562 ++
1563 ++ if (unix_recvq_full(other))
1564 ++ return 1;
1565 ++
1566 ++ if (connected)
1567 ++ unix_dgram_peer_wake_disconnect(sk, other);
1568 ++
1569 ++ return 0;
1570 ++}
1571 ++
1572 + static inline int unix_writable(struct sock *sk)
1573 + {
1574 + return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
1575 +@@ -420,6 +532,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
1576 + skpair->sk_state_change(skpair);
1577 + sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
1578 + }
1579 ++
1580 ++ unix_dgram_peer_wake_disconnect(sk, skpair);
1581 + sock_put(skpair); /* It may now die */
1582 + unix_peer(sk) = NULL;
1583 + }
1584 +@@ -653,6 +767,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
1585 + INIT_LIST_HEAD(&u->link);
1586 + mutex_init(&u->readlock); /* single task reading lock */
1587 + init_waitqueue_head(&u->peer_wait);
1588 ++ init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
1589 + unix_insert_socket(unix_sockets_unbound(sk), sk);
1590 + out:
1591 + if (sk == NULL)
1592 +@@ -1020,6 +1135,8 @@ restart:
1593 + if (unix_peer(sk)) {
1594 + struct sock *old_peer = unix_peer(sk);
1595 + unix_peer(sk) = other;
1596 ++ unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1597 ++
1598 + unix_state_double_unlock(sk, other);
1599 +
1600 + if (other != old_peer)
1601 +@@ -1459,6 +1576,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1602 + struct scm_cookie tmp_scm;
1603 + int max_level;
1604 + int data_len = 0;
1605 ++ int sk_locked;
1606 +
1607 + if (NULL == siocb->scm)
1608 + siocb->scm = &tmp_scm;
1609 +@@ -1540,12 +1658,14 @@ restart:
1610 + goto out_free;
1611 + }
1612 +
1613 ++ sk_locked = 0;
1614 + unix_state_lock(other);
1615 ++restart_locked:
1616 + err = -EPERM;
1617 + if (!unix_may_send(sk, other))
1618 + goto out_unlock;
1619 +
1620 +- if (sock_flag(other, SOCK_DEAD)) {
1621 ++ if (unlikely(sock_flag(other, SOCK_DEAD))) {
1622 + /*
1623 + * Check with 1003.1g - what should
1624 + * datagram error
1625 +@@ -1553,10 +1673,14 @@ restart:
1626 + unix_state_unlock(other);
1627 + sock_put(other);
1628 +
1629 ++ if (!sk_locked)
1630 ++ unix_state_lock(sk);
1631 ++
1632 + err = 0;
1633 +- unix_state_lock(sk);
1634 + if (unix_peer(sk) == other) {
1635 + unix_peer(sk) = NULL;
1636 ++ unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1637 ++
1638 + unix_state_unlock(sk);
1639 +
1640 + unix_dgram_disconnected(sk, other);
1641 +@@ -1582,21 +1706,38 @@ restart:
1642 + goto out_unlock;
1643 + }
1644 +
1645 +- if (unix_peer(other) != sk && unix_recvq_full(other)) {
1646 +- if (!timeo) {
1647 +- err = -EAGAIN;
1648 +- goto out_unlock;
1649 ++ if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1650 ++ if (timeo) {
1651 ++ timeo = unix_wait_for_peer(other, timeo);
1652 ++
1653 ++ err = sock_intr_errno(timeo);
1654 ++ if (signal_pending(current))
1655 ++ goto out_free;
1656 ++
1657 ++ goto restart;
1658 + }
1659 +
1660 +- timeo = unix_wait_for_peer(other, timeo);
1661 ++ if (!sk_locked) {
1662 ++ unix_state_unlock(other);
1663 ++ unix_state_double_lock(sk, other);
1664 ++ }
1665 +
1666 +- err = sock_intr_errno(timeo);
1667 +- if (signal_pending(current))
1668 +- goto out_free;
1669 ++ if (unix_peer(sk) != other ||
1670 ++ unix_dgram_peer_wake_me(sk, other)) {
1671 ++ err = -EAGAIN;
1672 ++ sk_locked = 1;
1673 ++ goto out_unlock;
1674 ++ }
1675 +
1676 +- goto restart;
1677 ++ if (!sk_locked) {
1678 ++ sk_locked = 1;
1679 ++ goto restart_locked;
1680 ++ }
1681 + }
1682 +
1683 ++ if (unlikely(sk_locked))
1684 ++ unix_state_unlock(sk);
1685 ++
1686 + if (sock_flag(other, SOCK_RCVTSTAMP))
1687 + __net_timestamp(skb);
1688 + maybe_add_creds(skb, sock, other);
1689 +@@ -1610,6 +1751,8 @@ restart:
1690 + return len;
1691 +
1692 + out_unlock:
1693 ++ if (sk_locked)
1694 ++ unix_state_unlock(sk);
1695 + unix_state_unlock(other);
1696 + out_free:
1697 + kfree_skb(skb);
1698 +@@ -1953,14 +2096,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1699 + memset(&tmp_scm, 0, sizeof(tmp_scm));
1700 + }
1701 +
1702 +- err = mutex_lock_interruptible(&u->readlock);
1703 +- if (unlikely(err)) {
1704 +- /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1705 +- * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1706 +- */
1707 +- err = noblock ? -EAGAIN : -ERESTARTSYS;
1708 +- goto out;
1709 +- }
1710 ++ mutex_lock(&u->readlock);
1711 +
1712 + if (flags & MSG_PEEK)
1713 + skip = sk_peek_offset(sk, flags);
1714 +@@ -2001,12 +2137,12 @@ again:
1715 +
1716 + timeo = unix_stream_data_wait(sk, timeo, last);
1717 +
1718 +- if (signal_pending(current)
1719 +- || mutex_lock_interruptible(&u->readlock)) {
1720 ++ if (signal_pending(current)) {
1721 + err = sock_intr_errno(timeo);
1722 + goto out;
1723 + }
1724 +
1725 ++ mutex_lock(&u->readlock);
1726 + continue;
1727 + unlock:
1728 + unix_state_unlock(sk);
1729 +@@ -2269,14 +2405,16 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
1730 + return mask;
1731 +
1732 + writable = unix_writable(sk);
1733 +- other = unix_peer_get(sk);
1734 +- if (other) {
1735 +- if (unix_peer(other) != sk) {
1736 +- sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
1737 +- if (unix_recvq_full(other))
1738 +- writable = 0;
1739 +- }
1740 +- sock_put(other);
1741 ++ if (writable) {
1742 ++ unix_state_lock(sk);
1743 ++
1744 ++ other = unix_peer(sk);
1745 ++ if (other && unix_peer(other) != sk &&
1746 ++ unix_recvq_full(other) &&
1747 ++ unix_dgram_peer_wake_me(sk, other))
1748 ++ writable = 0;
1749 ++
1750 ++ unix_state_unlock(sk);
1751 + }
1752 +
1753 + if (writable)
1754 +diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
1755 +index 4743d71e4aa6..fee27fe2b30f 100644
1756 +--- a/security/keys/keyctl.c
1757 ++++ b/security/keys/keyctl.c
1758 +@@ -757,16 +757,16 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
1759 +
1760 + /* the key is probably readable - now try to read it */
1761 + can_read_key:
1762 +- ret = key_validate(key);
1763 +- if (ret == 0) {
1764 +- ret = -EOPNOTSUPP;
1765 +- if (key->type->read) {
1766 +- /* read the data with the semaphore held (since we
1767 +- * might sleep) */
1768 +- down_read(&key->sem);
1769 ++ ret = -EOPNOTSUPP;
1770 ++ if (key->type->read) {
1771 ++ /* Read the data with the semaphore held (since we might sleep)
1772 ++ * to protect against the key being updated or revoked.
1773 ++ */
1774 ++ down_read(&key->sem);
1775 ++ ret = key_validate(key);
1776 ++ if (ret == 0)
1777 + ret = key->type->read(key, buffer, buflen);
1778 +- up_read(&key->sem);
1779 +- }
1780 ++ up_read(&key->sem);
1781 + }
1782 +
1783 + error2:
1784 +diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
1785 +index bd536cb221e2..db91639c81e3 100644
1786 +--- a/security/keys/process_keys.c
1787 ++++ b/security/keys/process_keys.c
1788 +@@ -794,6 +794,7 @@ long join_session_keyring(const char *name)
1789 + ret = PTR_ERR(keyring);
1790 + goto error2;
1791 + } else if (keyring == new->session_keyring) {
1792 ++ key_put(keyring);
1793 + ret = 0;
1794 + goto error2;
1795 + }
1796 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
1797 +index b422e406a9cb..99e952293498 100644
1798 +--- a/sound/pci/hda/patch_hdmi.c
1799 ++++ b/sound/pci/hda/patch_hdmi.c
1800 +@@ -48,8 +48,9 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
1801 + #define is_haswell(codec) ((codec)->vendor_id == 0x80862807)
1802 + #define is_broadwell(codec) ((codec)->vendor_id == 0x80862808)
1803 + #define is_skylake(codec) ((codec)->vendor_id == 0x80862809)
1804 ++#define is_broxton(codec) ((codec)->vendor_id == 0x8086280a)
1805 + #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
1806 +- || is_skylake(codec))
1807 ++ || is_skylake(codec) || is_broxton(codec))
1808 +
1809 + #define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
1810 + #define is_cherryview(codec) ((codec)->vendor_id == 0x80862883)
1811
1812 diff --git a/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch b/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
1813 deleted file mode 100644
1814 index 49020d7..0000000
1815 --- a/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
1816 +++ /dev/null
1817 @@ -1,81 +0,0 @@
1818 -From 23567fd052a9abb6d67fe8e7a9ccdd9800a540f2 Mon Sep 17 00:00:00 2001
1819 -From: Yevgeny Pats <yevgeny@××××××××××××××××.io>
1820 -Date: Tue, 19 Jan 2016 22:09:04 +0000
1821 -Subject: KEYS: Fix keyring ref leak in join_session_keyring()
1822 -
1823 -This fixes CVE-2016-0728.
1824 -
1825 -If a thread is asked to join as a session keyring the keyring that's already
1826 -set as its session, we leak a keyring reference.
1827 -
1828 -This can be tested with the following program:
1829 -
1830 - #include <stddef.h>
1831 - #include <stdio.h>
1832 - #include <sys/types.h>
1833 - #include <keyutils.h>
1834 -
1835 - int main(int argc, const char *argv[])
1836 - {
1837 - int i = 0;
1838 - key_serial_t serial;
1839 -
1840 - serial = keyctl(KEYCTL_JOIN_SESSION_KEYRING,
1841 - "leaked-keyring");
1842 - if (serial < 0) {
1843 - perror("keyctl");
1844 - return -1;
1845 - }
1846 -
1847 - if (keyctl(KEYCTL_SETPERM, serial,
1848 - KEY_POS_ALL | KEY_USR_ALL) < 0) {
1849 - perror("keyctl");
1850 - return -1;
1851 - }
1852 -
1853 - for (i = 0; i < 100; i++) {
1854 - serial = keyctl(KEYCTL_JOIN_SESSION_KEYRING,
1855 - "leaked-keyring");
1856 - if (serial < 0) {
1857 - perror("keyctl");
1858 - return -1;
1859 - }
1860 - }
1861 -
1862 - return 0;
1863 - }
1864 -
1865 -If, after the program has run, there something like the following line in
1866 -/proc/keys:
1867 -
1868 -3f3d898f I--Q--- 100 perm 3f3f0000 0 0 keyring leaked-keyring: empty
1869 -
1870 -with a usage count of 100 * the number of times the program has been run,
1871 -then the kernel is malfunctioning. If leaked-keyring has zero usages or
1872 -has been garbage collected, then the problem is fixed.
1873 -
1874 -Reported-by: Yevgeny Pats <yevgeny@××××××××××××××××.io>
1875 -Signed-off-by: David Howells <dhowells@××××××.com>
1876 -Acked-by: Don Zickus <dzickus@××××××.com>
1877 -Acked-by: Prarit Bhargava <prarit@××××××.com>
1878 -Acked-by: Jarod Wilson <jarod@××××××.com>
1879 -Signed-off-by: James Morris <james.l.morris@××××××.com>
1880 ----
1881 - security/keys/process_keys.c | 1 +
1882 - 1 file changed, 1 insertion(+)
1883 -
1884 -diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
1885 -index a3f85d2..e6d50172 100644
1886 ---- a/security/keys/process_keys.c
1887 -+++ b/security/keys/process_keys.c
1888 -@@ -794,6 +794,7 @@ long join_session_keyring(const char *name)
1889 - ret = PTR_ERR(keyring);
1890 - goto error2;
1891 - } else if (keyring == new->session_keyring) {
1892 -+ key_put(keyring);
1893 - ret = 0;
1894 - goto error2;
1895 - }
1896 ---
1897 -cgit v0.12
1898 -