Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Sat, 29 Dec 2018 18:54:15
Message-Id: 1546109632.e17f9236d0c06738492d9c80accbb911c2558360.mpagano@gentoo
1 commit: e17f9236d0c06738492d9c80accbb911c2558360
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Dec 29 18:53:52 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Dec 29 18:53:52 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e17f9236
7
8 proj/linux-patches: Linux patch 4.14.91
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1090_linux-4.14.91.patch | 1437 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1441 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index dc7f560..ca6677a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -403,6 +403,10 @@ Patch: 1089_4.14.90.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.90
23
24 +Patch: 1090.14.91.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.91
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1090_linux-4.14.91.patch b/1090_linux-4.14.91.patch
33 new file mode 100644
34 index 0000000..90250d3
35 --- /dev/null
36 +++ b/1090_linux-4.14.91.patch
37 @@ -0,0 +1,1437 @@
38 +diff --git a/Makefile b/Makefile
39 +index 280c7193e246..a6fb3b158a19 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 90
47 ++SUBLEVEL = 91
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
52 +index 62c62d3eb0ff..fed3636dce9a 100644
53 +--- a/arch/x86/include/asm/msr-index.h
54 ++++ b/arch/x86/include/asm/msr-index.h
55 +@@ -372,6 +372,7 @@
56 + #define MSR_F15H_NB_PERF_CTR 0xc0010241
57 + #define MSR_F15H_PTSC 0xc0010280
58 + #define MSR_F15H_IC_CFG 0xc0011021
59 ++#define MSR_F15H_EX_CFG 0xc001102c
60 +
61 + /* Fam 10h MSRs */
62 + #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
63 +diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
64 +index 558444b23923..c2987daa6a6b 100644
65 +--- a/arch/x86/kernel/cpu/mtrr/if.c
66 ++++ b/arch/x86/kernel/cpu/mtrr/if.c
67 +@@ -173,6 +173,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
68 + struct mtrr_gentry gentry;
69 + void __user *arg = (void __user *) __arg;
70 +
71 ++ memset(&gentry, 0, sizeof(gentry));
72 ++
73 + switch (cmd) {
74 + case MTRRIOC_ADD_ENTRY:
75 + case MTRRIOC_SET_ENTRY:
76 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
77 +index 4353580b659a..8eec37d37c3d 100644
78 +--- a/arch/x86/kvm/vmx.c
79 ++++ b/arch/x86/kvm/vmx.c
80 +@@ -10447,6 +10447,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
81 + kunmap(vmx->nested.pi_desc_page);
82 + kvm_release_page_dirty(vmx->nested.pi_desc_page);
83 + vmx->nested.pi_desc_page = NULL;
84 ++ vmx->nested.pi_desc = NULL;
85 ++ vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
86 + }
87 + page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
88 + if (is_error_page(page))
89 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
90 +index f24329659bea..ac431fa778aa 100644
91 +--- a/arch/x86/kvm/x86.c
92 ++++ b/arch/x86/kvm/x86.c
93 +@@ -2227,6 +2227,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
94 + case MSR_AMD64_PATCH_LOADER:
95 + case MSR_AMD64_BU_CFG2:
96 + case MSR_AMD64_DC_CFG:
97 ++ case MSR_F15H_EX_CFG:
98 + break;
99 +
100 + case MSR_IA32_UCODE_REV:
101 +@@ -2508,6 +2509,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
102 + case MSR_AMD64_BU_CFG2:
103 + case MSR_IA32_PERF_CTL:
104 + case MSR_AMD64_DC_CFG:
105 ++ case MSR_F15H_EX_CFG:
106 + msr_info->data = 0;
107 + break;
108 + case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
109 +diff --git a/block/blk-lib.c b/block/blk-lib.c
110 +index 2bc544ce3d2e..0bdc77888dc5 100644
111 +--- a/block/blk-lib.c
112 ++++ b/block/blk-lib.c
113 +@@ -59,10 +59,18 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
114 + unsigned int req_sects;
115 + sector_t end_sect, tmp;
116 +
117 +- /* Make sure bi_size doesn't overflow */
118 +- req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
119 ++ /*
120 ++ * Issue in chunks of the user defined max discard setting,
121 ++ * ensuring that bi_size doesn't overflow
122 ++ */
123 ++ req_sects = min_t(sector_t, nr_sects,
124 ++ q->limits.max_discard_sectors);
125 ++ if (!req_sects)
126 ++ goto fail;
127 ++ if (req_sects > UINT_MAX >> 9)
128 ++ req_sects = UINT_MAX >> 9;
129 +
130 +- /**
131 ++ /*
132 + * If splitting a request, and the next starting sector would be
133 + * misaligned, stop the discard at the previous aligned sector.
134 + */
135 +@@ -96,6 +104,14 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
136 +
137 + *biop = bio;
138 + return 0;
139 ++
140 ++fail:
141 ++ if (bio) {
142 ++ submit_bio_wait(bio);
143 ++ bio_put(bio);
144 ++ }
145 ++ *biop = NULL;
146 ++ return -EOPNOTSUPP;
147 + }
148 + EXPORT_SYMBOL(__blkdev_issue_discard);
149 +
150 +diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
151 +index 05813fbf3daf..647dfbbc4e1c 100644
152 +--- a/drivers/gpio/gpio-max7301.c
153 ++++ b/drivers/gpio/gpio-max7301.c
154 +@@ -25,7 +25,7 @@ static int max7301_spi_write(struct device *dev, unsigned int reg,
155 + struct spi_device *spi = to_spi_device(dev);
156 + u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
157 +
158 +- return spi_write(spi, (const u8 *)&word, sizeof(word));
159 ++ return spi_write_then_read(spi, &word, sizeof(word), NULL, 0);
160 + }
161 +
162 + /* A read from the MAX7301 means two transfers; here, one message each */
163 +@@ -37,14 +37,8 @@ static int max7301_spi_read(struct device *dev, unsigned int reg)
164 + struct spi_device *spi = to_spi_device(dev);
165 +
166 + word = 0x8000 | (reg << 8);
167 +- ret = spi_write(spi, (const u8 *)&word, sizeof(word));
168 +- if (ret)
169 +- return ret;
170 +- /*
171 +- * This relies on the fact, that a transfer with NULL tx_buf shifts out
172 +- * zero bytes (=NOOP for MAX7301)
173 +- */
174 +- ret = spi_read(spi, (u8 *)&word, sizeof(word));
175 ++ ret = spi_write_then_read(spi, &word, sizeof(word), &word,
176 ++ sizeof(word));
177 + if (ret)
178 + return ret;
179 + return word & 0xff;
180 +diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
181 +index 33d4bd505b5b..57d157e94bd6 100644
182 +--- a/drivers/gpio/gpiolib-acpi.c
183 ++++ b/drivers/gpio/gpiolib-acpi.c
184 +@@ -23,11 +23,28 @@
185 +
186 + #include "gpiolib.h"
187 +
188 ++/**
189 ++ * struct acpi_gpio_event - ACPI GPIO event handler data
190 ++ *
191 ++ * @node: list-entry of the events list of the struct acpi_gpio_chip
192 ++ * @handle: handle of ACPI method to execute when the IRQ triggers
193 ++ * @handler: irq_handler to pass to request_irq when requesting the IRQ
194 ++ * @pin: GPIO pin number on the gpio_chip
195 ++ * @irq: Linux IRQ number for the event, for request_ / free_irq
196 ++ * @irqflags: flags to pass to request_irq when requesting the IRQ
197 ++ * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
198 ++ * @is_requested: True if request_irq has been done
199 ++ * @desc: gpio_desc for the GPIO pin for this event
200 ++ */
201 + struct acpi_gpio_event {
202 + struct list_head node;
203 + acpi_handle handle;
204 ++ irq_handler_t handler;
205 + unsigned int pin;
206 + unsigned int irq;
207 ++ unsigned long irqflags;
208 ++ bool irq_is_wake;
209 ++ bool irq_requested;
210 + struct gpio_desc *desc;
211 + };
212 +
213 +@@ -53,10 +70,10 @@ struct acpi_gpio_chip {
214 +
215 + /*
216 + * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
217 +- * (so builtin drivers) we register the ACPI GpioInt event handlers from a
218 ++ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
219 + * late_initcall_sync handler, so that other builtin drivers can register their
220 + * OpRegions before the event handlers can run. This list contains gpiochips
221 +- * for which the acpi_gpiochip_request_interrupts() has been deferred.
222 ++ * for which the acpi_gpiochip_request_irqs() call has been deferred.
223 + */
224 + static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
225 + static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
226 +@@ -194,8 +211,42 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
227 + }
228 + EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
229 +
230 +-static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
231 +- void *context)
232 ++static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
233 ++ struct acpi_gpio_event *event)
234 ++{
235 ++ int ret, value;
236 ++
237 ++ ret = request_threaded_irq(event->irq, NULL, event->handler,
238 ++ event->irqflags, "ACPI:Event", event);
239 ++ if (ret) {
240 ++ dev_err(acpi_gpio->chip->parent,
241 ++ "Failed to setup interrupt handler for %d\n",
242 ++ event->irq);
243 ++ return;
244 ++ }
245 ++
246 ++ if (event->irq_is_wake)
247 ++ enable_irq_wake(event->irq);
248 ++
249 ++ event->irq_requested = true;
250 ++
251 ++ /* Make sure we trigger the initial state of edge-triggered IRQs */
252 ++ value = gpiod_get_raw_value_cansleep(event->desc);
253 ++ if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
254 ++ ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
255 ++ event->handler(event->irq, event);
256 ++}
257 ++
258 ++static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
259 ++{
260 ++ struct acpi_gpio_event *event;
261 ++
262 ++ list_for_each_entry(event, &acpi_gpio->events, node)
263 ++ acpi_gpiochip_request_irq(acpi_gpio, event);
264 ++}
265 ++
266 ++static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
267 ++ void *context)
268 + {
269 + struct acpi_gpio_chip *acpi_gpio = context;
270 + struct gpio_chip *chip = acpi_gpio->chip;
271 +@@ -204,8 +255,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
272 + struct acpi_gpio_event *event;
273 + irq_handler_t handler = NULL;
274 + struct gpio_desc *desc;
275 +- unsigned long irqflags;
276 +- int ret, pin, irq, value;
277 ++ int ret, pin, irq;
278 +
279 + if (!acpi_gpio_get_irq_resource(ares, &agpio))
280 + return AE_OK;
281 +@@ -240,8 +290,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
282 +
283 + gpiod_direction_input(desc);
284 +
285 +- value = gpiod_get_value_cansleep(desc);
286 +-
287 + ret = gpiochip_lock_as_irq(chip, pin);
288 + if (ret) {
289 + dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
290 +@@ -254,64 +302,42 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
291 + goto fail_unlock_irq;
292 + }
293 +
294 +- irqflags = IRQF_ONESHOT;
295 ++ event = kzalloc(sizeof(*event), GFP_KERNEL);
296 ++ if (!event)
297 ++ goto fail_unlock_irq;
298 ++
299 ++ event->irqflags = IRQF_ONESHOT;
300 + if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
301 + if (agpio->polarity == ACPI_ACTIVE_HIGH)
302 +- irqflags |= IRQF_TRIGGER_HIGH;
303 ++ event->irqflags |= IRQF_TRIGGER_HIGH;
304 + else
305 +- irqflags |= IRQF_TRIGGER_LOW;
306 ++ event->irqflags |= IRQF_TRIGGER_LOW;
307 + } else {
308 + switch (agpio->polarity) {
309 + case ACPI_ACTIVE_HIGH:
310 +- irqflags |= IRQF_TRIGGER_RISING;
311 ++ event->irqflags |= IRQF_TRIGGER_RISING;
312 + break;
313 + case ACPI_ACTIVE_LOW:
314 +- irqflags |= IRQF_TRIGGER_FALLING;
315 ++ event->irqflags |= IRQF_TRIGGER_FALLING;
316 + break;
317 + default:
318 +- irqflags |= IRQF_TRIGGER_RISING |
319 +- IRQF_TRIGGER_FALLING;
320 ++ event->irqflags |= IRQF_TRIGGER_RISING |
321 ++ IRQF_TRIGGER_FALLING;
322 + break;
323 + }
324 + }
325 +
326 +- event = kzalloc(sizeof(*event), GFP_KERNEL);
327 +- if (!event)
328 +- goto fail_unlock_irq;
329 +-
330 + event->handle = evt_handle;
331 ++ event->handler = handler;
332 + event->irq = irq;
333 ++ event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
334 + event->pin = pin;
335 + event->desc = desc;
336 +
337 +- ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
338 +- "ACPI:Event", event);
339 +- if (ret) {
340 +- dev_err(chip->parent,
341 +- "Failed to setup interrupt handler for %d\n",
342 +- event->irq);
343 +- goto fail_free_event;
344 +- }
345 +-
346 +- if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
347 +- enable_irq_wake(irq);
348 +-
349 + list_add_tail(&event->node, &acpi_gpio->events);
350 +
351 +- /*
352 +- * Make sure we trigger the initial state of the IRQ when using RISING
353 +- * or FALLING. Note we run the handlers on late_init, the AML code
354 +- * may refer to OperationRegions from other (builtin) drivers which
355 +- * may be probed after us.
356 +- */
357 +- if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
358 +- ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
359 +- handler(event->irq, event);
360 +-
361 + return AE_OK;
362 +
363 +-fail_free_event:
364 +- kfree(event);
365 + fail_unlock_irq:
366 + gpiochip_unlock_as_irq(chip, pin);
367 + fail_free_desc:
368 +@@ -348,6 +374,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
369 + if (ACPI_FAILURE(status))
370 + return;
371 +
372 ++ acpi_walk_resources(handle, "_AEI",
373 ++ acpi_gpiochip_alloc_event, acpi_gpio);
374 ++
375 + mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
376 + defer = !acpi_gpio_deferred_req_irqs_done;
377 + if (defer)
378 +@@ -358,8 +387,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
379 + if (defer)
380 + return;
381 +
382 +- acpi_walk_resources(handle, "_AEI",
383 +- acpi_gpiochip_request_interrupt, acpi_gpio);
384 ++ acpi_gpiochip_request_irqs(acpi_gpio);
385 + }
386 + EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
387 +
388 +@@ -396,10 +424,13 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
389 + list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
390 + struct gpio_desc *desc;
391 +
392 +- if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
393 +- disable_irq_wake(event->irq);
394 ++ if (event->irq_requested) {
395 ++ if (event->irq_is_wake)
396 ++ disable_irq_wake(event->irq);
397 ++
398 ++ free_irq(event->irq, event);
399 ++ }
400 +
401 +- free_irq(event->irq, event);
402 + desc = event->desc;
403 + if (WARN_ON(IS_ERR(desc)))
404 + continue;
405 +@@ -1253,23 +1284,16 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
406 + return con_id == NULL;
407 + }
408 +
409 +-/* Run deferred acpi_gpiochip_request_interrupts() */
410 +-static int acpi_gpio_handle_deferred_request_interrupts(void)
411 ++/* Run deferred acpi_gpiochip_request_irqs() */
412 ++static int acpi_gpio_handle_deferred_request_irqs(void)
413 + {
414 + struct acpi_gpio_chip *acpi_gpio, *tmp;
415 +
416 + mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
417 + list_for_each_entry_safe(acpi_gpio, tmp,
418 + &acpi_gpio_deferred_req_irqs_list,
419 +- deferred_req_irqs_list_entry) {
420 +- acpi_handle handle;
421 +-
422 +- handle = ACPI_HANDLE(acpi_gpio->chip->parent);
423 +- acpi_walk_resources(handle, "_AEI",
424 +- acpi_gpiochip_request_interrupt, acpi_gpio);
425 +-
426 +- list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
427 +- }
428 ++ deferred_req_irqs_list_entry)
429 ++ acpi_gpiochip_request_irqs(acpi_gpio);
430 +
431 + acpi_gpio_deferred_req_irqs_done = true;
432 + mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
433 +@@ -1277,4 +1301,4 @@ static int acpi_gpio_handle_deferred_request_interrupts(void)
434 + return 0;
435 + }
436 + /* We must use _sync so that this runs after the first deferred_probe run */
437 +-late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
438 ++late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
439 +diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
440 +index a9ae6dd2d593..53f319369de5 100644
441 +--- a/drivers/gpu/drm/drm_ioctl.c
442 ++++ b/drivers/gpu/drm/drm_ioctl.c
443 +@@ -37,6 +37,7 @@
444 +
445 + #include <linux/pci.h>
446 + #include <linux/export.h>
447 ++#include <linux/nospec.h>
448 +
449 + /**
450 + * DOC: getunique and setversion story
451 +@@ -778,13 +779,17 @@ long drm_ioctl(struct file *filp,
452 +
453 + if (is_driver_ioctl) {
454 + /* driver ioctl */
455 +- if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
456 ++ unsigned int index = nr - DRM_COMMAND_BASE;
457 ++
458 ++ if (index >= dev->driver->num_ioctls)
459 + goto err_i1;
460 +- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
461 ++ index = array_index_nospec(index, dev->driver->num_ioctls);
462 ++ ioctl = &dev->driver->ioctls[index];
463 + } else {
464 + /* core ioctl */
465 + if (nr >= DRM_CORE_IOCTL_COUNT)
466 + goto err_i1;
467 ++ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
468 + ioctl = &drm_ioctls[nr];
469 + }
470 +
471 +@@ -866,6 +871,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
472 +
473 + if (nr >= DRM_CORE_IOCTL_COUNT)
474 + return false;
475 ++ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
476 +
477 + *flags = drm_ioctls[nr].flags;
478 + return true;
479 +diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
480 +index 2cd134dd94d2..4218a616f1d3 100644
481 +--- a/drivers/hv/vmbus_drv.c
482 ++++ b/drivers/hv/vmbus_drv.c
483 +@@ -300,6 +300,8 @@ static ssize_t out_intr_mask_show(struct device *dev,
484 +
485 + if (!hv_dev->channel)
486 + return -ENODEV;
487 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
488 ++ return -EINVAL;
489 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
490 + return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
491 + }
492 +@@ -313,6 +315,8 @@ static ssize_t out_read_index_show(struct device *dev,
493 +
494 + if (!hv_dev->channel)
495 + return -ENODEV;
496 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
497 ++ return -EINVAL;
498 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
499 + return sprintf(buf, "%d\n", outbound.current_read_index);
500 + }
501 +@@ -327,6 +331,8 @@ static ssize_t out_write_index_show(struct device *dev,
502 +
503 + if (!hv_dev->channel)
504 + return -ENODEV;
505 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
506 ++ return -EINVAL;
507 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
508 + return sprintf(buf, "%d\n", outbound.current_write_index);
509 + }
510 +@@ -341,6 +347,8 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
511 +
512 + if (!hv_dev->channel)
513 + return -ENODEV;
514 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
515 ++ return -EINVAL;
516 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
517 + return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
518 + }
519 +@@ -355,6 +363,8 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
520 +
521 + if (!hv_dev->channel)
522 + return -ENODEV;
523 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
524 ++ return -EINVAL;
525 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
526 + return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
527 + }
528 +@@ -368,6 +378,8 @@ static ssize_t in_intr_mask_show(struct device *dev,
529 +
530 + if (!hv_dev->channel)
531 + return -ENODEV;
532 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
533 ++ return -EINVAL;
534 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
535 + return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
536 + }
537 +@@ -381,6 +393,8 @@ static ssize_t in_read_index_show(struct device *dev,
538 +
539 + if (!hv_dev->channel)
540 + return -ENODEV;
541 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
542 ++ return -EINVAL;
543 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
544 + return sprintf(buf, "%d\n", inbound.current_read_index);
545 + }
546 +@@ -394,6 +408,8 @@ static ssize_t in_write_index_show(struct device *dev,
547 +
548 + if (!hv_dev->channel)
549 + return -ENODEV;
550 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
551 ++ return -EINVAL;
552 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
553 + return sprintf(buf, "%d\n", inbound.current_write_index);
554 + }
555 +@@ -408,6 +424,8 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
556 +
557 + if (!hv_dev->channel)
558 + return -ENODEV;
559 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
560 ++ return -EINVAL;
561 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
562 + return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
563 + }
564 +@@ -422,6 +440,8 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
565 +
566 + if (!hv_dev->channel)
567 + return -ENODEV;
568 ++ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
569 ++ return -EINVAL;
570 + hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
571 + return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
572 + }
573 +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
574 +index 60105ba77889..47f3f562d86f 100644
575 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
576 ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
577 +@@ -1775,8 +1775,8 @@ static void __srpt_close_all_ch(struct srpt_device *sdev)
578 +
579 + list_for_each_entry(ch, &sdev->rch_list, list) {
580 + if (srpt_disconnect_ch(ch) >= 0)
581 +- pr_info("Closing channel %s-%d because target %s has been disabled\n",
582 +- ch->sess_name, ch->qp->qp_num,
583 ++ pr_info("Closing channel %s because target %s has been disabled\n",
584 ++ ch->sess_name,
585 + sdev->device->name);
586 + srpt_close_ch(ch);
587 + }
588 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
589 +index bad5c1bf4ed9..814a04e8fdd7 100644
590 +--- a/drivers/mmc/core/mmc.c
591 ++++ b/drivers/mmc/core/mmc.c
592 +@@ -30,6 +30,7 @@
593 + #include "pwrseq.h"
594 +
595 + #define DEFAULT_CMD6_TIMEOUT_MS 500
596 ++#define MIN_CACHE_EN_TIMEOUT_MS 1600
597 +
598 + static const unsigned int tran_exp[] = {
599 + 10000, 100000, 1000000, 10000000,
600 +@@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
601 + card->cid.year += 16;
602 +
603 + /* check whether the eMMC card supports BKOPS */
604 +- if (!mmc_card_broken_hpi(card) &&
605 +- ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
606 ++ if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
607 + card->ext_csd.bkops = 1;
608 + card->ext_csd.man_bkops_en =
609 + (ext_csd[EXT_CSD_BKOPS_EN] &
610 +@@ -1755,20 +1755,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
611 + if (err) {
612 + pr_warn("%s: Enabling HPI failed\n",
613 + mmc_hostname(card->host));
614 ++ card->ext_csd.hpi_en = 0;
615 + err = 0;
616 +- } else
617 ++ } else {
618 + card->ext_csd.hpi_en = 1;
619 ++ }
620 + }
621 +
622 + /*
623 +- * If cache size is higher than 0, this indicates
624 +- * the existence of cache and it can be turned on.
625 ++ * If cache size is higher than 0, this indicates the existence of cache
626 ++ * and it can be turned on. Note that some eMMCs from Micron has been
627 ++ * reported to need ~800 ms timeout, while enabling the cache after
628 ++ * sudden power failure tests. Let's extend the timeout to a minimum of
629 ++ * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
630 + */
631 +- if (!mmc_card_broken_hpi(card) &&
632 +- card->ext_csd.cache_size > 0) {
633 ++ if (card->ext_csd.cache_size > 0) {
634 ++ unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
635 ++
636 ++ timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
637 + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
638 +- EXT_CSD_CACHE_CTRL, 1,
639 +- card->ext_csd.generic_cmd6_time);
640 ++ EXT_CSD_CACHE_CTRL, 1, timeout_ms);
641 + if (err && err != -EBADMSG)
642 + goto free_card;
643 +
644 +diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
645 +index 9e03fada16dc..3f3ff7530b76 100644
646 +--- a/drivers/mmc/host/omap_hsmmc.c
647 ++++ b/drivers/mmc/host/omap_hsmmc.c
648 +@@ -2083,7 +2083,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
649 + mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
650 + mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
651 + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
652 +- mmc->max_seg_size = mmc->max_req_size;
653 +
654 + mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
655 + MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
656 +@@ -2113,6 +2112,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
657 + goto err_irq;
658 + }
659 +
660 ++ /*
661 ++ * Limit the maximum segment size to the lower of the request size
662 ++ * and the DMA engine device segment size limits. In reality, with
663 ++ * 32-bit transfers, the DMA engine can do longer segments than this
664 ++ * but there is no way to represent that in the DMA model - if we
665 ++ * increase this figure here, we get warnings from the DMA API debug.
666 ++ */
667 ++ mmc->max_seg_size = min3(mmc->max_req_size,
668 ++ dma_get_max_seg_size(host->rx_chan->device->dev),
669 ++ dma_get_max_seg_size(host->tx_chan->device->dev));
670 ++
671 + /* Request IRQ for MMC operations */
672 + ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
673 + mmc_hostname(mmc), host);
674 +diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
675 +index d7a3379ea668..18a0952f68a8 100644
676 +--- a/drivers/net/usb/hso.c
677 ++++ b/drivers/net/usb/hso.c
678 +@@ -2806,6 +2806,12 @@ static int hso_get_config_data(struct usb_interface *interface)
679 + return -EIO;
680 + }
681 +
682 ++ /* check if we have a valid interface */
683 ++ if (if_num > 16) {
684 ++ kfree(config_data);
685 ++ return -EINVAL;
686 ++ }
687 ++
688 + switch (config_data[if_num]) {
689 + case 0x0:
690 + result = 0;
691 +@@ -2876,10 +2882,18 @@ static int hso_probe(struct usb_interface *interface,
692 +
693 + /* Get the interface/port specification from either driver_info or from
694 + * the device itself */
695 +- if (id->driver_info)
696 ++ if (id->driver_info) {
697 ++ /* if_num is controlled by the device, driver_info is a 0 terminated
698 ++ * array. Make sure, the access is in bounds! */
699 ++ for (i = 0; i <= if_num; ++i)
700 ++ if (((u32 *)(id->driver_info))[i] == 0)
701 ++ goto exit;
702 + port_spec = ((u32 *)(id->driver_info))[if_num];
703 +- else
704 ++ } else {
705 + port_spec = hso_get_config_data(interface);
706 ++ if (port_spec < 0)
707 ++ goto exit;
708 ++ }
709 +
710 + /* Check if we need to switch to alt interfaces prior to port
711 + * configuration */
712 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
713 +index cebf0ce76d27..e9e466cae322 100644
714 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
715 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
716 +@@ -952,6 +952,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
717 + int ret, i, j;
718 + u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
719 +
720 ++ /*
721 ++ * This command is not supported on earlier firmware versions.
722 ++ * Unfortunately, we don't have a TLV API flag to rely on, so
723 ++ * rely on the major version which is in the first byte of
724 ++ * ucode_ver.
725 ++ */
726 ++ if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
727 ++ return 0;
728 ++
729 + ret = iwl_mvm_sar_get_wgds_table(mvm);
730 + if (ret < 0) {
731 + IWL_DEBUG_RADIO(mvm,
732 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
733 +index 4cbc6cb8bf89..0ff247326d6c 100644
734 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
735 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
736 +@@ -517,6 +517,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
737 + {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
738 +
739 + /* 9000 Series */
740 ++ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
741 ++ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
742 ++ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
743 ++ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
744 ++ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
745 ++ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
746 ++ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
747 ++ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
748 ++ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
749 ++ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
750 ++ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
751 ++ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
752 ++ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
753 ++ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
754 ++ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
755 ++ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
756 ++ {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
757 ++ {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
758 ++ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
759 ++ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
760 ++ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
761 ++ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
762 ++ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
763 ++ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
764 ++ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
765 ++ {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
766 ++ {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
767 ++ {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
768 ++ {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
769 ++ {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
770 ++ {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
771 ++ {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
772 ++ {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
773 ++ {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
774 ++ {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
775 ++ {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
776 ++ {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
777 ++ {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
778 ++ {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
779 ++ {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
780 ++ {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
781 ++ {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
782 ++ {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
783 ++ {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
784 ++ {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
785 ++ {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
786 ++ {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
787 ++ {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
788 ++ {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
789 ++ {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
790 + {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
791 + {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
792 + {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
793 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
794 +index c7b284587365..39754cc90043 100644
795 +--- a/drivers/scsi/sd.c
796 ++++ b/drivers/scsi/sd.c
797 +@@ -133,6 +133,7 @@ static DEFINE_MUTEX(sd_ref_mutex);
798 +
799 + static struct kmem_cache *sd_cdb_cache;
800 + static mempool_t *sd_cdb_pool;
801 ++static mempool_t *sd_page_pool;
802 +
803 + static const char *sd_cache_types[] = {
804 + "write through", "none", "write back",
805 +@@ -759,9 +760,10 @@ static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
806 + unsigned int data_len = 24;
807 + char *buf;
808 +
809 +- rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
810 ++ rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
811 + if (!rq->special_vec.bv_page)
812 + return BLKPREP_DEFER;
813 ++ clear_highpage(rq->special_vec.bv_page);
814 + rq->special_vec.bv_offset = 0;
815 + rq->special_vec.bv_len = data_len;
816 + rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
817 +@@ -792,9 +794,10 @@ static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap)
818 + u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
819 + u32 data_len = sdp->sector_size;
820 +
821 +- rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
822 ++ rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
823 + if (!rq->special_vec.bv_page)
824 + return BLKPREP_DEFER;
825 ++ clear_highpage(rq->special_vec.bv_page);
826 + rq->special_vec.bv_offset = 0;
827 + rq->special_vec.bv_len = data_len;
828 + rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
829 +@@ -822,9 +825,10 @@ static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap)
830 + u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
831 + u32 data_len = sdp->sector_size;
832 +
833 +- rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
834 ++ rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
835 + if (!rq->special_vec.bv_page)
836 + return BLKPREP_DEFER;
837 ++ clear_highpage(rq->special_vec.bv_page);
838 + rq->special_vec.bv_offset = 0;
839 + rq->special_vec.bv_len = data_len;
840 + rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
841 +@@ -1299,7 +1303,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
842 + sd_zbc_write_unlock_zone(SCpnt);
843 +
844 + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
845 +- __free_page(rq->special_vec.bv_page);
846 ++ mempool_free(rq->special_vec.bv_page, sd_page_pool);
847 +
848 + if (SCpnt->cmnd != scsi_req(rq)->cmd) {
849 + cmnd = SCpnt->cmnd;
850 +@@ -3655,6 +3659,13 @@ static int __init init_sd(void)
851 + goto err_out_cache;
852 + }
853 +
854 ++ sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
855 ++ if (!sd_page_pool) {
856 ++ printk(KERN_ERR "sd: can't init discard page pool\n");
857 ++ err = -ENOMEM;
858 ++ goto err_out_ppool;
859 ++ }
860 ++
861 + err = scsi_register_driver(&sd_template.gendrv);
862 + if (err)
863 + goto err_out_driver;
864 +@@ -3662,6 +3673,9 @@ static int __init init_sd(void)
865 + return 0;
866 +
867 + err_out_driver:
868 ++ mempool_destroy(sd_page_pool);
869 ++
870 ++err_out_ppool:
871 + mempool_destroy(sd_cdb_pool);
872 +
873 + err_out_cache:
874 +@@ -3688,6 +3702,7 @@ static void __exit exit_sd(void)
875 +
876 + scsi_unregister_driver(&sd_template.gendrv);
877 + mempool_destroy(sd_cdb_pool);
878 ++ mempool_destroy(sd_page_pool);
879 + kmem_cache_destroy(sd_cdb_cache);
880 +
881 + class_unregister(&sd_disk_class);
882 +diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
883 +index d51ca243a028..df18d07d544d 100644
884 +--- a/drivers/spi/spi-imx.c
885 ++++ b/drivers/spi/spi-imx.c
886 +@@ -72,6 +72,7 @@ struct spi_imx_data;
887 +
888 + struct spi_imx_devtype_data {
889 + void (*intctrl)(struct spi_imx_data *, int);
890 ++ int (*prepare_message)(struct spi_imx_data *, struct spi_message *);
891 + int (*config)(struct spi_device *);
892 + void (*trigger)(struct spi_imx_data *);
893 + int (*rx_available)(struct spi_imx_data *);
894 +@@ -439,11 +440,12 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
895 + writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
896 + }
897 +
898 +-static int mx51_ecspi_config(struct spi_device *spi)
899 ++static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
900 ++ struct spi_message *msg)
901 + {
902 +- struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
903 ++ struct spi_device *spi = msg->spi;
904 + u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
905 +- u32 clk = spi_imx->speed_hz, delay, reg;
906 ++ u32 testreg;
907 + u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
908 +
909 + /*
910 +@@ -461,14 +463,21 @@ static int mx51_ecspi_config(struct spi_device *spi)
911 + if (spi->mode & SPI_READY)
912 + ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
913 +
914 +- /* set clock speed */
915 +- ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->speed_hz, &clk);
916 +- spi_imx->spi_bus_clk = clk;
917 +-
918 + /* set chip select to use */
919 + ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
920 +
921 +- ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
922 ++ /*
923 ++ * The ctrl register must be written first, with the EN bit set other
924 ++ * registers must not be written to.
925 ++ */
926 ++ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
927 ++
928 ++ testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
929 ++ if (spi->mode & SPI_LOOP)
930 ++ testreg |= MX51_ECSPI_TESTREG_LBC;
931 ++ else
932 ++ testreg &= ~MX51_ECSPI_TESTREG_LBC;
933 ++ writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
934 +
935 + cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
936 +
937 +@@ -484,26 +493,38 @@ static int mx51_ecspi_config(struct spi_device *spi)
938 + cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
939 + cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
940 + }
941 ++
942 + if (spi->mode & SPI_CS_HIGH)
943 + cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
944 + else
945 + cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
946 +
947 ++ writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
948 ++
949 ++ return 0;
950 ++}
951 ++
952 ++static int mx51_ecspi_config(struct spi_device *spi)
953 ++{
954 ++ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
955 ++ u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
956 ++ u32 clk = spi_imx->speed_hz, delay;
957 ++
958 ++ /* Clear BL field and set the right value */
959 ++ ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
960 ++ ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
961 ++
962 ++ /* set clock speed */
963 ++ ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
964 ++ 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
965 ++ ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->speed_hz, &clk);
966 ++ spi_imx->spi_bus_clk = clk;
967 ++
968 + if (spi_imx->usedma)
969 + ctrl |= MX51_ECSPI_CTRL_SMC;
970 +
971 +- /* CTRL register always go first to bring out controller from reset */
972 + writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
973 +
974 +- reg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
975 +- if (spi->mode & SPI_LOOP)
976 +- reg |= MX51_ECSPI_TESTREG_LBC;
977 +- else
978 +- reg &= ~MX51_ECSPI_TESTREG_LBC;
979 +- writel(reg, spi_imx->base + MX51_ECSPI_TESTREG);
980 +-
981 +- writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
982 +-
983 + /*
984 + * Wait until the changes in the configuration register CONFIGREG
985 + * propagate into the hardware. It takes exactly one tick of the
986 +@@ -525,7 +546,6 @@ static int mx51_ecspi_config(struct spi_device *spi)
987 + * Configure the DMA register: setup the watermark
988 + * and enable DMA request.
989 + */
990 +-
991 + writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml) |
992 + MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
993 + MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
994 +@@ -599,6 +619,12 @@ static void mx31_trigger(struct spi_imx_data *spi_imx)
995 + writel(reg, spi_imx->base + MXC_CSPICTRL);
996 + }
997 +
998 ++static int mx31_prepare_message(struct spi_imx_data *spi_imx,
999 ++ struct spi_message *msg)
1000 ++{
1001 ++ return 0;
1002 ++}
1003 ++
1004 + static int mx31_config(struct spi_device *spi)
1005 + {
1006 + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1007 +@@ -695,6 +721,12 @@ static void mx21_trigger(struct spi_imx_data *spi_imx)
1008 + writel(reg, spi_imx->base + MXC_CSPICTRL);
1009 + }
1010 +
1011 ++static int mx21_prepare_message(struct spi_imx_data *spi_imx,
1012 ++ struct spi_message *msg)
1013 ++{
1014 ++ return 0;
1015 ++}
1016 ++
1017 + static int mx21_config(struct spi_device *spi)
1018 + {
1019 + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1020 +@@ -764,6 +796,12 @@ static void mx1_trigger(struct spi_imx_data *spi_imx)
1021 + writel(reg, spi_imx->base + MXC_CSPICTRL);
1022 + }
1023 +
1024 ++static int mx1_prepare_message(struct spi_imx_data *spi_imx,
1025 ++ struct spi_message *msg)
1026 ++{
1027 ++ return 0;
1028 ++}
1029 ++
1030 + static int mx1_config(struct spi_device *spi)
1031 + {
1032 + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1033 +@@ -798,6 +836,7 @@ static void mx1_reset(struct spi_imx_data *spi_imx)
1034 +
1035 + static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
1036 + .intctrl = mx1_intctrl,
1037 ++ .prepare_message = mx1_prepare_message,
1038 + .config = mx1_config,
1039 + .trigger = mx1_trigger,
1040 + .rx_available = mx1_rx_available,
1041 +@@ -810,6 +849,7 @@ static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
1042 +
1043 + static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
1044 + .intctrl = mx21_intctrl,
1045 ++ .prepare_message = mx21_prepare_message,
1046 + .config = mx21_config,
1047 + .trigger = mx21_trigger,
1048 + .rx_available = mx21_rx_available,
1049 +@@ -823,6 +863,7 @@ static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
1050 + static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
1051 + /* i.mx27 cspi shares the functions with i.mx21 one */
1052 + .intctrl = mx21_intctrl,
1053 ++ .prepare_message = mx21_prepare_message,
1054 + .config = mx21_config,
1055 + .trigger = mx21_trigger,
1056 + .rx_available = mx21_rx_available,
1057 +@@ -835,6 +876,7 @@ static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
1058 +
1059 + static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
1060 + .intctrl = mx31_intctrl,
1061 ++ .prepare_message = mx31_prepare_message,
1062 + .config = mx31_config,
1063 + .trigger = mx31_trigger,
1064 + .rx_available = mx31_rx_available,
1065 +@@ -848,6 +890,7 @@ static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
1066 + static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
1067 + /* i.mx35 and later cspi shares the functions with i.mx31 one */
1068 + .intctrl = mx31_intctrl,
1069 ++ .prepare_message = mx31_prepare_message,
1070 + .config = mx31_config,
1071 + .trigger = mx31_trigger,
1072 + .rx_available = mx31_rx_available,
1073 +@@ -860,6 +903,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
1074 +
1075 + static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
1076 + .intctrl = mx51_ecspi_intctrl,
1077 ++ .prepare_message = mx51_ecspi_prepare_message,
1078 + .config = mx51_ecspi_config,
1079 + .trigger = mx51_ecspi_trigger,
1080 + .rx_available = mx51_ecspi_rx_available,
1081 +@@ -872,6 +916,7 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
1082 +
1083 + static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
1084 + .intctrl = mx51_ecspi_intctrl,
1085 ++ .prepare_message = mx51_ecspi_prepare_message,
1086 + .config = mx51_ecspi_config,
1087 + .trigger = mx51_ecspi_trigger,
1088 + .rx_available = mx51_ecspi_rx_available,
1089 +@@ -1310,7 +1355,13 @@ spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
1090 + return ret;
1091 + }
1092 +
1093 +- return 0;
1094 ++ ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
1095 ++ if (ret) {
1096 ++ clk_disable(spi_imx->clk_ipg);
1097 ++ clk_disable(spi_imx->clk_per);
1098 ++ }
1099 ++
1100 ++ return ret;
1101 + }
1102 +
1103 + static int
1104 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
1105 +index 6b2f6c41e2a9..997ff183c9cb 100644
1106 +--- a/drivers/usb/host/xhci-hub.c
1107 ++++ b/drivers/usb/host/xhci-hub.c
1108 +@@ -1512,7 +1512,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1109 + portsc_buf[port_index] = 0;
1110 +
1111 + /* Bail out if a USB3 port has a new device in link training */
1112 +- if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
1113 ++ if ((hcd->speed >= HCD_USB3) &&
1114 ++ (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
1115 + bus_state->bus_suspended = 0;
1116 + spin_unlock_irqrestore(&xhci->lock, flags);
1117 + xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
1118 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
1119 +index 1ccff2d9dee9..cbc91536e512 100644
1120 +--- a/drivers/usb/host/xhci.h
1121 ++++ b/drivers/usb/host/xhci.h
1122 +@@ -1859,6 +1859,8 @@ struct xhci_hcd {
1123 + unsigned sw_lpm_support:1;
1124 + /* support xHCI 1.0 spec USB2 hardware LPM */
1125 + unsigned hw_lpm_support:1;
1126 ++ /* Broken Suspend flag for SNPS Suspend resume issue */
1127 ++ unsigned broken_suspend:1;
1128 + /* cached usb2 extened protocol capabilites */
1129 + u32 *ext_caps;
1130 + unsigned int num_ext_caps;
1131 +@@ -1871,8 +1873,6 @@ struct xhci_hcd {
1132 +
1133 + /* platform-specific data -- must come last */
1134 + unsigned long priv[0] __aligned(sizeof(s64));
1135 +- /* Broken Suspend flag for SNPS Suspend resume issue */
1136 +- u8 broken_suspend;
1137 + };
1138 +
1139 + /* Platform specific overrides to generic XHCI hc_driver ops */
1140 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1141 +index 392fddc80c44..988be9ca2b4f 100644
1142 +--- a/drivers/usb/serial/option.c
1143 ++++ b/drivers/usb/serial/option.c
1144 +@@ -1167,6 +1167,10 @@ static const struct usb_device_id option_ids[] = {
1145 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
1146 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
1147 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
1148 ++ { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
1149 ++ .driver_info = NCTRL(0) | RSVD(1) },
1150 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
1151 ++ .driver_info = NCTRL(0) },
1152 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
1153 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
1154 + .driver_info = RSVD(1) },
1155 +@@ -1331,6 +1335,7 @@ static const struct usb_device_id option_ids[] = {
1156 + .driver_info = RSVD(4) },
1157 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
1158 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
1159 ++ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
1160 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
1161 + .driver_info = RSVD(4) },
1162 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
1163 +@@ -1534,6 +1539,7 @@ static const struct usb_device_id option_ids[] = {
1164 + .driver_info = RSVD(2) },
1165 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1166 + .driver_info = RSVD(2) },
1167 ++ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
1168 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1169 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1170 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
1171 +@@ -1761,6 +1767,7 @@ static const struct usb_device_id option_ids[] = {
1172 + { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
1173 + { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1174 + .driver_info = RSVD(5) | RSVD(6) },
1175 ++ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
1176 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1177 + .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
1178 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
1179 +@@ -1942,7 +1949,14 @@ static const struct usb_device_id option_ids[] = {
1180 + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
1181 + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
1182 + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
1183 +- { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
1184 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
1185 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) }, /* HP lt4132 (Huawei ME906s-158) */
1186 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
1187 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
1188 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
1189 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
1190 ++ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
1191 ++ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
1192 + { } /* Terminating entry */
1193 + };
1194 + MODULE_DEVICE_TABLE(usb, option_ids);
1195 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1196 +index 69309538ffb8..1581e8668b09 100644
1197 +--- a/fs/cifs/smb2pdu.c
1198 ++++ b/fs/cifs/smb2pdu.c
1199 +@@ -2020,14 +2020,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1200 + /* We check for obvious errors in the output buffer length and offset */
1201 + if (*plen == 0)
1202 + goto ioctl_exit; /* server returned no data */
1203 +- else if (*plen > 0xFF00) {
1204 ++ else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
1205 + cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
1206 + *plen = 0;
1207 + rc = -EIO;
1208 + goto ioctl_exit;
1209 + }
1210 +
1211 +- if (get_rfc1002_length(rsp) < le32_to_cpu(rsp->OutputOffset) + *plen) {
1212 ++ if (get_rfc1002_length(rsp) - *plen < le32_to_cpu(rsp->OutputOffset)) {
1213 + cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
1214 + le32_to_cpu(rsp->OutputOffset));
1215 + *plen = 0;
1216 +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
1217 +index 82ac5f682b73..f69c545f5868 100644
1218 +--- a/fs/proc/proc_sysctl.c
1219 ++++ b/fs/proc/proc_sysctl.c
1220 +@@ -464,7 +464,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
1221 +
1222 + inode = new_inode(sb);
1223 + if (!inode)
1224 +- goto out;
1225 ++ return ERR_PTR(-ENOMEM);
1226 +
1227 + inode->i_ino = get_next_ino();
1228 +
1229 +@@ -474,8 +474,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
1230 + if (unlikely(head->unregistering)) {
1231 + spin_unlock(&sysctl_lock);
1232 + iput(inode);
1233 +- inode = NULL;
1234 +- goto out;
1235 ++ return ERR_PTR(-ENOENT);
1236 + }
1237 + ei->sysctl = head;
1238 + ei->sysctl_entry = table;
1239 +@@ -500,7 +499,6 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
1240 + if (root->set_ownership)
1241 + root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
1242 +
1243 +-out:
1244 + return inode;
1245 + }
1246 +
1247 +@@ -549,10 +547,11 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
1248 + goto out;
1249 + }
1250 +
1251 +- err = ERR_PTR(-ENOMEM);
1252 + inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
1253 +- if (!inode)
1254 ++ if (IS_ERR(inode)) {
1255 ++ err = ERR_CAST(inode);
1256 + goto out;
1257 ++ }
1258 +
1259 + err = NULL;
1260 + d_set_d_op(dentry, &proc_sys_dentry_operations);
1261 +@@ -685,7 +684,7 @@ static bool proc_sys_fill_cache(struct file *file,
1262 + return false;
1263 + if (d_in_lookup(child)) {
1264 + inode = proc_sys_make_inode(dir->d_sb, head, table);
1265 +- if (!inode) {
1266 ++ if (IS_ERR(inode)) {
1267 + d_lookup_done(child);
1268 + dput(child);
1269 + return false;
1270 +diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
1271 +index ef820f803176..4e6e32c0c08a 100644
1272 +--- a/fs/ubifs/dir.c
1273 ++++ b/fs/ubifs/dir.c
1274 +@@ -1147,8 +1147,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
1275 + struct ubifs_inode *ui;
1276 + struct ubifs_inode *dir_ui = ubifs_inode(dir);
1277 + struct ubifs_info *c = dir->i_sb->s_fs_info;
1278 +- int err, len = strlen(symname);
1279 +- int sz_change = CALC_DENT_SIZE(len);
1280 ++ int err, sz_change, len = strlen(symname);
1281 + struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1);
1282 + struct fscrypt_symlink_data *sd = NULL;
1283 + struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
1284 +@@ -1189,6 +1188,8 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
1285 + if (err)
1286 + goto out_budg;
1287 +
1288 ++ sz_change = CALC_DENT_SIZE(fname_len(&nm));
1289 ++
1290 + inode = ubifs_new_inode(c, dir, S_IFLNK | S_IRWXUGO);
1291 + if (IS_ERR(inode)) {
1292 + err = PTR_ERR(inode);
1293 +diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
1294 +index ae5c02f22f3e..d998fbf7de30 100644
1295 +--- a/fs/ubifs/replay.c
1296 ++++ b/fs/ubifs/replay.c
1297 +@@ -209,6 +209,38 @@ static int trun_remove_range(struct ubifs_info *c, struct replay_entry *r)
1298 + return ubifs_tnc_remove_range(c, &min_key, &max_key);
1299 + }
1300 +
1301 ++/**
1302 ++ * inode_still_linked - check whether inode in question will be re-linked.
1303 ++ * @c: UBIFS file-system description object
1304 ++ * @rino: replay entry to test
1305 ++ *
1306 ++ * O_TMPFILE files can be re-linked, this means link count goes from 0 to 1.
1307 ++ * This case needs special care, otherwise all references to the inode will
1308 ++ * be removed upon the first replay entry of an inode with link count 0
1309 ++ * is found.
1310 ++ */
1311 ++static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
1312 ++{
1313 ++ struct replay_entry *r;
1314 ++
1315 ++ ubifs_assert(rino->deletion);
1316 ++ ubifs_assert(key_type(c, &rino->key) == UBIFS_INO_KEY);
1317 ++
1318 ++ /*
1319 ++ * Find the most recent entry for the inode behind @rino and check
1320 ++ * whether it is a deletion.
1321 ++ */
1322 ++ list_for_each_entry_reverse(r, &c->replay_list, list) {
1323 ++ ubifs_assert(r->sqnum >= rino->sqnum);
1324 ++ if (key_inum(c, &r->key) == key_inum(c, &rino->key))
1325 ++ return r->deletion == 0;
1326 ++
1327 ++ }
1328 ++
1329 ++ ubifs_assert(0);
1330 ++ return false;
1331 ++}
1332 ++
1333 + /**
1334 + * apply_replay_entry - apply a replay entry to the TNC.
1335 + * @c: UBIFS file-system description object
1336 +@@ -239,6 +271,11 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
1337 + {
1338 + ino_t inum = key_inum(c, &r->key);
1339 +
1340 ++ if (inode_still_linked(c, r)) {
1341 ++ err = 0;
1342 ++ break;
1343 ++ }
1344 ++
1345 + err = ubifs_tnc_remove_ino(c, inum);
1346 + break;
1347 + }
1348 +diff --git a/include/linux/math64.h b/include/linux/math64.h
1349 +index 082de345b73c..3a7a14062668 100644
1350 +--- a/include/linux/math64.h
1351 ++++ b/include/linux/math64.h
1352 +@@ -254,4 +254,7 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
1353 + }
1354 + #endif /* mul_u64_u32_div */
1355 +
1356 ++#define DIV64_U64_ROUND_UP(ll, d) \
1357 ++ ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
1358 ++
1359 + #endif /* _LINUX_MATH64_H */
1360 +diff --git a/kernel/panic.c b/kernel/panic.c
1361 +index bdd18afa19a4..32ff6fd30201 100644
1362 +--- a/kernel/panic.c
1363 ++++ b/kernel/panic.c
1364 +@@ -14,6 +14,7 @@
1365 + #include <linux/kmsg_dump.h>
1366 + #include <linux/kallsyms.h>
1367 + #include <linux/notifier.h>
1368 ++#include <linux/vt_kern.h>
1369 + #include <linux/module.h>
1370 + #include <linux/random.h>
1371 + #include <linux/ftrace.h>
1372 +@@ -230,7 +231,10 @@ void panic(const char *fmt, ...)
1373 + if (_crash_kexec_post_notifiers)
1374 + __crash_kexec(NULL);
1375 +
1376 +- bust_spinlocks(0);
1377 ++#ifdef CONFIG_VT
1378 ++ unblank_screen();
1379 ++#endif
1380 ++ console_unblank();
1381 +
1382 + /*
1383 + * We may have ended up stopping the CPU holding the lock (in
1384 +diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
1385 +index 55d45fe2cc17..d7e478a430e9 100644
1386 +--- a/kernel/time/posix-timers.c
1387 ++++ b/kernel/time/posix-timers.c
1388 +@@ -298,9 +298,6 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
1389 + {
1390 + struct hrtimer *timer = &timr->it.real.timer;
1391 +
1392 +- if (!timr->it_interval)
1393 +- return;
1394 +-
1395 + timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
1396 + timr->it_interval);
1397 + hrtimer_restart(timer);
1398 +@@ -326,7 +323,7 @@ void posixtimer_rearm(struct siginfo *info)
1399 + if (!timr)
1400 + return;
1401 +
1402 +- if (timr->it_requeue_pending == info->si_sys_private) {
1403 ++ if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
1404 + timr->kclock->timer_rearm(timr);
1405 +
1406 + timr->it_active = 1;
1407 +diff --git a/mm/vmscan.c b/mm/vmscan.c
1408 +index be56e2e1931e..9734e62654fa 100644
1409 +--- a/mm/vmscan.c
1410 ++++ b/mm/vmscan.c
1411 +@@ -2367,9 +2367,11 @@ out:
1412 + /*
1413 + * Scan types proportional to swappiness and
1414 + * their relative recent reclaim efficiency.
1415 ++ * Make sure we don't miss the last page
1416 ++ * because of a round-off error.
1417 + */
1418 +- scan = div64_u64(scan * fraction[file],
1419 +- denominator);
1420 ++ scan = DIV64_U64_ROUND_UP(scan * fraction[file],
1421 ++ denominator);
1422 + break;
1423 + case SCAN_FILE:
1424 + case SCAN_ANON:
1425 +diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
1426 +index 5b888476d9ff..b728140c79a9 100644
1427 +--- a/sound/soc/codecs/sta32x.c
1428 ++++ b/sound/soc/codecs/sta32x.c
1429 +@@ -879,6 +879,9 @@ static int sta32x_probe(struct snd_soc_codec *codec)
1430 + struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
1431 + struct sta32x_platform_data *pdata = sta32x->pdata;
1432 + int i, ret = 0, thermal = 0;
1433 ++
1434 ++ sta32x->codec = codec;
1435 ++
1436 + ret = regulator_bulk_enable(ARRAY_SIZE(sta32x->supplies),
1437 + sta32x->supplies);
1438 + if (ret != 0) {
1439 +diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
1440 +index b205c1340456..5e53cafe6cf9 100644
1441 +--- a/tools/perf/builtin-record.c
1442 ++++ b/tools/perf/builtin-record.c
1443 +@@ -800,13 +800,10 @@ static int record__synthesize(struct record *rec, bool tail)
1444 + return 0;
1445 +
1446 + if (file->is_pipe) {
1447 +- err = perf_event__synthesize_features(
1448 +- tool, session, rec->evlist, process_synthesized_event);
1449 +- if (err < 0) {
1450 +- pr_err("Couldn't synthesize features.\n");
1451 +- return err;
1452 +- }
1453 +-
1454 ++ /*
1455 ++ * We need to synthesize events first, because some
1456 ++ * features works on top of them (on report side).
1457 ++ */
1458 + err = perf_event__synthesize_attrs(tool, session,
1459 + process_synthesized_event);
1460 + if (err < 0) {
1461 +@@ -814,6 +811,13 @@ static int record__synthesize(struct record *rec, bool tail)
1462 + goto out;
1463 + }
1464 +
1465 ++ err = perf_event__synthesize_features(tool, session, rec->evlist,
1466 ++ process_synthesized_event);
1467 ++ if (err < 0) {
1468 ++ pr_err("Couldn't synthesize features.\n");
1469 ++ return err;
1470 ++ }
1471 ++
1472 + if (have_tracepoints(&rec->evlist->entries)) {
1473 + /*
1474 + * FIXME err <= 0 here actually means that