Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.15 commit in: /
Date: Fri, 27 Jun 2014 11:30:33
Message-Id: 1403868611.869cfedf5b4ae47bbc00a95d2936b1efd797b6d9.mpagano@gentoo
1 commit: 869cfedf5b4ae47bbc00a95d2936b1efd797b6d9
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Jun 27 11:30:11 2014 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Jun 27 11:30:11 2014 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=869cfedf
7
8 Linux patch 3.15.2
9
10 ---
11 0000_README | 4 +
12 1001_linux-3.15.2.patch | 2455 +++++++++++++++++++++++++++++++++++++++++++++++
13 2 files changed, 2459 insertions(+)
14
15 diff --git a/0000_README b/0000_README
16 index 019dbd2..58bb467 100644
17 --- a/0000_README
18 +++ b/0000_README
19 @@ -47,6 +47,10 @@ Patch: 1000_linux-3.15.1.patch
20 From: http://www.kernel.org
21 Desc: Linux 3.15.1
22
23 +Patch: 1001_linux-3.15.2.patch
24 +From: http://www.kernel.org
25 +Desc: Linux 3.15.2
26 +
27 Patch: 1700_enable-thinkpad-micled.patch
28 From: https://bugs.gentoo.org/show_bug.cgi?id=449248
29 Desc: Enable mic mute led in thinkpads
30
31 diff --git a/1001_linux-3.15.2.patch b/1001_linux-3.15.2.patch
32 new file mode 100644
33 index 0000000..3be738a
34 --- /dev/null
35 +++ b/1001_linux-3.15.2.patch
36 @@ -0,0 +1,2455 @@
37 +diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
38 +index f1c5cc9d17a8..4c3efe434806 100644
39 +--- a/Documentation/ABI/testing/ima_policy
40 ++++ b/Documentation/ABI/testing/ima_policy
41 +@@ -23,7 +23,7 @@ Description:
42 + [fowner]]
43 + lsm: [[subj_user=] [subj_role=] [subj_type=]
44 + [obj_user=] [obj_role=] [obj_type=]]
45 +- option: [[appraise_type=]]
46 ++ option: [[appraise_type=]] [permit_directio]
47 +
48 + base: func:= [BPRM_CHECK][MMAP_CHECK][FILE_CHECK][MODULE_CHECK]
49 + mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
50 +diff --git a/Makefile b/Makefile
51 +index e2846acd2841..475e0853a2f4 100644
52 +--- a/Makefile
53 ++++ b/Makefile
54 +@@ -1,6 +1,6 @@
55 + VERSION = 3
56 + PATCHLEVEL = 15
57 +-SUBLEVEL = 1
58 ++SUBLEVEL = 2
59 + EXTRAVERSION =
60 + NAME = Shuffling Zombie Juror
61 +
62 +diff --git a/arch/arm/mach-at91/sysirq_mask.c b/arch/arm/mach-at91/sysirq_mask.c
63 +index 2ba694f9626b..f8bc3511a8c8 100644
64 +--- a/arch/arm/mach-at91/sysirq_mask.c
65 ++++ b/arch/arm/mach-at91/sysirq_mask.c
66 +@@ -25,24 +25,28 @@
67 +
68 + #include "generic.h"
69 +
70 +-#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
71 +-#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
72 ++#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
73 ++#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
74 ++#define AT91_RTC_IRQ_MASK 0x1f /* Available IRQs mask */
75 +
76 + void __init at91_sysirq_mask_rtc(u32 rtc_base)
77 + {
78 + void __iomem *base;
79 +- u32 mask;
80 +
81 + base = ioremap(rtc_base, 64);
82 + if (!base)
83 + return;
84 +
85 +- mask = readl_relaxed(base + AT91_RTC_IMR);
86 +- if (mask) {
87 +- pr_info("AT91: Disabling rtc irq\n");
88 +- writel_relaxed(mask, base + AT91_RTC_IDR);
89 +- (void)readl_relaxed(base + AT91_RTC_IMR); /* flush */
90 +- }
91 ++ /*
92 ++ * sam9x5 SoCs have the following errata:
93 ++ * "RTC: Interrupt Mask Register cannot be used
94 ++ * Interrupt Mask Register read always returns 0."
95 ++ *
96 ++ * Hence we're not relying on IMR values to disable
97 ++ * interrupts.
98 ++ */
99 ++ writel_relaxed(AT91_RTC_IRQ_MASK, base + AT91_RTC_IDR);
100 ++ (void)readl_relaxed(base + AT91_RTC_IMR); /* flush */
101 +
102 + iounmap(base);
103 + }
104 +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
105 +index da5186fbd77a..5efce56f0df0 100644
106 +--- a/arch/mips/kvm/kvm_mips.c
107 ++++ b/arch/mips/kvm/kvm_mips.c
108 +@@ -304,7 +304,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
109 + if (cpu_has_veic || cpu_has_vint) {
110 + size = 0x200 + VECTORSPACING * 64;
111 + } else {
112 +- size = 0x200;
113 ++ size = 0x4000;
114 + }
115 +
116 + /* Save Linux EBASE */
117 +diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
118 +index 200a8f9390b6..0c734baea2d4 100644
119 +--- a/arch/s390/kvm/interrupt.c
120 ++++ b/arch/s390/kvm/interrupt.c
121 +@@ -900,7 +900,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
122 + return 0;
123 + }
124 +
125 +-static void clear_floating_interrupts(struct kvm *kvm)
126 ++void kvm_s390_clear_float_irqs(struct kvm *kvm)
127 + {
128 + struct kvm_s390_float_interrupt *fi;
129 + struct kvm_s390_interrupt_info *n, *inti = NULL;
130 +@@ -1246,7 +1246,7 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
131 + break;
132 + case KVM_DEV_FLIC_CLEAR_IRQS:
133 + r = 0;
134 +- clear_floating_interrupts(dev->kvm);
135 ++ kvm_s390_clear_float_irqs(dev->kvm);
136 + break;
137 + case KVM_DEV_FLIC_APF_ENABLE:
138 + dev->kvm->arch.gmap->pfault_enabled = 1;
139 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
140 +index 9ae6664ff08c..6c3699ec998e 100644
141 +--- a/arch/s390/kvm/kvm-s390.c
142 ++++ b/arch/s390/kvm/kvm-s390.c
143 +@@ -322,6 +322,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
144 + {
145 + VCPU_EVENT(vcpu, 3, "%s", "free cpu");
146 + trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
147 ++ kvm_s390_clear_local_irqs(vcpu);
148 + kvm_clear_async_pf_completion_queue(vcpu);
149 + if (!kvm_is_ucontrol(vcpu->kvm)) {
150 + clear_bit(63 - vcpu->vcpu_id,
151 +@@ -372,6 +373,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
152 + if (!kvm_is_ucontrol(kvm))
153 + gmap_free(kvm->arch.gmap);
154 + kvm_s390_destroy_adapters(kvm);
155 ++ kvm_s390_clear_float_irqs(kvm);
156 + }
157 +
158 + /* Section: vcpu related */
159 +diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
160 +index 3c1e2274d9ea..604872125309 100644
161 +--- a/arch/s390/kvm/kvm-s390.h
162 ++++ b/arch/s390/kvm/kvm-s390.h
163 +@@ -130,6 +130,7 @@ void kvm_s390_tasklet(unsigned long parm);
164 + void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
165 + void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
166 + void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
167 ++void kvm_s390_clear_float_irqs(struct kvm *kvm);
168 + int __must_check kvm_s390_inject_vm(struct kvm *kvm,
169 + struct kvm_s390_interrupt *s390int);
170 + int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
171 +diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
172 +index a82c6b2a9780..49cee4af16f4 100644
173 +--- a/arch/sparc/net/bpf_jit_comp.c
174 ++++ b/arch/sparc/net/bpf_jit_comp.c
175 +@@ -83,9 +83,9 @@ static void bpf_flush_icache(void *start_, void *end_)
176 + #define BNE (F2(0, 2) | CONDNE)
177 +
178 + #ifdef CONFIG_SPARC64
179 +-#define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20))
180 ++#define BE_PTR (F2(0, 1) | CONDE | (2 << 20))
181 + #else
182 +-#define BNE_PTR BNE
183 ++#define BE_PTR BE
184 + #endif
185 +
186 + #define SETHI(K, REG) \
187 +@@ -600,7 +600,7 @@ void bpf_jit_compile(struct sk_filter *fp)
188 + case BPF_S_ANC_IFINDEX:
189 + emit_skb_loadptr(dev, r_A);
190 + emit_cmpi(r_A, 0);
191 +- emit_branch(BNE_PTR, cleanup_addr + 4);
192 ++ emit_branch(BE_PTR, cleanup_addr + 4);
193 + emit_nop();
194 + emit_load32(r_A, struct net_device, ifindex, r_A);
195 + break;
196 +@@ -613,7 +613,7 @@ void bpf_jit_compile(struct sk_filter *fp)
197 + case BPF_S_ANC_HATYPE:
198 + emit_skb_loadptr(dev, r_A);
199 + emit_cmpi(r_A, 0);
200 +- emit_branch(BNE_PTR, cleanup_addr + 4);
201 ++ emit_branch(BE_PTR, cleanup_addr + 4);
202 + emit_nop();
203 + emit_load16(r_A, struct net_device, type, r_A);
204 + break;
205 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
206 +index 9736529ade08..006911858174 100644
207 +--- a/arch/x86/kvm/lapic.c
208 ++++ b/arch/x86/kvm/lapic.c
209 +@@ -360,6 +360,8 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
210 +
211 + static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
212 + {
213 ++ /* Note that we never get here with APIC virtualization enabled. */
214 ++
215 + if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
216 + ++apic->isr_count;
217 + BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
218 +@@ -371,12 +373,48 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
219 + apic->highest_isr_cache = vec;
220 + }
221 +
222 ++static inline int apic_find_highest_isr(struct kvm_lapic *apic)
223 ++{
224 ++ int result;
225 ++
226 ++ /*
227 ++ * Note that isr_count is always 1, and highest_isr_cache
228 ++ * is always -1, with APIC virtualization enabled.
229 ++ */
230 ++ if (!apic->isr_count)
231 ++ return -1;
232 ++ if (likely(apic->highest_isr_cache != -1))
233 ++ return apic->highest_isr_cache;
234 ++
235 ++ result = find_highest_vector(apic->regs + APIC_ISR);
236 ++ ASSERT(result == -1 || result >= 16);
237 ++
238 ++ return result;
239 ++}
240 ++
241 + static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
242 + {
243 +- if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
244 ++ struct kvm_vcpu *vcpu;
245 ++ if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
246 ++ return;
247 ++
248 ++ vcpu = apic->vcpu;
249 ++
250 ++ /*
251 ++ * We do get here for APIC virtualization enabled if the guest
252 ++ * uses the Hyper-V APIC enlightenment. In this case we may need
253 ++ * to trigger a new interrupt delivery by writing the SVI field;
254 ++ * on the other hand isr_count and highest_isr_cache are unused
255 ++ * and must be left alone.
256 ++ */
257 ++ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
258 ++ kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
259 ++ apic_find_highest_isr(apic));
260 ++ else {
261 + --apic->isr_count;
262 +- BUG_ON(apic->isr_count < 0);
263 +- apic->highest_isr_cache = -1;
264 ++ BUG_ON(apic->isr_count < 0);
265 ++ apic->highest_isr_cache = -1;
266 ++ }
267 + }
268 +
269 + int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
270 +@@ -456,22 +494,6 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
271 + __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
272 + }
273 +
274 +-static inline int apic_find_highest_isr(struct kvm_lapic *apic)
275 +-{
276 +- int result;
277 +-
278 +- /* Note that isr_count is always 1 with vid enabled */
279 +- if (!apic->isr_count)
280 +- return -1;
281 +- if (likely(apic->highest_isr_cache != -1))
282 +- return apic->highest_isr_cache;
283 +-
284 +- result = find_highest_vector(apic->regs + APIC_ISR);
285 +- ASSERT(result == -1 || result >= 16);
286 +-
287 +- return result;
288 +-}
289 +-
290 + void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr)
291 + {
292 + struct kvm_lapic *apic = vcpu->arch.apic;
293 +@@ -1605,6 +1627,8 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
294 + int vector = kvm_apic_has_interrupt(vcpu);
295 + struct kvm_lapic *apic = vcpu->arch.apic;
296 +
297 ++ /* Note that we never get here with APIC virtualization enabled. */
298 ++
299 + if (vector == -1)
300 + return -1;
301 +
302 +diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
303 +index 2e7801af466e..05827eccc53a 100644
304 +--- a/drivers/hv/connection.c
305 ++++ b/drivers/hv/connection.c
306 +@@ -224,8 +224,8 @@ cleanup:
307 + vmbus_connection.int_page = NULL;
308 + }
309 +
310 +- free_pages((unsigned long)vmbus_connection.monitor_pages[0], 1);
311 +- free_pages((unsigned long)vmbus_connection.monitor_pages[1], 1);
312 ++ free_pages((unsigned long)vmbus_connection.monitor_pages[0], 0);
313 ++ free_pages((unsigned long)vmbus_connection.monitor_pages[1], 0);
314 + vmbus_connection.monitor_pages[0] = NULL;
315 + vmbus_connection.monitor_pages[1] = NULL;
316 +
317 +diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
318 +index 7e6d78dc9437..5e90c5d771a7 100644
319 +--- a/drivers/hv/hv_balloon.c
320 ++++ b/drivers/hv/hv_balloon.c
321 +@@ -19,6 +19,7 @@
322 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
323 +
324 + #include <linux/kernel.h>
325 ++#include <linux/jiffies.h>
326 + #include <linux/mman.h>
327 + #include <linux/delay.h>
328 + #include <linux/init.h>
329 +@@ -459,6 +460,11 @@ static bool do_hot_add;
330 + */
331 + static uint pressure_report_delay = 45;
332 +
333 ++/*
334 ++ * The last time we posted a pressure report to host.
335 ++ */
336 ++static unsigned long last_post_time;
337 ++
338 + module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
339 + MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
340 +
341 +@@ -542,6 +548,7 @@ struct hv_dynmem_device {
342 +
343 + static struct hv_dynmem_device dm_device;
344 +
345 ++static void post_status(struct hv_dynmem_device *dm);
346 + #ifdef CONFIG_MEMORY_HOTPLUG
347 +
348 + static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
349 +@@ -612,7 +619,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
350 + * have not been "onlined" within the allowed time.
351 + */
352 + wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
353 +-
354 ++ post_status(&dm_device);
355 + }
356 +
357 + return;
358 +@@ -951,11 +958,17 @@ static void post_status(struct hv_dynmem_device *dm)
359 + {
360 + struct dm_status status;
361 + struct sysinfo val;
362 ++ unsigned long now = jiffies;
363 ++ unsigned long last_post = last_post_time;
364 +
365 + if (pressure_report_delay > 0) {
366 + --pressure_report_delay;
367 + return;
368 + }
369 ++
370 ++ if (!time_after(now, (last_post_time + HZ)))
371 ++ return;
372 ++
373 + si_meminfo(&val);
374 + memset(&status, 0, sizeof(struct dm_status));
375 + status.hdr.type = DM_STATUS_REPORT;
376 +@@ -983,6 +996,14 @@ static void post_status(struct hv_dynmem_device *dm)
377 + if (status.hdr.trans_id != atomic_read(&trans_id))
378 + return;
379 +
380 ++ /*
381 ++ * If the last post time that we sampled has changed,
382 ++ * we have raced, don't post the status.
383 ++ */
384 ++ if (last_post != last_post_time)
385 ++ return;
386 ++
387 ++ last_post_time = jiffies;
388 + vmbus_sendpacket(dm->dev->channel, &status,
389 + sizeof(struct dm_status),
390 + (unsigned long)NULL,
391 +@@ -1117,7 +1138,7 @@ static void balloon_up(struct work_struct *dummy)
392 +
393 + if (ret == -EAGAIN)
394 + msleep(20);
395 +-
396 ++ post_status(&dm_device);
397 + } while (ret == -EAGAIN);
398 +
399 + if (ret) {
400 +@@ -1144,8 +1165,10 @@ static void balloon_down(struct hv_dynmem_device *dm,
401 + struct dm_unballoon_response resp;
402 + int i;
403 +
404 +- for (i = 0; i < range_count; i++)
405 ++ for (i = 0; i < range_count; i++) {
406 + free_balloon_pages(dm, &range_array[i]);
407 ++ post_status(&dm_device);
408 ++ }
409 +
410 + if (req->more_pages == 1)
411 + return;
412 +diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
413 +index 89777ed9abd8..be0b2accf895 100644
414 +--- a/drivers/iio/adc/at91_adc.c
415 ++++ b/drivers/iio/adc/at91_adc.c
416 +@@ -322,12 +322,11 @@ static int at91_adc_channel_init(struct iio_dev *idev)
417 + return idev->num_channels;
418 + }
419 +
420 +-static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
421 ++static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
422 + struct at91_adc_trigger *triggers,
423 + const char *trigger_name)
424 + {
425 + struct at91_adc_state *st = iio_priv(idev);
426 +- u8 value = 0;
427 + int i;
428 +
429 + for (i = 0; i < st->trigger_number; i++) {
430 +@@ -340,15 +339,16 @@ static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
431 + return -ENOMEM;
432 +
433 + if (strcmp(trigger_name, name) == 0) {
434 +- value = triggers[i].value;
435 + kfree(name);
436 +- break;
437 ++ if (triggers[i].value == 0)
438 ++ return -EINVAL;
439 ++ return triggers[i].value;
440 + }
441 +
442 + kfree(name);
443 + }
444 +
445 +- return value;
446 ++ return -EINVAL;
447 + }
448 +
449 + static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
450 +@@ -358,14 +358,14 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
451 + struct iio_buffer *buffer = idev->buffer;
452 + struct at91_adc_reg_desc *reg = st->registers;
453 + u32 status = at91_adc_readl(st, reg->trigger_register);
454 +- u8 value;
455 ++ int value;
456 + u8 bit;
457 +
458 + value = at91_adc_get_trigger_value_by_name(idev,
459 + st->trigger_list,
460 + idev->trig->name);
461 +- if (value == 0)
462 +- return -EINVAL;
463 ++ if (value < 0)
464 ++ return value;
465 +
466 + if (state) {
467 + st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL);
468 +diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
469 +index 9cf3229a7272..1b3b74be5c20 100644
470 +--- a/drivers/iio/adc/max1363.c
471 ++++ b/drivers/iio/adc/max1363.c
472 +@@ -1252,8 +1252,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
473 + .num_modes = ARRAY_SIZE(max1238_mode_list),
474 + .default_mode = s0to11,
475 + .info = &max1238_info,
476 +- .channels = max1238_channels,
477 +- .num_channels = ARRAY_SIZE(max1238_channels),
478 ++ .channels = max1038_channels,
479 ++ .num_channels = ARRAY_SIZE(max1038_channels),
480 + },
481 + [max11605] = {
482 + .bits = 8,
483 +@@ -1262,8 +1262,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
484 + .num_modes = ARRAY_SIZE(max1238_mode_list),
485 + .default_mode = s0to11,
486 + .info = &max1238_info,
487 +- .channels = max1238_channels,
488 +- .num_channels = ARRAY_SIZE(max1238_channels),
489 ++ .channels = max1038_channels,
490 ++ .num_channels = ARRAY_SIZE(max1038_channels),
491 + },
492 + [max11606] = {
493 + .bits = 10,
494 +@@ -1312,8 +1312,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
495 + .num_modes = ARRAY_SIZE(max1238_mode_list),
496 + .default_mode = s0to11,
497 + .info = &max1238_info,
498 +- .channels = max1238_channels,
499 +- .num_channels = ARRAY_SIZE(max1238_channels),
500 ++ .channels = max1138_channels,
501 ++ .num_channels = ARRAY_SIZE(max1138_channels),
502 + },
503 + [max11611] = {
504 + .bits = 10,
505 +@@ -1322,8 +1322,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
506 + .num_modes = ARRAY_SIZE(max1238_mode_list),
507 + .default_mode = s0to11,
508 + .info = &max1238_info,
509 +- .channels = max1238_channels,
510 +- .num_channels = ARRAY_SIZE(max1238_channels),
511 ++ .channels = max1138_channels,
512 ++ .num_channels = ARRAY_SIZE(max1138_channels),
513 + },
514 + [max11612] = {
515 + .bits = 12,
516 +diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
517 +index 6989c16aec2b..b58d6302521f 100644
518 +--- a/drivers/iio/adc/men_z188_adc.c
519 ++++ b/drivers/iio/adc/men_z188_adc.c
520 +@@ -121,8 +121,8 @@ static int men_z188_probe(struct mcb_device *dev,
521 + indio_dev->num_channels = ARRAY_SIZE(z188_adc_iio_channels);
522 +
523 + mem = mcb_request_mem(dev, "z188-adc");
524 +- if (!mem)
525 +- return -ENOMEM;
526 ++ if (IS_ERR(mem))
527 ++ return PTR_ERR(mem);
528 +
529 + adc->base = ioremap(mem->start, resource_size(mem));
530 + if (adc->base == NULL)
531 +diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
532 +index 74866d1efd1b..2a524acabec8 100644
533 +--- a/drivers/iio/magnetometer/ak8975.c
534 ++++ b/drivers/iio/magnetometer/ak8975.c
535 +@@ -352,8 +352,6 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
536 + {
537 + struct ak8975_data *data = iio_priv(indio_dev);
538 + struct i2c_client *client = data->client;
539 +- u16 meas_reg;
540 +- s16 raw;
541 + int ret;
542 +
543 + mutex_lock(&data->lock);
544 +@@ -401,16 +399,11 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
545 + dev_err(&client->dev, "Read axis data fails\n");
546 + goto exit;
547 + }
548 +- meas_reg = ret;
549 +
550 + mutex_unlock(&data->lock);
551 +
552 +- /* Endian conversion of the measured values. */
553 +- raw = (s16) (le16_to_cpu(meas_reg));
554 +-
555 + /* Clamp to valid range. */
556 +- raw = clamp_t(s16, raw, -4096, 4095);
557 +- *val = raw;
558 ++ *val = clamp_t(s16, ret, -4096, 4095);
559 + return IIO_VAL_INT;
560 +
561 + exit:
562 +diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
563 +index ba6d0c520e63..01b2e0b18878 100644
564 +--- a/drivers/iio/pressure/mpl3115.c
565 ++++ b/drivers/iio/pressure/mpl3115.c
566 +@@ -98,7 +98,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
567 + mutex_unlock(&data->lock);
568 + if (ret < 0)
569 + return ret;
570 +- *val = sign_extend32(be32_to_cpu(tmp) >> 12, 23);
571 ++ *val = be32_to_cpu(tmp) >> 12;
572 + return IIO_VAL_INT;
573 + case IIO_TEMP: /* in 0.0625 celsius / LSB */
574 + mutex_lock(&data->lock);
575 +@@ -112,7 +112,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
576 + mutex_unlock(&data->lock);
577 + if (ret < 0)
578 + return ret;
579 +- *val = sign_extend32(be32_to_cpu(tmp) >> 20, 15);
580 ++ *val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
581 + return IIO_VAL_INT;
582 + default:
583 + return -EINVAL;
584 +@@ -185,7 +185,7 @@ static const struct iio_chan_spec mpl3115_channels[] = {
585 + BIT(IIO_CHAN_INFO_SCALE),
586 + .scan_index = 0,
587 + .scan_type = {
588 +- .sign = 's',
589 ++ .sign = 'u',
590 + .realbits = 20,
591 + .storagebits = 32,
592 + .shift = 12,
593 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
594 +index 6a9509ccd33b..08ed9a30c3a7 100644
595 +--- a/drivers/net/ethernet/renesas/sh_eth.c
596 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
597 +@@ -307,6 +307,27 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
598 + };
599 +
600 + static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
601 ++ [EDMR] = 0x0000,
602 ++ [EDTRR] = 0x0004,
603 ++ [EDRRR] = 0x0008,
604 ++ [TDLAR] = 0x000c,
605 ++ [RDLAR] = 0x0010,
606 ++ [EESR] = 0x0014,
607 ++ [EESIPR] = 0x0018,
608 ++ [TRSCER] = 0x001c,
609 ++ [RMFCR] = 0x0020,
610 ++ [TFTR] = 0x0024,
611 ++ [FDR] = 0x0028,
612 ++ [RMCR] = 0x002c,
613 ++ [EDOCR] = 0x0030,
614 ++ [FCFTR] = 0x0034,
615 ++ [RPADIR] = 0x0038,
616 ++ [TRIMD] = 0x003c,
617 ++ [RBWAR] = 0x0040,
618 ++ [RDFAR] = 0x0044,
619 ++ [TBRAR] = 0x004c,
620 ++ [TDFAR] = 0x0050,
621 ++
622 + [ECMR] = 0x0160,
623 + [ECSR] = 0x0164,
624 + [ECSIPR] = 0x0168,
625 +@@ -546,7 +567,6 @@ static struct sh_eth_cpu_data sh7757_data = {
626 + .register_type = SH_ETH_REG_FAST_SH4,
627 +
628 + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
629 +- .rmcr_value = RMCR_RNC,
630 +
631 + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
632 + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
633 +@@ -624,7 +644,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
634 + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
635 + EESR_TDE | EESR_ECI,
636 + .fdr_value = 0x0000072f,
637 +- .rmcr_value = RMCR_RNC,
638 +
639 + .irq_flags = IRQF_SHARED,
640 + .apr = 1,
641 +@@ -752,7 +771,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
642 + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
643 + EESR_TDE | EESR_ECI,
644 + .fdr_value = 0x0000070f,
645 +- .rmcr_value = RMCR_RNC,
646 +
647 + .apr = 1,
648 + .mpr = 1,
649 +@@ -784,7 +802,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
650 + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
651 + EESR_TDE | EESR_ECI,
652 + .fdr_value = 0x0000070f,
653 +- .rmcr_value = RMCR_RNC,
654 +
655 + .no_psr = 1,
656 + .apr = 1,
657 +@@ -833,9 +850,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
658 + if (!cd->fdr_value)
659 + cd->fdr_value = DEFAULT_FDR_INIT;
660 +
661 +- if (!cd->rmcr_value)
662 +- cd->rmcr_value = DEFAULT_RMCR_VALUE;
663 +-
664 + if (!cd->tx_check)
665 + cd->tx_check = DEFAULT_TX_CHECK;
666 +
667 +@@ -1287,8 +1301,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
668 + sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
669 + sh_eth_write(ndev, 0, TFTR);
670 +
671 +- /* Frame recv control */
672 +- sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
673 ++ /* Frame recv control (enable multiple-packets per rx irq) */
674 ++ sh_eth_write(ndev, RMCR_RNC, RMCR);
675 +
676 + sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
677 +
678 +diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
679 +index d55e37cd5fec..b37c427144ee 100644
680 +--- a/drivers/net/ethernet/renesas/sh_eth.h
681 ++++ b/drivers/net/ethernet/renesas/sh_eth.h
682 +@@ -319,7 +319,6 @@ enum TD_STS_BIT {
683 + enum RMCR_BIT {
684 + RMCR_RNC = 0x00000001,
685 + };
686 +-#define DEFAULT_RMCR_VALUE 0x00000000
687 +
688 + /* ECMR */
689 + enum FELIC_MODE_BIT {
690 +@@ -466,7 +465,6 @@ struct sh_eth_cpu_data {
691 + unsigned long fdr_value;
692 + unsigned long fcftr_value;
693 + unsigned long rpadir_value;
694 +- unsigned long rmcr_value;
695 +
696 + /* interrupt checking mask */
697 + unsigned long tx_check;
698 +diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
699 +index 4d3f119b67b3..afb94aa2c15e 100644
700 +--- a/drivers/net/ethernet/sfc/io.h
701 ++++ b/drivers/net/ethernet/sfc/io.h
702 +@@ -66,10 +66,17 @@
703 + #define EFX_USE_QWORD_IO 1
704 + #endif
705 +
706 ++/* Hardware issue requires that only 64-bit naturally aligned writes
707 ++ * are seen by hardware. Its not strictly necessary to restrict to
708 ++ * x86_64 arch, but done for safety since unusual write combining behaviour
709 ++ * can break PIO.
710 ++ */
711 ++#ifdef CONFIG_X86_64
712 + /* PIO is a win only if write-combining is possible */
713 + #ifdef ARCH_HAS_IOREMAP_WC
714 + #define EFX_USE_PIO 1
715 + #endif
716 ++#endif
717 +
718 + #ifdef EFX_USE_QWORD_IO
719 + static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
720 +diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
721 +index fa9475300411..ede8dcca0ff3 100644
722 +--- a/drivers/net/ethernet/sfc/tx.c
723 ++++ b/drivers/net/ethernet/sfc/tx.c
724 +@@ -189,6 +189,18 @@ struct efx_short_copy_buffer {
725 + u8 buf[L1_CACHE_BYTES];
726 + };
727 +
728 ++/* Copy in explicit 64-bit writes. */
729 ++static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
730 ++{
731 ++ u64 *src64 = src;
732 ++ u64 __iomem *dest64 = dest;
733 ++ size_t l64 = len / 8;
734 ++ size_t i;
735 ++
736 ++ for (i = 0; i < l64; i++)
737 ++ writeq(src64[i], &dest64[i]);
738 ++}
739 ++
740 + /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
741 + * Advances piobuf pointer. Leaves additional data in the copy buffer.
742 + */
743 +@@ -198,7 +210,7 @@ static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
744 + {
745 + int block_len = len & ~(sizeof(copy_buf->buf) - 1);
746 +
747 +- memcpy_toio(*piobuf, data, block_len);
748 ++ efx_memcpy_64(*piobuf, data, block_len);
749 + *piobuf += block_len;
750 + len -= block_len;
751 +
752 +@@ -230,7 +242,7 @@ static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
753 + if (copy_buf->used < sizeof(copy_buf->buf))
754 + return;
755 +
756 +- memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
757 ++ efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
758 + *piobuf += sizeof(copy_buf->buf);
759 + data += copy_to_buf;
760 + len -= copy_to_buf;
761 +@@ -245,7 +257,7 @@ static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
762 + {
763 + /* if there's anything in it, write the whole buffer, including junk */
764 + if (copy_buf->used)
765 +- memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
766 ++ efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
767 + }
768 +
769 + /* Traverse skb structure and copy fragments in to PIO buffer.
770 +@@ -304,8 +316,8 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
771 + */
772 + BUILD_BUG_ON(L1_CACHE_BYTES >
773 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
774 +- memcpy_toio(tx_queue->piobuf, skb->data,
775 +- ALIGN(skb->len, L1_CACHE_BYTES));
776 ++ efx_memcpy_64(tx_queue->piobuf, skb->data,
777 ++ ALIGN(skb->len, L1_CACHE_BYTES));
778 + }
779 +
780 + EFX_POPULATE_QWORD_5(buffer->option,
781 +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
782 +index d53e299ae1d9..7eec598c5cb6 100644
783 +--- a/drivers/net/macvlan.c
784 ++++ b/drivers/net/macvlan.c
785 +@@ -1036,7 +1036,6 @@ static int macvlan_device_event(struct notifier_block *unused,
786 + list_for_each_entry_safe(vlan, next, &port->vlans, list)
787 + vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
788 + unregister_netdevice_many(&list_kill);
789 +- list_del(&list_kill);
790 + break;
791 + case NETDEV_PRE_TYPE_CHANGE:
792 + /* Forbid underlaying device to change its type. */
793 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
794 +index dc4bf06948c7..cf62d7e8329f 100644
795 +--- a/drivers/net/usb/qmi_wwan.c
796 ++++ b/drivers/net/usb/qmi_wwan.c
797 +@@ -763,7 +763,12 @@ static const struct usb_device_id products[] = {
798 + {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
799 + {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
800 + {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
801 +- {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
802 ++ {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
803 ++ {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
804 ++ {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
805 ++ {QMI_FIXED_INTF(0x0b3c, 0xc004, 6)}, /* Olivetti Olicard 155 */
806 ++ {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
807 ++ {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */
808 + {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
809 + {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
810 + {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
811 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
812 +index 4dbb2ed85b97..77dcf92ea350 100644
813 +--- a/drivers/net/vxlan.c
814 ++++ b/drivers/net/vxlan.c
815 +@@ -2275,9 +2275,9 @@ static void vxlan_setup(struct net_device *dev)
816 + eth_hw_addr_random(dev);
817 + ether_setup(dev);
818 + if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
819 +- dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM;
820 ++ dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
821 + else
822 +- dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
823 ++ dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
824 +
825 + dev->netdev_ops = &vxlan_netdev_ops;
826 + dev->destructor = free_netdev;
827 +@@ -2660,8 +2660,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
828 + if (!tb[IFLA_MTU])
829 + dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
830 +
831 +- /* update header length based on lower device */
832 +- dev->hard_header_len = lowerdev->hard_header_len +
833 ++ dev->needed_headroom = lowerdev->hard_header_len +
834 + (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
835 + } else if (use_ipv6)
836 + vxlan->flags |= VXLAN_F_IPV6;
837 +diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
838 +index 3281c90691c3..44fe83ee9bee 100644
839 +--- a/drivers/rtc/rtc-at91rm9200.c
840 ++++ b/drivers/rtc/rtc-at91rm9200.c
841 +@@ -48,6 +48,7 @@ struct at91_rtc_config {
842 +
843 + static const struct at91_rtc_config *at91_rtc_config;
844 + static DECLARE_COMPLETION(at91_rtc_updated);
845 ++static DECLARE_COMPLETION(at91_rtc_upd_rdy);
846 + static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
847 + static void __iomem *at91_rtc_regs;
848 + static int irq;
849 +@@ -161,6 +162,8 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
850 + 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
851 + tm->tm_hour, tm->tm_min, tm->tm_sec);
852 +
853 ++ wait_for_completion(&at91_rtc_upd_rdy);
854 ++
855 + /* Stop Time/Calendar from counting */
856 + cr = at91_rtc_read(AT91_RTC_CR);
857 + at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
858 +@@ -183,7 +186,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
859 +
860 + /* Restart Time/Calendar */
861 + cr = at91_rtc_read(AT91_RTC_CR);
862 ++ at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_SECEV);
863 + at91_rtc_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
864 ++ at91_rtc_write_ier(AT91_RTC_SECEV);
865 +
866 + return 0;
867 + }
868 +@@ -290,8 +295,10 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
869 + if (rtsr) { /* this interrupt is shared! Is it ours? */
870 + if (rtsr & AT91_RTC_ALARM)
871 + events |= (RTC_AF | RTC_IRQF);
872 +- if (rtsr & AT91_RTC_SECEV)
873 +- events |= (RTC_UF | RTC_IRQF);
874 ++ if (rtsr & AT91_RTC_SECEV) {
875 ++ complete(&at91_rtc_upd_rdy);
876 ++ at91_rtc_write_idr(AT91_RTC_SECEV);
877 ++ }
878 + if (rtsr & AT91_RTC_ACKUPD)
879 + complete(&at91_rtc_updated);
880 +
881 +@@ -413,6 +420,11 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
882 + return PTR_ERR(rtc);
883 + platform_set_drvdata(pdev, rtc);
884 +
885 ++ /* enable SECEV interrupt in order to initialize at91_rtc_upd_rdy
886 ++ * completion.
887 ++ */
888 ++ at91_rtc_write_ier(AT91_RTC_SECEV);
889 ++
890 + dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
891 + return 0;
892 + }
893 +diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
894 +index dae8d1a9038e..52d7517b342e 100644
895 +--- a/drivers/staging/iio/adc/mxs-lradc.c
896 ++++ b/drivers/staging/iio/adc/mxs-lradc.c
897 +@@ -846,6 +846,14 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
898 + LRADC_CTRL1);
899 + mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
900 +
901 ++ /* Enable / disable the divider per requirement */
902 ++ if (test_bit(chan, &lradc->is_divided))
903 ++ mxs_lradc_reg_set(lradc, 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
904 ++ LRADC_CTRL2);
905 ++ else
906 ++ mxs_lradc_reg_clear(lradc,
907 ++ 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET, LRADC_CTRL2);
908 ++
909 + /* Clean the slot's previous content, then set new one. */
910 + mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0),
911 + LRADC_CTRL4);
912 +@@ -961,15 +969,11 @@ static int mxs_lradc_write_raw(struct iio_dev *iio_dev,
913 + if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer &&
914 + val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) {
915 + /* divider by two disabled */
916 +- writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
917 +- lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_CLR);
918 + clear_bit(chan->channel, &lradc->is_divided);
919 + ret = 0;
920 + } else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer &&
921 + val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) {
922 + /* divider by two enabled */
923 +- writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
924 +- lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_SET);
925 + set_bit(chan->channel, &lradc->is_divided);
926 + ret = 0;
927 + }
928 +diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
929 +index 9e0f2a9c73ae..ab338e3ddd05 100644
930 +--- a/drivers/staging/iio/light/tsl2x7x_core.c
931 ++++ b/drivers/staging/iio/light/tsl2x7x_core.c
932 +@@ -667,9 +667,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
933 + chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
934 + chip->tsl2x7x_settings.prox_pulse_count;
935 + chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
936 +- chip->tsl2x7x_settings.prox_thres_low;
937 ++ (chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
938 ++ chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
939 ++ (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
940 + chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
941 +- chip->tsl2x7x_settings.prox_thres_high;
942 ++ (chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
943 ++ chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
944 ++ (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
945 +
946 + /* and make sure we're not already on */
947 + if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
948 +diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
949 +index de77d9aa22c6..6689de6c5591 100644
950 +--- a/drivers/target/iscsi/iscsi_target_auth.c
951 ++++ b/drivers/target/iscsi/iscsi_target_auth.c
952 +@@ -314,6 +314,16 @@ static int chap_server_compute_md5(
953 + goto out;
954 + }
955 + /*
956 ++ * During mutual authentication, the CHAP_C generated by the
957 ++ * initiator must not match the original CHAP_C generated by
958 ++ * the target.
959 ++ */
960 ++ if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
961 ++ pr_err("initiator CHAP_C matches target CHAP_C, failing"
962 ++ " login attempt\n");
963 ++ goto out;
964 ++ }
965 ++ /*
966 + * Generate CHAP_N and CHAP_R for mutual authentication.
967 + */
968 + tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
969 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
970 +index 789aa9eb0a1e..a51dd4efc23b 100644
971 +--- a/drivers/target/target_core_transport.c
972 ++++ b/drivers/target/target_core_transport.c
973 +@@ -2407,6 +2407,10 @@ static void target_release_cmd_kref(struct kref *kref)
974 + */
975 + int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
976 + {
977 ++ if (!se_sess) {
978 ++ se_cmd->se_tfo->release_cmd(se_cmd);
979 ++ return 1;
980 ++ }
981 + return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
982 + &se_sess->sess_cmd_lock);
983 + }
984 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
985 +index 904efb6035b0..6bbd203f1861 100644
986 +--- a/drivers/usb/class/cdc-acm.c
987 ++++ b/drivers/usb/class/cdc-acm.c
988 +@@ -122,13 +122,23 @@ static void acm_release_minor(struct acm *acm)
989 + static int acm_ctrl_msg(struct acm *acm, int request, int value,
990 + void *buf, int len)
991 + {
992 +- int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
993 ++ int retval;
994 ++
995 ++ retval = usb_autopm_get_interface(acm->control);
996 ++ if (retval)
997 ++ return retval;
998 ++
999 ++ retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
1000 + request, USB_RT_ACM, value,
1001 + acm->control->altsetting[0].desc.bInterfaceNumber,
1002 + buf, len, 5000);
1003 ++
1004 + dev_dbg(&acm->control->dev,
1005 + "%s - rq 0x%02x, val %#x, len %#x, result %d\n",
1006 + __func__, request, value, len, retval);
1007 ++
1008 ++ usb_autopm_put_interface(acm->control);
1009 ++
1010 + return retval < 0 ? retval : 0;
1011 + }
1012 +
1013 +@@ -496,6 +506,7 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
1014 + {
1015 + struct acm *acm = container_of(port, struct acm, port);
1016 + int retval = -ENODEV;
1017 ++ int i;
1018 +
1019 + dev_dbg(&acm->control->dev, "%s\n", __func__);
1020 +
1021 +@@ -515,21 +526,17 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
1022 + acm->control->needs_remote_wakeup = 1;
1023 +
1024 + acm->ctrlurb->dev = acm->dev;
1025 +- if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
1026 ++ retval = usb_submit_urb(acm->ctrlurb, GFP_KERNEL);
1027 ++ if (retval) {
1028 + dev_err(&acm->control->dev,
1029 + "%s - usb_submit_urb(ctrl irq) failed\n", __func__);
1030 +- usb_autopm_put_interface(acm->control);
1031 + goto error_submit_urb;
1032 + }
1033 +
1034 + acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
1035 +- if (acm_set_control(acm, acm->ctrlout) < 0 &&
1036 +- (acm->ctrl_caps & USB_CDC_CAP_LINE)) {
1037 +- usb_autopm_put_interface(acm->control);
1038 ++ retval = acm_set_control(acm, acm->ctrlout);
1039 ++ if (retval < 0 && (acm->ctrl_caps & USB_CDC_CAP_LINE))
1040 + goto error_set_control;
1041 +- }
1042 +-
1043 +- usb_autopm_put_interface(acm->control);
1044 +
1045 + /*
1046 + * Unthrottle device in case the TTY was closed while throttled.
1047 +@@ -539,23 +546,30 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
1048 + acm->throttle_req = 0;
1049 + spin_unlock_irq(&acm->read_lock);
1050 +
1051 +- if (acm_submit_read_urbs(acm, GFP_KERNEL))
1052 ++ retval = acm_submit_read_urbs(acm, GFP_KERNEL);
1053 ++ if (retval)
1054 + goto error_submit_read_urbs;
1055 +
1056 ++ usb_autopm_put_interface(acm->control);
1057 ++
1058 + mutex_unlock(&acm->mutex);
1059 +
1060 + return 0;
1061 +
1062 + error_submit_read_urbs:
1063 ++ for (i = 0; i < acm->rx_buflimit; i++)
1064 ++ usb_kill_urb(acm->read_urbs[i]);
1065 + acm->ctrlout = 0;
1066 + acm_set_control(acm, acm->ctrlout);
1067 + error_set_control:
1068 + usb_kill_urb(acm->ctrlurb);
1069 + error_submit_urb:
1070 ++ usb_autopm_put_interface(acm->control);
1071 + error_get_interface:
1072 + disconnected:
1073 + mutex_unlock(&acm->mutex);
1074 +- return retval;
1075 ++
1076 ++ return usb_translate_errors(retval);
1077 + }
1078 +
1079 + static void acm_port_destruct(struct tty_port *port)
1080 +@@ -573,21 +587,35 @@ static void acm_port_destruct(struct tty_port *port)
1081 + static void acm_port_shutdown(struct tty_port *port)
1082 + {
1083 + struct acm *acm = container_of(port, struct acm, port);
1084 ++ struct urb *urb;
1085 ++ struct acm_wb *wb;
1086 + int i;
1087 ++ int pm_err;
1088 +
1089 + dev_dbg(&acm->control->dev, "%s\n", __func__);
1090 +
1091 + mutex_lock(&acm->mutex);
1092 + if (!acm->disconnected) {
1093 +- usb_autopm_get_interface(acm->control);
1094 ++ pm_err = usb_autopm_get_interface(acm->control);
1095 + acm_set_control(acm, acm->ctrlout = 0);
1096 ++
1097 ++ for (;;) {
1098 ++ urb = usb_get_from_anchor(&acm->delayed);
1099 ++ if (!urb)
1100 ++ break;
1101 ++ wb = urb->context;
1102 ++ wb->use = 0;
1103 ++ usb_autopm_put_interface_async(acm->control);
1104 ++ }
1105 ++
1106 + usb_kill_urb(acm->ctrlurb);
1107 + for (i = 0; i < ACM_NW; i++)
1108 + usb_kill_urb(acm->wb[i].urb);
1109 + for (i = 0; i < acm->rx_buflimit; i++)
1110 + usb_kill_urb(acm->read_urbs[i]);
1111 + acm->control->needs_remote_wakeup = 0;
1112 +- usb_autopm_put_interface(acm->control);
1113 ++ if (!pm_err)
1114 ++ usb_autopm_put_interface(acm->control);
1115 + }
1116 + mutex_unlock(&acm->mutex);
1117 + }
1118 +@@ -646,14 +674,17 @@ static int acm_tty_write(struct tty_struct *tty,
1119 + memcpy(wb->buf, buf, count);
1120 + wb->len = count;
1121 +
1122 +- usb_autopm_get_interface_async(acm->control);
1123 ++ stat = usb_autopm_get_interface_async(acm->control);
1124 ++ if (stat) {
1125 ++ wb->use = 0;
1126 ++ spin_unlock_irqrestore(&acm->write_lock, flags);
1127 ++ return stat;
1128 ++ }
1129 ++
1130 + if (acm->susp_count) {
1131 +- if (!acm->delayed_wb)
1132 +- acm->delayed_wb = wb;
1133 +- else
1134 +- usb_autopm_put_interface_async(acm->control);
1135 ++ usb_anchor_urb(wb->urb, &acm->delayed);
1136 + spin_unlock_irqrestore(&acm->write_lock, flags);
1137 +- return count; /* A white lie */
1138 ++ return count;
1139 + }
1140 + usb_mark_last_busy(acm->dev);
1141 +
1142 +@@ -1269,6 +1300,7 @@ made_compressed_probe:
1143 + acm->bInterval = epread->bInterval;
1144 + tty_port_init(&acm->port);
1145 + acm->port.ops = &acm_port_ops;
1146 ++ init_usb_anchor(&acm->delayed);
1147 +
1148 + buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
1149 + if (!buf) {
1150 +@@ -1514,18 +1546,15 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1151 + struct acm *acm = usb_get_intfdata(intf);
1152 + int cnt;
1153 +
1154 ++ spin_lock_irq(&acm->read_lock);
1155 ++ spin_lock(&acm->write_lock);
1156 + if (PMSG_IS_AUTO(message)) {
1157 +- int b;
1158 +-
1159 +- spin_lock_irq(&acm->write_lock);
1160 +- b = acm->transmitting;
1161 +- spin_unlock_irq(&acm->write_lock);
1162 +- if (b)
1163 ++ if (acm->transmitting) {
1164 ++ spin_unlock(&acm->write_lock);
1165 ++ spin_unlock_irq(&acm->read_lock);
1166 + return -EBUSY;
1167 ++ }
1168 + }
1169 +-
1170 +- spin_lock_irq(&acm->read_lock);
1171 +- spin_lock(&acm->write_lock);
1172 + cnt = acm->susp_count++;
1173 + spin_unlock(&acm->write_lock);
1174 + spin_unlock_irq(&acm->read_lock);
1175 +@@ -1533,8 +1562,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1176 + if (cnt)
1177 + return 0;
1178 +
1179 +- if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags))
1180 +- stop_data_traffic(acm);
1181 ++ stop_data_traffic(acm);
1182 +
1183 + return 0;
1184 + }
1185 +@@ -1542,29 +1570,24 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
1186 + static int acm_resume(struct usb_interface *intf)
1187 + {
1188 + struct acm *acm = usb_get_intfdata(intf);
1189 +- struct acm_wb *wb;
1190 ++ struct urb *urb;
1191 + int rv = 0;
1192 +- int cnt;
1193 +
1194 + spin_lock_irq(&acm->read_lock);
1195 +- acm->susp_count -= 1;
1196 +- cnt = acm->susp_count;
1197 +- spin_unlock_irq(&acm->read_lock);
1198 ++ spin_lock(&acm->write_lock);
1199 +
1200 +- if (cnt)
1201 +- return 0;
1202 ++ if (--acm->susp_count)
1203 ++ goto out;
1204 +
1205 + if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) {
1206 +- rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
1207 +-
1208 +- spin_lock_irq(&acm->write_lock);
1209 +- if (acm->delayed_wb) {
1210 +- wb = acm->delayed_wb;
1211 +- acm->delayed_wb = NULL;
1212 +- spin_unlock_irq(&acm->write_lock);
1213 +- acm_start_wb(acm, wb);
1214 +- } else {
1215 +- spin_unlock_irq(&acm->write_lock);
1216 ++ rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
1217 ++
1218 ++ for (;;) {
1219 ++ urb = usb_get_from_anchor(&acm->delayed);
1220 ++ if (!urb)
1221 ++ break;
1222 ++
1223 ++ acm_start_wb(acm, urb->context);
1224 + }
1225 +
1226 + /*
1227 +@@ -1572,12 +1595,14 @@ static int acm_resume(struct usb_interface *intf)
1228 + * do the write path at all cost
1229 + */
1230 + if (rv < 0)
1231 +- goto err_out;
1232 ++ goto out;
1233 +
1234 +- rv = acm_submit_read_urbs(acm, GFP_NOIO);
1235 ++ rv = acm_submit_read_urbs(acm, GFP_ATOMIC);
1236 + }
1237 ++out:
1238 ++ spin_unlock(&acm->write_lock);
1239 ++ spin_unlock_irq(&acm->read_lock);
1240 +
1241 +-err_out:
1242 + return rv;
1243 + }
1244 +
1245 +diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
1246 +index e38dc785808f..80826f843e04 100644
1247 +--- a/drivers/usb/class/cdc-acm.h
1248 ++++ b/drivers/usb/class/cdc-acm.h
1249 +@@ -120,7 +120,7 @@ struct acm {
1250 + unsigned int throttled:1; /* actually throttled */
1251 + unsigned int throttle_req:1; /* throttle requested */
1252 + u8 bInterval;
1253 +- struct acm_wb *delayed_wb; /* write queued for a device about to be woken */
1254 ++ struct usb_anchor delayed; /* writes queued for a device about to be woken */
1255 + };
1256 +
1257 + #define CDC_DATA_INTERFACE_TYPE 0x0a
1258 +diff --git a/include/sound/core.h b/include/sound/core.h
1259 +index d3f5f818e0b9..88c9fbb7ed90 100644
1260 +--- a/include/sound/core.h
1261 ++++ b/include/sound/core.h
1262 +@@ -116,6 +116,8 @@ struct snd_card {
1263 + int user_ctl_count; /* count of all user controls */
1264 + struct list_head controls; /* all controls for this card */
1265 + struct list_head ctl_files; /* active control files */
1266 ++ struct mutex user_ctl_lock; /* protects user controls against
1267 ++ concurrent access */
1268 +
1269 + struct snd_info_entry *proc_root; /* root for soundcard specific files */
1270 + struct snd_info_entry *proc_id; /* the card id */
1271 +diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
1272 +index 5759810e1c1b..21eed488783f 100644
1273 +--- a/include/uapi/sound/compress_offload.h
1274 ++++ b/include/uapi/sound/compress_offload.h
1275 +@@ -80,7 +80,7 @@ struct snd_compr_tstamp {
1276 + struct snd_compr_avail {
1277 + __u64 avail;
1278 + struct snd_compr_tstamp tstamp;
1279 +-};
1280 ++} __attribute__((packed));
1281 +
1282 + enum snd_compr_direction {
1283 + SND_COMPRESS_PLAYBACK = 0,
1284 +diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
1285 +index df6839e3ce08..99a03acb7d47 100644
1286 +--- a/lib/lz4/lz4_decompress.c
1287 ++++ b/lib/lz4/lz4_decompress.c
1288 +@@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
1289 + len = *ip++;
1290 + for (; len == 255; length += 255)
1291 + len = *ip++;
1292 ++ if (unlikely(length > (size_t)(length + len)))
1293 ++ goto _output_error;
1294 + length += len;
1295 + }
1296 +
1297 +diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
1298 +index 569985d522d5..8563081e8da3 100644
1299 +--- a/lib/lzo/lzo1x_decompress_safe.c
1300 ++++ b/lib/lzo/lzo1x_decompress_safe.c
1301 +@@ -19,11 +19,31 @@
1302 + #include <linux/lzo.h>
1303 + #include "lzodefs.h"
1304 +
1305 +-#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
1306 +-#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
1307 +-#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
1308 +-#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
1309 +-#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
1310 ++#define HAVE_IP(t, x) \
1311 ++ (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
1312 ++ (((t + x) >= t) && ((t + x) >= x)))
1313 ++
1314 ++#define HAVE_OP(t, x) \
1315 ++ (((size_t)(op_end - op) >= (size_t)(t + x)) && \
1316 ++ (((t + x) >= t) && ((t + x) >= x)))
1317 ++
1318 ++#define NEED_IP(t, x) \
1319 ++ do { \
1320 ++ if (!HAVE_IP(t, x)) \
1321 ++ goto input_overrun; \
1322 ++ } while (0)
1323 ++
1324 ++#define NEED_OP(t, x) \
1325 ++ do { \
1326 ++ if (!HAVE_OP(t, x)) \
1327 ++ goto output_overrun; \
1328 ++ } while (0)
1329 ++
1330 ++#define TEST_LB(m_pos) \
1331 ++ do { \
1332 ++ if ((m_pos) < out) \
1333 ++ goto lookbehind_overrun; \
1334 ++ } while (0)
1335 +
1336 + int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
1337 + unsigned char *out, size_t *out_len)
1338 +@@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
1339 + while (unlikely(*ip == 0)) {
1340 + t += 255;
1341 + ip++;
1342 +- NEED_IP(1);
1343 ++ NEED_IP(1, 0);
1344 + }
1345 + t += 15 + *ip++;
1346 + }
1347 + t += 3;
1348 + copy_literal_run:
1349 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1350 +- if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
1351 ++ if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
1352 + const unsigned char *ie = ip + t;
1353 + unsigned char *oe = op + t;
1354 + do {
1355 +@@ -81,8 +101,8 @@ copy_literal_run:
1356 + } else
1357 + #endif
1358 + {
1359 +- NEED_OP(t);
1360 +- NEED_IP(t + 3);
1361 ++ NEED_OP(t, 0);
1362 ++ NEED_IP(t, 3);
1363 + do {
1364 + *op++ = *ip++;
1365 + } while (--t > 0);
1366 +@@ -95,7 +115,7 @@ copy_literal_run:
1367 + m_pos -= t >> 2;
1368 + m_pos -= *ip++ << 2;
1369 + TEST_LB(m_pos);
1370 +- NEED_OP(2);
1371 ++ NEED_OP(2, 0);
1372 + op[0] = m_pos[0];
1373 + op[1] = m_pos[1];
1374 + op += 2;
1375 +@@ -119,10 +139,10 @@ copy_literal_run:
1376 + while (unlikely(*ip == 0)) {
1377 + t += 255;
1378 + ip++;
1379 +- NEED_IP(1);
1380 ++ NEED_IP(1, 0);
1381 + }
1382 + t += 31 + *ip++;
1383 +- NEED_IP(2);
1384 ++ NEED_IP(2, 0);
1385 + }
1386 + m_pos = op - 1;
1387 + next = get_unaligned_le16(ip);
1388 +@@ -137,10 +157,10 @@ copy_literal_run:
1389 + while (unlikely(*ip == 0)) {
1390 + t += 255;
1391 + ip++;
1392 +- NEED_IP(1);
1393 ++ NEED_IP(1, 0);
1394 + }
1395 + t += 7 + *ip++;
1396 +- NEED_IP(2);
1397 ++ NEED_IP(2, 0);
1398 + }
1399 + next = get_unaligned_le16(ip);
1400 + ip += 2;
1401 +@@ -154,7 +174,7 @@ copy_literal_run:
1402 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1403 + if (op - m_pos >= 8) {
1404 + unsigned char *oe = op + t;
1405 +- if (likely(HAVE_OP(t + 15))) {
1406 ++ if (likely(HAVE_OP(t, 15))) {
1407 + do {
1408 + COPY8(op, m_pos);
1409 + op += 8;
1410 +@@ -164,7 +184,7 @@ copy_literal_run:
1411 + m_pos += 8;
1412 + } while (op < oe);
1413 + op = oe;
1414 +- if (HAVE_IP(6)) {
1415 ++ if (HAVE_IP(6, 0)) {
1416 + state = next;
1417 + COPY4(op, ip);
1418 + op += next;
1419 +@@ -172,7 +192,7 @@ copy_literal_run:
1420 + continue;
1421 + }
1422 + } else {
1423 +- NEED_OP(t);
1424 ++ NEED_OP(t, 0);
1425 + do {
1426 + *op++ = *m_pos++;
1427 + } while (op < oe);
1428 +@@ -181,7 +201,7 @@ copy_literal_run:
1429 + #endif
1430 + {
1431 + unsigned char *oe = op + t;
1432 +- NEED_OP(t);
1433 ++ NEED_OP(t, 0);
1434 + op[0] = m_pos[0];
1435 + op[1] = m_pos[1];
1436 + op += 2;
1437 +@@ -194,15 +214,15 @@ match_next:
1438 + state = next;
1439 + t = next;
1440 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
1441 +- if (likely(HAVE_IP(6) && HAVE_OP(4))) {
1442 ++ if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
1443 + COPY4(op, ip);
1444 + op += t;
1445 + ip += t;
1446 + } else
1447 + #endif
1448 + {
1449 +- NEED_IP(t + 3);
1450 +- NEED_OP(t);
1451 ++ NEED_IP(t, 3);
1452 ++ NEED_OP(t, 0);
1453 + while (t > 0) {
1454 + *op++ = *ip++;
1455 + t--;
1456 +diff --git a/mm/shmem.c b/mm/shmem.c
1457 +index 9f70e02111c6..a2801ba8ae2d 100644
1458 +--- a/mm/shmem.c
1459 ++++ b/mm/shmem.c
1460 +@@ -1728,6 +1728,9 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1461 + pgoff_t start, index, end;
1462 + int error;
1463 +
1464 ++ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1465 ++ return -EOPNOTSUPP;
1466 ++
1467 + mutex_lock(&inode->i_mutex);
1468 +
1469 + if (mode & FALLOC_FL_PUNCH_HOLE) {
1470 +diff --git a/mm/slab.c b/mm/slab.c
1471 +index 19d92181ce24..9432556ab912 100644
1472 +--- a/mm/slab.c
1473 ++++ b/mm/slab.c
1474 +@@ -386,6 +386,39 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
1475 +
1476 + #endif
1477 +
1478 ++#define OBJECT_FREE (0)
1479 ++#define OBJECT_ACTIVE (1)
1480 ++
1481 ++#ifdef CONFIG_DEBUG_SLAB_LEAK
1482 ++
1483 ++static void set_obj_status(struct page *page, int idx, int val)
1484 ++{
1485 ++ int freelist_size;
1486 ++ char *status;
1487 ++ struct kmem_cache *cachep = page->slab_cache;
1488 ++
1489 ++ freelist_size = cachep->num * sizeof(freelist_idx_t);
1490 ++ status = (char *)page->freelist + freelist_size;
1491 ++ status[idx] = val;
1492 ++}
1493 ++
1494 ++static inline unsigned int get_obj_status(struct page *page, int idx)
1495 ++{
1496 ++ int freelist_size;
1497 ++ char *status;
1498 ++ struct kmem_cache *cachep = page->slab_cache;
1499 ++
1500 ++ freelist_size = cachep->num * sizeof(freelist_idx_t);
1501 ++ status = (char *)page->freelist + freelist_size;
1502 ++
1503 ++ return status[idx];
1504 ++}
1505 ++
1506 ++#else
1507 ++static inline void set_obj_status(struct page *page, int idx, int val) {}
1508 ++
1509 ++#endif
1510 ++
1511 + /*
1512 + * Do not go above this order unless 0 objects fit into the slab or
1513 + * overridden on the command line.
1514 +@@ -576,12 +609,30 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
1515 + return cachep->array[smp_processor_id()];
1516 + }
1517 +
1518 ++static size_t calculate_freelist_size(int nr_objs, size_t align)
1519 ++{
1520 ++ size_t freelist_size;
1521 ++
1522 ++ freelist_size = nr_objs * sizeof(freelist_idx_t);
1523 ++ if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
1524 ++ freelist_size += nr_objs * sizeof(char);
1525 ++
1526 ++ if (align)
1527 ++ freelist_size = ALIGN(freelist_size, align);
1528 ++
1529 ++ return freelist_size;
1530 ++}
1531 ++
1532 + static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
1533 + size_t idx_size, size_t align)
1534 + {
1535 + int nr_objs;
1536 ++ size_t remained_size;
1537 + size_t freelist_size;
1538 ++ int extra_space = 0;
1539 +
1540 ++ if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
1541 ++ extra_space = sizeof(char);
1542 + /*
1543 + * Ignore padding for the initial guess. The padding
1544 + * is at most @align-1 bytes, and @buffer_size is at
1545 +@@ -590,14 +641,15 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
1546 + * into the memory allocation when taking the padding
1547 + * into account.
1548 + */
1549 +- nr_objs = slab_size / (buffer_size + idx_size);
1550 ++ nr_objs = slab_size / (buffer_size + idx_size + extra_space);
1551 +
1552 + /*
1553 + * This calculated number will be either the right
1554 + * amount, or one greater than what we want.
1555 + */
1556 +- freelist_size = slab_size - nr_objs * buffer_size;
1557 +- if (freelist_size < ALIGN(nr_objs * idx_size, align))
1558 ++ remained_size = slab_size - nr_objs * buffer_size;
1559 ++ freelist_size = calculate_freelist_size(nr_objs, align);
1560 ++ if (remained_size < freelist_size)
1561 + nr_objs--;
1562 +
1563 + return nr_objs;
1564 +@@ -635,7 +687,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
1565 + } else {
1566 + nr_objs = calculate_nr_objs(slab_size, buffer_size,
1567 + sizeof(freelist_idx_t), align);
1568 +- mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align);
1569 ++ mgmt_size = calculate_freelist_size(nr_objs, align);
1570 + }
1571 + *num = nr_objs;
1572 + *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
1573 +@@ -2032,13 +2084,16 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
1574 + break;
1575 +
1576 + if (flags & CFLGS_OFF_SLAB) {
1577 ++ size_t freelist_size_per_obj = sizeof(freelist_idx_t);
1578 + /*
1579 + * Max number of objs-per-slab for caches which
1580 + * use off-slab slabs. Needed to avoid a possible
1581 + * looping condition in cache_grow().
1582 + */
1583 ++ if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
1584 ++ freelist_size_per_obj += sizeof(char);
1585 + offslab_limit = size;
1586 +- offslab_limit /= sizeof(freelist_idx_t);
1587 ++ offslab_limit /= freelist_size_per_obj;
1588 +
1589 + if (num > offslab_limit)
1590 + break;
1591 +@@ -2285,8 +2340,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
1592 + if (!cachep->num)
1593 + return -E2BIG;
1594 +
1595 +- freelist_size =
1596 +- ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
1597 ++ freelist_size = calculate_freelist_size(cachep->num, cachep->align);
1598 +
1599 + /*
1600 + * If the slab has been placed off-slab, and we have enough space then
1601 +@@ -2299,7 +2353,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
1602 +
1603 + if (flags & CFLGS_OFF_SLAB) {
1604 + /* really off slab. No need for manual alignment */
1605 +- freelist_size = cachep->num * sizeof(freelist_idx_t);
1606 ++ freelist_size = calculate_freelist_size(cachep->num, 0);
1607 +
1608 + #ifdef CONFIG_PAGE_POISONING
1609 + /* If we're going to use the generic kernel_map_pages()
1610 +@@ -2625,6 +2679,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
1611 + if (cachep->ctor)
1612 + cachep->ctor(objp);
1613 + #endif
1614 ++ set_obj_status(page, i, OBJECT_FREE);
1615 + set_free_obj(page, i, i);
1616 + }
1617 + }
1618 +@@ -2833,6 +2888,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
1619 + BUG_ON(objnr >= cachep->num);
1620 + BUG_ON(objp != index_to_obj(cachep, page, objnr));
1621 +
1622 ++ set_obj_status(page, objnr, OBJECT_FREE);
1623 + if (cachep->flags & SLAB_POISON) {
1624 + #ifdef CONFIG_DEBUG_PAGEALLOC
1625 + if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
1626 +@@ -2966,6 +3022,8 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
1627 + static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
1628 + gfp_t flags, void *objp, unsigned long caller)
1629 + {
1630 ++ struct page *page;
1631 ++
1632 + if (!objp)
1633 + return objp;
1634 + if (cachep->flags & SLAB_POISON) {
1635 +@@ -2996,6 +3054,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
1636 + *dbg_redzone1(cachep, objp) = RED_ACTIVE;
1637 + *dbg_redzone2(cachep, objp) = RED_ACTIVE;
1638 + }
1639 ++
1640 ++ page = virt_to_head_page(objp);
1641 ++ set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
1642 + objp += obj_offset(cachep);
1643 + if (cachep->ctor && cachep->flags & SLAB_POISON)
1644 + cachep->ctor(objp);
1645 +@@ -4232,21 +4293,12 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c,
1646 + struct page *page)
1647 + {
1648 + void *p;
1649 +- int i, j;
1650 ++ int i;
1651 +
1652 + if (n[0] == n[1])
1653 + return;
1654 + for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
1655 +- bool active = true;
1656 +-
1657 +- for (j = page->active; j < c->num; j++) {
1658 +- /* Skip freed item */
1659 +- if (get_free_obj(page, j) == i) {
1660 +- active = false;
1661 +- break;
1662 +- }
1663 +- }
1664 +- if (!active)
1665 ++ if (get_obj_status(page, i) != OBJECT_ACTIVE)
1666 + continue;
1667 +
1668 + if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
1669 +diff --git a/net/core/dev.c b/net/core/dev.c
1670 +index fb8b0546485b..a30bef1882f5 100644
1671 +--- a/net/core/dev.c
1672 ++++ b/net/core/dev.c
1673 +@@ -6613,6 +6613,9 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
1674 + /**
1675 + * unregister_netdevice_many - unregister many devices
1676 + * @head: list of devices
1677 ++ *
1678 ++ * Note: As most callers use a stack allocated list_head,
1679 ++ * we force a list_del() to make sure stack wont be corrupted later.
1680 + */
1681 + void unregister_netdevice_many(struct list_head *head)
1682 + {
1683 +@@ -6622,6 +6625,7 @@ void unregister_netdevice_many(struct list_head *head)
1684 + rollback_registered_many(head);
1685 + list_for_each_entry(dev, head, unreg_list)
1686 + net_set_todo(dev);
1687 ++ list_del(head);
1688 + }
1689 + }
1690 + EXPORT_SYMBOL(unregister_netdevice_many);
1691 +@@ -7077,7 +7081,6 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
1692 + }
1693 + }
1694 + unregister_netdevice_many(&dev_kill_list);
1695 +- list_del(&dev_kill_list);
1696 + rtnl_unlock();
1697 + }
1698 +
1699 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1700 +index 2d8d8fcfa060..1999ed832267 100644
1701 +--- a/net/core/rtnetlink.c
1702 ++++ b/net/core/rtnetlink.c
1703 +@@ -1234,6 +1234,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1704 + struct nlattr *tb[IFLA_MAX+1];
1705 + u32 ext_filter_mask = 0;
1706 + int err;
1707 ++ int hdrlen;
1708 +
1709 + s_h = cb->args[0];
1710 + s_idx = cb->args[1];
1711 +@@ -1241,8 +1242,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1712 + rcu_read_lock();
1713 + cb->seq = net->dev_base_seq;
1714 +
1715 +- if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
1716 +- ifla_policy) >= 0) {
1717 ++ /* A hack to preserve kernel<->userspace interface.
1718 ++ * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1719 ++ * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1720 ++ * what iproute2 < v3.9.0 used.
1721 ++ * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1722 ++ * attribute, its netlink message is shorter than struct ifinfomsg.
1723 ++ */
1724 ++ hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
1725 ++ sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1726 ++
1727 ++ if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
1728 +
1729 + if (tb[IFLA_EXT_MASK])
1730 + ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1731 +@@ -1744,7 +1754,6 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
1732 +
1733 + ops->dellink(dev, &list_kill);
1734 + unregister_netdevice_many(&list_kill);
1735 +- list_del(&list_kill);
1736 + return 0;
1737 + }
1738 +
1739 +@@ -2095,9 +2104,13 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
1740 + struct nlattr *tb[IFLA_MAX+1];
1741 + u32 ext_filter_mask = 0;
1742 + u16 min_ifinfo_dump_size = 0;
1743 ++ int hdrlen;
1744 ++
1745 ++ /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
1746 ++ hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
1747 ++ sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
1748 +
1749 +- if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
1750 +- ifla_policy) >= 0) {
1751 ++ if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
1752 + if (tb[IFLA_EXT_MASK])
1753 + ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
1754 + }
1755 +diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
1756 +index 8b5134c582f1..a3095fdefbed 100644
1757 +--- a/net/ipv4/datagram.c
1758 ++++ b/net/ipv4/datagram.c
1759 +@@ -86,18 +86,26 @@ out:
1760 + }
1761 + EXPORT_SYMBOL(ip4_datagram_connect);
1762 +
1763 ++/* Because UDP xmit path can manipulate sk_dst_cache without holding
1764 ++ * socket lock, we need to use sk_dst_set() here,
1765 ++ * even if we own the socket lock.
1766 ++ */
1767 + void ip4_datagram_release_cb(struct sock *sk)
1768 + {
1769 + const struct inet_sock *inet = inet_sk(sk);
1770 + const struct ip_options_rcu *inet_opt;
1771 + __be32 daddr = inet->inet_daddr;
1772 ++ struct dst_entry *dst;
1773 + struct flowi4 fl4;
1774 + struct rtable *rt;
1775 +
1776 +- if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
1777 +- return;
1778 +-
1779 + rcu_read_lock();
1780 ++
1781 ++ dst = __sk_dst_get(sk);
1782 ++ if (!dst || !dst->obsolete || dst->ops->check(dst, 0)) {
1783 ++ rcu_read_unlock();
1784 ++ return;
1785 ++ }
1786 + inet_opt = rcu_dereference(inet->inet_opt);
1787 + if (inet_opt && inet_opt->opt.srr)
1788 + daddr = inet_opt->opt.faddr;
1789 +@@ -105,8 +113,10 @@ void ip4_datagram_release_cb(struct sock *sk)
1790 + inet->inet_saddr, inet->inet_dport,
1791 + inet->inet_sport, sk->sk_protocol,
1792 + RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1793 +- if (!IS_ERR(rt))
1794 +- __sk_dst_set(sk, &rt->dst);
1795 ++
1796 ++ dst = !IS_ERR(rt) ? &rt->dst : NULL;
1797 ++ sk_dst_set(sk, dst);
1798 ++
1799 + rcu_read_unlock();
1800 + }
1801 + EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
1802 +diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
1803 +index 812b18351462..62eaa005e146 100644
1804 +--- a/net/ipv4/ipip.c
1805 ++++ b/net/ipv4/ipip.c
1806 +@@ -149,13 +149,13 @@ static int ipip_err(struct sk_buff *skb, u32 info)
1807 +
1808 + if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
1809 + ipv4_update_pmtu(skb, dev_net(skb->dev), info,
1810 +- t->dev->ifindex, 0, IPPROTO_IPIP, 0);
1811 ++ t->parms.link, 0, IPPROTO_IPIP, 0);
1812 + err = 0;
1813 + goto out;
1814 + }
1815 +
1816 + if (type == ICMP_REDIRECT) {
1817 +- ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
1818 ++ ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
1819 + IPPROTO_IPIP, 0);
1820 + err = 0;
1821 + goto out;
1822 +@@ -486,4 +486,5 @@ static void __exit ipip_fini(void)
1823 + module_init(ipip_init);
1824 + module_exit(ipip_fini);
1825 + MODULE_LICENSE("GPL");
1826 ++MODULE_ALIAS_RTNL_LINK("ipip");
1827 + MODULE_ALIAS_NETDEV("tunl0");
1828 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1829 +index 4468e1adc094..54a5fe92de5a 100644
1830 +--- a/net/ipv4/udp.c
1831 ++++ b/net/ipv4/udp.c
1832 +@@ -1834,6 +1834,10 @@ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
1833 + unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
1834 + struct udp_hslot *hslot = &udp_table.hash[slot];
1835 +
1836 ++ /* Do not bother scanning a too big list */
1837 ++ if (hslot->count > 10)
1838 ++ return NULL;
1839 ++
1840 + rcu_read_lock();
1841 + begin:
1842 + count = 0;
1843 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1844 +index f6a66bb4114d..afa082458360 100644
1845 +--- a/net/ipv6/ip6_tunnel.c
1846 ++++ b/net/ipv6/ip6_tunnel.c
1847 +@@ -61,6 +61,7 @@
1848 + MODULE_AUTHOR("Ville Nuorvala");
1849 + MODULE_DESCRIPTION("IPv6 tunneling device");
1850 + MODULE_LICENSE("GPL");
1851 ++MODULE_ALIAS_RTNL_LINK("ip6tnl");
1852 + MODULE_ALIAS_NETDEV("ip6tnl0");
1853 +
1854 + #ifdef IP6_TNL_DEBUG
1855 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1856 +index e5a453ca302e..4f408176dc64 100644
1857 +--- a/net/ipv6/sit.c
1858 ++++ b/net/ipv6/sit.c
1859 +@@ -560,12 +560,12 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
1860 +
1861 + if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
1862 + ipv4_update_pmtu(skb, dev_net(skb->dev), info,
1863 +- t->dev->ifindex, 0, IPPROTO_IPV6, 0);
1864 ++ t->parms.link, 0, IPPROTO_IPV6, 0);
1865 + err = 0;
1866 + goto out;
1867 + }
1868 + if (type == ICMP_REDIRECT) {
1869 +- ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
1870 ++ ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
1871 + IPPROTO_IPV6, 0);
1872 + err = 0;
1873 + goto out;
1874 +@@ -1828,4 +1828,5 @@ xfrm_tunnel_failed:
1875 + module_init(sit_init);
1876 + module_exit(sit_cleanup);
1877 + MODULE_LICENSE("GPL");
1878 ++MODULE_ALIAS_RTNL_LINK("sit");
1879 + MODULE_ALIAS_NETDEV("sit0");
1880 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1881 +index 1e586d92260e..20b63d2ab70f 100644
1882 +--- a/net/ipv6/udp.c
1883 ++++ b/net/ipv6/udp.c
1884 +@@ -716,15 +716,15 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
1885 + if (inet->inet_dport != rmt_port)
1886 + continue;
1887 + }
1888 +- if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
1889 +- !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
1890 ++ if (!ipv6_addr_any(&s->sk_v6_daddr) &&
1891 ++ !ipv6_addr_equal(&s->sk_v6_daddr, rmt_addr))
1892 + continue;
1893 +
1894 + if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
1895 + continue;
1896 +
1897 +- if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
1898 +- if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
1899 ++ if (!ipv6_addr_any(&s->sk_v6_rcv_saddr)) {
1900 ++ if (!ipv6_addr_equal(&s->sk_v6_rcv_saddr, loc_addr))
1901 + continue;
1902 + }
1903 + if (!inet6_mc_check(s, loc_addr, rmt_addr))
1904 +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
1905 +index b8d331e7d883..34799e06ee01 100644
1906 +--- a/net/mac80211/iface.c
1907 ++++ b/net/mac80211/iface.c
1908 +@@ -1758,7 +1758,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1909 + }
1910 + mutex_unlock(&local->iflist_mtx);
1911 + unregister_netdevice_many(&unreg_list);
1912 +- list_del(&unreg_list);
1913 +
1914 + list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
1915 + list_del(&sdata->list);
1916 +diff --git a/net/sctp/associola.c b/net/sctp/associola.c
1917 +index 39579c3e0d14..0b999987b658 100644
1918 +--- a/net/sctp/associola.c
1919 ++++ b/net/sctp/associola.c
1920 +@@ -330,7 +330,7 @@ void sctp_association_free(struct sctp_association *asoc)
1921 + /* Only real associations count against the endpoint, so
1922 + * don't bother for if this is a temporary association.
1923 + */
1924 +- if (!asoc->temp) {
1925 ++ if (!list_empty(&asoc->asocs)) {
1926 + list_del(&asoc->asocs);
1927 +
1928 + /* Decrement the backlog value for a TCP-style listening
1929 +diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
1930 +index 6e0bd933b6a9..3b312ed51618 100644
1931 +--- a/security/integrity/evm/evm_main.c
1932 ++++ b/security/integrity/evm/evm_main.c
1933 +@@ -287,12 +287,20 @@ out:
1934 + * @xattr_value: pointer to the new extended attribute value
1935 + * @xattr_value_len: pointer to the new extended attribute value length
1936 + *
1937 +- * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that
1938 +- * the current value is valid.
1939 ++ * Before allowing the 'security.evm' protected xattr to be updated,
1940 ++ * verify the existing value is valid. As only the kernel should have
1941 ++ * access to the EVM encrypted key needed to calculate the HMAC, prevent
1942 ++ * userspace from writing HMAC value. Writing 'security.evm' requires
1943 ++ * requires CAP_SYS_ADMIN privileges.
1944 + */
1945 + int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
1946 + const void *xattr_value, size_t xattr_value_len)
1947 + {
1948 ++ const struct evm_ima_xattr_data *xattr_data = xattr_value;
1949 ++
1950 ++ if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
1951 ++ && (xattr_data->type == EVM_XATTR_HMAC))
1952 ++ return -EPERM;
1953 + return evm_protect_xattr(dentry, xattr_name, xattr_value,
1954 + xattr_value_len);
1955 + }
1956 +diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
1957 +index ba9e4d792dd5..d9cd5ce14d2b 100644
1958 +--- a/security/integrity/ima/ima_api.c
1959 ++++ b/security/integrity/ima/ima_api.c
1960 +@@ -199,6 +199,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
1961 + struct evm_ima_xattr_data **xattr_value,
1962 + int *xattr_len)
1963 + {
1964 ++ const char *audit_cause = "failed";
1965 + struct inode *inode = file_inode(file);
1966 + const char *filename = file->f_dentry->d_name.name;
1967 + int result = 0;
1968 +@@ -213,6 +214,12 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
1969 + if (!(iint->flags & IMA_COLLECTED)) {
1970 + u64 i_version = file_inode(file)->i_version;
1971 +
1972 ++ if (file->f_flags & O_DIRECT) {
1973 ++ audit_cause = "failed(directio)";
1974 ++ result = -EACCES;
1975 ++ goto out;
1976 ++ }
1977 ++
1978 + /* use default hash algorithm */
1979 + hash.hdr.algo = ima_hash_algo;
1980 +
1981 +@@ -233,9 +240,10 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
1982 + result = -ENOMEM;
1983 + }
1984 + }
1985 ++out:
1986 + if (result)
1987 + integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
1988 +- filename, "collect_data", "failed",
1989 ++ filename, "collect_data", audit_cause,
1990 + result, 0);
1991 + return result;
1992 + }
1993 +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
1994 +index 1bde8e627766..ccd0ac8fa9a0 100644
1995 +--- a/security/integrity/ima/ima_crypto.c
1996 ++++ b/security/integrity/ima/ima_crypto.c
1997 +@@ -27,6 +27,36 @@
1998 +
1999 + static struct crypto_shash *ima_shash_tfm;
2000 +
2001 ++/**
2002 ++ * ima_kernel_read - read file content
2003 ++ *
2004 ++ * This is a function for reading file content instead of kernel_read().
2005 ++ * It does not perform locking checks to ensure it cannot be blocked.
2006 ++ * It does not perform security checks because it is irrelevant for IMA.
2007 ++ *
2008 ++ */
2009 ++static int ima_kernel_read(struct file *file, loff_t offset,
2010 ++ char *addr, unsigned long count)
2011 ++{
2012 ++ mm_segment_t old_fs;
2013 ++ char __user *buf = addr;
2014 ++ ssize_t ret;
2015 ++
2016 ++ if (!(file->f_mode & FMODE_READ))
2017 ++ return -EBADF;
2018 ++ if (!file->f_op->read && !file->f_op->aio_read)
2019 ++ return -EINVAL;
2020 ++
2021 ++ old_fs = get_fs();
2022 ++ set_fs(get_ds());
2023 ++ if (file->f_op->read)
2024 ++ ret = file->f_op->read(file, buf, count, &offset);
2025 ++ else
2026 ++ ret = do_sync_read(file, buf, count, &offset);
2027 ++ set_fs(old_fs);
2028 ++ return ret;
2029 ++}
2030 ++
2031 + int ima_init_crypto(void)
2032 + {
2033 + long rc;
2034 +@@ -104,7 +134,7 @@ static int ima_calc_file_hash_tfm(struct file *file,
2035 + while (offset < i_size) {
2036 + int rbuf_len;
2037 +
2038 +- rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE);
2039 ++ rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE);
2040 + if (rbuf_len < 0) {
2041 + rc = rbuf_len;
2042 + break;
2043 +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
2044 +index 52ac6cf41f88..dcc98cf542d8 100644
2045 +--- a/security/integrity/ima/ima_main.c
2046 ++++ b/security/integrity/ima/ima_main.c
2047 +@@ -214,8 +214,11 @@ static int process_measurement(struct file *file, const char *filename,
2048 + xattr_ptr = &xattr_value;
2049 +
2050 + rc = ima_collect_measurement(iint, file, xattr_ptr, &xattr_len);
2051 +- if (rc != 0)
2052 ++ if (rc != 0) {
2053 ++ if (file->f_flags & O_DIRECT)
2054 ++ rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES;
2055 + goto out_digsig;
2056 ++ }
2057 +
2058 + pathname = filename ?: ima_d_path(&file->f_path, &pathbuf);
2059 +
2060 +diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
2061 +index 93873a450ff7..40a7488f6721 100644
2062 +--- a/security/integrity/ima/ima_policy.c
2063 ++++ b/security/integrity/ima/ima_policy.c
2064 +@@ -353,7 +353,7 @@ enum {
2065 + Opt_obj_user, Opt_obj_role, Opt_obj_type,
2066 + Opt_subj_user, Opt_subj_role, Opt_subj_type,
2067 + Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner,
2068 +- Opt_appraise_type, Opt_fsuuid
2069 ++ Opt_appraise_type, Opt_fsuuid, Opt_permit_directio
2070 + };
2071 +
2072 + static match_table_t policy_tokens = {
2073 +@@ -375,6 +375,7 @@ static match_table_t policy_tokens = {
2074 + {Opt_uid, "uid=%s"},
2075 + {Opt_fowner, "fowner=%s"},
2076 + {Opt_appraise_type, "appraise_type=%s"},
2077 ++ {Opt_permit_directio, "permit_directio"},
2078 + {Opt_err, NULL}
2079 + };
2080 +
2081 +@@ -622,6 +623,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
2082 + else
2083 + result = -EINVAL;
2084 + break;
2085 ++ case Opt_permit_directio:
2086 ++ entry->flags |= IMA_PERMIT_DIRECTIO;
2087 ++ break;
2088 + case Opt_err:
2089 + ima_log_string(ab, "UNKNOWN", p);
2090 + result = -EINVAL;
2091 +diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
2092 +index 2fb5e53e927f..33c0a70f6b15 100644
2093 +--- a/security/integrity/integrity.h
2094 ++++ b/security/integrity/integrity.h
2095 +@@ -30,6 +30,7 @@
2096 + #define IMA_ACTION_FLAGS 0xff000000
2097 + #define IMA_DIGSIG 0x01000000
2098 + #define IMA_DIGSIG_REQUIRED 0x02000000
2099 ++#define IMA_PERMIT_DIRECTIO 0x04000000
2100 +
2101 + #define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
2102 + IMA_APPRAISE_SUBMASK)
2103 +diff --git a/sound/core/control.c b/sound/core/control.c
2104 +index f038f5afafe2..f0b0e14497a5 100644
2105 +--- a/sound/core/control.c
2106 ++++ b/sound/core/control.c
2107 +@@ -288,6 +288,10 @@ static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
2108 + {
2109 + struct snd_kcontrol *kctl;
2110 +
2111 ++ /* Make sure that the ids assigned to the control do not wrap around */
2112 ++ if (card->last_numid >= UINT_MAX - count)
2113 ++ card->last_numid = 0;
2114 ++
2115 + list_for_each_entry(kctl, &card->controls, list) {
2116 + if (kctl->id.numid < card->last_numid + 1 + count &&
2117 + kctl->id.numid + kctl->count > card->last_numid + 1) {
2118 +@@ -330,6 +334,7 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
2119 + {
2120 + struct snd_ctl_elem_id id;
2121 + unsigned int idx;
2122 ++ unsigned int count;
2123 + int err = -EINVAL;
2124 +
2125 + if (! kcontrol)
2126 +@@ -337,6 +342,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
2127 + if (snd_BUG_ON(!card || !kcontrol->info))
2128 + goto error;
2129 + id = kcontrol->id;
2130 ++ if (id.index > UINT_MAX - kcontrol->count)
2131 ++ goto error;
2132 ++
2133 + down_write(&card->controls_rwsem);
2134 + if (snd_ctl_find_id(card, &id)) {
2135 + up_write(&card->controls_rwsem);
2136 +@@ -358,8 +366,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
2137 + card->controls_count += kcontrol->count;
2138 + kcontrol->id.numid = card->last_numid + 1;
2139 + card->last_numid += kcontrol->count;
2140 ++ count = kcontrol->count;
2141 + up_write(&card->controls_rwsem);
2142 +- for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
2143 ++ for (idx = 0; idx < count; idx++, id.index++, id.numid++)
2144 + snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
2145 + return 0;
2146 +
2147 +@@ -388,6 +397,7 @@ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
2148 + bool add_on_replace)
2149 + {
2150 + struct snd_ctl_elem_id id;
2151 ++ unsigned int count;
2152 + unsigned int idx;
2153 + struct snd_kcontrol *old;
2154 + int ret;
2155 +@@ -423,8 +433,9 @@ add:
2156 + card->controls_count += kcontrol->count;
2157 + kcontrol->id.numid = card->last_numid + 1;
2158 + card->last_numid += kcontrol->count;
2159 ++ count = kcontrol->count;
2160 + up_write(&card->controls_rwsem);
2161 +- for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
2162 ++ for (idx = 0; idx < count; idx++, id.index++, id.numid++)
2163 + snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
2164 + return 0;
2165 +
2166 +@@ -897,9 +908,9 @@ static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
2167 + result = kctl->put(kctl, control);
2168 + }
2169 + if (result > 0) {
2170 ++ struct snd_ctl_elem_id id = control->id;
2171 + up_read(&card->controls_rwsem);
2172 +- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
2173 +- &control->id);
2174 ++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
2175 + return 0;
2176 + }
2177 + }
2178 +@@ -991,6 +1002,7 @@ static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
2179 +
2180 + struct user_element {
2181 + struct snd_ctl_elem_info info;
2182 ++ struct snd_card *card;
2183 + void *elem_data; /* element data */
2184 + unsigned long elem_data_size; /* size of element data in bytes */
2185 + void *tlv_data; /* TLV data */
2186 +@@ -1034,7 +1046,9 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
2187 + {
2188 + struct user_element *ue = kcontrol->private_data;
2189 +
2190 ++ mutex_lock(&ue->card->user_ctl_lock);
2191 + memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
2192 ++ mutex_unlock(&ue->card->user_ctl_lock);
2193 + return 0;
2194 + }
2195 +
2196 +@@ -1043,10 +1057,12 @@ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
2197 + {
2198 + int change;
2199 + struct user_element *ue = kcontrol->private_data;
2200 +-
2201 ++
2202 ++ mutex_lock(&ue->card->user_ctl_lock);
2203 + change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
2204 + if (change)
2205 + memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
2206 ++ mutex_unlock(&ue->card->user_ctl_lock);
2207 + return change;
2208 + }
2209 +
2210 +@@ -1066,19 +1082,32 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
2211 + new_data = memdup_user(tlv, size);
2212 + if (IS_ERR(new_data))
2213 + return PTR_ERR(new_data);
2214 ++ mutex_lock(&ue->card->user_ctl_lock);
2215 + change = ue->tlv_data_size != size;
2216 + if (!change)
2217 + change = memcmp(ue->tlv_data, new_data, size);
2218 + kfree(ue->tlv_data);
2219 + ue->tlv_data = new_data;
2220 + ue->tlv_data_size = size;
2221 ++ mutex_unlock(&ue->card->user_ctl_lock);
2222 + } else {
2223 +- if (! ue->tlv_data_size || ! ue->tlv_data)
2224 +- return -ENXIO;
2225 +- if (size < ue->tlv_data_size)
2226 +- return -ENOSPC;
2227 ++ int ret = 0;
2228 ++
2229 ++ mutex_lock(&ue->card->user_ctl_lock);
2230 ++ if (!ue->tlv_data_size || !ue->tlv_data) {
2231 ++ ret = -ENXIO;
2232 ++ goto err_unlock;
2233 ++ }
2234 ++ if (size < ue->tlv_data_size) {
2235 ++ ret = -ENOSPC;
2236 ++ goto err_unlock;
2237 ++ }
2238 + if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
2239 +- return -EFAULT;
2240 ++ ret = -EFAULT;
2241 ++err_unlock:
2242 ++ mutex_unlock(&ue->card->user_ctl_lock);
2243 ++ if (ret)
2244 ++ return ret;
2245 + }
2246 + return change;
2247 + }
2248 +@@ -1136,8 +1165,6 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
2249 + struct user_element *ue;
2250 + int idx, err;
2251 +
2252 +- if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
2253 +- return -ENOMEM;
2254 + if (info->count < 1)
2255 + return -EINVAL;
2256 + access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
2257 +@@ -1146,21 +1173,16 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
2258 + SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
2259 + info->id.numid = 0;
2260 + memset(&kctl, 0, sizeof(kctl));
2261 +- down_write(&card->controls_rwsem);
2262 +- _kctl = snd_ctl_find_id(card, &info->id);
2263 +- err = 0;
2264 +- if (_kctl) {
2265 +- if (replace)
2266 +- err = snd_ctl_remove(card, _kctl);
2267 +- else
2268 +- err = -EBUSY;
2269 +- } else {
2270 +- if (replace)
2271 +- err = -ENOENT;
2272 ++
2273 ++ if (replace) {
2274 ++ err = snd_ctl_remove_user_ctl(file, &info->id);
2275 ++ if (err)
2276 ++ return err;
2277 + }
2278 +- up_write(&card->controls_rwsem);
2279 +- if (err < 0)
2280 +- return err;
2281 ++
2282 ++ if (card->user_ctl_count >= MAX_USER_CONTROLS)
2283 ++ return -ENOMEM;
2284 ++
2285 + memcpy(&kctl.id, &info->id, sizeof(info->id));
2286 + kctl.count = info->owner ? info->owner : 1;
2287 + access |= SNDRV_CTL_ELEM_ACCESS_USER;
2288 +@@ -1210,6 +1232,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
2289 + ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
2290 + if (ue == NULL)
2291 + return -ENOMEM;
2292 ++ ue->card = card;
2293 + ue->info = *info;
2294 + ue->info.access = 0;
2295 + ue->elem_data = (char *)ue + sizeof(*ue);
2296 +@@ -1321,8 +1344,9 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
2297 + }
2298 + err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
2299 + if (err > 0) {
2300 ++ struct snd_ctl_elem_id id = kctl->id;
2301 + up_read(&card->controls_rwsem);
2302 +- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id);
2303 ++ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
2304 + return 0;
2305 + }
2306 + } else {
2307 +diff --git a/sound/core/init.c b/sound/core/init.c
2308 +index 5ee83845c5de..7bdfd19e24a8 100644
2309 +--- a/sound/core/init.c
2310 ++++ b/sound/core/init.c
2311 +@@ -232,6 +232,7 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
2312 + INIT_LIST_HEAD(&card->devices);
2313 + init_rwsem(&card->controls_rwsem);
2314 + rwlock_init(&card->ctl_files_rwlock);
2315 ++ mutex_init(&card->user_ctl_lock);
2316 + INIT_LIST_HEAD(&card->controls);
2317 + INIT_LIST_HEAD(&card->ctl_files);
2318 + spin_lock_init(&card->files_lock);
2319 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2320 +index b4218a19df22..8867ab3a71d4 100644
2321 +--- a/sound/pci/hda/patch_hdmi.c
2322 ++++ b/sound/pci/hda/patch_hdmi.c
2323 +@@ -1598,10 +1598,18 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
2324 + * Re-setup pin and infoframe. This is needed e.g. when
2325 + * - sink is first plugged-in (infoframe is not set up if !monitor_present)
2326 + * - transcoder can change during stream playback on Haswell
2327 ++ * and this can make HW reset converter selection on a pin.
2328 + */
2329 +- if (eld->eld_valid && !old_eld_valid && per_pin->setup)
2330 ++ if (eld->eld_valid && !old_eld_valid && per_pin->setup) {
2331 ++ if (is_haswell_plus(codec) || is_valleyview(codec)) {
2332 ++ intel_verify_pin_cvt_connect(codec, per_pin);
2333 ++ intel_not_share_assigned_cvt(codec, pin_nid,
2334 ++ per_pin->mux_idx);
2335 ++ }
2336 ++
2337 + hdmi_setup_audio_infoframe(codec, per_pin,
2338 + per_pin->non_pcm);
2339 ++ }
2340 + }
2341 +
2342 + if (eld_changed)
2343 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2344 +index d943508a7f48..10014ed541cf 100644
2345 +--- a/sound/pci/hda/patch_realtek.c
2346 ++++ b/sound/pci/hda/patch_realtek.c
2347 +@@ -4114,6 +4114,7 @@ enum {
2348 + ALC269_FIXUP_HEADSET_MIC,
2349 + ALC269_FIXUP_QUANTA_MUTE,
2350 + ALC269_FIXUP_LIFEBOOK,
2351 ++ ALC269_FIXUP_LIFEBOOK_EXTMIC,
2352 + ALC269_FIXUP_AMIC,
2353 + ALC269_FIXUP_DMIC,
2354 + ALC269VB_FIXUP_AMIC,
2355 +@@ -4243,6 +4244,13 @@ static const struct hda_fixup alc269_fixups[] = {
2356 + .chained = true,
2357 + .chain_id = ALC269_FIXUP_QUANTA_MUTE
2358 + },
2359 ++ [ALC269_FIXUP_LIFEBOOK_EXTMIC] = {
2360 ++ .type = HDA_FIXUP_PINS,
2361 ++ .v.pins = (const struct hda_pintbl[]) {
2362 ++ { 0x19, 0x01a1903c }, /* headset mic, with jack detect */
2363 ++ { }
2364 ++ },
2365 ++ },
2366 + [ALC269_FIXUP_AMIC] = {
2367 + .type = HDA_FIXUP_PINS,
2368 + .v.pins = (const struct hda_pintbl[]) {
2369 +@@ -4633,14 +4641,24 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2370 + SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2371 + SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
2372 + /* ALC282 */
2373 ++ SND_PCI_QUIRK(0x103c, 0x220d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2374 ++ SND_PCI_QUIRK(0x103c, 0x220e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2375 + SND_PCI_QUIRK(0x103c, 0x220f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2376 ++ SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2377 ++ SND_PCI_QUIRK(0x103c, 0x2211, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2378 ++ SND_PCI_QUIRK(0x103c, 0x2212, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2379 + SND_PCI_QUIRK(0x103c, 0x2213, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2380 ++ SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2381 + SND_PCI_QUIRK(0x103c, 0x2266, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2382 + SND_PCI_QUIRK(0x103c, 0x2267, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2383 + SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2384 + SND_PCI_QUIRK(0x103c, 0x2269, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2385 + SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2386 + SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2387 ++ SND_PCI_QUIRK(0x103c, 0x226c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2388 ++ SND_PCI_QUIRK(0x103c, 0x226d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2389 ++ SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2390 ++ SND_PCI_QUIRK(0x103c, 0x226f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2391 + SND_PCI_QUIRK(0x103c, 0x227a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2392 + SND_PCI_QUIRK(0x103c, 0x227b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2393 + SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2394 +@@ -4680,6 +4698,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2395 + SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2396 + SND_PCI_QUIRK(0x103c, 0x22c3, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2397 + SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2398 ++ SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2399 ++ SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2400 ++ SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2401 ++ SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
2402 + SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
2403 + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
2404 + SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
2405 +@@ -4702,6 +4724,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2406 + SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
2407 + SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
2408 + SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
2409 ++ SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
2410 + SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
2411 + SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
2412 + SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
2413 +@@ -5809,6 +5832,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
2414 + { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
2415 + { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 },
2416 + { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
2417 ++ { .id = 0x10ec0867, .name = "ALC891", .patch = patch_alc882 },
2418 + { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
2419 + { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
2420 + { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc882 },
2421 +diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
2422 +index f7b0b37aa858..0757e655bfe3 100644
2423 +--- a/sound/soc/codecs/max98090.c
2424 ++++ b/sound/soc/codecs/max98090.c
2425 +@@ -255,6 +255,7 @@ static struct reg_default max98090_reg[] = {
2426 + static bool max98090_volatile_register(struct device *dev, unsigned int reg)
2427 + {
2428 + switch (reg) {
2429 ++ case M98090_REG_SOFTWARE_RESET:
2430 + case M98090_REG_DEVICE_STATUS:
2431 + case M98090_REG_JACK_STATUS:
2432 + case M98090_REG_REVISION_ID:
2433 +@@ -2373,6 +2374,8 @@ static int max98090_runtime_resume(struct device *dev)
2434 +
2435 + regcache_cache_only(max98090->regmap, false);
2436 +
2437 ++ max98090_reset(max98090);
2438 ++
2439 + regcache_sync(max98090->regmap);
2440 +
2441 + return 0;
2442 +diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
2443 +index d7349bc89ad3..e12fafbb1e09 100644
2444 +--- a/sound/soc/codecs/tlv320aic3x.c
2445 ++++ b/sound/soc/codecs/tlv320aic3x.c
2446 +@@ -169,7 +169,7 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
2447 + mask <<= shift;
2448 + val <<= shift;
2449 +
2450 +- change = snd_soc_test_bits(codec, val, mask, reg);
2451 ++ change = snd_soc_test_bits(codec, reg, mask, val);
2452 + if (change) {
2453 + update.kcontrol = kcontrol;
2454 + update.reg = reg;
2455 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2456 +index 6d6ceee447d5..ebb03a886593 100644
2457 +--- a/sound/soc/soc-dapm.c
2458 ++++ b/sound/soc/soc-dapm.c
2459 +@@ -2857,22 +2857,19 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
2460 + mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
2461 +
2462 + change = dapm_kcontrol_set_value(kcontrol, val);
2463 +-
2464 +- if (reg != SND_SOC_NOPM) {
2465 +- mask = mask << shift;
2466 +- val = val << shift;
2467 +-
2468 +- change = snd_soc_test_bits(codec, reg, mask, val);
2469 +- }
2470 +-
2471 + if (change) {
2472 + if (reg != SND_SOC_NOPM) {
2473 +- update.kcontrol = kcontrol;
2474 +- update.reg = reg;
2475 +- update.mask = mask;
2476 +- update.val = val;
2477 ++ mask = mask << shift;
2478 ++ val = val << shift;
2479 ++
2480 ++ if (snd_soc_test_bits(codec, reg, mask, val)) {
2481 ++ update.kcontrol = kcontrol;
2482 ++ update.reg = reg;
2483 ++ update.mask = mask;
2484 ++ update.val = val;
2485 ++ card->update = &update;
2486 ++ }
2487 +
2488 +- card->update = &update;
2489 + }
2490 +
2491 + ret = soc_dapm_mixer_update_power(card, kcontrol, connect);