Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Fri, 06 Sep 2019 17:17:42
Message-Id: 1567790234.d37141da92767871b7abd6a808d02756f5d6438e.mpagano@gentoo
1 commit: d37141da92767871b7abd6a808d02756f5d6438e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Sep 6 17:17:14 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Sep 6 17:17:14 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d37141da
7
8 Linux patch 4.4.191
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1190_linux-4.4.191.patch | 4273 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4277 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e429de7..af92bf6 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -803,6 +803,10 @@ Patch: 1189_linux-4.4.190.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.4.190
23
24 +Patch: 1190_linux-4.4.191.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.4.191
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1190_linux-4.4.191.patch b/1190_linux-4.4.191.patch
33 new file mode 100644
34 index 0000000..eccb96a
35 --- /dev/null
36 +++ b/1190_linux-4.4.191.patch
37 @@ -0,0 +1,4273 @@
38 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
39 +index 7a9fd54a0186..5b94c0bfba85 100644
40 +--- a/Documentation/kernel-parameters.txt
41 ++++ b/Documentation/kernel-parameters.txt
42 +@@ -3415,6 +3415,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
43 + Run specified binary instead of /init from the ramdisk,
44 + used for early userspace startup. See initrd.
45 +
46 ++ rdrand= [X86]
47 ++ force - Override the decision by the kernel to hide the
48 ++ advertisement of RDRAND support (this affects
49 ++ certain AMD processors because of buggy BIOS
50 ++ support, specifically around the suspend/resume
51 ++ path).
52 ++
53 + reboot= [KNL]
54 + Format (x86 or x86_64):
55 + [w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \
56 +diff --git a/Documentation/siphash.txt b/Documentation/siphash.txt
57 +new file mode 100644
58 +index 000000000000..908d348ff777
59 +--- /dev/null
60 ++++ b/Documentation/siphash.txt
61 +@@ -0,0 +1,175 @@
62 ++ SipHash - a short input PRF
63 ++-----------------------------------------------
64 ++Written by Jason A. Donenfeld <jason@×××××.com>
65 ++
66 ++SipHash is a cryptographically secure PRF -- a keyed hash function -- that
67 ++performs very well for short inputs, hence the name. It was designed by
68 ++cryptographers Daniel J. Bernstein and Jean-Philippe Aumasson. It is intended
69 ++as a replacement for some uses of: `jhash`, `md5_transform`, `sha_transform`,
70 ++and so forth.
71 ++
72 ++SipHash takes a secret key filled with randomly generated numbers and either
73 ++an input buffer or several input integers. It spits out an integer that is
74 ++indistinguishable from random. You may then use that integer as part of secure
75 ++sequence numbers, secure cookies, or mask it off for use in a hash table.
76 ++
77 ++1. Generating a key
78 ++
79 ++Keys should always be generated from a cryptographically secure source of
80 ++random numbers, either using get_random_bytes or get_random_once:
81 ++
82 ++siphash_key_t key;
83 ++get_random_bytes(&key, sizeof(key));
84 ++
85 ++If you're not deriving your key from here, you're doing it wrong.
86 ++
87 ++2. Using the functions
88 ++
89 ++There are two variants of the function, one that takes a list of integers, and
90 ++one that takes a buffer:
91 ++
92 ++u64 siphash(const void *data, size_t len, const siphash_key_t *key);
93 ++
94 ++And:
95 ++
96 ++u64 siphash_1u64(u64, const siphash_key_t *key);
97 ++u64 siphash_2u64(u64, u64, const siphash_key_t *key);
98 ++u64 siphash_3u64(u64, u64, u64, const siphash_key_t *key);
99 ++u64 siphash_4u64(u64, u64, u64, u64, const siphash_key_t *key);
100 ++u64 siphash_1u32(u32, const siphash_key_t *key);
101 ++u64 siphash_2u32(u32, u32, const siphash_key_t *key);
102 ++u64 siphash_3u32(u32, u32, u32, const siphash_key_t *key);
103 ++u64 siphash_4u32(u32, u32, u32, u32, const siphash_key_t *key);
104 ++
105 ++If you pass the generic siphash function something of a constant length, it
106 ++will constant fold at compile-time and automatically choose one of the
107 ++optimized functions.
108 ++
109 ++3. Hashtable key function usage:
110 ++
111 ++struct some_hashtable {
112 ++ DECLARE_HASHTABLE(hashtable, 8);
113 ++ siphash_key_t key;
114 ++};
115 ++
116 ++void init_hashtable(struct some_hashtable *table)
117 ++{
118 ++ get_random_bytes(&table->key, sizeof(table->key));
119 ++}
120 ++
121 ++static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
122 ++{
123 ++ return &table->hashtable[siphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
124 ++}
125 ++
126 ++You may then iterate like usual over the returned hash bucket.
127 ++
128 ++4. Security
129 ++
130 ++SipHash has a very high security margin, with its 128-bit key. So long as the
131 ++key is kept secret, it is impossible for an attacker to guess the outputs of
132 ++the function, even if being able to observe many outputs, since 2^128 outputs
133 ++is significant.
134 ++
135 ++Linux implements the "2-4" variant of SipHash.
136 ++
137 ++5. Struct-passing Pitfalls
138 ++
139 ++Often times the XuY functions will not be large enough, and instead you'll
140 ++want to pass a pre-filled struct to siphash. When doing this, it's important
141 ++to always ensure the struct has no padding holes. The easiest way to do this
142 ++is to simply arrange the members of the struct in descending order of size,
143 ++and to use offsetendof() instead of sizeof() for getting the size. For
144 ++performance reasons, if possible, it's probably a good thing to align the
145 ++struct to the right boundary. Here's an example:
146 ++
147 ++const struct {
148 ++ struct in6_addr saddr;
149 ++ u32 counter;
150 ++ u16 dport;
151 ++} __aligned(SIPHASH_ALIGNMENT) combined = {
152 ++ .saddr = *(struct in6_addr *)saddr,
153 ++ .counter = counter,
154 ++ .dport = dport
155 ++};
156 ++u64 h = siphash(&combined, offsetofend(typeof(combined), dport), &secret);
157 ++
158 ++6. Resources
159 ++
160 ++Read the SipHash paper if you're interested in learning more:
161 ++https://131002.net/siphash/siphash.pdf
162 ++
163 ++
164 ++~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
165 ++
166 ++HalfSipHash - SipHash's insecure younger cousin
167 ++-----------------------------------------------
168 ++Written by Jason A. Donenfeld <jason@×××××.com>
169 ++
170 ++On the off-chance that SipHash is not fast enough for your needs, you might be
171 ++able to justify using HalfSipHash, a terrifying but potentially useful
172 ++possibility. HalfSipHash cuts SipHash's rounds down from "2-4" to "1-3" and,
173 ++even scarier, uses an easily brute-forcable 64-bit key (with a 32-bit output)
174 ++instead of SipHash's 128-bit key. However, this may appeal to some
175 ++high-performance `jhash` users.
176 ++
177 ++Danger!
178 ++
179 ++Do not ever use HalfSipHash except for as a hashtable key function, and only
180 ++then when you can be absolutely certain that the outputs will never be
181 ++transmitted out of the kernel. This is only remotely useful over `jhash` as a
182 ++means of mitigating hashtable flooding denial of service attacks.
183 ++
184 ++1. Generating a key
185 ++
186 ++Keys should always be generated from a cryptographically secure source of
187 ++random numbers, either using get_random_bytes or get_random_once:
188 ++
189 ++hsiphash_key_t key;
190 ++get_random_bytes(&key, sizeof(key));
191 ++
192 ++If you're not deriving your key from here, you're doing it wrong.
193 ++
194 ++2. Using the functions
195 ++
196 ++There are two variants of the function, one that takes a list of integers, and
197 ++one that takes a buffer:
198 ++
199 ++u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key);
200 ++
201 ++And:
202 ++
203 ++u32 hsiphash_1u32(u32, const hsiphash_key_t *key);
204 ++u32 hsiphash_2u32(u32, u32, const hsiphash_key_t *key);
205 ++u32 hsiphash_3u32(u32, u32, u32, const hsiphash_key_t *key);
206 ++u32 hsiphash_4u32(u32, u32, u32, u32, const hsiphash_key_t *key);
207 ++
208 ++If you pass the generic hsiphash function something of a constant length, it
209 ++will constant fold at compile-time and automatically choose one of the
210 ++optimized functions.
211 ++
212 ++3. Hashtable key function usage:
213 ++
214 ++struct some_hashtable {
215 ++ DECLARE_HASHTABLE(hashtable, 8);
216 ++ hsiphash_key_t key;
217 ++};
218 ++
219 ++void init_hashtable(struct some_hashtable *table)
220 ++{
221 ++ get_random_bytes(&table->key, sizeof(table->key));
222 ++}
223 ++
224 ++static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
225 ++{
226 ++ return &table->hashtable[hsiphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
227 ++}
228 ++
229 ++You may then iterate like usual over the returned hash bucket.
230 ++
231 ++4. Performance
232 ++
233 ++HalfSipHash is roughly 3 times slower than JenkinsHash. For many replacements,
234 ++this will not be a problem, as the hashtable lookup isn't the bottleneck. And
235 ++in general, this is probably a good sacrifice to make for the security and DoS
236 ++resistance of HalfSipHash.
237 +diff --git a/MAINTAINERS b/MAINTAINERS
238 +index f4d4a5544dc1..20a31b357929 100644
239 +--- a/MAINTAINERS
240 ++++ b/MAINTAINERS
241 +@@ -9749,6 +9749,13 @@ F: arch/arm/mach-s3c24xx/mach-bast.c
242 + F: arch/arm/mach-s3c24xx/bast-ide.c
243 + F: arch/arm/mach-s3c24xx/bast-irq.c
244 +
245 ++SIPHASH PRF ROUTINES
246 ++M: Jason A. Donenfeld <Jason@×××××.com>
247 ++S: Maintained
248 ++F: lib/siphash.c
249 ++F: lib/test_siphash.c
250 ++F: include/linux/siphash.h
251 ++
252 + TI DAVINCI MACHINE SUPPORT
253 + M: Sekhar Nori <nsekhar@××.com>
254 + M: Kevin Hilman <khilman@×××××××××××××××.com>
255 +diff --git a/Makefile b/Makefile
256 +index 83acf2d6c55e..266c3d7e0120 100644
257 +--- a/Makefile
258 ++++ b/Makefile
259 +@@ -1,6 +1,6 @@
260 + VERSION = 4
261 + PATCHLEVEL = 4
262 +-SUBLEVEL = 190
263 ++SUBLEVEL = 191
264 + EXTRAVERSION =
265 + NAME = Blurry Fish Butt
266 +
267 +diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
268 +index c5bc344fc745..73039746ae36 100644
269 +--- a/arch/mips/kernel/i8253.c
270 ++++ b/arch/mips/kernel/i8253.c
271 +@@ -31,7 +31,8 @@ void __init setup_pit_timer(void)
272 +
273 + static int __init init_pit_clocksource(void)
274 + {
275 +- if (num_possible_cpus() > 1) /* PIT does not scale! */
276 ++ if (num_possible_cpus() > 1 || /* PIT does not scale! */
277 ++ !clockevent_state_periodic(&i8253_clockevent))
278 + return 0;
279 +
280 + return clocksource_i8253_init();
281 +diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
282 +index 4a8cb8d7cbd5..0232b5a2a2d9 100644
283 +--- a/arch/x86/include/asm/bootparam_utils.h
284 ++++ b/arch/x86/include/asm/bootparam_utils.h
285 +@@ -17,6 +17,20 @@
286 + * Note: efi_info is commonly left uninitialized, but that field has a
287 + * private magic, so it is better to leave it unchanged.
288 + */
289 ++
290 ++#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
291 ++
292 ++#define BOOT_PARAM_PRESERVE(struct_member) \
293 ++ { \
294 ++ .start = offsetof(struct boot_params, struct_member), \
295 ++ .len = sizeof_mbr(struct boot_params, struct_member), \
296 ++ }
297 ++
298 ++struct boot_params_to_save {
299 ++ unsigned int start;
300 ++ unsigned int len;
301 ++};
302 ++
303 + static void sanitize_boot_params(struct boot_params *boot_params)
304 + {
305 + /*
306 +@@ -35,19 +49,39 @@ static void sanitize_boot_params(struct boot_params *boot_params)
307 + */
308 + if (boot_params->sentinel) {
309 + /* fields in boot_params are left uninitialized, clear them */
310 +- memset(&boot_params->ext_ramdisk_image, 0,
311 +- (char *)&boot_params->efi_info -
312 +- (char *)&boot_params->ext_ramdisk_image);
313 +- memset(&boot_params->kbd_status, 0,
314 +- (char *)&boot_params->hdr -
315 +- (char *)&boot_params->kbd_status);
316 +- memset(&boot_params->_pad7[0], 0,
317 +- (char *)&boot_params->edd_mbr_sig_buffer[0] -
318 +- (char *)&boot_params->_pad7[0]);
319 +- memset(&boot_params->_pad8[0], 0,
320 +- (char *)&boot_params->eddbuf[0] -
321 +- (char *)&boot_params->_pad8[0]);
322 +- memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
323 ++ static struct boot_params scratch;
324 ++ char *bp_base = (char *)boot_params;
325 ++ char *save_base = (char *)&scratch;
326 ++ int i;
327 ++
328 ++ const struct boot_params_to_save to_save[] = {
329 ++ BOOT_PARAM_PRESERVE(screen_info),
330 ++ BOOT_PARAM_PRESERVE(apm_bios_info),
331 ++ BOOT_PARAM_PRESERVE(tboot_addr),
332 ++ BOOT_PARAM_PRESERVE(ist_info),
333 ++ BOOT_PARAM_PRESERVE(hd0_info),
334 ++ BOOT_PARAM_PRESERVE(hd1_info),
335 ++ BOOT_PARAM_PRESERVE(sys_desc_table),
336 ++ BOOT_PARAM_PRESERVE(olpc_ofw_header),
337 ++ BOOT_PARAM_PRESERVE(efi_info),
338 ++ BOOT_PARAM_PRESERVE(alt_mem_k),
339 ++ BOOT_PARAM_PRESERVE(scratch),
340 ++ BOOT_PARAM_PRESERVE(e820_entries),
341 ++ BOOT_PARAM_PRESERVE(eddbuf_entries),
342 ++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
343 ++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
344 ++ BOOT_PARAM_PRESERVE(hdr),
345 ++ BOOT_PARAM_PRESERVE(eddbuf),
346 ++ };
347 ++
348 ++ memset(&scratch, 0, sizeof(scratch));
349 ++
350 ++ for (i = 0; i < ARRAY_SIZE(to_save); i++) {
351 ++ memcpy(save_base + to_save[i].start,
352 ++ bp_base + to_save[i].start, to_save[i].len);
353 ++ }
354 ++
355 ++ memcpy(boot_params, save_base, sizeof(*boot_params));
356 + }
357 + }
358 +
359 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
360 +index d4f5b8209393..30183770132a 100644
361 +--- a/arch/x86/include/asm/msr-index.h
362 ++++ b/arch/x86/include/asm/msr-index.h
363 +@@ -311,6 +311,7 @@
364 + #define MSR_AMD64_PATCH_LEVEL 0x0000008b
365 + #define MSR_AMD64_TSC_RATIO 0xc0000104
366 + #define MSR_AMD64_NB_CFG 0xc001001f
367 ++#define MSR_AMD64_CPUID_FN_1 0xc0011004
368 + #define MSR_AMD64_PATCH_LOADER 0xc0010020
369 + #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
370 + #define MSR_AMD64_OSVW_STATUS 0xc0010141
371 +diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
372 +index 5a10ac8c131e..20f822fec8af 100644
373 +--- a/arch/x86/include/asm/msr.h
374 ++++ b/arch/x86/include/asm/msr.h
375 +@@ -32,6 +32,16 @@ struct msr_regs_info {
376 + int err;
377 + };
378 +
379 ++struct saved_msr {
380 ++ bool valid;
381 ++ struct msr_info info;
382 ++};
383 ++
384 ++struct saved_msrs {
385 ++ unsigned int num;
386 ++ struct saved_msr *array;
387 ++};
388 ++
389 + static inline unsigned long long native_read_tscp(unsigned int *aux)
390 + {
391 + unsigned long low, high;
392 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
393 +index e58c078f3d96..c3138ac80db2 100644
394 +--- a/arch/x86/include/asm/nospec-branch.h
395 ++++ b/arch/x86/include/asm/nospec-branch.h
396 +@@ -151,7 +151,7 @@
397 + " lfence;\n" \
398 + " jmp 902b;\n" \
399 + " .align 16\n" \
400 +- "903: addl $4, %%esp;\n" \
401 ++ "903: lea 4(%%esp), %%esp;\n" \
402 + " pushl %[thunk_target];\n" \
403 + " ret;\n" \
404 + " .align 16\n" \
405 +diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
406 +index 6271281f947d..0d8e0831b1a0 100644
407 +--- a/arch/x86/include/asm/ptrace.h
408 ++++ b/arch/x86/include/asm/ptrace.h
409 +@@ -121,9 +121,9 @@ static inline int v8086_mode(struct pt_regs *regs)
410 + #endif
411 + }
412 +
413 +-#ifdef CONFIG_X86_64
414 + static inline bool user_64bit_mode(struct pt_regs *regs)
415 + {
416 ++#ifdef CONFIG_X86_64
417 + #ifndef CONFIG_PARAVIRT
418 + /*
419 + * On non-paravirt systems, this is the only long mode CPL 3
420 +@@ -134,8 +134,12 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
421 + /* Headers are too twisted for this to go in paravirt.h. */
422 + return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
423 + #endif
424 ++#else /* !CONFIG_X86_64 */
425 ++ return false;
426 ++#endif
427 + }
428 +
429 ++#ifdef CONFIG_X86_64
430 + #define current_user_stack_pointer() current_pt_regs()->sp
431 + #define compat_user_stack_pointer() current_pt_regs()->sp
432 + #endif
433 +diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
434 +index d1793f06854d..8e9dbe7b73a1 100644
435 +--- a/arch/x86/include/asm/suspend_32.h
436 ++++ b/arch/x86/include/asm/suspend_32.h
437 +@@ -15,6 +15,7 @@ struct saved_context {
438 + unsigned long cr0, cr2, cr3, cr4;
439 + u64 misc_enable;
440 + bool misc_enable_saved;
441 ++ struct saved_msrs saved_msrs;
442 + struct desc_ptr gdt_desc;
443 + struct desc_ptr idt;
444 + u16 ldt;
445 +diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
446 +index 7ebf0ebe4e68..6136a18152af 100644
447 +--- a/arch/x86/include/asm/suspend_64.h
448 ++++ b/arch/x86/include/asm/suspend_64.h
449 +@@ -24,6 +24,7 @@ struct saved_context {
450 + unsigned long cr0, cr2, cr3, cr4, cr8;
451 + u64 misc_enable;
452 + bool misc_enable_saved;
453 ++ struct saved_msrs saved_msrs;
454 + unsigned long efer;
455 + u16 gdt_pad; /* Unused */
456 + struct desc_ptr gdt_desc;
457 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
458 +index cc6c33249850..80c94fc8ad5a 100644
459 +--- a/arch/x86/kernel/apic/apic.c
460 ++++ b/arch/x86/kernel/apic/apic.c
461 +@@ -593,7 +593,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
462 + static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
463 +
464 + /*
465 +- * Temporary interrupt handler.
466 ++ * Temporary interrupt handler and polled calibration function.
467 + */
468 + static void __init lapic_cal_handler(struct clock_event_device *dev)
469 + {
470 +@@ -677,7 +677,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
471 + static int __init calibrate_APIC_clock(void)
472 + {
473 + struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
474 +- void (*real_handler)(struct clock_event_device *dev);
475 ++ u64 tsc_perj = 0, tsc_start = 0;
476 ++ unsigned long jif_start;
477 + unsigned long deltaj;
478 + long delta, deltatsc;
479 + int pm_referenced = 0;
480 +@@ -706,28 +707,64 @@ static int __init calibrate_APIC_clock(void)
481 + apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
482 + "calibrating APIC timer ...\n");
483 +
484 ++ /*
485 ++ * There are platforms w/o global clockevent devices. Instead of
486 ++ * making the calibration conditional on that, use a polling based
487 ++ * approach everywhere.
488 ++ */
489 + local_irq_disable();
490 +
491 +- /* Replace the global interrupt handler */
492 +- real_handler = global_clock_event->event_handler;
493 +- global_clock_event->event_handler = lapic_cal_handler;
494 +-
495 + /*
496 + * Setup the APIC counter to maximum. There is no way the lapic
497 + * can underflow in the 100ms detection time frame
498 + */
499 + __setup_APIC_LVTT(0xffffffff, 0, 0);
500 +
501 +- /* Let the interrupts run */
502 ++ /*
503 ++ * Methods to terminate the calibration loop:
504 ++ * 1) Global clockevent if available (jiffies)
505 ++ * 2) TSC if available and frequency is known
506 ++ */
507 ++ jif_start = READ_ONCE(jiffies);
508 ++
509 ++ if (tsc_khz) {
510 ++ tsc_start = rdtsc();
511 ++ tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
512 ++ }
513 ++
514 ++ /*
515 ++ * Enable interrupts so the tick can fire, if a global
516 ++ * clockevent device is available
517 ++ */
518 + local_irq_enable();
519 +
520 +- while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
521 +- cpu_relax();
522 ++ while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
523 ++ /* Wait for a tick to elapse */
524 ++ while (1) {
525 ++ if (tsc_khz) {
526 ++ u64 tsc_now = rdtsc();
527 ++ if ((tsc_now - tsc_start) >= tsc_perj) {
528 ++ tsc_start += tsc_perj;
529 ++ break;
530 ++ }
531 ++ } else {
532 ++ unsigned long jif_now = READ_ONCE(jiffies);
533 +
534 +- local_irq_disable();
535 ++ if (time_after(jif_now, jif_start)) {
536 ++ jif_start = jif_now;
537 ++ break;
538 ++ }
539 ++ }
540 ++ cpu_relax();
541 ++ }
542 +
543 +- /* Restore the real event handler */
544 +- global_clock_event->event_handler = real_handler;
545 ++ /* Invoke the calibration routine */
546 ++ local_irq_disable();
547 ++ lapic_cal_handler(NULL);
548 ++ local_irq_enable();
549 ++ }
550 ++
551 ++ local_irq_disable();
552 +
553 + /* Build delta t1-t2 as apic timer counts down */
554 + delta = lapic_cal_t1 - lapic_cal_t2;
555 +@@ -778,10 +815,11 @@ static int __init calibrate_APIC_clock(void)
556 + levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
557 +
558 + /*
559 +- * PM timer calibration failed or not turned on
560 +- * so lets try APIC timer based calibration
561 ++ * PM timer calibration failed or not turned on so lets try APIC
562 ++ * timer based calibration, if a global clockevent device is
563 ++ * available.
564 + */
565 +- if (!pm_referenced) {
566 ++ if (!pm_referenced && global_clock_event) {
567 + apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
568 +
569 + /*
570 +@@ -993,6 +1031,10 @@ void clear_local_APIC(void)
571 + apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
572 + v = apic_read(APIC_LVT1);
573 + apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
574 ++ if (!x2apic_enabled()) {
575 ++ v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
576 ++ apic_write(APIC_LDR, v);
577 ++ }
578 + if (maxlvt >= 4) {
579 + v = apic_read(APIC_LVTPC);
580 + apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
581 +diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
582 +index 971cf8875939..d75f665dd022 100644
583 +--- a/arch/x86/kernel/apic/bigsmp_32.c
584 ++++ b/arch/x86/kernel/apic/bigsmp_32.c
585 +@@ -37,32 +37,12 @@ static int bigsmp_early_logical_apicid(int cpu)
586 + return early_per_cpu(x86_cpu_to_apicid, cpu);
587 + }
588 +
589 +-static inline unsigned long calculate_ldr(int cpu)
590 +-{
591 +- unsigned long val, id;
592 +-
593 +- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
594 +- id = per_cpu(x86_bios_cpu_apicid, cpu);
595 +- val |= SET_APIC_LOGICAL_ID(id);
596 +-
597 +- return val;
598 +-}
599 +-
600 + /*
601 +- * Set up the logical destination ID.
602 +- *
603 +- * Intel recommends to set DFR, LDR and TPR before enabling
604 +- * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
605 +- * document number 292116). So here it goes...
606 ++ * bigsmp enables physical destination mode
607 ++ * and doesn't use LDR and DFR
608 + */
609 + static void bigsmp_init_apic_ldr(void)
610 + {
611 +- unsigned long val;
612 +- int cpu = smp_processor_id();
613 +-
614 +- apic_write(APIC_DFR, APIC_DFR_FLAT);
615 +- val = calculate_ldr(cpu);
616 +- apic_write(APIC_LDR, val);
617 + }
618 +
619 + static void bigsmp_setup_apic_routing(void)
620 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
621 +index 6f2483292de0..424d8a636615 100644
622 +--- a/arch/x86/kernel/cpu/amd.c
623 ++++ b/arch/x86/kernel/cpu/amd.c
624 +@@ -684,6 +684,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
625 + msr_set_bit(MSR_AMD64_DE_CFG, 31);
626 + }
627 +
628 ++static bool rdrand_force;
629 ++
630 ++static int __init rdrand_cmdline(char *str)
631 ++{
632 ++ if (!str)
633 ++ return -EINVAL;
634 ++
635 ++ if (!strcmp(str, "force"))
636 ++ rdrand_force = true;
637 ++ else
638 ++ return -EINVAL;
639 ++
640 ++ return 0;
641 ++}
642 ++early_param("rdrand", rdrand_cmdline);
643 ++
644 ++static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
645 ++{
646 ++ /*
647 ++ * Saving of the MSR used to hide the RDRAND support during
648 ++ * suspend/resume is done by arch/x86/power/cpu.c, which is
649 ++ * dependent on CONFIG_PM_SLEEP.
650 ++ */
651 ++ if (!IS_ENABLED(CONFIG_PM_SLEEP))
652 ++ return;
653 ++
654 ++ /*
655 ++ * The nordrand option can clear X86_FEATURE_RDRAND, so check for
656 ++ * RDRAND support using the CPUID function directly.
657 ++ */
658 ++ if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
659 ++ return;
660 ++
661 ++ msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
662 ++
663 ++ /*
664 ++ * Verify that the CPUID change has occurred in case the kernel is
665 ++ * running virtualized and the hypervisor doesn't support the MSR.
666 ++ */
667 ++ if (cpuid_ecx(1) & BIT(30)) {
668 ++ pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
669 ++ return;
670 ++ }
671 ++
672 ++ clear_cpu_cap(c, X86_FEATURE_RDRAND);
673 ++ pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
674 ++}
675 ++
676 ++static void init_amd_jg(struct cpuinfo_x86 *c)
677 ++{
678 ++ /*
679 ++ * Some BIOS implementations do not restore proper RDRAND support
680 ++ * across suspend and resume. Check on whether to hide the RDRAND
681 ++ * instruction support via CPUID.
682 ++ */
683 ++ clear_rdrand_cpuid_bit(c);
684 ++}
685 ++
686 + static void init_amd_bd(struct cpuinfo_x86 *c)
687 + {
688 + u64 value;
689 +@@ -711,6 +769,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
690 + wrmsrl_safe(0xc0011021, value);
691 + }
692 + }
693 ++
694 ++ /*
695 ++ * Some BIOS implementations do not restore proper RDRAND support
696 ++ * across suspend and resume. Check on whether to hide the RDRAND
697 ++ * instruction support via CPUID.
698 ++ */
699 ++ clear_rdrand_cpuid_bit(c);
700 + }
701 +
702 + static void init_amd_zn(struct cpuinfo_x86 *c)
703 +@@ -755,6 +820,7 @@ static void init_amd(struct cpuinfo_x86 *c)
704 + case 0x10: init_amd_gh(c); break;
705 + case 0x12: init_amd_ln(c); break;
706 + case 0x15: init_amd_bd(c); break;
707 ++ case 0x16: init_amd_jg(c); break;
708 + case 0x17: init_amd_zn(c); break;
709 + }
710 +
711 +diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
712 +index 1ca929767a1b..0b6d27dfc234 100644
713 +--- a/arch/x86/kernel/ptrace.c
714 ++++ b/arch/x86/kernel/ptrace.c
715 +@@ -698,11 +698,10 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
716 + {
717 + struct thread_struct *thread = &tsk->thread;
718 + unsigned long val = 0;
719 +- int index = n;
720 +
721 + if (n < HBP_NUM) {
722 ++ int index = array_index_nospec(n, HBP_NUM);
723 + struct perf_event *bp = thread->ptrace_bps[index];
724 +- index = array_index_nospec(index, HBP_NUM);
725 +
726 + if (bp)
727 + val = bp->hw.info.address;
728 +diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
729 +index b8105289c60b..178d63cac321 100644
730 +--- a/arch/x86/kernel/uprobes.c
731 ++++ b/arch/x86/kernel/uprobes.c
732 +@@ -514,9 +514,12 @@ struct uprobe_xol_ops {
733 + void (*abort)(struct arch_uprobe *, struct pt_regs *);
734 + };
735 +
736 +-static inline int sizeof_long(void)
737 ++static inline int sizeof_long(struct pt_regs *regs)
738 + {
739 +- return is_ia32_task() ? 4 : 8;
740 ++ /*
741 ++ * Check registers for mode as in_xxx_syscall() does not apply here.
742 ++ */
743 ++ return user_64bit_mode(regs) ? 8 : 4;
744 + }
745 +
746 + static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
747 +@@ -527,9 +530,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
748 +
749 + static int push_ret_address(struct pt_regs *regs, unsigned long ip)
750 + {
751 +- unsigned long new_sp = regs->sp - sizeof_long();
752 ++ unsigned long new_sp = regs->sp - sizeof_long(regs);
753 +
754 +- if (copy_to_user((void __user *)new_sp, &ip, sizeof_long()))
755 ++ if (copy_to_user((void __user *)new_sp, &ip, sizeof_long(regs)))
756 + return -EFAULT;
757 +
758 + regs->sp = new_sp;
759 +@@ -562,7 +565,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
760 + long correction = utask->vaddr - utask->xol_vaddr;
761 + regs->ip += correction;
762 + } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
763 +- regs->sp += sizeof_long(); /* Pop incorrect return address */
764 ++ regs->sp += sizeof_long(regs); /* Pop incorrect return address */
765 + if (push_ret_address(regs, utask->vaddr + auprobe->defparam.ilen))
766 + return -ERESTART;
767 + }
768 +@@ -671,7 +674,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
769 + * "call" insn was executed out-of-line. Just restore ->sp and restart.
770 + * We could also restore ->ip and try to call branch_emulate_op() again.
771 + */
772 +- regs->sp += sizeof_long();
773 ++ regs->sp += sizeof_long(regs);
774 + return -ERESTART;
775 + }
776 +
777 +@@ -962,7 +965,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
778 + unsigned long
779 + arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
780 + {
781 +- int rasize = sizeof_long(), nleft;
782 ++ int rasize = sizeof_long(regs), nleft;
783 + unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
784 +
785 + if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
786 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
787 +index 8613422660b6..9f70de2ca0e2 100644
788 +--- a/arch/x86/kvm/x86.c
789 ++++ b/arch/x86/kvm/x86.c
790 +@@ -5545,12 +5545,13 @@ restart:
791 + unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
792 + toggle_interruptibility(vcpu, ctxt->interruptibility);
793 + vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
794 +- kvm_rip_write(vcpu, ctxt->eip);
795 +- if (r == EMULATE_DONE && ctxt->tf)
796 +- kvm_vcpu_do_singlestep(vcpu, &r);
797 + if (!ctxt->have_exception ||
798 +- exception_type(ctxt->exception.vector) == EXCPT_TRAP)
799 ++ exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
800 ++ kvm_rip_write(vcpu, ctxt->eip);
801 ++ if (r == EMULATE_DONE && ctxt->tf)
802 ++ kvm_vcpu_do_singlestep(vcpu, &r);
803 + __kvm_set_rflags(vcpu, ctxt->eflags);
804 ++ }
805 +
806 + /*
807 + * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
808 +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
809 +index 9ab52791fed5..2e5052b2d238 100644
810 +--- a/arch/x86/power/cpu.c
811 ++++ b/arch/x86/power/cpu.c
812 +@@ -12,6 +12,7 @@
813 + #include <linux/export.h>
814 + #include <linux/smp.h>
815 + #include <linux/perf_event.h>
816 ++#include <linux/dmi.h>
817 +
818 + #include <asm/pgtable.h>
819 + #include <asm/proto.h>
820 +@@ -23,6 +24,7 @@
821 + #include <asm/debugreg.h>
822 + #include <asm/cpu.h>
823 + #include <asm/mmu_context.h>
824 ++#include <asm/cpu_device_id.h>
825 +
826 + #ifdef CONFIG_X86_32
827 + __visible unsigned long saved_context_ebx;
828 +@@ -32,6 +34,29 @@ __visible unsigned long saved_context_eflags;
829 + #endif
830 + struct saved_context saved_context;
831 +
832 ++static void msr_save_context(struct saved_context *ctxt)
833 ++{
834 ++ struct saved_msr *msr = ctxt->saved_msrs.array;
835 ++ struct saved_msr *end = msr + ctxt->saved_msrs.num;
836 ++
837 ++ while (msr < end) {
838 ++ msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
839 ++ msr++;
840 ++ }
841 ++}
842 ++
843 ++static void msr_restore_context(struct saved_context *ctxt)
844 ++{
845 ++ struct saved_msr *msr = ctxt->saved_msrs.array;
846 ++ struct saved_msr *end = msr + ctxt->saved_msrs.num;
847 ++
848 ++ while (msr < end) {
849 ++ if (msr->valid)
850 ++ wrmsrl(msr->info.msr_no, msr->info.reg.q);
851 ++ msr++;
852 ++ }
853 ++}
854 ++
855 + /**
856 + * __save_processor_state - save CPU registers before creating a
857 + * hibernation image and before restoring the memory state from it
858 +@@ -111,6 +136,7 @@ static void __save_processor_state(struct saved_context *ctxt)
859 + #endif
860 + ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
861 + &ctxt->misc_enable);
862 ++ msr_save_context(ctxt);
863 + }
864 +
865 + /* Needed by apm.c */
866 +@@ -229,6 +255,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
867 + x86_platform.restore_sched_clock_state();
868 + mtrr_bp_restore();
869 + perf_restore_debug_store();
870 ++ msr_restore_context(ctxt);
871 + }
872 +
873 + /* Needed by apm.c */
874 +@@ -320,3 +347,128 @@ static int __init bsp_pm_check_init(void)
875 + }
876 +
877 + core_initcall(bsp_pm_check_init);
878 ++
879 ++static int msr_build_context(const u32 *msr_id, const int num)
880 ++{
881 ++ struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
882 ++ struct saved_msr *msr_array;
883 ++ int total_num;
884 ++ int i, j;
885 ++
886 ++ total_num = saved_msrs->num + num;
887 ++
888 ++ msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
889 ++ if (!msr_array) {
890 ++ pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
891 ++ return -ENOMEM;
892 ++ }
893 ++
894 ++ if (saved_msrs->array) {
895 ++ /*
896 ++ * Multiple callbacks can invoke this function, so copy any
897 ++ * MSR save requests from previous invocations.
898 ++ */
899 ++ memcpy(msr_array, saved_msrs->array,
900 ++ sizeof(struct saved_msr) * saved_msrs->num);
901 ++
902 ++ kfree(saved_msrs->array);
903 ++ }
904 ++
905 ++ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
906 ++ msr_array[i].info.msr_no = msr_id[j];
907 ++ msr_array[i].valid = false;
908 ++ msr_array[i].info.reg.q = 0;
909 ++ }
910 ++ saved_msrs->num = total_num;
911 ++ saved_msrs->array = msr_array;
912 ++
913 ++ return 0;
914 ++}
915 ++
916 ++/*
917 ++ * The following sections are a quirk framework for problematic BIOSen:
918 ++ * Sometimes MSRs are modified by the BIOSen after suspended to
919 ++ * RAM, this might cause unexpected behavior after wakeup.
920 ++ * Thus we save/restore these specified MSRs across suspend/resume
921 ++ * in order to work around it.
922 ++ *
923 ++ * For any further problematic BIOSen/platforms,
924 ++ * please add your own function similar to msr_initialize_bdw.
925 ++ */
926 ++static int msr_initialize_bdw(const struct dmi_system_id *d)
927 ++{
928 ++ /* Add any extra MSR ids into this array. */
929 ++ u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
930 ++
931 ++ pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
932 ++ return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
933 ++}
934 ++
935 ++static struct dmi_system_id msr_save_dmi_table[] = {
936 ++ {
937 ++ .callback = msr_initialize_bdw,
938 ++ .ident = "BROADWELL BDX_EP",
939 ++ .matches = {
940 ++ DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
941 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
942 ++ },
943 ++ },
944 ++ {}
945 ++};
946 ++
947 ++static int msr_save_cpuid_features(const struct x86_cpu_id *c)
948 ++{
949 ++ u32 cpuid_msr_id[] = {
950 ++ MSR_AMD64_CPUID_FN_1,
951 ++ };
952 ++
953 ++ pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
954 ++ c->family);
955 ++
956 ++ return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
957 ++}
958 ++
959 ++static const struct x86_cpu_id msr_save_cpu_table[] = {
960 ++ {
961 ++ .vendor = X86_VENDOR_AMD,
962 ++ .family = 0x15,
963 ++ .model = X86_MODEL_ANY,
964 ++ .feature = X86_FEATURE_ANY,
965 ++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
966 ++ },
967 ++ {
968 ++ .vendor = X86_VENDOR_AMD,
969 ++ .family = 0x16,
970 ++ .model = X86_MODEL_ANY,
971 ++ .feature = X86_FEATURE_ANY,
972 ++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
973 ++ },
974 ++ {}
975 ++};
976 ++
977 ++typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
978 ++static int pm_cpu_check(const struct x86_cpu_id *c)
979 ++{
980 ++ const struct x86_cpu_id *m;
981 ++ int ret = 0;
982 ++
983 ++ m = x86_match_cpu(msr_save_cpu_table);
984 ++ if (m) {
985 ++ pm_cpu_match_t fn;
986 ++
987 ++ fn = (pm_cpu_match_t)m->driver_data;
988 ++ ret = fn(m);
989 ++ }
990 ++
991 ++ return ret;
992 ++}
993 ++
994 ++static int pm_check_save_msr(void)
995 ++{
996 ++ dmi_check_system(msr_save_dmi_table);
997 ++ pm_cpu_check(msr_save_cpu_table);
998 ++
999 ++ return 0;
1000 ++}
1001 ++
1002 ++device_initcall(pm_check_save_msr);
1003 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
1004 +index 18de4c457068..1d8901fc0bfa 100644
1005 +--- a/drivers/ata/libata-sff.c
1006 ++++ b/drivers/ata/libata-sff.c
1007 +@@ -703,6 +703,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
1008 + unsigned int offset;
1009 + unsigned char *buf;
1010 +
1011 ++ if (!qc->cursg) {
1012 ++ qc->curbytes = qc->nbytes;
1013 ++ return;
1014 ++ }
1015 + if (qc->curbytes == qc->nbytes - qc->sect_size)
1016 + ap->hsm_task_state = HSM_ST_LAST;
1017 +
1018 +@@ -742,6 +746,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
1019 +
1020 + if (qc->cursg_ofs == qc->cursg->length) {
1021 + qc->cursg = sg_next(qc->cursg);
1022 ++ if (!qc->cursg)
1023 ++ ap->hsm_task_state = HSM_ST_LAST;
1024 + qc->cursg_ofs = 0;
1025 + }
1026 + }
1027 +diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
1028 +index dd3e7ba273ad..0fede051f4e1 100644
1029 +--- a/drivers/dma/ste_dma40.c
1030 ++++ b/drivers/dma/ste_dma40.c
1031 +@@ -142,7 +142,7 @@ enum d40_events {
1032 + * when the DMA hw is powered off.
1033 + * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
1034 + */
1035 +-static u32 d40_backup_regs[] = {
1036 ++static __maybe_unused u32 d40_backup_regs[] = {
1037 + D40_DREG_LCPA,
1038 + D40_DREG_LCLA,
1039 + D40_DREG_PRMSE,
1040 +@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
1041 +
1042 + #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
1043 +
1044 +-static u32 d40_backup_regs_chan[] = {
1045 ++static __maybe_unused u32 d40_backup_regs_chan[] = {
1046 + D40_CHAN_REG_SSCFG,
1047 + D40_CHAN_REG_SSELT,
1048 + D40_CHAN_REG_SSPTR,
1049 +diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
1050 +index b83376077d72..cfa0cb22c9b3 100644
1051 +--- a/drivers/hid/hid-tmff.c
1052 ++++ b/drivers/hid/hid-tmff.c
1053 +@@ -34,6 +34,8 @@
1054 +
1055 + #include "hid-ids.h"
1056 +
1057 ++#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
1058 ++
1059 + static const signed short ff_rumble[] = {
1060 + FF_RUMBLE,
1061 + -1
1062 +@@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
1063 + struct hid_field *ff_field = tmff->ff_field;
1064 + int x, y;
1065 + int left, right; /* Rumbling */
1066 ++ int motor_swap;
1067 +
1068 + switch (effect->type) {
1069 + case FF_CONSTANT:
1070 +@@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
1071 + ff_field->logical_minimum,
1072 + ff_field->logical_maximum);
1073 +
1074 ++ /* 2-in-1 strong motor is left */
1075 ++ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
1076 ++ motor_swap = left;
1077 ++ left = right;
1078 ++ right = motor_swap;
1079 ++ }
1080 ++
1081 + dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
1082 + ff_field->value[0] = left;
1083 + ff_field->value[1] = right;
1084 +@@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
1085 + .driver_data = (unsigned long)ff_rumble },
1086 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
1087 + .driver_data = (unsigned long)ff_rumble },
1088 ++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
1089 ++ .driver_data = (unsigned long)ff_rumble },
1090 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
1091 + .driver_data = (unsigned long)ff_rumble },
1092 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
1093 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1094 +index b184956bd430..72a1fdd529be 100644
1095 +--- a/drivers/hid/wacom_wac.c
1096 ++++ b/drivers/hid/wacom_wac.c
1097 +@@ -674,7 +674,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
1098 + input_report_key(input, BTN_BASE2, (data[11] & 0x02));
1099 +
1100 + if (data[12] & 0x80)
1101 +- input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
1102 ++ input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
1103 + else
1104 + input_report_abs(input, ABS_WHEEL, 0);
1105 +
1106 +diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
1107 +index e880702a3784..5eb9c3bba216 100644
1108 +--- a/drivers/hwtracing/stm/core.c
1109 ++++ b/drivers/hwtracing/stm/core.c
1110 +@@ -1020,7 +1020,6 @@ int stm_source_register_device(struct device *parent,
1111 +
1112 + err:
1113 + put_device(&src->dev);
1114 +- kfree(src);
1115 +
1116 + return err;
1117 + }
1118 +diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
1119 +index c60c7998af17..726fba452f5f 100644
1120 +--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
1121 ++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
1122 +@@ -1402,6 +1402,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
1123 + printk(KERN_DEBUG
1124 + "%s: %s: alloc urb for fifo %i failed",
1125 + hw->name, __func__, fifo->fifonum);
1126 ++ continue;
1127 + }
1128 + fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
1129 + fifo->iso[i].indx = i;
1130 +@@ -1700,13 +1701,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
1131 + static int
1132 + setup_hfcsusb(struct hfcsusb *hw)
1133 + {
1134 ++ void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
1135 + u_char b;
1136 ++ int ret;
1137 +
1138 + if (debug & DBG_HFC_CALL_TRACE)
1139 + printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
1140 +
1141 ++ if (!dmabuf)
1142 ++ return -ENOMEM;
1143 ++
1144 ++ ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
1145 ++
1146 ++ memcpy(&b, dmabuf, sizeof(u_char));
1147 ++ kfree(dmabuf);
1148 ++
1149 + /* check the chip id */
1150 +- if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
1151 ++ if (ret != 1) {
1152 + printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
1153 + hw->name, __func__);
1154 + return 1;
1155 +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
1156 +index 8a6e7646e1c9..b1d5fa0bc8f7 100644
1157 +--- a/drivers/md/dm-bufio.c
1158 ++++ b/drivers/md/dm-bufio.c
1159 +@@ -1561,7 +1561,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1160 + unsigned long freed;
1161 +
1162 + c = container_of(shrink, struct dm_bufio_client, shrinker);
1163 +- if (!dm_bufio_trylock(c))
1164 ++ if (sc->gfp_mask & __GFP_FS)
1165 ++ dm_bufio_lock(c);
1166 ++ else if (!dm_bufio_trylock(c))
1167 + return SHRINK_STOP;
1168 +
1169 + freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1170 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1171 +index cb5d0daf53bb..466158d06ab1 100644
1172 +--- a/drivers/md/dm-table.c
1173 ++++ b/drivers/md/dm-table.c
1174 +@@ -1167,7 +1167,7 @@ void dm_table_event(struct dm_table *t)
1175 + }
1176 + EXPORT_SYMBOL(dm_table_event);
1177 +
1178 +-sector_t dm_table_get_size(struct dm_table *t)
1179 ++inline sector_t dm_table_get_size(struct dm_table *t)
1180 + {
1181 + return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1182 + }
1183 +@@ -1192,6 +1192,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1184 + unsigned int l, n = 0, k = 0;
1185 + sector_t *node;
1186 +
1187 ++ if (unlikely(sector >= dm_table_get_size(t)))
1188 ++ return &t->targets[t->num_targets];
1189 ++
1190 + for (l = 0; l < t->depth; l++) {
1191 + n = get_child(n, k);
1192 + node = get_node(t, l, n);
1193 +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
1194 +index 880b7dee9c52..fa9039a53ae5 100644
1195 +--- a/drivers/md/persistent-data/dm-btree.c
1196 ++++ b/drivers/md/persistent-data/dm-btree.c
1197 +@@ -616,39 +616,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
1198 +
1199 + new_parent = shadow_current(s);
1200 +
1201 ++ pn = dm_block_data(new_parent);
1202 ++ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
1203 ++ sizeof(__le64) : s->info->value_type.size;
1204 ++
1205 ++ /* create & init the left block */
1206 + r = new_block(s->info, &left);
1207 + if (r < 0)
1208 + return r;
1209 +
1210 ++ ln = dm_block_data(left);
1211 ++ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
1212 ++
1213 ++ ln->header.flags = pn->header.flags;
1214 ++ ln->header.nr_entries = cpu_to_le32(nr_left);
1215 ++ ln->header.max_entries = pn->header.max_entries;
1216 ++ ln->header.value_size = pn->header.value_size;
1217 ++ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
1218 ++ memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
1219 ++
1220 ++ /* create & init the right block */
1221 + r = new_block(s->info, &right);
1222 + if (r < 0) {
1223 + unlock_block(s->info, left);
1224 + return r;
1225 + }
1226 +
1227 +- pn = dm_block_data(new_parent);
1228 +- ln = dm_block_data(left);
1229 + rn = dm_block_data(right);
1230 +-
1231 +- nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
1232 + nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
1233 +
1234 +- ln->header.flags = pn->header.flags;
1235 +- ln->header.nr_entries = cpu_to_le32(nr_left);
1236 +- ln->header.max_entries = pn->header.max_entries;
1237 +- ln->header.value_size = pn->header.value_size;
1238 +-
1239 + rn->header.flags = pn->header.flags;
1240 + rn->header.nr_entries = cpu_to_le32(nr_right);
1241 + rn->header.max_entries = pn->header.max_entries;
1242 + rn->header.value_size = pn->header.value_size;
1243 +-
1244 +- memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
1245 + memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
1246 +-
1247 +- size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
1248 +- sizeof(__le64) : s->info->value_type.size;
1249 +- memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
1250 + memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
1251 + nr_right * size);
1252 +
1253 +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
1254 +index 20557e2c60c6..1d29771af380 100644
1255 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c
1256 ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
1257 +@@ -248,7 +248,7 @@ static int out(struct sm_metadata *smm)
1258 + }
1259 +
1260 + if (smm->recursion_count == 1)
1261 +- apply_bops(smm);
1262 ++ r = apply_bops(smm);
1263 +
1264 + smm->recursion_count--;
1265 +
1266 +diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
1267 +index a8cee33ae8d2..305a3449e946 100644
1268 +--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
1269 ++++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
1270 +@@ -318,7 +318,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
1271 +
1272 + entry = container_of(resource, struct dbell_entry, resource);
1273 + if (entry->run_delayed) {
1274 +- schedule_work(&entry->work);
1275 ++ if (!schedule_work(&entry->work))
1276 ++ vmci_resource_put(resource);
1277 + } else {
1278 + entry->notify_cb(entry->client_data);
1279 + vmci_resource_put(resource);
1280 +@@ -366,7 +367,8 @@ static void dbell_fire_entries(u32 notify_idx)
1281 + atomic_read(&dbell->active) == 1) {
1282 + if (dbell->run_delayed) {
1283 + vmci_resource_get(&dbell->resource);
1284 +- schedule_work(&dbell->work);
1285 ++ if (!schedule_work(&dbell->work))
1286 ++ vmci_resource_put(&dbell->resource);
1287 + } else {
1288 + dbell->notify_cb(dbell->client_data);
1289 + }
1290 +diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
1291 +index fb8741f18c1f..54ba1abb5460 100644
1292 +--- a/drivers/mmc/core/sd.c
1293 ++++ b/drivers/mmc/core/sd.c
1294 +@@ -1232,6 +1232,12 @@ int mmc_attach_sd(struct mmc_host *host)
1295 + goto err;
1296 + }
1297 +
1298 ++ /*
1299 ++ * Some SD cards claims an out of spec VDD voltage range. Let's treat
1300 ++ * these bits as being in-valid and especially also bit7.
1301 ++ */
1302 ++ ocr &= ~0x7FFF;
1303 ++
1304 + rocr = mmc_select_voltage(host, ocr);
1305 +
1306 + /*
1307 +diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
1308 +index 06d0b50dfe71..4e374a05319c 100644
1309 +--- a/drivers/mmc/host/sdhci-of-at91.c
1310 ++++ b/drivers/mmc/host/sdhci-of-at91.c
1311 +@@ -144,6 +144,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
1312 +
1313 + sdhci_get_of_property(pdev);
1314 +
1315 ++ /* HS200 is broken at this moment */
1316 ++ host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
1317 ++
1318 + ret = sdhci_add_host(host);
1319 + if (ret)
1320 + goto clocks_disable_unprepare;
1321 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1322 +index 168f2331194f..fd6aff9f0052 100644
1323 +--- a/drivers/net/bonding/bond_main.c
1324 ++++ b/drivers/net/bonding/bond_main.c
1325 +@@ -2081,6 +2081,15 @@ static void bond_miimon_commit(struct bonding *bond)
1326 + bond_for_each_slave(bond, slave, iter) {
1327 + switch (slave->new_link) {
1328 + case BOND_LINK_NOCHANGE:
1329 ++ /* For 802.3ad mode, check current slave speed and
1330 ++ * duplex again in case its port was disabled after
1331 ++ * invalid speed/duplex reporting but recovered before
1332 ++ * link monitoring could make a decision on the actual
1333 ++ * link status
1334 ++ */
1335 ++ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
1336 ++ slave->link == BOND_LINK_UP)
1337 ++ bond_3ad_adapter_speed_duplex_changed(slave);
1338 + continue;
1339 +
1340 + case BOND_LINK_UP:
1341 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1342 +index 8b7c6425b681..9dd968ee792e 100644
1343 +--- a/drivers/net/can/dev.c
1344 ++++ b/drivers/net/can/dev.c
1345 +@@ -1065,6 +1065,8 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
1346 + int register_candev(struct net_device *dev)
1347 + {
1348 + dev->rtnl_link_ops = &can_link_ops;
1349 ++ netif_carrier_off(dev);
1350 ++
1351 + return register_netdev(dev);
1352 + }
1353 + EXPORT_SYMBOL_GPL(register_candev);
1354 +diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
1355 +index dd56133cc461..fc9f8b01ecae 100644
1356 +--- a/drivers/net/can/sja1000/peak_pcmcia.c
1357 ++++ b/drivers/net/can/sja1000/peak_pcmcia.c
1358 +@@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
1359 + if (!netdev)
1360 + continue;
1361 +
1362 +- strncpy(name, netdev->name, IFNAMSIZ);
1363 ++ strlcpy(name, netdev->name, IFNAMSIZ);
1364 +
1365 + unregister_sja1000dev(netdev);
1366 +
1367 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1368 +index e13bc27b4291..b1d68f49b398 100644
1369 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1370 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1371 +@@ -881,7 +881,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
1372 +
1373 + dev_prev_siblings = dev->prev_siblings;
1374 + dev->state &= ~PCAN_USB_STATE_CONNECTED;
1375 +- strncpy(name, netdev->name, IFNAMSIZ);
1376 ++ strlcpy(name, netdev->name, IFNAMSIZ);
1377 +
1378 + unregister_netdev(netdev);
1379 +
1380 +diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
1381 +index 9cc5daed13ed..b0285ac203f0 100644
1382 +--- a/drivers/net/ethernet/arc/emac_main.c
1383 ++++ b/drivers/net/ethernet/arc/emac_main.c
1384 +@@ -163,7 +163,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
1385 + struct sk_buff *skb = tx_buff->skb;
1386 + unsigned int info = le32_to_cpu(txbd->info);
1387 +
1388 +- if ((info & FOR_EMAC) || !txbd->data)
1389 ++ if ((info & FOR_EMAC) || !txbd->data || !skb)
1390 + break;
1391 +
1392 + if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
1393 +@@ -191,6 +191,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
1394 +
1395 + txbd->data = 0;
1396 + txbd->info = 0;
1397 ++ tx_buff->skb = NULL;
1398 +
1399 + *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
1400 + }
1401 +@@ -619,7 +620,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
1402 + dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
1403 + dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
1404 +
1405 +- priv->tx_buff[*txbd_curr].skb = skb;
1406 + priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
1407 +
1408 + /* Make sure pointer to data buffer is set */
1409 +@@ -629,6 +629,11 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
1410 +
1411 + *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
1412 +
1413 ++ /* Make sure info word is set */
1414 ++ wmb();
1415 ++
1416 ++ priv->tx_buff[*txbd_curr].skb = skb;
1417 ++
1418 + /* Increment index to point to the next BD */
1419 + *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
1420 +
1421 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1422 +index 3dd4c39640dc..bee615cddbdd 100644
1423 +--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1424 ++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1425 +@@ -3260,7 +3260,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1426 + if (!adapter->regs) {
1427 + dev_err(&pdev->dev, "cannot map device registers\n");
1428 + err = -ENOMEM;
1429 +- goto out_free_adapter;
1430 ++ goto out_free_adapter_nofail;
1431 + }
1432 +
1433 + adapter->pdev = pdev;
1434 +@@ -3378,6 +3378,9 @@ out_free_dev:
1435 + if (adapter->port[i])
1436 + free_netdev(adapter->port[i]);
1437 +
1438 ++out_free_adapter_nofail:
1439 ++ kfree_skb(adapter->nofail_skb);
1440 ++
1441 + out_free_adapter:
1442 + kfree(adapter);
1443 +
1444 +diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
1445 +index 60c727b0b7ab..def831c89d35 100644
1446 +--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
1447 ++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
1448 +@@ -157,6 +157,7 @@ struct hip04_priv {
1449 + unsigned int reg_inten;
1450 +
1451 + struct napi_struct napi;
1452 ++ struct device *dev;
1453 + struct net_device *ndev;
1454 +
1455 + struct tx_desc *tx_desc;
1456 +@@ -185,7 +186,7 @@ struct hip04_priv {
1457 +
1458 + static inline unsigned int tx_count(unsigned int head, unsigned int tail)
1459 + {
1460 +- return (head - tail) % (TX_DESC_NUM - 1);
1461 ++ return (head - tail) % TX_DESC_NUM;
1462 + }
1463 +
1464 + static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
1465 +@@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
1466 + }
1467 +
1468 + if (priv->tx_phys[tx_tail]) {
1469 +- dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
1470 ++ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
1471 + priv->tx_skb[tx_tail]->len,
1472 + DMA_TO_DEVICE);
1473 + priv->tx_phys[tx_tail] = 0;
1474 +@@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1475 + return NETDEV_TX_BUSY;
1476 + }
1477 +
1478 +- phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1479 +- if (dma_mapping_error(&ndev->dev, phys)) {
1480 ++ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
1481 ++ if (dma_mapping_error(priv->dev, phys)) {
1482 + dev_kfree_skb(skb);
1483 + return NETDEV_TX_OK;
1484 + }
1485 +@@ -497,13 +498,16 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
1486 + u16 len;
1487 + u32 err;
1488 +
1489 ++ /* clean up tx descriptors */
1490 ++ tx_remaining = hip04_tx_reclaim(ndev, false);
1491 ++
1492 + while (cnt && !last) {
1493 + buf = priv->rx_buf[priv->rx_head];
1494 + skb = build_skb(buf, priv->rx_buf_size);
1495 + if (unlikely(!skb))
1496 + net_dbg_ratelimited("build_skb failed\n");
1497 +
1498 +- dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
1499 ++ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
1500 + RX_BUF_SIZE, DMA_FROM_DEVICE);
1501 + priv->rx_phys[priv->rx_head] = 0;
1502 +
1503 +@@ -531,9 +535,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
1504 + buf = netdev_alloc_frag(priv->rx_buf_size);
1505 + if (!buf)
1506 + goto done;
1507 +- phys = dma_map_single(&ndev->dev, buf,
1508 ++ phys = dma_map_single(priv->dev, buf,
1509 + RX_BUF_SIZE, DMA_FROM_DEVICE);
1510 +- if (dma_mapping_error(&ndev->dev, phys))
1511 ++ if (dma_mapping_error(priv->dev, phys))
1512 + goto done;
1513 + priv->rx_buf[priv->rx_head] = buf;
1514 + priv->rx_phys[priv->rx_head] = phys;
1515 +@@ -554,8 +558,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
1516 + }
1517 + napi_complete(napi);
1518 + done:
1519 +- /* clean up tx descriptors and start a new timer if necessary */
1520 +- tx_remaining = hip04_tx_reclaim(ndev, false);
1521 ++ /* start a new timer if necessary */
1522 + if (rx < budget && tx_remaining)
1523 + hip04_start_tx_timer(priv);
1524 +
1525 +@@ -637,9 +640,9 @@ static int hip04_mac_open(struct net_device *ndev)
1526 + for (i = 0; i < RX_DESC_NUM; i++) {
1527 + dma_addr_t phys;
1528 +
1529 +- phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
1530 ++ phys = dma_map_single(priv->dev, priv->rx_buf[i],
1531 + RX_BUF_SIZE, DMA_FROM_DEVICE);
1532 +- if (dma_mapping_error(&ndev->dev, phys))
1533 ++ if (dma_mapping_error(priv->dev, phys))
1534 + return -EIO;
1535 +
1536 + priv->rx_phys[i] = phys;
1537 +@@ -673,7 +676,7 @@ static int hip04_mac_stop(struct net_device *ndev)
1538 +
1539 + for (i = 0; i < RX_DESC_NUM; i++) {
1540 + if (priv->rx_phys[i]) {
1541 +- dma_unmap_single(&ndev->dev, priv->rx_phys[i],
1542 ++ dma_unmap_single(priv->dev, priv->rx_phys[i],
1543 + RX_BUF_SIZE, DMA_FROM_DEVICE);
1544 + priv->rx_phys[i] = 0;
1545 + }
1546 +@@ -824,6 +827,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
1547 + return -ENOMEM;
1548 +
1549 + priv = netdev_priv(ndev);
1550 ++ priv->dev = d;
1551 + priv->ndev = ndev;
1552 + platform_set_drvdata(pdev, ndev);
1553 +
1554 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1555 +index ee6fefe92af4..4391430e2527 100644
1556 +--- a/drivers/net/usb/qmi_wwan.c
1557 ++++ b/drivers/net/usb/qmi_wwan.c
1558 +@@ -719,6 +719,7 @@ static const struct usb_device_id products[] = {
1559 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1560 + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1561 + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1562 ++ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
1563 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1564 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1565 + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
1566 +diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
1567 +index dbab722a0654..6f9d9b90ac64 100644
1568 +--- a/drivers/nfc/st-nci/se.c
1569 ++++ b/drivers/nfc/st-nci/se.c
1570 +@@ -346,6 +346,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
1571 +
1572 + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
1573 + skb->len - 2, GFP_KERNEL);
1574 ++ if (!transaction)
1575 ++ return -ENOMEM;
1576 +
1577 + transaction->aid_len = skb->data[1];
1578 + memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
1579 +diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
1580 +index c79d99b24c96..f1b96b5255e0 100644
1581 +--- a/drivers/nfc/st21nfca/se.c
1582 ++++ b/drivers/nfc/st21nfca/se.c
1583 +@@ -327,6 +327,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
1584 +
1585 + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
1586 + skb->len - 2, GFP_KERNEL);
1587 ++ if (!transaction)
1588 ++ return -ENOMEM;
1589 +
1590 + transaction->aid_len = skb->data[1];
1591 + memcpy(transaction->aid, &skb->data[2],
1592 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
1593 +index b140e81c4f7d..fd8bbd2b5d0e 100644
1594 +--- a/drivers/scsi/ufs/ufshcd.c
1595 ++++ b/drivers/scsi/ufs/ufshcd.c
1596 +@@ -4418,6 +4418,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
1597 + static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
1598 + struct ufs_vreg *vreg)
1599 + {
1600 ++ if (!vreg)
1601 ++ return 0;
1602 ++
1603 + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
1604 + }
1605 +
1606 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
1607 +index 61ea87917433..71ad04d54212 100644
1608 +--- a/drivers/usb/class/cdc-wdm.c
1609 ++++ b/drivers/usb/class/cdc-wdm.c
1610 +@@ -577,10 +577,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
1611 + {
1612 + struct wdm_device *desc = file->private_data;
1613 +
1614 +- wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
1615 ++ wait_event(desc->wait,
1616 ++ /*
1617 ++ * needs both flags. We cannot do with one
1618 ++ * because resetting it would cause a race
1619 ++ * with write() yet we need to signal
1620 ++ * a disconnect
1621 ++ */
1622 ++ !test_bit(WDM_IN_USE, &desc->flags) ||
1623 ++ test_bit(WDM_DISCONNECTING, &desc->flags));
1624 +
1625 + /* cannot dereference desc->intf if WDM_DISCONNECTING */
1626 +- if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
1627 ++ if (test_bit(WDM_DISCONNECTING, &desc->flags))
1628 ++ return -ENODEV;
1629 ++ if (desc->werr < 0)
1630 + dev_err(&desc->intf->dev, "Error in flush path: %d\n",
1631 + desc->werr);
1632 +
1633 +@@ -968,8 +978,6 @@ static void wdm_disconnect(struct usb_interface *intf)
1634 + spin_lock_irqsave(&desc->iuspin, flags);
1635 + set_bit(WDM_DISCONNECTING, &desc->flags);
1636 + set_bit(WDM_READ, &desc->flags);
1637 +- /* to terminate pending flushes */
1638 +- clear_bit(WDM_IN_USE, &desc->flags);
1639 + spin_unlock_irqrestore(&desc->iuspin, flags);
1640 + wake_up_all(&desc->wait);
1641 + mutex_lock(&desc->rlock);
1642 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
1643 +index 8bf54477f472..351a406b97af 100644
1644 +--- a/drivers/usb/gadget/composite.c
1645 ++++ b/drivers/usb/gadget/composite.c
1646 +@@ -1889,6 +1889,7 @@ void composite_disconnect(struct usb_gadget *gadget)
1647 + * disconnect callbacks?
1648 + */
1649 + spin_lock_irqsave(&cdev->lock, flags);
1650 ++ cdev->suspended = 0;
1651 + if (cdev->config)
1652 + reset_config(cdev);
1653 + if (cdev->driver->disconnect)
1654 +diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
1655 +index 2341af4f3490..11b3a8c57eab 100644
1656 +--- a/drivers/usb/host/fotg210-hcd.c
1657 ++++ b/drivers/usb/host/fotg210-hcd.c
1658 +@@ -1653,6 +1653,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1659 + /* see what we found out */
1660 + temp = check_reset_complete(fotg210, wIndex, status_reg,
1661 + fotg210_readl(fotg210, status_reg));
1662 ++
1663 ++ /* restart schedule */
1664 ++ fotg210->command |= CMD_RUN;
1665 ++ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
1666 + }
1667 +
1668 + if (!(temp & (PORT_RESUME|PORT_RESET))) {
1669 +diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
1670 +index 602c6e42c34d..27bd3e49fe8e 100644
1671 +--- a/drivers/usb/host/ohci-hcd.c
1672 ++++ b/drivers/usb/host/ohci-hcd.c
1673 +@@ -415,8 +415,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
1674 + * other cases where the next software may expect clean state from the
1675 + * "firmware". this is bus-neutral, unlike shutdown() methods.
1676 + */
1677 +-static void
1678 +-ohci_shutdown (struct usb_hcd *hcd)
1679 ++static void _ohci_shutdown(struct usb_hcd *hcd)
1680 + {
1681 + struct ohci_hcd *ohci;
1682 +
1683 +@@ -432,6 +431,16 @@ ohci_shutdown (struct usb_hcd *hcd)
1684 + ohci->rh_state = OHCI_RH_HALTED;
1685 + }
1686 +
1687 ++static void ohci_shutdown(struct usb_hcd *hcd)
1688 ++{
1689 ++ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
1690 ++ unsigned long flags;
1691 ++
1692 ++ spin_lock_irqsave(&ohci->lock, flags);
1693 ++ _ohci_shutdown(hcd);
1694 ++ spin_unlock_irqrestore(&ohci->lock, flags);
1695 ++}
1696 ++
1697 + /*-------------------------------------------------------------------------*
1698 + * HC functions
1699 + *-------------------------------------------------------------------------*/
1700 +@@ -750,7 +759,7 @@ static void io_watchdog_func(unsigned long _ohci)
1701 + died:
1702 + usb_hc_died(ohci_to_hcd(ohci));
1703 + ohci_dump(ohci);
1704 +- ohci_shutdown(ohci_to_hcd(ohci));
1705 ++ _ohci_shutdown(ohci_to_hcd(ohci));
1706 + goto done;
1707 + } else {
1708 + /* No write back because the done queue was empty */
1709 +diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
1710 +index be432bec0c5b..f0ca9feaea1d 100644
1711 +--- a/drivers/usb/storage/realtek_cr.c
1712 ++++ b/drivers/usb/storage/realtek_cr.c
1713 +@@ -50,7 +50,7 @@ MODULE_VERSION("1.03");
1714 +
1715 + static int auto_delink_en = 1;
1716 + module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
1717 +-MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
1718 ++MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
1719 +
1720 + #ifdef CONFIG_REALTEK_AUTOPM
1721 + static int ss_en = 1;
1722 +@@ -1006,12 +1006,15 @@ static int init_realtek_cr(struct us_data *us)
1723 + goto INIT_FAIL;
1724 + }
1725 +
1726 +- if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
1727 +- CHECK_FW_VER(chip, 0x5901))
1728 +- SET_AUTO_DELINK(chip);
1729 +- if (STATUS_LEN(chip) == 16) {
1730 +- if (SUPPORT_AUTO_DELINK(chip))
1731 ++ if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
1732 ++ CHECK_PID(chip, 0x0159)) {
1733 ++ if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
1734 ++ CHECK_FW_VER(chip, 0x5901))
1735 + SET_AUTO_DELINK(chip);
1736 ++ if (STATUS_LEN(chip) == 16) {
1737 ++ if (SUPPORT_AUTO_DELINK(chip))
1738 ++ SET_AUTO_DELINK(chip);
1739 ++ }
1740 + }
1741 + #ifdef CONFIG_REALTEK_AUTOPM
1742 + if (ss_en)
1743 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1744 +index d92b974f0635..a98259e136dd 100644
1745 +--- a/drivers/usb/storage/unusual_devs.h
1746 ++++ b/drivers/usb/storage/unusual_devs.h
1747 +@@ -2006,7 +2006,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1748 + US_FL_IGNORE_RESIDUE ),
1749 +
1750 + /* Reported by Michael Büsch <m@××××.ch> */
1751 +-UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
1752 ++UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117,
1753 + "JMicron",
1754 + "USB to ATA/ATAPI Bridge",
1755 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1756 +diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
1757 +index f46317135224..1459dc9fd701 100644
1758 +--- a/drivers/vhost/net.c
1759 ++++ b/drivers/vhost/net.c
1760 +@@ -39,6 +39,12 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
1761 + * Using this limit prevents one virtqueue from starving others. */
1762 + #define VHOST_NET_WEIGHT 0x80000
1763 +
1764 ++/* Max number of packets transferred before requeueing the job.
1765 ++ * Using this limit prevents one virtqueue from starving others with small
1766 ++ * pkts.
1767 ++ */
1768 ++#define VHOST_NET_PKT_WEIGHT 256
1769 ++
1770 + /* MAX number of TX used buffers for outstanding zerocopy */
1771 + #define VHOST_MAX_PEND 128
1772 + #define VHOST_GOODCOPY_LEN 256
1773 +@@ -308,6 +314,7 @@ static void handle_tx(struct vhost_net *net)
1774 + struct socket *sock;
1775 + struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
1776 + bool zcopy, zcopy_used;
1777 ++ int sent_pkts = 0;
1778 +
1779 + mutex_lock(&vq->mutex);
1780 + sock = vq->private_data;
1781 +@@ -319,7 +326,7 @@ static void handle_tx(struct vhost_net *net)
1782 + hdr_size = nvq->vhost_hlen;
1783 + zcopy = nvq->ubufs;
1784 +
1785 +- for (;;) {
1786 ++ do {
1787 + /* Release DMAs done buffers first */
1788 + if (zcopy)
1789 + vhost_zerocopy_signal_used(net, vq);
1790 +@@ -408,11 +415,7 @@ static void handle_tx(struct vhost_net *net)
1791 + vhost_zerocopy_signal_used(net, vq);
1792 + total_len += len;
1793 + vhost_net_tx_packet(net);
1794 +- if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
1795 +- vhost_poll_queue(&vq->poll);
1796 +- break;
1797 +- }
1798 +- }
1799 ++ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
1800 + out:
1801 + mutex_unlock(&vq->mutex);
1802 + }
1803 +@@ -539,6 +542,7 @@ static void handle_rx(struct vhost_net *net)
1804 + struct socket *sock;
1805 + struct iov_iter fixup;
1806 + __virtio16 num_buffers;
1807 ++ int recv_pkts = 0;
1808 +
1809 + mutex_lock(&vq->mutex);
1810 + sock = vq->private_data;
1811 +@@ -553,7 +557,10 @@ static void handle_rx(struct vhost_net *net)
1812 + vq->log : NULL;
1813 + mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
1814 +
1815 +- while ((sock_len = peek_head_len(sock->sk))) {
1816 ++ do {
1817 ++ sock_len = peek_head_len(sock->sk);
1818 ++ if (!sock_len)
1819 ++ break;
1820 + sock_len += sock_hlen;
1821 + vhost_len = sock_len + vhost_hlen;
1822 + headcount = get_rx_bufs(vq, vq->heads, vhost_len,
1823 +@@ -631,11 +638,8 @@ static void handle_rx(struct vhost_net *net)
1824 + if (unlikely(vq_log))
1825 + vhost_log_write(vq, vq_log, log, vhost_len);
1826 + total_len += vhost_len;
1827 +- if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
1828 +- vhost_poll_queue(&vq->poll);
1829 +- break;
1830 +- }
1831 +- }
1832 ++ } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
1833 ++
1834 + out:
1835 + mutex_unlock(&vq->mutex);
1836 + }
1837 +@@ -704,7 +708,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
1838 + n->vqs[i].vhost_hlen = 0;
1839 + n->vqs[i].sock_hlen = 0;
1840 + }
1841 +- vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
1842 ++ vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
1843 ++ VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);
1844 +
1845 + vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
1846 + vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
1847 +diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
1848 +index 8fc62a03637a..269cfdd2958d 100644
1849 +--- a/drivers/vhost/scsi.c
1850 ++++ b/drivers/vhost/scsi.c
1851 +@@ -58,6 +58,12 @@
1852 + #define VHOST_SCSI_PREALLOC_UPAGES 2048
1853 + #define VHOST_SCSI_PREALLOC_PROT_SGLS 512
1854 +
1855 ++/* Max number of requests before requeueing the job.
1856 ++ * Using this limit prevents one virtqueue from starving others with
1857 ++ * request.
1858 ++ */
1859 ++#define VHOST_SCSI_WEIGHT 256
1860 ++
1861 + struct vhost_scsi_inflight {
1862 + /* Wait for the flush operation to finish */
1863 + struct completion comp;
1864 +@@ -855,7 +861,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1865 + u64 tag;
1866 + u32 exp_data_len, data_direction;
1867 + unsigned out, in;
1868 +- int head, ret, prot_bytes;
1869 ++ int head, ret, prot_bytes, c = 0;
1870 + size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
1871 + size_t out_size, in_size;
1872 + u16 lun;
1873 +@@ -874,7 +880,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1874 +
1875 + vhost_disable_notify(&vs->dev, vq);
1876 +
1877 +- for (;;) {
1878 ++ do {
1879 + head = vhost_get_vq_desc(vq, vq->iov,
1880 + ARRAY_SIZE(vq->iov), &out, &in,
1881 + NULL, NULL);
1882 +@@ -1090,7 +1096,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1883 + */
1884 + INIT_WORK(&cmd->work, vhost_scsi_submission_work);
1885 + queue_work(vhost_scsi_workqueue, &cmd->work);
1886 +- }
1887 ++ } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
1888 + out:
1889 + mutex_unlock(&vq->mutex);
1890 + }
1891 +@@ -1443,7 +1449,8 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1892 + vqs[i] = &vs->vqs[i].vq;
1893 + vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1894 + }
1895 +- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1896 ++ vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ,
1897 ++ VHOST_SCSI_WEIGHT, 0);
1898 +
1899 + vhost_scsi_init_inflight(vs, NULL);
1900 +
1901 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
1902 +index 2ed0a356d1d3..0f653f314876 100644
1903 +--- a/drivers/vhost/vhost.c
1904 ++++ b/drivers/vhost/vhost.c
1905 +@@ -370,8 +370,24 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
1906 + vhost_vq_free_iovecs(dev->vqs[i]);
1907 + }
1908 +
1909 ++bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
1910 ++ int pkts, int total_len)
1911 ++{
1912 ++ struct vhost_dev *dev = vq->dev;
1913 ++
1914 ++ if ((dev->byte_weight && total_len >= dev->byte_weight) ||
1915 ++ pkts >= dev->weight) {
1916 ++ vhost_poll_queue(&vq->poll);
1917 ++ return true;
1918 ++ }
1919 ++
1920 ++ return false;
1921 ++}
1922 ++EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
1923 ++
1924 + void vhost_dev_init(struct vhost_dev *dev,
1925 +- struct vhost_virtqueue **vqs, int nvqs)
1926 ++ struct vhost_virtqueue **vqs, int nvqs,
1927 ++ int weight, int byte_weight)
1928 + {
1929 + struct vhost_virtqueue *vq;
1930 + int i;
1931 +@@ -386,6 +402,8 @@ void vhost_dev_init(struct vhost_dev *dev,
1932 + spin_lock_init(&dev->work_lock);
1933 + INIT_LIST_HEAD(&dev->work_list);
1934 + dev->worker = NULL;
1935 ++ dev->weight = weight;
1936 ++ dev->byte_weight = byte_weight;
1937 +
1938 + for (i = 0; i < dev->nvqs; ++i) {
1939 + vq = dev->vqs[i];
1940 +diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
1941 +index d3f767448a72..5ac486970569 100644
1942 +--- a/drivers/vhost/vhost.h
1943 ++++ b/drivers/vhost/vhost.h
1944 +@@ -127,9 +127,13 @@ struct vhost_dev {
1945 + spinlock_t work_lock;
1946 + struct list_head work_list;
1947 + struct task_struct *worker;
1948 ++ int weight;
1949 ++ int byte_weight;
1950 + };
1951 +
1952 +-void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
1953 ++bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
1954 ++void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
1955 ++ int nvqs, int weight, int byte_weight);
1956 + long vhost_dev_set_owner(struct vhost_dev *dev);
1957 + bool vhost_dev_has_owner(struct vhost_dev *dev);
1958 + long vhost_dev_check_owner(struct vhost_dev *);
1959 +diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
1960 +index 8a5ce5b5a0b6..199b1fb3669c 100644
1961 +--- a/drivers/watchdog/bcm2835_wdt.c
1962 ++++ b/drivers/watchdog/bcm2835_wdt.c
1963 +@@ -248,6 +248,7 @@ module_param(nowayout, bool, 0);
1964 + MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
1965 + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
1966 +
1967 ++MODULE_ALIAS("platform:bcm2835-wdt");
1968 + MODULE_AUTHOR("Lubomir Rintel <lkundrak@××.sk>");
1969 + MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer");
1970 + MODULE_LICENSE("GPL");
1971 +diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
1972 +index ef24894edecc..9c159e6ad116 100644
1973 +--- a/fs/gfs2/rgrp.c
1974 ++++ b/fs/gfs2/rgrp.c
1975 +@@ -739,6 +739,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
1976 +
1977 + gfs2_free_clones(rgd);
1978 + kfree(rgd->rd_bits);
1979 ++ rgd->rd_bits = NULL;
1980 + return_all_reservations(rgd);
1981 + kmem_cache_free(gfs2_rgrpd_cachep, rgd);
1982 + }
1983 +@@ -933,10 +934,6 @@ static int read_rindex_entry(struct gfs2_inode *ip)
1984 + if (error)
1985 + goto fail;
1986 +
1987 +- rgd->rd_gl->gl_object = rgd;
1988 +- rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK;
1989 +- rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr +
1990 +- rgd->rd_length) * bsize) - 1;
1991 + rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
1992 + rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
1993 + if (rgd->rd_data > sdp->sd_max_rg_data)
1994 +@@ -944,14 +941,20 @@ static int read_rindex_entry(struct gfs2_inode *ip)
1995 + spin_lock(&sdp->sd_rindex_spin);
1996 + error = rgd_insert(rgd);
1997 + spin_unlock(&sdp->sd_rindex_spin);
1998 +- if (!error)
1999 ++ if (!error) {
2000 ++ rgd->rd_gl->gl_object = rgd;
2001 ++ rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
2002 ++ rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
2003 ++ rgd->rd_length) * bsize) - 1;
2004 + return 0;
2005 ++ }
2006 +
2007 + error = 0; /* someone else read in the rgrp; free it and ignore it */
2008 + gfs2_glock_put(rgd->rd_gl);
2009 +
2010 + fail:
2011 + kfree(rgd->rd_bits);
2012 ++ rgd->rd_bits = NULL;
2013 + kmem_cache_free(gfs2_rgrpd_cachep, rgd);
2014 + return error;
2015 + }
2016 +diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
2017 +index 4afdee420d25..9f15696f55b9 100644
2018 +--- a/fs/nfs/nfs4_fs.h
2019 ++++ b/fs/nfs/nfs4_fs.h
2020 +@@ -416,7 +416,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
2021 +
2022 + extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
2023 + extern void nfs4_put_state_owner(struct nfs4_state_owner *);
2024 +-extern void nfs4_purge_state_owners(struct nfs_server *);
2025 ++extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
2026 ++extern void nfs4_free_state_owners(struct list_head *head);
2027 + extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
2028 + extern void nfs4_put_open_state(struct nfs4_state *);
2029 + extern void nfs4_close_state(struct nfs4_state *, fmode_t);
2030 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
2031 +index ae91d1e450be..dac20f31f01f 100644
2032 +--- a/fs/nfs/nfs4client.c
2033 ++++ b/fs/nfs/nfs4client.c
2034 +@@ -685,9 +685,12 @@ found:
2035 +
2036 + static void nfs4_destroy_server(struct nfs_server *server)
2037 + {
2038 ++ LIST_HEAD(freeme);
2039 ++
2040 + nfs_server_return_all_delegations(server);
2041 + unset_pnfs_layoutdriver(server);
2042 +- nfs4_purge_state_owners(server);
2043 ++ nfs4_purge_state_owners(server, &freeme);
2044 ++ nfs4_free_state_owners(&freeme);
2045 + }
2046 +
2047 + /*
2048 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2049 +index 5be61affeefd..ef3ed2b1fd27 100644
2050 +--- a/fs/nfs/nfs4state.c
2051 ++++ b/fs/nfs/nfs4state.c
2052 +@@ -611,24 +611,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
2053 + /**
2054 + * nfs4_purge_state_owners - Release all cached state owners
2055 + * @server: nfs_server with cached state owners to release
2056 ++ * @head: resulting list of state owners
2057 + *
2058 + * Called at umount time. Remaining state owners will be on
2059 + * the LRU with ref count of zero.
2060 ++ * Note that the state owners are not freed, but are added
2061 ++ * to the list @head, which can later be used as an argument
2062 ++ * to nfs4_free_state_owners.
2063 + */
2064 +-void nfs4_purge_state_owners(struct nfs_server *server)
2065 ++void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
2066 + {
2067 + struct nfs_client *clp = server->nfs_client;
2068 + struct nfs4_state_owner *sp, *tmp;
2069 +- LIST_HEAD(doomed);
2070 +
2071 + spin_lock(&clp->cl_lock);
2072 + list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
2073 +- list_move(&sp->so_lru, &doomed);
2074 ++ list_move(&sp->so_lru, head);
2075 + nfs4_remove_state_owner_locked(sp);
2076 + }
2077 + spin_unlock(&clp->cl_lock);
2078 ++}
2079 +
2080 +- list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
2081 ++/**
2082 ++ * nfs4_purge_state_owners - Release all cached state owners
2083 ++ * @head: resulting list of state owners
2084 ++ *
2085 ++ * Frees a list of state owners that was generated by
2086 ++ * nfs4_purge_state_owners
2087 ++ */
2088 ++void nfs4_free_state_owners(struct list_head *head)
2089 ++{
2090 ++ struct nfs4_state_owner *sp, *tmp;
2091 ++
2092 ++ list_for_each_entry_safe(sp, tmp, head, so_lru) {
2093 + list_del(&sp->so_lru);
2094 + nfs4_free_state_owner(sp);
2095 + }
2096 +@@ -1724,12 +1739,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
2097 + struct nfs4_state_owner *sp;
2098 + struct nfs_server *server;
2099 + struct rb_node *pos;
2100 ++ LIST_HEAD(freeme);
2101 + int status = 0;
2102 +
2103 + restart:
2104 + rcu_read_lock();
2105 + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
2106 +- nfs4_purge_state_owners(server);
2107 ++ nfs4_purge_state_owners(server, &freeme);
2108 + spin_lock(&clp->cl_lock);
2109 + for (pos = rb_first(&server->state_owners);
2110 + pos != NULL;
2111 +@@ -1758,6 +1774,7 @@ restart:
2112 + spin_unlock(&clp->cl_lock);
2113 + }
2114 + rcu_read_unlock();
2115 ++ nfs4_free_state_owners(&freeme);
2116 + return 0;
2117 + }
2118 +
2119 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
2120 +index f187e02d267e..fe1c146f4032 100644
2121 +--- a/fs/userfaultfd.c
2122 ++++ b/fs/userfaultfd.c
2123 +@@ -431,6 +431,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2124 + /* len == 0 means wake all */
2125 + struct userfaultfd_wake_range range = { .len = 0, };
2126 + unsigned long new_flags;
2127 ++ bool still_valid;
2128 +
2129 + ACCESS_ONCE(ctx->released) = true;
2130 +
2131 +@@ -446,8 +447,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2132 + * taking the mmap_sem for writing.
2133 + */
2134 + down_write(&mm->mmap_sem);
2135 +- if (!mmget_still_valid(mm))
2136 +- goto skip_mm;
2137 ++ still_valid = mmget_still_valid(mm);
2138 + prev = NULL;
2139 + for (vma = mm->mmap; vma; vma = vma->vm_next) {
2140 + cond_resched();
2141 +@@ -458,19 +458,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2142 + continue;
2143 + }
2144 + new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
2145 +- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
2146 +- new_flags, vma->anon_vma,
2147 +- vma->vm_file, vma->vm_pgoff,
2148 +- vma_policy(vma),
2149 +- NULL_VM_UFFD_CTX);
2150 +- if (prev)
2151 +- vma = prev;
2152 +- else
2153 +- prev = vma;
2154 ++ if (still_valid) {
2155 ++ prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
2156 ++ new_flags, vma->anon_vma,
2157 ++ vma->vm_file, vma->vm_pgoff,
2158 ++ vma_policy(vma),
2159 ++ NULL_VM_UFFD_CTX);
2160 ++ if (prev)
2161 ++ vma = prev;
2162 ++ else
2163 ++ prev = vma;
2164 ++ }
2165 + vma->vm_flags = new_flags;
2166 + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
2167 + }
2168 +-skip_mm:
2169 + up_write(&mm->mmap_sem);
2170 + mmput(mm);
2171 + wakeup:
2172 +diff --git a/include/linux/siphash.h b/include/linux/siphash.h
2173 +new file mode 100644
2174 +index 000000000000..bf21591a9e5e
2175 +--- /dev/null
2176 ++++ b/include/linux/siphash.h
2177 +@@ -0,0 +1,145 @@
2178 ++/* Copyright (C) 2016 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved.
2179 ++ *
2180 ++ * This file is provided under a dual BSD/GPLv2 license.
2181 ++ *
2182 ++ * SipHash: a fast short-input PRF
2183 ++ * https://131002.net/siphash/
2184 ++ *
2185 ++ * This implementation is specifically for SipHash2-4 for a secure PRF
2186 ++ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
2187 ++ * hashtables.
2188 ++ */
2189 ++
2190 ++#ifndef _LINUX_SIPHASH_H
2191 ++#define _LINUX_SIPHASH_H
2192 ++
2193 ++#include <linux/types.h>
2194 ++#include <linux/kernel.h>
2195 ++
2196 ++#define SIPHASH_ALIGNMENT __alignof__(u64)
2197 ++typedef struct {
2198 ++ u64 key[2];
2199 ++} siphash_key_t;
2200 ++
2201 ++static inline bool siphash_key_is_zero(const siphash_key_t *key)
2202 ++{
2203 ++ return !(key->key[0] | key->key[1]);
2204 ++}
2205 ++
2206 ++u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
2207 ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2208 ++u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
2209 ++#endif
2210 ++
2211 ++u64 siphash_1u64(const u64 a, const siphash_key_t *key);
2212 ++u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
2213 ++u64 siphash_3u64(const u64 a, const u64 b, const u64 c,
2214 ++ const siphash_key_t *key);
2215 ++u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d,
2216 ++ const siphash_key_t *key);
2217 ++u64 siphash_1u32(const u32 a, const siphash_key_t *key);
2218 ++u64 siphash_3u32(const u32 a, const u32 b, const u32 c,
2219 ++ const siphash_key_t *key);
2220 ++
2221 ++static inline u64 siphash_2u32(const u32 a, const u32 b,
2222 ++ const siphash_key_t *key)
2223 ++{
2224 ++ return siphash_1u64((u64)b << 32 | a, key);
2225 ++}
2226 ++static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c,
2227 ++ const u32 d, const siphash_key_t *key)
2228 ++{
2229 ++ return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key);
2230 ++}
2231 ++
2232 ++
2233 ++static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
2234 ++ const siphash_key_t *key)
2235 ++{
2236 ++ if (__builtin_constant_p(len) && len == 4)
2237 ++ return siphash_1u32(le32_to_cpup((const __le32 *)data), key);
2238 ++ if (__builtin_constant_p(len) && len == 8)
2239 ++ return siphash_1u64(le64_to_cpu(data[0]), key);
2240 ++ if (__builtin_constant_p(len) && len == 16)
2241 ++ return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
2242 ++ key);
2243 ++ if (__builtin_constant_p(len) && len == 24)
2244 ++ return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
2245 ++ le64_to_cpu(data[2]), key);
2246 ++ if (__builtin_constant_p(len) && len == 32)
2247 ++ return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
2248 ++ le64_to_cpu(data[2]), le64_to_cpu(data[3]),
2249 ++ key);
2250 ++ return __siphash_aligned(data, len, key);
2251 ++}
2252 ++
2253 ++/**
2254 ++ * siphash - compute 64-bit siphash PRF value
2255 ++ * @data: buffer to hash
2256 ++ * @size: size of @data
2257 ++ * @key: the siphash key
2258 ++ */
2259 ++static inline u64 siphash(const void *data, size_t len,
2260 ++ const siphash_key_t *key)
2261 ++{
2262 ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2263 ++ if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
2264 ++ return __siphash_unaligned(data, len, key);
2265 ++#endif
2266 ++ return ___siphash_aligned(data, len, key);
2267 ++}
2268 ++
2269 ++#define HSIPHASH_ALIGNMENT __alignof__(unsigned long)
2270 ++typedef struct {
2271 ++ unsigned long key[2];
2272 ++} hsiphash_key_t;
2273 ++
2274 ++u32 __hsiphash_aligned(const void *data, size_t len,
2275 ++ const hsiphash_key_t *key);
2276 ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2277 ++u32 __hsiphash_unaligned(const void *data, size_t len,
2278 ++ const hsiphash_key_t *key);
2279 ++#endif
2280 ++
2281 ++u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
2282 ++u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
2283 ++u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c,
2284 ++ const hsiphash_key_t *key);
2285 ++u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d,
2286 ++ const hsiphash_key_t *key);
2287 ++
2288 ++static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
2289 ++ const hsiphash_key_t *key)
2290 ++{
2291 ++ if (__builtin_constant_p(len) && len == 4)
2292 ++ return hsiphash_1u32(le32_to_cpu(data[0]), key);
2293 ++ if (__builtin_constant_p(len) && len == 8)
2294 ++ return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
2295 ++ key);
2296 ++ if (__builtin_constant_p(len) && len == 12)
2297 ++ return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
2298 ++ le32_to_cpu(data[2]), key);
2299 ++ if (__builtin_constant_p(len) && len == 16)
2300 ++ return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
2301 ++ le32_to_cpu(data[2]), le32_to_cpu(data[3]),
2302 ++ key);
2303 ++ return __hsiphash_aligned(data, len, key);
2304 ++}
2305 ++
2306 ++/**
2307 ++ * hsiphash - compute 32-bit hsiphash PRF value
2308 ++ * @data: buffer to hash
2309 ++ * @size: size of @data
2310 ++ * @key: the hsiphash key
2311 ++ */
2312 ++static inline u32 hsiphash(const void *data, size_t len,
2313 ++ const hsiphash_key_t *key)
2314 ++{
2315 ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2316 ++ if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
2317 ++ return __hsiphash_unaligned(data, len, key);
2318 ++#endif
2319 ++ return ___hsiphash_aligned(data, len, key);
2320 ++}
2321 ++
2322 ++#endif /* _LINUX_SIPHASH_H */
2323 +diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
2324 +index fde4068eec0b..636e9e11bd5f 100644
2325 +--- a/include/net/netfilter/nf_conntrack.h
2326 ++++ b/include/net/netfilter/nf_conntrack.h
2327 +@@ -297,6 +297,8 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
2328 + gfp_t flags);
2329 + void nf_ct_tmpl_free(struct nf_conn *tmpl);
2330 +
2331 ++u32 nf_ct_get_id(const struct nf_conn *ct);
2332 ++
2333 + #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
2334 + #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
2335 +
2336 +diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
2337 +index 61c38f87ea07..e6f49f22e006 100644
2338 +--- a/include/net/netns/ipv4.h
2339 ++++ b/include/net/netns/ipv4.h
2340 +@@ -8,6 +8,7 @@
2341 + #include <linux/uidgid.h>
2342 + #include <net/inet_frag.h>
2343 + #include <linux/rcupdate.h>
2344 ++#include <linux/siphash.h>
2345 +
2346 + struct tcpm_hash_bucket;
2347 + struct ctl_table_header;
2348 +@@ -109,5 +110,6 @@ struct netns_ipv4 {
2349 + #endif
2350 + #endif
2351 + atomic_t rt_genid;
2352 ++ siphash_key_t ip_id_key;
2353 + };
2354 + #endif
2355 +diff --git a/include/net/tcp.h b/include/net/tcp.h
2356 +index 0410fd29d569..4447195a0cd4 100644
2357 +--- a/include/net/tcp.h
2358 ++++ b/include/net/tcp.h
2359 +@@ -1540,6 +1540,10 @@ static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
2360 + {
2361 + struct sk_buff *skb = tcp_send_head(sk);
2362 +
2363 ++ /* empty retransmit queue, for example due to zero window */
2364 ++ if (skb == tcp_write_queue_head(sk))
2365 ++ return NULL;
2366 ++
2367 + return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
2368 + }
2369 +
2370 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
2371 +index 5299618d6308..7a7c535f8a2f 100644
2372 +--- a/kernel/cgroup.c
2373 ++++ b/kernel/cgroup.c
2374 +@@ -784,6 +784,8 @@ static void put_css_set_locked(struct css_set *cset)
2375 +
2376 + static void put_css_set(struct css_set *cset)
2377 + {
2378 ++ unsigned long flags;
2379 ++
2380 + /*
2381 + * Ensure that the refcount doesn't hit zero while any readers
2382 + * can see it. Similar to atomic_dec_and_lock(), but for an
2383 +@@ -792,9 +794,9 @@ static void put_css_set(struct css_set *cset)
2384 + if (atomic_add_unless(&cset->refcount, -1, 1))
2385 + return;
2386 +
2387 +- spin_lock_bh(&css_set_lock);
2388 ++ spin_lock_irqsave(&css_set_lock, flags);
2389 + put_css_set_locked(cset);
2390 +- spin_unlock_bh(&css_set_lock);
2391 ++ spin_unlock_irqrestore(&css_set_lock, flags);
2392 + }
2393 +
2394 + /*
2395 +@@ -1017,11 +1019,11 @@ static struct css_set *find_css_set(struct css_set *old_cset,
2396 +
2397 + /* First see if we already have a cgroup group that matches
2398 + * the desired set */
2399 +- spin_lock_bh(&css_set_lock);
2400 ++ spin_lock_irq(&css_set_lock);
2401 + cset = find_existing_css_set(old_cset, cgrp, template);
2402 + if (cset)
2403 + get_css_set(cset);
2404 +- spin_unlock_bh(&css_set_lock);
2405 ++ spin_unlock_irq(&css_set_lock);
2406 +
2407 + if (cset)
2408 + return cset;
2409 +@@ -1049,7 +1051,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
2410 + * find_existing_css_set() */
2411 + memcpy(cset->subsys, template, sizeof(cset->subsys));
2412 +
2413 +- spin_lock_bh(&css_set_lock);
2414 ++ spin_lock_irq(&css_set_lock);
2415 + /* Add reference counts and links from the new css_set. */
2416 + list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
2417 + struct cgroup *c = link->cgrp;
2418 +@@ -1075,7 +1077,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
2419 + css_get(css);
2420 + }
2421 +
2422 +- spin_unlock_bh(&css_set_lock);
2423 ++ spin_unlock_irq(&css_set_lock);
2424 +
2425 + return cset;
2426 + }
2427 +@@ -1139,7 +1141,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
2428 + * Release all the links from cset_links to this hierarchy's
2429 + * root cgroup
2430 + */
2431 +- spin_lock_bh(&css_set_lock);
2432 ++ spin_lock_irq(&css_set_lock);
2433 +
2434 + list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
2435 + list_del(&link->cset_link);
2436 +@@ -1147,7 +1149,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
2437 + kfree(link);
2438 + }
2439 +
2440 +- spin_unlock_bh(&css_set_lock);
2441 ++ spin_unlock_irq(&css_set_lock);
2442 +
2443 + if (!list_empty(&root->root_list)) {
2444 + list_del(&root->root_list);
2445 +@@ -1551,11 +1553,11 @@ static int rebind_subsystems(struct cgroup_root *dst_root,
2446 + ss->root = dst_root;
2447 + css->cgroup = dcgrp;
2448 +
2449 +- spin_lock_bh(&css_set_lock);
2450 ++ spin_lock_irq(&css_set_lock);
2451 + hash_for_each(css_set_table, i, cset, hlist)
2452 + list_move_tail(&cset->e_cset_node[ss->id],
2453 + &dcgrp->e_csets[ss->id]);
2454 +- spin_unlock_bh(&css_set_lock);
2455 ++ spin_unlock_irq(&css_set_lock);
2456 +
2457 + src_root->subsys_mask &= ~(1 << ssid);
2458 + scgrp->subtree_control &= ~(1 << ssid);
2459 +@@ -1832,7 +1834,7 @@ static void cgroup_enable_task_cg_lists(void)
2460 + {
2461 + struct task_struct *p, *g;
2462 +
2463 +- spin_lock_bh(&css_set_lock);
2464 ++ spin_lock_irq(&css_set_lock);
2465 +
2466 + if (use_task_css_set_links)
2467 + goto out_unlock;
2468 +@@ -1857,8 +1859,12 @@ static void cgroup_enable_task_cg_lists(void)
2469 + * entry won't be deleted though the process has exited.
2470 + * Do it while holding siglock so that we don't end up
2471 + * racing against cgroup_exit().
2472 ++ *
2473 ++ * Interrupts were already disabled while acquiring
2474 ++ * the css_set_lock, so we do not need to disable it
2475 ++ * again when acquiring the sighand->siglock here.
2476 + */
2477 +- spin_lock_irq(&p->sighand->siglock);
2478 ++ spin_lock(&p->sighand->siglock);
2479 + if (!(p->flags & PF_EXITING)) {
2480 + struct css_set *cset = task_css_set(p);
2481 +
2482 +@@ -1867,11 +1873,11 @@ static void cgroup_enable_task_cg_lists(void)
2483 + list_add_tail(&p->cg_list, &cset->tasks);
2484 + get_css_set(cset);
2485 + }
2486 +- spin_unlock_irq(&p->sighand->siglock);
2487 ++ spin_unlock(&p->sighand->siglock);
2488 + } while_each_thread(g, p);
2489 + read_unlock(&tasklist_lock);
2490 + out_unlock:
2491 +- spin_unlock_bh(&css_set_lock);
2492 ++ spin_unlock_irq(&css_set_lock);
2493 + }
2494 +
2495 + static void init_cgroup_housekeeping(struct cgroup *cgrp)
2496 +@@ -1976,13 +1982,13 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
2497 + * Link the root cgroup in this hierarchy into all the css_set
2498 + * objects.
2499 + */
2500 +- spin_lock_bh(&css_set_lock);
2501 ++ spin_lock_irq(&css_set_lock);
2502 + hash_for_each(css_set_table, i, cset, hlist) {
2503 + link_css_set(&tmp_links, cset, root_cgrp);
2504 + if (css_set_populated(cset))
2505 + cgroup_update_populated(root_cgrp, true);
2506 + }
2507 +- spin_unlock_bh(&css_set_lock);
2508 ++ spin_unlock_irq(&css_set_lock);
2509 +
2510 + BUG_ON(!list_empty(&root_cgrp->self.children));
2511 + BUG_ON(atomic_read(&root->nr_cgrps) != 1);
2512 +@@ -2215,7 +2221,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
2513 + char *path = NULL;
2514 +
2515 + mutex_lock(&cgroup_mutex);
2516 +- spin_lock_bh(&css_set_lock);
2517 ++ spin_lock_irq(&css_set_lock);
2518 +
2519 + root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
2520 +
2521 +@@ -2228,7 +2234,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
2522 + path = buf;
2523 + }
2524 +
2525 +- spin_unlock_bh(&css_set_lock);
2526 ++ spin_unlock_irq(&css_set_lock);
2527 + mutex_unlock(&cgroup_mutex);
2528 + return path;
2529 + }
2530 +@@ -2403,7 +2409,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
2531 + * the new cgroup. There are no failure cases after here, so this
2532 + * is the commit point.
2533 + */
2534 +- spin_lock_bh(&css_set_lock);
2535 ++ spin_lock_irq(&css_set_lock);
2536 + list_for_each_entry(cset, &tset->src_csets, mg_node) {
2537 + list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
2538 + struct css_set *from_cset = task_css_set(task);
2539 +@@ -2414,7 +2420,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
2540 + put_css_set_locked(from_cset);
2541 + }
2542 + }
2543 +- spin_unlock_bh(&css_set_lock);
2544 ++ spin_unlock_irq(&css_set_lock);
2545 +
2546 + /*
2547 + * Migration is committed, all target tasks are now on dst_csets.
2548 +@@ -2443,13 +2449,13 @@ out_cancel_attach:
2549 + }
2550 + }
2551 + out_release_tset:
2552 +- spin_lock_bh(&css_set_lock);
2553 ++ spin_lock_irq(&css_set_lock);
2554 + list_splice_init(&tset->dst_csets, &tset->src_csets);
2555 + list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
2556 + list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2557 + list_del_init(&cset->mg_node);
2558 + }
2559 +- spin_unlock_bh(&css_set_lock);
2560 ++ spin_unlock_irq(&css_set_lock);
2561 + return ret;
2562 + }
2563 +
2564 +@@ -2466,14 +2472,14 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
2565 +
2566 + lockdep_assert_held(&cgroup_mutex);
2567 +
2568 +- spin_lock_bh(&css_set_lock);
2569 ++ spin_lock_irq(&css_set_lock);
2570 + list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
2571 + cset->mg_src_cgrp = NULL;
2572 + cset->mg_dst_cset = NULL;
2573 + list_del_init(&cset->mg_preload_node);
2574 + put_css_set_locked(cset);
2575 + }
2576 +- spin_unlock_bh(&css_set_lock);
2577 ++ spin_unlock_irq(&css_set_lock);
2578 + }
2579 +
2580 + /**
2581 +@@ -2623,7 +2629,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
2582 + * already PF_EXITING could be freed from underneath us unless we
2583 + * take an rcu_read_lock.
2584 + */
2585 +- spin_lock_bh(&css_set_lock);
2586 ++ spin_lock_irq(&css_set_lock);
2587 + rcu_read_lock();
2588 + task = leader;
2589 + do {
2590 +@@ -2632,7 +2638,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
2591 + break;
2592 + } while_each_thread(leader, task);
2593 + rcu_read_unlock();
2594 +- spin_unlock_bh(&css_set_lock);
2595 ++ spin_unlock_irq(&css_set_lock);
2596 +
2597 + return cgroup_taskset_migrate(&tset, cgrp);
2598 + }
2599 +@@ -2653,7 +2659,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
2600 + int ret;
2601 +
2602 + /* look up all src csets */
2603 +- spin_lock_bh(&css_set_lock);
2604 ++ spin_lock_irq(&css_set_lock);
2605 + rcu_read_lock();
2606 + task = leader;
2607 + do {
2608 +@@ -2663,7 +2669,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
2609 + break;
2610 + } while_each_thread(leader, task);
2611 + rcu_read_unlock();
2612 +- spin_unlock_bh(&css_set_lock);
2613 ++ spin_unlock_irq(&css_set_lock);
2614 +
2615 + /* prepare dst csets and commit */
2616 + ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
2617 +@@ -2696,9 +2702,9 @@ static int cgroup_procs_write_permission(struct task_struct *task,
2618 + struct cgroup *cgrp;
2619 + struct inode *inode;
2620 +
2621 +- spin_lock_bh(&css_set_lock);
2622 ++ spin_lock_irq(&css_set_lock);
2623 + cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
2624 +- spin_unlock_bh(&css_set_lock);
2625 ++ spin_unlock_irq(&css_set_lock);
2626 +
2627 + while (!cgroup_is_descendant(dst_cgrp, cgrp))
2628 + cgrp = cgroup_parent(cgrp);
2629 +@@ -2800,9 +2806,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
2630 + if (root == &cgrp_dfl_root)
2631 + continue;
2632 +
2633 +- spin_lock_bh(&css_set_lock);
2634 ++ spin_lock_irq(&css_set_lock);
2635 + from_cgrp = task_cgroup_from_root(from, root);
2636 +- spin_unlock_bh(&css_set_lock);
2637 ++ spin_unlock_irq(&css_set_lock);
2638 +
2639 + retval = cgroup_attach_task(from_cgrp, tsk, false);
2640 + if (retval)
2641 +@@ -2927,7 +2933,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2642 + percpu_down_write(&cgroup_threadgroup_rwsem);
2643 +
2644 + /* look up all csses currently attached to @cgrp's subtree */
2645 +- spin_lock_bh(&css_set_lock);
2646 ++ spin_lock_irq(&css_set_lock);
2647 + css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
2648 + struct cgrp_cset_link *link;
2649 +
2650 +@@ -2939,14 +2945,14 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2651 + cgroup_migrate_add_src(link->cset, cgrp,
2652 + &preloaded_csets);
2653 + }
2654 +- spin_unlock_bh(&css_set_lock);
2655 ++ spin_unlock_irq(&css_set_lock);
2656 +
2657 + /* NULL dst indicates self on default hierarchy */
2658 + ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
2659 + if (ret)
2660 + goto out_finish;
2661 +
2662 +- spin_lock_bh(&css_set_lock);
2663 ++ spin_lock_irq(&css_set_lock);
2664 + list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
2665 + struct task_struct *task, *ntask;
2666 +
2667 +@@ -2958,7 +2964,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2668 + list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
2669 + cgroup_taskset_add(task, &tset);
2670 + }
2671 +- spin_unlock_bh(&css_set_lock);
2672 ++ spin_unlock_irq(&css_set_lock);
2673 +
2674 + ret = cgroup_taskset_migrate(&tset, cgrp);
2675 + out_finish:
2676 +@@ -3641,10 +3647,10 @@ static int cgroup_task_count(const struct cgroup *cgrp)
2677 + int count = 0;
2678 + struct cgrp_cset_link *link;
2679 +
2680 +- spin_lock_bh(&css_set_lock);
2681 ++ spin_lock_irq(&css_set_lock);
2682 + list_for_each_entry(link, &cgrp->cset_links, cset_link)
2683 + count += atomic_read(&link->cset->refcount);
2684 +- spin_unlock_bh(&css_set_lock);
2685 ++ spin_unlock_irq(&css_set_lock);
2686 + return count;
2687 + }
2688 +
2689 +@@ -3982,7 +3988,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
2690 +
2691 + memset(it, 0, sizeof(*it));
2692 +
2693 +- spin_lock_bh(&css_set_lock);
2694 ++ spin_lock_irq(&css_set_lock);
2695 +
2696 + it->ss = css->ss;
2697 +
2698 +@@ -3995,7 +4001,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
2699 +
2700 + css_task_iter_advance_css_set(it);
2701 +
2702 +- spin_unlock_bh(&css_set_lock);
2703 ++ spin_unlock_irq(&css_set_lock);
2704 + }
2705 +
2706 + /**
2707 +@@ -4013,7 +4019,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
2708 + it->cur_task = NULL;
2709 + }
2710 +
2711 +- spin_lock_bh(&css_set_lock);
2712 ++ spin_lock_irq(&css_set_lock);
2713 +
2714 + if (it->task_pos) {
2715 + it->cur_task = list_entry(it->task_pos, struct task_struct,
2716 +@@ -4022,7 +4028,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
2717 + css_task_iter_advance(it);
2718 + }
2719 +
2720 +- spin_unlock_bh(&css_set_lock);
2721 ++ spin_unlock_irq(&css_set_lock);
2722 +
2723 + return it->cur_task;
2724 + }
2725 +@@ -4036,10 +4042,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
2726 + void css_task_iter_end(struct css_task_iter *it)
2727 + {
2728 + if (it->cur_cset) {
2729 +- spin_lock_bh(&css_set_lock);
2730 ++ spin_lock_irq(&css_set_lock);
2731 + list_del(&it->iters_node);
2732 + put_css_set_locked(it->cur_cset);
2733 +- spin_unlock_bh(&css_set_lock);
2734 ++ spin_unlock_irq(&css_set_lock);
2735 + }
2736 +
2737 + if (it->cur_task)
2738 +@@ -4068,10 +4074,10 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
2739 + mutex_lock(&cgroup_mutex);
2740 +
2741 + /* all tasks in @from are being moved, all csets are source */
2742 +- spin_lock_bh(&css_set_lock);
2743 ++ spin_lock_irq(&css_set_lock);
2744 + list_for_each_entry(link, &from->cset_links, cset_link)
2745 + cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
2746 +- spin_unlock_bh(&css_set_lock);
2747 ++ spin_unlock_irq(&css_set_lock);
2748 +
2749 + ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
2750 + if (ret)
2751 +@@ -5180,10 +5186,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
2752 + */
2753 + cgrp->self.flags &= ~CSS_ONLINE;
2754 +
2755 +- spin_lock_bh(&css_set_lock);
2756 ++ spin_lock_irq(&css_set_lock);
2757 + list_for_each_entry(link, &cgrp->cset_links, cset_link)
2758 + link->cset->dead = true;
2759 +- spin_unlock_bh(&css_set_lock);
2760 ++ spin_unlock_irq(&css_set_lock);
2761 +
2762 + /* initiate massacre of all css's */
2763 + for_each_css(css, ssid, cgrp)
2764 +@@ -5436,7 +5442,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
2765 + goto out;
2766 +
2767 + mutex_lock(&cgroup_mutex);
2768 +- spin_lock_bh(&css_set_lock);
2769 ++ spin_lock_irq(&css_set_lock);
2770 +
2771 + for_each_root(root) {
2772 + struct cgroup_subsys *ss;
2773 +@@ -5488,7 +5494,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
2774 +
2775 + retval = 0;
2776 + out_unlock:
2777 +- spin_unlock_bh(&css_set_lock);
2778 ++ spin_unlock_irq(&css_set_lock);
2779 + mutex_unlock(&cgroup_mutex);
2780 + kfree(buf);
2781 + out:
2782 +@@ -5649,13 +5655,13 @@ void cgroup_post_fork(struct task_struct *child,
2783 + if (use_task_css_set_links) {
2784 + struct css_set *cset;
2785 +
2786 +- spin_lock_bh(&css_set_lock);
2787 ++ spin_lock_irq(&css_set_lock);
2788 + cset = task_css_set(current);
2789 + if (list_empty(&child->cg_list)) {
2790 + get_css_set(cset);
2791 + css_set_move_task(child, NULL, cset, false);
2792 + }
2793 +- spin_unlock_bh(&css_set_lock);
2794 ++ spin_unlock_irq(&css_set_lock);
2795 + }
2796 +
2797 + /*
2798 +@@ -5699,9 +5705,9 @@ void cgroup_exit(struct task_struct *tsk)
2799 + cset = task_css_set(tsk);
2800 +
2801 + if (!list_empty(&tsk->cg_list)) {
2802 +- spin_lock_bh(&css_set_lock);
2803 ++ spin_lock_irq(&css_set_lock);
2804 + css_set_move_task(tsk, cset, NULL, false);
2805 +- spin_unlock_bh(&css_set_lock);
2806 ++ spin_unlock_irq(&css_set_lock);
2807 + } else {
2808 + get_css_set(cset);
2809 + }
2810 +@@ -5914,7 +5920,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
2811 + if (!name_buf)
2812 + return -ENOMEM;
2813 +
2814 +- spin_lock_bh(&css_set_lock);
2815 ++ spin_lock_irq(&css_set_lock);
2816 + rcu_read_lock();
2817 + cset = rcu_dereference(current->cgroups);
2818 + list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
2819 +@@ -5925,7 +5931,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
2820 + c->root->hierarchy_id, name_buf);
2821 + }
2822 + rcu_read_unlock();
2823 +- spin_unlock_bh(&css_set_lock);
2824 ++ spin_unlock_irq(&css_set_lock);
2825 + kfree(name_buf);
2826 + return 0;
2827 + }
2828 +@@ -5936,7 +5942,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
2829 + struct cgroup_subsys_state *css = seq_css(seq);
2830 + struct cgrp_cset_link *link;
2831 +
2832 +- spin_lock_bh(&css_set_lock);
2833 ++ spin_lock_irq(&css_set_lock);
2834 + list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
2835 + struct css_set *cset = link->cset;
2836 + struct task_struct *task;
2837 +@@ -5959,7 +5965,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
2838 + overflow:
2839 + seq_puts(seq, " ...\n");
2840 + }
2841 +- spin_unlock_bh(&css_set_lock);
2842 ++ spin_unlock_irq(&css_set_lock);
2843 + return 0;
2844 + }
2845 +
2846 +diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
2847 +index f0602beeba26..fd1205a3dbdb 100644
2848 +--- a/lib/Kconfig.debug
2849 ++++ b/lib/Kconfig.debug
2850 +@@ -1706,6 +1706,16 @@ config TEST_RHASHTABLE
2851 +
2852 + If unsure, say N.
2853 +
2854 ++config TEST_HASH
2855 ++ tristate "Perform selftest on hash functions"
2856 ++ default n
2857 ++ help
2858 ++ Enable this option to test the kernel's siphash (<linux/siphash.h>)
2859 ++ hash functions on boot (or module load).
2860 ++
2861 ++ This is intended to help people writing architecture-specific
2862 ++ optimized versions. If unsure, say N.
2863 ++
2864 + endmenu # runtime tests
2865 +
2866 + config PROVIDE_OHCI1394_DMA_INIT
2867 +diff --git a/lib/Makefile b/lib/Makefile
2868 +index cb4f6aa95013..6c6c1fb2fa04 100644
2869 +--- a/lib/Makefile
2870 ++++ b/lib/Makefile
2871 +@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
2872 + sha1.o md5.o irq_regs.o argv_split.o \
2873 + proportions.o flex_proportions.o ratelimit.o show_mem.o \
2874 + is_single_threaded.o plist.o decompress.o kobject_uevent.o \
2875 +- earlycpio.o seq_buf.o nmi_backtrace.o
2876 ++ earlycpio.o seq_buf.o siphash.o nmi_backtrace.o
2877 +
2878 + obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
2879 + lib-$(CONFIG_MMU) += ioremap.o
2880 +@@ -35,6 +35,7 @@ obj-$(CONFIG_TEST_HEXDUMP) += test-hexdump.o
2881 + obj-y += kstrtox.o
2882 + obj-$(CONFIG_TEST_BPF) += test_bpf.o
2883 + obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
2884 ++obj-$(CONFIG_TEST_HASH) += test_siphash.o
2885 + obj-$(CONFIG_TEST_KASAN) += test_kasan.o
2886 + obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
2887 + obj-$(CONFIG_TEST_LKM) += test_module.o
2888 +diff --git a/lib/siphash.c b/lib/siphash.c
2889 +new file mode 100644
2890 +index 000000000000..3ae58b4edad6
2891 +--- /dev/null
2892 ++++ b/lib/siphash.c
2893 +@@ -0,0 +1,551 @@
2894 ++/* Copyright (C) 2016 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved.
2895 ++ *
2896 ++ * This file is provided under a dual BSD/GPLv2 license.
2897 ++ *
2898 ++ * SipHash: a fast short-input PRF
2899 ++ * https://131002.net/siphash/
2900 ++ *
2901 ++ * This implementation is specifically for SipHash2-4 for a secure PRF
2902 ++ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
2903 ++ * hashtables.
2904 ++ */
2905 ++
2906 ++#include <linux/siphash.h>
2907 ++#include <asm/unaligned.h>
2908 ++
2909 ++#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
2910 ++#include <linux/dcache.h>
2911 ++#include <asm/word-at-a-time.h>
2912 ++#endif
2913 ++
2914 ++#define SIPROUND \
2915 ++ do { \
2916 ++ v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
2917 ++ v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
2918 ++ v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
2919 ++ v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
2920 ++ } while (0)
2921 ++
2922 ++#define PREAMBLE(len) \
2923 ++ u64 v0 = 0x736f6d6570736575ULL; \
2924 ++ u64 v1 = 0x646f72616e646f6dULL; \
2925 ++ u64 v2 = 0x6c7967656e657261ULL; \
2926 ++ u64 v3 = 0x7465646279746573ULL; \
2927 ++ u64 b = ((u64)(len)) << 56; \
2928 ++ v3 ^= key->key[1]; \
2929 ++ v2 ^= key->key[0]; \
2930 ++ v1 ^= key->key[1]; \
2931 ++ v0 ^= key->key[0];
2932 ++
2933 ++#define POSTAMBLE \
2934 ++ v3 ^= b; \
2935 ++ SIPROUND; \
2936 ++ SIPROUND; \
2937 ++ v0 ^= b; \
2938 ++ v2 ^= 0xff; \
2939 ++ SIPROUND; \
2940 ++ SIPROUND; \
2941 ++ SIPROUND; \
2942 ++ SIPROUND; \
2943 ++ return (v0 ^ v1) ^ (v2 ^ v3);
2944 ++
2945 ++u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
2946 ++{
2947 ++ const u8 *end = data + len - (len % sizeof(u64));
2948 ++ const u8 left = len & (sizeof(u64) - 1);
2949 ++ u64 m;
2950 ++ PREAMBLE(len)
2951 ++ for (; data != end; data += sizeof(u64)) {
2952 ++ m = le64_to_cpup(data);
2953 ++ v3 ^= m;
2954 ++ SIPROUND;
2955 ++ SIPROUND;
2956 ++ v0 ^= m;
2957 ++ }
2958 ++#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
2959 ++ if (left)
2960 ++ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
2961 ++ bytemask_from_count(left)));
2962 ++#else
2963 ++ switch (left) {
2964 ++ case 7: b |= ((u64)end[6]) << 48;
2965 ++ case 6: b |= ((u64)end[5]) << 40;
2966 ++ case 5: b |= ((u64)end[4]) << 32;
2967 ++ case 4: b |= le32_to_cpup(data); break;
2968 ++ case 3: b |= ((u64)end[2]) << 16;
2969 ++ case 2: b |= le16_to_cpup(data); break;
2970 ++ case 1: b |= end[0];
2971 ++ }
2972 ++#endif
2973 ++ POSTAMBLE
2974 ++}
2975 ++EXPORT_SYMBOL(__siphash_aligned);
2976 ++
2977 ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2978 ++u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
2979 ++{
2980 ++ const u8 *end = data + len - (len % sizeof(u64));
2981 ++ const u8 left = len & (sizeof(u64) - 1);
2982 ++ u64 m;
2983 ++ PREAMBLE(len)
2984 ++ for (; data != end; data += sizeof(u64)) {
2985 ++ m = get_unaligned_le64(data);
2986 ++ v3 ^= m;
2987 ++ SIPROUND;
2988 ++ SIPROUND;
2989 ++ v0 ^= m;
2990 ++ }
2991 ++#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
2992 ++ if (left)
2993 ++ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
2994 ++ bytemask_from_count(left)));
2995 ++#else
2996 ++ switch (left) {
2997 ++ case 7: b |= ((u64)end[6]) << 48;
2998 ++ case 6: b |= ((u64)end[5]) << 40;
2999 ++ case 5: b |= ((u64)end[4]) << 32;
3000 ++ case 4: b |= get_unaligned_le32(end); break;
3001 ++ case 3: b |= ((u64)end[2]) << 16;
3002 ++ case 2: b |= get_unaligned_le16(end); break;
3003 ++ case 1: b |= end[0];
3004 ++ }
3005 ++#endif
3006 ++ POSTAMBLE
3007 ++}
3008 ++EXPORT_SYMBOL(__siphash_unaligned);
3009 ++#endif
3010 ++
3011 ++/**
3012 ++ * siphash_1u64 - compute 64-bit siphash PRF value of a u64
3013 ++ * @first: first u64
3014 ++ * @key: the siphash key
3015 ++ */
3016 ++u64 siphash_1u64(const u64 first, const siphash_key_t *key)
3017 ++{
3018 ++ PREAMBLE(8)
3019 ++ v3 ^= first;
3020 ++ SIPROUND;
3021 ++ SIPROUND;
3022 ++ v0 ^= first;
3023 ++ POSTAMBLE
3024 ++}
3025 ++EXPORT_SYMBOL(siphash_1u64);
3026 ++
3027 ++/**
3028 ++ * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64
3029 ++ * @first: first u64
3030 ++ * @second: second u64
3031 ++ * @key: the siphash key
3032 ++ */
3033 ++u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
3034 ++{
3035 ++ PREAMBLE(16)
3036 ++ v3 ^= first;
3037 ++ SIPROUND;
3038 ++ SIPROUND;
3039 ++ v0 ^= first;
3040 ++ v3 ^= second;
3041 ++ SIPROUND;
3042 ++ SIPROUND;
3043 ++ v0 ^= second;
3044 ++ POSTAMBLE
3045 ++}
3046 ++EXPORT_SYMBOL(siphash_2u64);
3047 ++
3048 ++/**
3049 ++ * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64
3050 ++ * @first: first u64
3051 ++ * @second: second u64
3052 ++ * @third: third u64
3053 ++ * @key: the siphash key
3054 ++ */
3055 ++u64 siphash_3u64(const u64 first, const u64 second, const u64 third,
3056 ++ const siphash_key_t *key)
3057 ++{
3058 ++ PREAMBLE(24)
3059 ++ v3 ^= first;
3060 ++ SIPROUND;
3061 ++ SIPROUND;
3062 ++ v0 ^= first;
3063 ++ v3 ^= second;
3064 ++ SIPROUND;
3065 ++ SIPROUND;
3066 ++ v0 ^= second;
3067 ++ v3 ^= third;
3068 ++ SIPROUND;
3069 ++ SIPROUND;
3070 ++ v0 ^= third;
3071 ++ POSTAMBLE
3072 ++}
3073 ++EXPORT_SYMBOL(siphash_3u64);
3074 ++
3075 ++/**
3076 ++ * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64
3077 ++ * @first: first u64
3078 ++ * @second: second u64
3079 ++ * @third: third u64
3080 ++ * @forth: forth u64
3081 ++ * @key: the siphash key
3082 ++ */
3083 ++u64 siphash_4u64(const u64 first, const u64 second, const u64 third,
3084 ++ const u64 forth, const siphash_key_t *key)
3085 ++{
3086 ++ PREAMBLE(32)
3087 ++ v3 ^= first;
3088 ++ SIPROUND;
3089 ++ SIPROUND;
3090 ++ v0 ^= first;
3091 ++ v3 ^= second;
3092 ++ SIPROUND;
3093 ++ SIPROUND;
3094 ++ v0 ^= second;
3095 ++ v3 ^= third;
3096 ++ SIPROUND;
3097 ++ SIPROUND;
3098 ++ v0 ^= third;
3099 ++ v3 ^= forth;
3100 ++ SIPROUND;
3101 ++ SIPROUND;
3102 ++ v0 ^= forth;
3103 ++ POSTAMBLE
3104 ++}
3105 ++EXPORT_SYMBOL(siphash_4u64);
3106 ++
3107 ++u64 siphash_1u32(const u32 first, const siphash_key_t *key)
3108 ++{
3109 ++ PREAMBLE(4)
3110 ++ b |= first;
3111 ++ POSTAMBLE
3112 ++}
3113 ++EXPORT_SYMBOL(siphash_1u32);
3114 ++
3115 ++u64 siphash_3u32(const u32 first, const u32 second, const u32 third,
3116 ++ const siphash_key_t *key)
3117 ++{
3118 ++ u64 combined = (u64)second << 32 | first;
3119 ++ PREAMBLE(12)
3120 ++ v3 ^= combined;
3121 ++ SIPROUND;
3122 ++ SIPROUND;
3123 ++ v0 ^= combined;
3124 ++ b |= third;
3125 ++ POSTAMBLE
3126 ++}
3127 ++EXPORT_SYMBOL(siphash_3u32);
3128 ++
3129 ++#if BITS_PER_LONG == 64
3130 ++/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for
3131 ++ * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3.
3132 ++ */
3133 ++
3134 ++#define HSIPROUND SIPROUND
3135 ++#define HPREAMBLE(len) PREAMBLE(len)
3136 ++#define HPOSTAMBLE \
3137 ++ v3 ^= b; \
3138 ++ HSIPROUND; \
3139 ++ v0 ^= b; \
3140 ++ v2 ^= 0xff; \
3141 ++ HSIPROUND; \
3142 ++ HSIPROUND; \
3143 ++ HSIPROUND; \
3144 ++ return (v0 ^ v1) ^ (v2 ^ v3);
3145 ++
3146 ++u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
3147 ++{
3148 ++ const u8 *end = data + len - (len % sizeof(u64));
3149 ++ const u8 left = len & (sizeof(u64) - 1);
3150 ++ u64 m;
3151 ++ HPREAMBLE(len)
3152 ++ for (; data != end; data += sizeof(u64)) {
3153 ++ m = le64_to_cpup(data);
3154 ++ v3 ^= m;
3155 ++ HSIPROUND;
3156 ++ v0 ^= m;
3157 ++ }
3158 ++#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
3159 ++ if (left)
3160 ++ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
3161 ++ bytemask_from_count(left)));
3162 ++#else
3163 ++ switch (left) {
3164 ++ case 7: b |= ((u64)end[6]) << 48;
3165 ++ case 6: b |= ((u64)end[5]) << 40;
3166 ++ case 5: b |= ((u64)end[4]) << 32;
3167 ++ case 4: b |= le32_to_cpup(data); break;
3168 ++ case 3: b |= ((u64)end[2]) << 16;
3169 ++ case 2: b |= le16_to_cpup(data); break;
3170 ++ case 1: b |= end[0];
3171 ++ }
3172 ++#endif
3173 ++ HPOSTAMBLE
3174 ++}
3175 ++EXPORT_SYMBOL(__hsiphash_aligned);
3176 ++
3177 ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3178 ++u32 __hsiphash_unaligned(const void *data, size_t len,
3179 ++ const hsiphash_key_t *key)
3180 ++{
3181 ++ const u8 *end = data + len - (len % sizeof(u64));
3182 ++ const u8 left = len & (sizeof(u64) - 1);
3183 ++ u64 m;
3184 ++ HPREAMBLE(len)
3185 ++ for (; data != end; data += sizeof(u64)) {
3186 ++ m = get_unaligned_le64(data);
3187 ++ v3 ^= m;
3188 ++ HSIPROUND;
3189 ++ v0 ^= m;
3190 ++ }
3191 ++#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
3192 ++ if (left)
3193 ++ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
3194 ++ bytemask_from_count(left)));
3195 ++#else
3196 ++ switch (left) {
3197 ++ case 7: b |= ((u64)end[6]) << 48;
3198 ++ case 6: b |= ((u64)end[5]) << 40;
3199 ++ case 5: b |= ((u64)end[4]) << 32;
3200 ++ case 4: b |= get_unaligned_le32(end); break;
3201 ++ case 3: b |= ((u64)end[2]) << 16;
3202 ++ case 2: b |= get_unaligned_le16(end); break;
3203 ++ case 1: b |= end[0];
3204 ++ }
3205 ++#endif
3206 ++ HPOSTAMBLE
3207 ++}
3208 ++EXPORT_SYMBOL(__hsiphash_unaligned);
3209 ++#endif
3210 ++
3211 ++/**
3212 ++ * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
3213 ++ * @first: first u32
3214 ++ * @key: the hsiphash key
3215 ++ */
3216 ++u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
3217 ++{
3218 ++ HPREAMBLE(4)
3219 ++ b |= first;
3220 ++ HPOSTAMBLE
3221 ++}
3222 ++EXPORT_SYMBOL(hsiphash_1u32);
3223 ++
3224 ++/**
3225 ++ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
3226 ++ * @first: first u32
3227 ++ * @second: second u32
3228 ++ * @key: the hsiphash key
3229 ++ */
3230 ++u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
3231 ++{
3232 ++ u64 combined = (u64)second << 32 | first;
3233 ++ HPREAMBLE(8)
3234 ++ v3 ^= combined;
3235 ++ HSIPROUND;
3236 ++ v0 ^= combined;
3237 ++ HPOSTAMBLE
3238 ++}
3239 ++EXPORT_SYMBOL(hsiphash_2u32);
3240 ++
3241 ++/**
3242 ++ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
3243 ++ * @first: first u32
3244 ++ * @second: second u32
3245 ++ * @third: third u32
3246 ++ * @key: the hsiphash key
3247 ++ */
3248 ++u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
3249 ++ const hsiphash_key_t *key)
3250 ++{
3251 ++ u64 combined = (u64)second << 32 | first;
3252 ++ HPREAMBLE(12)
3253 ++ v3 ^= combined;
3254 ++ HSIPROUND;
3255 ++ v0 ^= combined;
3256 ++ b |= third;
3257 ++ HPOSTAMBLE
3258 ++}
3259 ++EXPORT_SYMBOL(hsiphash_3u32);
3260 ++
3261 ++/**
3262 ++ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
3263 ++ * @first: first u32
3264 ++ * @second: second u32
3265 ++ * @third: third u32
3266 ++ * @forth: forth u32
3267 ++ * @key: the hsiphash key
3268 ++ */
3269 ++u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
3270 ++ const u32 forth, const hsiphash_key_t *key)
3271 ++{
3272 ++ u64 combined = (u64)second << 32 | first;
3273 ++ HPREAMBLE(16)
3274 ++ v3 ^= combined;
3275 ++ HSIPROUND;
3276 ++ v0 ^= combined;
3277 ++ combined = (u64)forth << 32 | third;
3278 ++ v3 ^= combined;
3279 ++ HSIPROUND;
3280 ++ v0 ^= combined;
3281 ++ HPOSTAMBLE
3282 ++}
3283 ++EXPORT_SYMBOL(hsiphash_4u32);
3284 ++#else
3285 ++#define HSIPROUND \
3286 ++ do { \
3287 ++ v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
3288 ++ v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
3289 ++ v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
3290 ++ v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
3291 ++ } while (0)
3292 ++
3293 ++#define HPREAMBLE(len) \
3294 ++ u32 v0 = 0; \
3295 ++ u32 v1 = 0; \
3296 ++ u32 v2 = 0x6c796765U; \
3297 ++ u32 v3 = 0x74656462U; \
3298 ++ u32 b = ((u32)(len)) << 24; \
3299 ++ v3 ^= key->key[1]; \
3300 ++ v2 ^= key->key[0]; \
3301 ++ v1 ^= key->key[1]; \
3302 ++ v0 ^= key->key[0];
3303 ++
3304 ++#define HPOSTAMBLE \
3305 ++ v3 ^= b; \
3306 ++ HSIPROUND; \
3307 ++ v0 ^= b; \
3308 ++ v2 ^= 0xff; \
3309 ++ HSIPROUND; \
3310 ++ HSIPROUND; \
3311 ++ HSIPROUND; \
3312 ++ return v1 ^ v3;
3313 ++
3314 ++u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
3315 ++{
3316 ++ const u8 *end = data + len - (len % sizeof(u32));
3317 ++ const u8 left = len & (sizeof(u32) - 1);
3318 ++ u32 m;
3319 ++ HPREAMBLE(len)
3320 ++ for (; data != end; data += sizeof(u32)) {
3321 ++ m = le32_to_cpup(data);
3322 ++ v3 ^= m;
3323 ++ HSIPROUND;
3324 ++ v0 ^= m;
3325 ++ }
3326 ++ switch (left) {
3327 ++ case 3: b |= ((u32)end[2]) << 16;
3328 ++ case 2: b |= le16_to_cpup(data); break;
3329 ++ case 1: b |= end[0];
3330 ++ }
3331 ++ HPOSTAMBLE
3332 ++}
3333 ++EXPORT_SYMBOL(__hsiphash_aligned);
3334 ++
3335 ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3336 ++u32 __hsiphash_unaligned(const void *data, size_t len,
3337 ++ const hsiphash_key_t *key)
3338 ++{
3339 ++ const u8 *end = data + len - (len % sizeof(u32));
3340 ++ const u8 left = len & (sizeof(u32) - 1);
3341 ++ u32 m;
3342 ++ HPREAMBLE(len)
3343 ++ for (; data != end; data += sizeof(u32)) {
3344 ++ m = get_unaligned_le32(data);
3345 ++ v3 ^= m;
3346 ++ HSIPROUND;
3347 ++ v0 ^= m;
3348 ++ }
3349 ++ switch (left) {
3350 ++ case 3: b |= ((u32)end[2]) << 16;
3351 ++ case 2: b |= get_unaligned_le16(end); break;
3352 ++ case 1: b |= end[0];
3353 ++ }
3354 ++ HPOSTAMBLE
3355 ++}
3356 ++EXPORT_SYMBOL(__hsiphash_unaligned);
3357 ++#endif
3358 ++
3359 ++/**
3360 ++ * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
3361 ++ * @first: first u32
3362 ++ * @key: the hsiphash key
3363 ++ */
3364 ++u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
3365 ++{
3366 ++ HPREAMBLE(4)
3367 ++ v3 ^= first;
3368 ++ HSIPROUND;
3369 ++ v0 ^= first;
3370 ++ HPOSTAMBLE
3371 ++}
3372 ++EXPORT_SYMBOL(hsiphash_1u32);
3373 ++
3374 ++/**
3375 ++ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
3376 ++ * @first: first u32
3377 ++ * @second: second u32
3378 ++ * @key: the hsiphash key
3379 ++ */
3380 ++u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
3381 ++{
3382 ++ HPREAMBLE(8)
3383 ++ v3 ^= first;
3384 ++ HSIPROUND;
3385 ++ v0 ^= first;
3386 ++ v3 ^= second;
3387 ++ HSIPROUND;
3388 ++ v0 ^= second;
3389 ++ HPOSTAMBLE
3390 ++}
3391 ++EXPORT_SYMBOL(hsiphash_2u32);
3392 ++
3393 ++/**
3394 ++ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
3395 ++ * @first: first u32
3396 ++ * @second: second u32
3397 ++ * @third: third u32
3398 ++ * @key: the hsiphash key
3399 ++ */
3400 ++u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
3401 ++ const hsiphash_key_t *key)
3402 ++{
3403 ++ HPREAMBLE(12)
3404 ++ v3 ^= first;
3405 ++ HSIPROUND;
3406 ++ v0 ^= first;
3407 ++ v3 ^= second;
3408 ++ HSIPROUND;
3409 ++ v0 ^= second;
3410 ++ v3 ^= third;
3411 ++ HSIPROUND;
3412 ++ v0 ^= third;
3413 ++ HPOSTAMBLE
3414 ++}
3415 ++EXPORT_SYMBOL(hsiphash_3u32);
3416 ++
3417 ++/**
3418 ++ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
3419 ++ * @first: first u32
3420 ++ * @second: second u32
3421 ++ * @third: third u32
3422 ++ * @forth: forth u32
3423 ++ * @key: the hsiphash key
3424 ++ */
3425 ++u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
3426 ++ const u32 forth, const hsiphash_key_t *key)
3427 ++{
3428 ++ HPREAMBLE(16)
3429 ++ v3 ^= first;
3430 ++ HSIPROUND;
3431 ++ v0 ^= first;
3432 ++ v3 ^= second;
3433 ++ HSIPROUND;
3434 ++ v0 ^= second;
3435 ++ v3 ^= third;
3436 ++ HSIPROUND;
3437 ++ v0 ^= third;
3438 ++ v3 ^= forth;
3439 ++ HSIPROUND;
3440 ++ v0 ^= forth;
3441 ++ HPOSTAMBLE
3442 ++}
3443 ++EXPORT_SYMBOL(hsiphash_4u32);
3444 ++#endif
3445 +diff --git a/lib/test_siphash.c b/lib/test_siphash.c
3446 +new file mode 100644
3447 +index 000000000000..a6d854d933bf
3448 +--- /dev/null
3449 ++++ b/lib/test_siphash.c
3450 +@@ -0,0 +1,223 @@
3451 ++/* Test cases for siphash.c
3452 ++ *
3453 ++ * Copyright (C) 2016 Jason A. Donenfeld <Jason@×××××.com>. All Rights Reserved.
3454 ++ *
3455 ++ * This file is provided under a dual BSD/GPLv2 license.
3456 ++ *
3457 ++ * SipHash: a fast short-input PRF
3458 ++ * https://131002.net/siphash/
3459 ++ *
3460 ++ * This implementation is specifically for SipHash2-4 for a secure PRF
3461 ++ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
3462 ++ * hashtables.
3463 ++ */
3464 ++
3465 ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3466 ++
3467 ++#include <linux/siphash.h>
3468 ++#include <linux/kernel.h>
3469 ++#include <linux/string.h>
3470 ++#include <linux/errno.h>
3471 ++#include <linux/module.h>
3472 ++
3473 ++/* Test vectors taken from reference source available at:
3474 ++ * https://github.com/veorq/SipHash
3475 ++ */
3476 ++
3477 ++static const siphash_key_t test_key_siphash =
3478 ++ {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
3479 ++
3480 ++static const u64 test_vectors_siphash[64] = {
3481 ++ 0x726fdb47dd0e0e31ULL, 0x74f839c593dc67fdULL, 0x0d6c8009d9a94f5aULL,
3482 ++ 0x85676696d7fb7e2dULL, 0xcf2794e0277187b7ULL, 0x18765564cd99a68dULL,
3483 ++ 0xcbc9466e58fee3ceULL, 0xab0200f58b01d137ULL, 0x93f5f5799a932462ULL,
3484 ++ 0x9e0082df0ba9e4b0ULL, 0x7a5dbbc594ddb9f3ULL, 0xf4b32f46226bada7ULL,
3485 ++ 0x751e8fbc860ee5fbULL, 0x14ea5627c0843d90ULL, 0xf723ca908e7af2eeULL,
3486 ++ 0xa129ca6149be45e5ULL, 0x3f2acc7f57c29bdbULL, 0x699ae9f52cbe4794ULL,
3487 ++ 0x4bc1b3f0968dd39cULL, 0xbb6dc91da77961bdULL, 0xbed65cf21aa2ee98ULL,
3488 ++ 0xd0f2cbb02e3b67c7ULL, 0x93536795e3a33e88ULL, 0xa80c038ccd5ccec8ULL,
3489 ++ 0xb8ad50c6f649af94ULL, 0xbce192de8a85b8eaULL, 0x17d835b85bbb15f3ULL,
3490 ++ 0x2f2e6163076bcfadULL, 0xde4daaaca71dc9a5ULL, 0xa6a2506687956571ULL,
3491 ++ 0xad87a3535c49ef28ULL, 0x32d892fad841c342ULL, 0x7127512f72f27cceULL,
3492 ++ 0xa7f32346f95978e3ULL, 0x12e0b01abb051238ULL, 0x15e034d40fa197aeULL,
3493 ++ 0x314dffbe0815a3b4ULL, 0x027990f029623981ULL, 0xcadcd4e59ef40c4dULL,
3494 ++ 0x9abfd8766a33735cULL, 0x0e3ea96b5304a7d0ULL, 0xad0c42d6fc585992ULL,
3495 ++ 0x187306c89bc215a9ULL, 0xd4a60abcf3792b95ULL, 0xf935451de4f21df2ULL,
3496 ++ 0xa9538f0419755787ULL, 0xdb9acddff56ca510ULL, 0xd06c98cd5c0975ebULL,
3497 ++ 0xe612a3cb9ecba951ULL, 0xc766e62cfcadaf96ULL, 0xee64435a9752fe72ULL,
3498 ++ 0xa192d576b245165aULL, 0x0a8787bf8ecb74b2ULL, 0x81b3e73d20b49b6fULL,
3499 ++ 0x7fa8220ba3b2eceaULL, 0x245731c13ca42499ULL, 0xb78dbfaf3a8d83bdULL,
3500 ++ 0xea1ad565322a1a0bULL, 0x60e61c23a3795013ULL, 0x6606d7e446282b93ULL,
3501 ++ 0x6ca4ecb15c5f91e1ULL, 0x9f626da15c9625f3ULL, 0xe51b38608ef25f57ULL,
3502 ++ 0x958a324ceb064572ULL
3503 ++};
3504 ++
3505 ++#if BITS_PER_LONG == 64
3506 ++static const hsiphash_key_t test_key_hsiphash =
3507 ++ {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
3508 ++
3509 ++static const u32 test_vectors_hsiphash[64] = {
3510 ++ 0x050fc4dcU, 0x7d57ca93U, 0x4dc7d44dU,
3511 ++ 0xe7ddf7fbU, 0x88d38328U, 0x49533b67U,
3512 ++ 0xc59f22a7U, 0x9bb11140U, 0x8d299a8eU,
3513 ++ 0x6c063de4U, 0x92ff097fU, 0xf94dc352U,
3514 ++ 0x57b4d9a2U, 0x1229ffa7U, 0xc0f95d34U,
3515 ++ 0x2a519956U, 0x7d908b66U, 0x63dbd80cU,
3516 ++ 0xb473e63eU, 0x8d297d1cU, 0xa6cce040U,
3517 ++ 0x2b45f844U, 0xa320872eU, 0xdae6c123U,
3518 ++ 0x67349c8cU, 0x705b0979U, 0xca9913a5U,
3519 ++ 0x4ade3b35U, 0xef6cd00dU, 0x4ab1e1f4U,
3520 ++ 0x43c5e663U, 0x8c21d1bcU, 0x16a7b60dU,
3521 ++ 0x7a8ff9bfU, 0x1f2a753eU, 0xbf186b91U,
3522 ++ 0xada26206U, 0xa3c33057U, 0xae3a36a1U,
3523 ++ 0x7b108392U, 0x99e41531U, 0x3f1ad944U,
3524 ++ 0xc8138825U, 0xc28949a6U, 0xfaf8876bU,
3525 ++ 0x9f042196U, 0x68b1d623U, 0x8b5114fdU,
3526 ++ 0xdf074c46U, 0x12cc86b3U, 0x0a52098fU,
3527 ++ 0x9d292f9aU, 0xa2f41f12U, 0x43a71ed0U,
3528 ++ 0x73f0bce6U, 0x70a7e980U, 0x243c6d75U,
3529 ++ 0xfdb71513U, 0xa67d8a08U, 0xb7e8f148U,
3530 ++ 0xf7a644eeU, 0x0f1837f2U, 0x4b6694e0U,
3531 ++ 0xb7bbb3a8U
3532 ++};
3533 ++#else
3534 ++static const hsiphash_key_t test_key_hsiphash =
3535 ++ {{ 0x03020100U, 0x07060504U }};
3536 ++
3537 ++static const u32 test_vectors_hsiphash[64] = {
3538 ++ 0x5814c896U, 0xe7e864caU, 0xbc4b0e30U,
3539 ++ 0x01539939U, 0x7e059ea6U, 0x88e3d89bU,
3540 ++ 0xa0080b65U, 0x9d38d9d6U, 0x577999b1U,
3541 ++ 0xc839caedU, 0xe4fa32cfU, 0x959246eeU,
3542 ++ 0x6b28096cU, 0x66dd9cd6U, 0x16658a7cU,
3543 ++ 0xd0257b04U, 0x8b31d501U, 0x2b1cd04bU,
3544 ++ 0x06712339U, 0x522aca67U, 0x911bb605U,
3545 ++ 0x90a65f0eU, 0xf826ef7bU, 0x62512debU,
3546 ++ 0x57150ad7U, 0x5d473507U, 0x1ec47442U,
3547 ++ 0xab64afd3U, 0x0a4100d0U, 0x6d2ce652U,
3548 ++ 0x2331b6a3U, 0x08d8791aU, 0xbc6dda8dU,
3549 ++ 0xe0f6c934U, 0xb0652033U, 0x9b9851ccU,
3550 ++ 0x7c46fb7fU, 0x732ba8cbU, 0xf142997aU,
3551 ++ 0xfcc9aa1bU, 0x05327eb2U, 0xe110131cU,
3552 ++ 0xf9e5e7c0U, 0xa7d708a6U, 0x11795ab1U,
3553 ++ 0x65671619U, 0x9f5fff91U, 0xd89c5267U,
3554 ++ 0x007783ebU, 0x95766243U, 0xab639262U,
3555 ++ 0x9c7e1390U, 0xc368dda6U, 0x38ddc455U,
3556 ++ 0xfa13d379U, 0x979ea4e8U, 0x53ecd77eU,
3557 ++ 0x2ee80657U, 0x33dbb66aU, 0xae3f0577U,
3558 ++ 0x88b4c4ccU, 0x3e7f480bU, 0x74c1ebf8U,
3559 ++ 0x87178304U
3560 ++};
3561 ++#endif
3562 ++
3563 ++static int __init siphash_test_init(void)
3564 ++{
3565 ++ u8 in[64] __aligned(SIPHASH_ALIGNMENT);
3566 ++ u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT);
3567 ++ u8 i;
3568 ++ int ret = 0;
3569 ++
3570 ++ for (i = 0; i < 64; ++i) {
3571 ++ in[i] = i;
3572 ++ in_unaligned[i + 1] = i;
3573 ++ if (siphash(in, i, &test_key_siphash) !=
3574 ++ test_vectors_siphash[i]) {
3575 ++ pr_info("siphash self-test aligned %u: FAIL\n", i + 1);
3576 ++ ret = -EINVAL;
3577 ++ }
3578 ++ if (siphash(in_unaligned + 1, i, &test_key_siphash) !=
3579 ++ test_vectors_siphash[i]) {
3580 ++ pr_info("siphash self-test unaligned %u: FAIL\n", i + 1);
3581 ++ ret = -EINVAL;
3582 ++ }
3583 ++ if (hsiphash(in, i, &test_key_hsiphash) !=
3584 ++ test_vectors_hsiphash[i]) {
3585 ++ pr_info("hsiphash self-test aligned %u: FAIL\n", i + 1);
3586 ++ ret = -EINVAL;
3587 ++ }
3588 ++ if (hsiphash(in_unaligned + 1, i, &test_key_hsiphash) !=
3589 ++ test_vectors_hsiphash[i]) {
3590 ++ pr_info("hsiphash self-test unaligned %u: FAIL\n", i + 1);
3591 ++ ret = -EINVAL;
3592 ++ }
3593 ++ }
3594 ++ if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) !=
3595 ++ test_vectors_siphash[8]) {
3596 ++ pr_info("siphash self-test 1u64: FAIL\n");
3597 ++ ret = -EINVAL;
3598 ++ }
3599 ++ if (siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
3600 ++ &test_key_siphash) != test_vectors_siphash[16]) {
3601 ++ pr_info("siphash self-test 2u64: FAIL\n");
3602 ++ ret = -EINVAL;
3603 ++ }
3604 ++ if (siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
3605 ++ 0x1716151413121110ULL, &test_key_siphash) !=
3606 ++ test_vectors_siphash[24]) {
3607 ++ pr_info("siphash self-test 3u64: FAIL\n");
3608 ++ ret = -EINVAL;
3609 ++ }
3610 ++ if (siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
3611 ++ 0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL,
3612 ++ &test_key_siphash) != test_vectors_siphash[32]) {
3613 ++ pr_info("siphash self-test 4u64: FAIL\n");
3614 ++ ret = -EINVAL;
3615 ++ }
3616 ++ if (siphash_1u32(0x03020100U, &test_key_siphash) !=
3617 ++ test_vectors_siphash[4]) {
3618 ++ pr_info("siphash self-test 1u32: FAIL\n");
3619 ++ ret = -EINVAL;
3620 ++ }
3621 ++ if (siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash) !=
3622 ++ test_vectors_siphash[8]) {
3623 ++ pr_info("siphash self-test 2u32: FAIL\n");
3624 ++ ret = -EINVAL;
3625 ++ }
3626 ++ if (siphash_3u32(0x03020100U, 0x07060504U,
3627 ++ 0x0b0a0908U, &test_key_siphash) !=
3628 ++ test_vectors_siphash[12]) {
3629 ++ pr_info("siphash self-test 3u32: FAIL\n");
3630 ++ ret = -EINVAL;
3631 ++ }
3632 ++ if (siphash_4u32(0x03020100U, 0x07060504U,
3633 ++ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash) !=
3634 ++ test_vectors_siphash[16]) {
3635 ++ pr_info("siphash self-test 4u32: FAIL\n");
3636 ++ ret = -EINVAL;
3637 ++ }
3638 ++ if (hsiphash_1u32(0x03020100U, &test_key_hsiphash) !=
3639 ++ test_vectors_hsiphash[4]) {
3640 ++ pr_info("hsiphash self-test 1u32: FAIL\n");
3641 ++ ret = -EINVAL;
3642 ++ }
3643 ++ if (hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash) !=
3644 ++ test_vectors_hsiphash[8]) {
3645 ++ pr_info("hsiphash self-test 2u32: FAIL\n");
3646 ++ ret = -EINVAL;
3647 ++ }
3648 ++ if (hsiphash_3u32(0x03020100U, 0x07060504U,
3649 ++ 0x0b0a0908U, &test_key_hsiphash) !=
3650 ++ test_vectors_hsiphash[12]) {
3651 ++ pr_info("hsiphash self-test 3u32: FAIL\n");
3652 ++ ret = -EINVAL;
3653 ++ }
3654 ++ if (hsiphash_4u32(0x03020100U, 0x07060504U,
3655 ++ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash) !=
3656 ++ test_vectors_hsiphash[16]) {
3657 ++ pr_info("hsiphash self-test 4u32: FAIL\n");
3658 ++ ret = -EINVAL;
3659 ++ }
3660 ++ if (!ret)
3661 ++ pr_info("self-tests: pass\n");
3662 ++ return ret;
3663 ++}
3664 ++
3665 ++static void __exit siphash_test_exit(void)
3666 ++{
3667 ++}
3668 ++
3669 ++module_init(siphash_test_init);
3670 ++module_exit(siphash_test_exit);
3671 ++
3672 ++MODULE_AUTHOR("Jason A. Donenfeld <Jason@×××××.com>");
3673 ++MODULE_LICENSE("Dual BSD/GPL");
3674 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
3675 +index 1a87cf78fadc..d9471e3ef216 100644
3676 +--- a/net/bridge/netfilter/ebtables.c
3677 ++++ b/net/bridge/netfilter/ebtables.c
3678 +@@ -2280,8 +2280,10 @@ static int compat_do_replace(struct net *net, void __user *user,
3679 + state.buf_kern_len = size64;
3680 +
3681 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
3682 +- if (WARN_ON(ret < 0))
3683 ++ if (WARN_ON(ret < 0)) {
3684 ++ vfree(entries_tmp);
3685 + goto out_unlock;
3686 ++ }
3687 +
3688 + vfree(entries_tmp);
3689 + tmp.entries_size = size64;
3690 +diff --git a/net/core/stream.c b/net/core/stream.c
3691 +index b96f7a79e544..3089b014bb53 100644
3692 +--- a/net/core/stream.c
3693 ++++ b/net/core/stream.c
3694 +@@ -119,7 +119,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
3695 + int err = 0;
3696 + long vm_wait = 0;
3697 + long current_timeo = *timeo_p;
3698 +- bool noblock = (*timeo_p ? false : true);
3699 + DEFINE_WAIT(wait);
3700 +
3701 + if (sk_stream_memory_free(sk))
3702 +@@ -132,11 +131,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
3703 +
3704 + if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
3705 + goto do_error;
3706 +- if (!*timeo_p) {
3707 +- if (noblock)
3708 +- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
3709 +- goto do_nonblock;
3710 +- }
3711 ++ if (!*timeo_p)
3712 ++ goto do_eagain;
3713 + if (signal_pending(current))
3714 + goto do_interrupted;
3715 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3716 +@@ -168,7 +164,13 @@ out:
3717 + do_error:
3718 + err = -EPIPE;
3719 + goto out;
3720 +-do_nonblock:
3721 ++do_eagain:
3722 ++ /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
3723 ++ * be generated later.
3724 ++ * When TCP receives ACK packets that make room, tcp_check_space()
3725 ++ * only calls tcp_new_space() if SOCK_NOSPACE is set.
3726 ++ */
3727 ++ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
3728 + err = -EAGAIN;
3729 + goto out;
3730 + do_interrupted:
3731 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3732 +index a58effba760a..3c605a788ba1 100644
3733 +--- a/net/ipv4/route.c
3734 ++++ b/net/ipv4/route.c
3735 +@@ -490,15 +490,17 @@ EXPORT_SYMBOL(ip_idents_reserve);
3736 +
3737 + void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
3738 + {
3739 +- static u32 ip_idents_hashrnd __read_mostly;
3740 + u32 hash, id;
3741 +
3742 +- net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
3743 ++ /* Note the following code is not safe, but this is okay. */
3744 ++ if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
3745 ++ get_random_bytes(&net->ipv4.ip_id_key,
3746 ++ sizeof(net->ipv4.ip_id_key));
3747 +
3748 +- hash = jhash_3words((__force u32)iph->daddr,
3749 ++ hash = siphash_3u32((__force u32)iph->daddr,
3750 + (__force u32)iph->saddr,
3751 +- iph->protocol ^ net_hash_mix(net),
3752 +- ip_idents_hashrnd);
3753 ++ iph->protocol,
3754 ++ &net->ipv4.ip_id_key);
3755 + id = ip_idents_reserve(hash, segs);
3756 + iph->id = htons(id);
3757 + }
3758 +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
3759 +index f99a04674419..6b896cc9604e 100644
3760 +--- a/net/ipv6/output_core.c
3761 ++++ b/net/ipv6/output_core.c
3762 +@@ -10,15 +10,25 @@
3763 + #include <net/secure_seq.h>
3764 + #include <linux/netfilter.h>
3765 +
3766 +-static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
3767 ++static u32 __ipv6_select_ident(struct net *net,
3768 + const struct in6_addr *dst,
3769 + const struct in6_addr *src)
3770 + {
3771 ++ const struct {
3772 ++ struct in6_addr dst;
3773 ++ struct in6_addr src;
3774 ++ } __aligned(SIPHASH_ALIGNMENT) combined = {
3775 ++ .dst = *dst,
3776 ++ .src = *src,
3777 ++ };
3778 + u32 hash, id;
3779 +
3780 +- hash = __ipv6_addr_jhash(dst, hashrnd);
3781 +- hash = __ipv6_addr_jhash(src, hash);
3782 +- hash ^= net_hash_mix(net);
3783 ++ /* Note the following code is not safe, but this is okay. */
3784 ++ if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
3785 ++ get_random_bytes(&net->ipv4.ip_id_key,
3786 ++ sizeof(net->ipv4.ip_id_key));
3787 ++
3788 ++ hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key);
3789 +
3790 + /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
3791 + * set the hight order instead thus minimizing possible future
3792 +@@ -41,7 +51,6 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
3793 + */
3794 + void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
3795 + {
3796 +- static u32 ip6_proxy_idents_hashrnd __read_mostly;
3797 + struct in6_addr buf[2];
3798 + struct in6_addr *addrs;
3799 + u32 id;
3800 +@@ -53,11 +62,7 @@ void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
3801 + if (!addrs)
3802 + return;
3803 +
3804 +- net_get_random_once(&ip6_proxy_idents_hashrnd,
3805 +- sizeof(ip6_proxy_idents_hashrnd));
3806 +-
3807 +- id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
3808 +- &addrs[1], &addrs[0]);
3809 ++ id = __ipv6_select_ident(net, &addrs[1], &addrs[0]);
3810 + skb_shinfo(skb)->ip6_frag_id = htonl(id);
3811 + }
3812 + EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
3813 +@@ -66,12 +71,9 @@ __be32 ipv6_select_ident(struct net *net,
3814 + const struct in6_addr *daddr,
3815 + const struct in6_addr *saddr)
3816 + {
3817 +- static u32 ip6_idents_hashrnd __read_mostly;
3818 + u32 id;
3819 +
3820 +- net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
3821 +-
3822 +- id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr);
3823 ++ id = __ipv6_select_ident(net, daddr, saddr);
3824 + return htonl(id);
3825 + }
3826 + EXPORT_SYMBOL(ipv6_select_ident);
3827 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
3828 +index 7349bf26ae7b..1999a7eaa692 100644
3829 +--- a/net/mac80211/cfg.c
3830 ++++ b/net/mac80211/cfg.c
3831 +@@ -1211,6 +1211,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
3832 + if (is_multicast_ether_addr(mac))
3833 + return -EINVAL;
3834 +
3835 ++ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
3836 ++ sdata->vif.type == NL80211_IFTYPE_STATION &&
3837 ++ !sdata->u.mgd.associated)
3838 ++ return -EINVAL;
3839 ++
3840 + sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
3841 + if (!sta)
3842 + return -ENOMEM;
3843 +@@ -1228,10 +1233,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
3844 + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
3845 + sta->sta.tdls = true;
3846 +
3847 +- if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
3848 +- !sdata->u.mgd.associated)
3849 +- return -EINVAL;
3850 +-
3851 + err = sta_apply_parameters(local, sta, params);
3852 + if (err) {
3853 + sta_info_free(local, sta);
3854 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
3855 +index 5f747089024f..de0aad12b91d 100644
3856 +--- a/net/netfilter/nf_conntrack_core.c
3857 ++++ b/net/netfilter/nf_conntrack_core.c
3858 +@@ -23,6 +23,7 @@
3859 + #include <linux/slab.h>
3860 + #include <linux/random.h>
3861 + #include <linux/jhash.h>
3862 ++#include <linux/siphash.h>
3863 + #include <linux/err.h>
3864 + #include <linux/percpu.h>
3865 + #include <linux/moduleparam.h>
3866 +@@ -234,6 +235,40 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
3867 + }
3868 + EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
3869 +
3870 ++/* Generate a almost-unique pseudo-id for a given conntrack.
3871 ++ *
3872 ++ * intentionally doesn't re-use any of the seeds used for hash
3873 ++ * table location, we assume id gets exposed to userspace.
3874 ++ *
3875 ++ * Following nf_conn items do not change throughout lifetime
3876 ++ * of the nf_conn:
3877 ++ *
3878 ++ * 1. nf_conn address
3879 ++ * 2. nf_conn->master address (normally NULL)
3880 ++ * 3. the associated net namespace
3881 ++ * 4. the original direction tuple
3882 ++ */
3883 ++u32 nf_ct_get_id(const struct nf_conn *ct)
3884 ++{
3885 ++ static __read_mostly siphash_key_t ct_id_seed;
3886 ++ unsigned long a, b, c, d;
3887 ++
3888 ++ net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
3889 ++
3890 ++ a = (unsigned long)ct;
3891 ++ b = (unsigned long)ct->master;
3892 ++ c = (unsigned long)nf_ct_net(ct);
3893 ++ d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
3894 ++ sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
3895 ++ &ct_id_seed);
3896 ++#ifdef CONFIG_64BIT
3897 ++ return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
3898 ++#else
3899 ++ return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
3900 ++#endif
3901 ++}
3902 ++EXPORT_SYMBOL_GPL(nf_ct_get_id);
3903 ++
3904 + static void
3905 + clean_from_lists(struct nf_conn *ct)
3906 + {
3907 +diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
3908 +index c68e020427ab..3a24c01cb909 100644
3909 +--- a/net/netfilter/nf_conntrack_netlink.c
3910 ++++ b/net/netfilter/nf_conntrack_netlink.c
3911 +@@ -29,6 +29,7 @@
3912 + #include <linux/spinlock.h>
3913 + #include <linux/interrupt.h>
3914 + #include <linux/slab.h>
3915 ++#include <linux/siphash.h>
3916 +
3917 + #include <linux/netfilter.h>
3918 + #include <net/netlink.h>
3919 +@@ -451,7 +452,9 @@ ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
3920 + static inline int
3921 + ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
3922 + {
3923 +- if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
3924 ++ __be32 id = (__force __be32)nf_ct_get_id(ct);
3925 ++
3926 ++ if (nla_put_be32(skb, CTA_ID, id))
3927 + goto nla_put_failure;
3928 + return 0;
3929 +
3930 +@@ -1159,8 +1162,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
3931 + ct = nf_ct_tuplehash_to_ctrack(h);
3932 +
3933 + if (cda[CTA_ID]) {
3934 +- u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
3935 +- if (id != (u32)(unsigned long)ct) {
3936 ++ __be32 id = nla_get_be32(cda[CTA_ID]);
3937 ++
3938 ++ if (id != (__force __be32)nf_ct_get_id(ct)) {
3939 + nf_ct_put(ct);
3940 + return -ENOENT;
3941 + }
3942 +@@ -2480,6 +2484,25 @@ nla_put_failure:
3943 +
3944 + static const union nf_inet_addr any_addr;
3945 +
3946 ++static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
3947 ++{
3948 ++ static __read_mostly siphash_key_t exp_id_seed;
3949 ++ unsigned long a, b, c, d;
3950 ++
3951 ++ net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
3952 ++
3953 ++ a = (unsigned long)exp;
3954 ++ b = (unsigned long)exp->helper;
3955 ++ c = (unsigned long)exp->master;
3956 ++ d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
3957 ++
3958 ++#ifdef CONFIG_64BIT
3959 ++ return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
3960 ++#else
3961 ++ return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
3962 ++#endif
3963 ++}
3964 ++
3965 + static int
3966 + ctnetlink_exp_dump_expect(struct sk_buff *skb,
3967 + const struct nf_conntrack_expect *exp)
3968 +@@ -2527,7 +2550,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
3969 + }
3970 + #endif
3971 + if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
3972 +- nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
3973 ++ nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
3974 + nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
3975 + nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
3976 + goto nla_put_failure;
3977 +@@ -2824,7 +2847,8 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
3978 +
3979 + if (cda[CTA_EXPECT_ID]) {
3980 + __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
3981 +- if (ntohl(id) != (u32)(unsigned long)exp) {
3982 ++
3983 ++ if (id != nf_expect_get_id(exp)) {
3984 + nf_ct_expect_put(exp);
3985 + return -ENOENT;
3986 + }
3987 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
3988 +index 429abf421906..6a670a373e29 100644
3989 +--- a/net/wireless/reg.c
3990 ++++ b/net/wireless/reg.c
3991 +@@ -2234,7 +2234,7 @@ static void reg_process_pending_hints(void)
3992 +
3993 + /* When last_request->processed becomes true this will be rescheduled */
3994 + if (lr && !lr->processed) {
3995 +- reg_process_hint(lr);
3996 ++ pr_debug("Pending regulatory request, waiting for it to be processed...\n");
3997 + return;
3998 + }
3999 +
4000 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
4001 +index 7fa0219c9758..331a2b00e53f 100644
4002 +--- a/sound/core/seq/seq_clientmgr.c
4003 ++++ b/sound/core/seq/seq_clientmgr.c
4004 +@@ -1906,8 +1906,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
4005 + if (cptr->type == USER_CLIENT) {
4006 + info.input_pool = cptr->data.user.fifo_pool_size;
4007 + info.input_free = info.input_pool;
4008 +- if (cptr->data.user.fifo)
4009 +- info.input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
4010 ++ info.input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
4011 + } else {
4012 + info.input_pool = 0;
4013 + info.input_free = 0;
4014 +diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
4015 +index 9acbed1ac982..d9f5428ee995 100644
4016 +--- a/sound/core/seq/seq_fifo.c
4017 ++++ b/sound/core/seq/seq_fifo.c
4018 +@@ -278,3 +278,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
4019 +
4020 + return 0;
4021 + }
4022 ++
4023 ++/* get the number of unused cells safely */
4024 ++int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
4025 ++{
4026 ++ unsigned long flags;
4027 ++ int cells;
4028 ++
4029 ++ if (!f)
4030 ++ return 0;
4031 ++
4032 ++ snd_use_lock_use(&f->use_lock);
4033 ++ spin_lock_irqsave(&f->lock, flags);
4034 ++ cells = snd_seq_unused_cells(f->pool);
4035 ++ spin_unlock_irqrestore(&f->lock, flags);
4036 ++ snd_use_lock_free(&f->use_lock);
4037 ++ return cells;
4038 ++}
4039 +diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h
4040 +index 062c446e7867..5d38a0d7f0cd 100644
4041 +--- a/sound/core/seq/seq_fifo.h
4042 ++++ b/sound/core/seq/seq_fifo.h
4043 +@@ -68,5 +68,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
4044 + /* resize pool in fifo */
4045 + int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
4046 +
4047 ++/* get the number of unused cells safely */
4048 ++int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
4049 +
4050 + #endif
4051 +diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
4052 +index 512ec25c9ead..2f7be6cee98e 100644
4053 +--- a/sound/soc/davinci/davinci-mcasp.c
4054 ++++ b/sound/soc/davinci/davinci-mcasp.c
4055 +@@ -1128,6 +1128,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
4056 + return ret;
4057 + }
4058 +
4059 ++static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
4060 ++ struct snd_pcm_hw_rule *rule)
4061 ++{
4062 ++ struct davinci_mcasp_ruledata *rd = rule->private;
4063 ++ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
4064 ++ struct snd_mask nfmt;
4065 ++ int i, slot_width;
4066 ++
4067 ++ snd_mask_none(&nfmt);
4068 ++ slot_width = rd->mcasp->slot_width;
4069 ++
4070 ++ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
4071 ++ if (snd_mask_test(fmt, i)) {
4072 ++ if (snd_pcm_format_width(i) <= slot_width) {
4073 ++ snd_mask_set(&nfmt, i);
4074 ++ }
4075 ++ }
4076 ++ }
4077 ++
4078 ++ return snd_mask_refine(fmt, &nfmt);
4079 ++}
4080 ++
4081 + static const unsigned int davinci_mcasp_dai_rates[] = {
4082 + 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
4083 + 88200, 96000, 176400, 192000,
4084 +@@ -1219,7 +1241,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
4085 + struct davinci_mcasp_ruledata *ruledata =
4086 + &mcasp->ruledata[substream->stream];
4087 + u32 max_channels = 0;
4088 +- int i, dir;
4089 ++ int i, dir, ret;
4090 + int tdm_slots = mcasp->tdm_slots;
4091 +
4092 + if (mcasp->tdm_mask[substream->stream])
4093 +@@ -1244,6 +1266,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
4094 + max_channels++;
4095 + }
4096 + ruledata->serializers = max_channels;
4097 ++ ruledata->mcasp = mcasp;
4098 + max_channels *= tdm_slots;
4099 + /*
4100 + * If the already active stream has less channels than the calculated
4101 +@@ -1269,20 +1292,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
4102 + 0, SNDRV_PCM_HW_PARAM_CHANNELS,
4103 + &mcasp->chconstr[substream->stream]);
4104 +
4105 +- if (mcasp->slot_width)
4106 +- snd_pcm_hw_constraint_minmax(substream->runtime,
4107 +- SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
4108 +- 8, mcasp->slot_width);
4109 ++ if (mcasp->slot_width) {
4110 ++ /* Only allow formats require <= slot_width bits on the bus */
4111 ++ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
4112 ++ SNDRV_PCM_HW_PARAM_FORMAT,
4113 ++ davinci_mcasp_hw_rule_slot_width,
4114 ++ ruledata,
4115 ++ SNDRV_PCM_HW_PARAM_FORMAT, -1);
4116 ++ if (ret)
4117 ++ return ret;
4118 ++ }
4119 +
4120 + /*
4121 + * If we rely on implicit BCLK divider setting we should
4122 + * set constraints based on what we can provide.
4123 + */
4124 + if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
4125 +- int ret;
4126 +-
4127 +- ruledata->mcasp = mcasp;
4128 +-
4129 + ret = snd_pcm_hw_rule_add(substream->runtime, 0,
4130 + SNDRV_PCM_HW_PARAM_RATE,
4131 + davinci_mcasp_hw_rule_rate,
4132 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
4133 +index 1f7eb3816cd7..e24572fd6e30 100644
4134 +--- a/sound/usb/mixer.c
4135 ++++ b/sound/usb/mixer.c
4136 +@@ -81,6 +81,7 @@ struct mixer_build {
4137 + unsigned char *buffer;
4138 + unsigned int buflen;
4139 + DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
4140 ++ DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
4141 + struct usb_audio_term oterm;
4142 + const struct usbmix_name_map *map;
4143 + const struct usbmix_selector_map *selector_map;
4144 +@@ -709,15 +710,24 @@ static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm
4145 + * parse the source unit recursively until it reaches to a terminal
4146 + * or a branched unit.
4147 + */
4148 +-static int check_input_term(struct mixer_build *state, int id,
4149 ++static int __check_input_term(struct mixer_build *state, int id,
4150 + struct usb_audio_term *term)
4151 + {
4152 + int err;
4153 + void *p1;
4154 ++ unsigned char *hdr;
4155 +
4156 + memset(term, 0, sizeof(*term));
4157 +- while ((p1 = find_audio_control_unit(state, id)) != NULL) {
4158 +- unsigned char *hdr = p1;
4159 ++ for (;;) {
4160 ++ /* a loop in the terminal chain? */
4161 ++ if (test_and_set_bit(id, state->termbitmap))
4162 ++ return -EINVAL;
4163 ++
4164 ++ p1 = find_audio_control_unit(state, id);
4165 ++ if (!p1)
4166 ++ break;
4167 ++
4168 ++ hdr = p1;
4169 + term->id = id;
4170 + switch (hdr[2]) {
4171 + case UAC_INPUT_TERMINAL:
4172 +@@ -732,7 +742,7 @@ static int check_input_term(struct mixer_build *state, int id,
4173 +
4174 + /* call recursively to verify that the
4175 + * referenced clock entity is valid */
4176 +- err = check_input_term(state, d->bCSourceID, term);
4177 ++ err = __check_input_term(state, d->bCSourceID, term);
4178 + if (err < 0)
4179 + return err;
4180 +
4181 +@@ -764,7 +774,7 @@ static int check_input_term(struct mixer_build *state, int id,
4182 + case UAC2_CLOCK_SELECTOR: {
4183 + struct uac_selector_unit_descriptor *d = p1;
4184 + /* call recursively to retrieve the channel info */
4185 +- err = check_input_term(state, d->baSourceID[0], term);
4186 ++ err = __check_input_term(state, d->baSourceID[0], term);
4187 + if (err < 0)
4188 + return err;
4189 + term->type = d->bDescriptorSubtype << 16; /* virtual type */
4190 +@@ -811,6 +821,15 @@ static int check_input_term(struct mixer_build *state, int id,
4191 + return -ENODEV;
4192 + }
4193 +
4194 ++
4195 ++static int check_input_term(struct mixer_build *state, int id,
4196 ++ struct usb_audio_term *term)
4197 ++{
4198 ++ memset(term, 0, sizeof(*term));
4199 ++ memset(state->termbitmap, 0, sizeof(state->termbitmap));
4200 ++ return __check_input_term(state, id, term);
4201 ++}
4202 ++
4203 + /*
4204 + * Feature Unit
4205 + */
4206 +@@ -1628,6 +1647,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
4207 + int pin, ich, err;
4208 +
4209 + if (desc->bLength < 11 || !(input_pins = desc->bNrInPins) ||
4210 ++ desc->bLength < sizeof(*desc) + desc->bNrInPins ||
4211 + !(num_outs = uac_mixer_unit_bNrChannels(desc))) {
4212 + usb_audio_err(state->chip,
4213 + "invalid MIXER UNIT descriptor %d\n",
4214 +diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
4215 +index 177480066816..fffc7c418459 100644
4216 +--- a/tools/hv/hv_kvp_daemon.c
4217 ++++ b/tools/hv/hv_kvp_daemon.c
4218 +@@ -1379,6 +1379,8 @@ int main(int argc, char *argv[])
4219 + daemonize = 0;
4220 + break;
4221 + case 'h':
4222 ++ print_usage(argv);
4223 ++ exit(0);
4224 + default:
4225 + print_usage(argv);
4226 + exit(EXIT_FAILURE);
4227 +diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
4228 +index 5d51d6ff08e6..b5465f92ed50 100644
4229 +--- a/tools/hv/hv_vss_daemon.c
4230 ++++ b/tools/hv/hv_vss_daemon.c
4231 +@@ -164,6 +164,8 @@ int main(int argc, char *argv[])
4232 + daemonize = 0;
4233 + break;
4234 + case 'h':
4235 ++ print_usage(argv);
4236 ++ exit(0);
4237 + default:
4238 + print_usage(argv);
4239 + exit(EXIT_FAILURE);
4240 +diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
4241 +index df41deed0320..3bfba81d1911 100644
4242 +--- a/tools/perf/bench/numa.c
4243 ++++ b/tools/perf/bench/numa.c
4244 +@@ -370,8 +370,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
4245 +
4246 + /* Allocate and initialize all memory on CPU#0: */
4247 + if (init_cpu0) {
4248 +- orig_mask = bind_to_node(0);
4249 +- bind_to_memnode(0);
4250 ++ int node = numa_node_of_cpu(0);
4251 ++
4252 ++ orig_mask = bind_to_node(node);
4253 ++ bind_to_memnode(node);
4254 + }
4255 +
4256 + bytes = bytes0 + HPSIZE;
4257 +diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
4258 +index 1a35ab044c11..54af2f2e2ee4 100644
4259 +--- a/tools/perf/tests/parse-events.c
4260 ++++ b/tools/perf/tests/parse-events.c
4261 +@@ -12,32 +12,6 @@
4262 + #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
4263 + PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
4264 +
4265 +-#if defined(__s390x__)
4266 +-/* Return true if kvm module is available and loaded. Test this
4267 +- * and retun success when trace point kvm_s390_create_vm
4268 +- * exists. Otherwise this test always fails.
4269 +- */
4270 +-static bool kvm_s390_create_vm_valid(void)
4271 +-{
4272 +- char *eventfile;
4273 +- bool rc = false;
4274 +-
4275 +- eventfile = get_events_file("kvm-s390");
4276 +-
4277 +- if (eventfile) {
4278 +- DIR *mydir = opendir(eventfile);
4279 +-
4280 +- if (mydir) {
4281 +- rc = true;
4282 +- closedir(mydir);
4283 +- }
4284 +- put_events_file(eventfile);
4285 +- }
4286 +-
4287 +- return rc;
4288 +-}
4289 +-#endif
4290 +-
4291 + static int test__checkevent_tracepoint(struct perf_evlist *evlist)
4292 + {
4293 + struct perf_evsel *evsel = perf_evlist__first(evlist);
4294 +@@ -1587,7 +1561,6 @@ static struct evlist_test test__events[] = {
4295 + {
4296 + .name = "kvm-s390:kvm_s390_create_vm",
4297 + .check = test__checkevent_tracepoint,
4298 +- .valid = kvm_s390_create_vm_valid,
4299 + .id = 100,
4300 + },
4301 + #endif
4302 +diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
4303 +new file mode 100644
4304 +index 000000000000..63ed533f73d6
4305 +--- /dev/null
4306 ++++ b/tools/testing/selftests/kvm/config
4307 +@@ -0,0 +1,3 @@
4308 ++CONFIG_KVM=y
4309 ++CONFIG_KVM_INTEL=y
4310 ++CONFIG_KVM_AMD=y